diff --git a/.lagoon.secrets.yaml b/.lagoon.secrets.yaml index 41928b1a09..6a411006cb 100644 --- a/.lagoon.secrets.yaml +++ b/.lagoon.secrets.yaml @@ -32,6 +32,10 @@ parameters: description: Password used for connecting to the keycloak-db generate: expression from: "[a-zA-Z0-9]{32}" + - name: API_REDIS_PASSWORD + description: Password used for connecting to the api-redis + generate: expression + from: "[a-zA-Z0-9]{32}" - name: SAFE_BRANCH description: Which branch this belongs to, special chars replaced with dashes required: true @@ -99,3 +103,9 @@ objects: name: opendistro-security-cookie-password stringData: OPENDISTRO_SECURITY_COOKIE_PASSWORD: ${OPENDISTRO_SECURITY_COOKIE_PASSWORD} +- kind: Secret + apiVersion: v1 + metadata: + name: api-redis-password + stringData: + API_REDIS_PASSWORD: ${API_REDIS_PASSWORD} diff --git a/.lagoon.yml b/.lagoon.yml index 9e2b4e48dc..b5fbfed2f0 100644 --- a/.lagoon.yml +++ b/.lagoon.yml @@ -50,15 +50,6 @@ environments: rollouts: logs-db: statefulset logs-forwarder: statefulset - cronjobs: - - name: idle-clis - schedule: '*/15 * * * *' - command: /idle-clis.sh - service: auto-idler - - name: idle-services - schedule: '*/30 * * * *' - command: /idle-services.sh - service: auto-idler develop: types: logs-db: elasticsearch-cluster diff --git a/Makefile b/Makefile index ac2a47bae4..7da9c940f6 100644 --- a/Makefile +++ b/Makefile @@ -426,7 +426,9 @@ services := api \ backup-handler \ broker \ broker-single \ + logs-concentrator \ logs-dispatcher \ + logs-tee \ logs-forwarder \ logs-db \ logs-db-ui \ @@ -447,7 +449,8 @@ services := api \ harbor-redis \ harborregistry \ harborregistryctl \ - harbor-trivy + harbor-trivy \ + api-redis service-images += $(services) @@ -480,6 +483,7 @@ build/harbor-nginx: build/harborregistryctl services/harbor-core/Dockerfile serv build/tests-kubernetes: build/tests build/tests-openshift: build/tests build/toolbox: build/mariadb +build/api-redis: build/redis # Auth SSH needs the context of the root folder, so we have it individually build/ssh: build/commons @@ -529,7 +533,8 @@ build-list: # Define list of all tests all-k8s-tests-list:= features-kubernetes \ nginx \ - drupal + drupal \ + active-standby-kubernetes all-k8s-tests = $(foreach image,$(all-k8s-tests-list),k8s-tests/$(image)) # Run all k8s tests @@ -576,7 +581,7 @@ all-openshift-tests-list:= features-openshift \ bitbucket \ nginx \ elasticsearch \ - active-standby + active-standby-openshift all-openshift-tests = $(foreach image,$(all-openshift-tests-list),openshift-tests/$(image)) .PHONY: openshift-tests @@ -611,7 +616,7 @@ drupal-test-services = drush-alias webhook-tests = github gitlab bitbucket # All Tests that use API endpoints -api-tests = node features-openshift features-kubernetes nginx elasticsearch active-standby +api-tests = node features-openshift features-kubernetes nginx elasticsearch active-standby-openshift active-standby-kubernetes # All drupal tests drupal-tests = drupal drupal-postgres @@ -1025,28 +1030,31 @@ endif --volume $$PWD/local-dev/k3d-nginx-ingress.yaml:/var/lib/rancher/k3s/server/manifests/k3d-nginx-ingress.yaml echo "$(K3D_NAME)" > $@ export KUBECONFIG="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')"; \ - local-dev/kubectl apply -f $$PWD/local-dev/k3d-storageclass-bulk.yaml; \ + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" apply -f $$PWD/local-dev/k3d-storageclass-bulk.yaml; \ docker tag $(CI_BUILD_TAG)/docker-host localhost:5000/lagoon/docker-host; \ docker push localhost:5000/lagoon/docker-host; \ - local-dev/kubectl create namespace k8up; \ - local-dev/helm/helm repo add appuio https://charts.appuio.ch; \ - local-dev/helm/helm upgrade --install -n k8up k8up appuio/k8up; \ - local-dev/kubectl create namespace dbaas-operator; \ - local-dev/helm/helm repo add dbaas-operator https://raw.githubusercontent.com/amazeeio/dbaas-operator/master/charts ; \ - local-dev/helm/helm upgrade --install -n dbaas-operator dbaas-operator dbaas-operator/dbaas-operator ; \ - local-dev/helm/helm upgrade --install -n dbaas-operator mariadbprovider dbaas-operator/mariadbprovider -f local-dev/helm-values-mariadbprovider.yml ; \ - local-dev/kubectl create namespace lagoon; \ - local-dev/helm/helm upgrade --install -n lagoon lagoon-remote ./charts/lagoon-remote --set dockerHost.image.name=172.17.0.1:5000/lagoon/docker-host --set dockerHost.registry=172.17.0.1:5000; \ - local-dev/kubectl -n lagoon rollout status deployment docker-host -w; + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' create namespace k8up; \ + local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add appuio https://charts.appuio.ch; \ + local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n k8up k8up appuio/k8up; \ + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' create namespace dioscuri; \ + local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add dioscuri https://raw.githubusercontent.com/amazeeio/dioscuri/ingress/charts ; \ + local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n dioscuri dioscuri dioscuri/dioscuri ; \ + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' create namespace dbaas-operator; \ + local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add dbaas-operator https://raw.githubusercontent.com/amazeeio/dbaas-operator/master/charts ; \ + local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n dbaas-operator dbaas-operator dbaas-operator/dbaas-operator ; \ + local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n dbaas-operator mariadbprovider dbaas-operator/mariadbprovider -f local-dev/helm-values-mariadbprovider.yml ; \ + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' create namespace lagoon; \ + local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n lagoon lagoon-remote ./charts/lagoon-remote --set dockerHost.image.name=172.17.0.1:5000/lagoon/docker-host --set dockerHost.registry=172.17.0.1:5000; \ + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon rollout status deployment docker-host -w; ifeq ($(ARCH), darwin) export KUBECONFIG="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')"; \ - KUBERNETESBUILDDEPLOY_TOKEN=$$(local-dev/kubectl -n lagoon describe secret $$(local-dev/kubectl -n lagoon get secret | grep kubernetesbuilddeploy | awk '{print $$1}') | grep token: | awk '{print $$2}'); \ + KUBERNETESBUILDDEPLOY_TOKEN=$$(local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon describe secret $$(local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon get secret | grep kubernetesbuilddeploy | awk '{print $$1}') | grep token: | awk '{print $$2}'); \ sed -i '' -e "s/\".*\" # make-kubernetes-token/\"$${KUBERNETESBUILDDEPLOY_TOKEN}\" # make-kubernetes-token/g" local-dev/api-data/03-populate-api-data-kubernetes.gql; \ DOCKER_IP="$$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}')"; \ sed -i '' -e "s/172\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}/$${DOCKER_IP}/g" local-dev/api-data/03-populate-api-data-kubernetes.gql docker-compose.yaml; else export KUBECONFIG="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')"; \ - KUBERNETESBUILDDEPLOY_TOKEN=$$(local-dev/kubectl -n lagoon describe secret $$(local-dev/kubectl -n lagoon get secret | grep kubernetesbuilddeploy | awk '{print $$1}') | grep token: | awk '{print $$2}'); \ + KUBERNETESBUILDDEPLOY_TOKEN=$$(local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon describe secret $$(local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon get secret | grep kubernetesbuilddeploy | awk '{print $$1}') | grep token: | awk '{print $$2}'); \ sed -i "s/\".*\" # make-kubernetes-token/\"$${KUBERNETESBUILDDEPLOY_TOKEN}\" # make-kubernetes-token/g" local-dev/api-data/03-populate-api-data-kubernetes.gql; \ DOCKER_IP="$$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}')"; \ sed -i "s/172\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}/$${DOCKER_IP}/g" local-dev/api-data/03-populate-api-data-kubernetes.gql docker-compose.yaml; @@ -1068,27 +1076,27 @@ k3d-kubeconfig: k3d-dashboard: export KUBECONFIG="$$(./local-dev/k3d get-kubeconfig --name=$$(cat k3d))"; \ - local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/00_dashboard-namespace.yaml; \ - local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/01_dashboard-serviceaccount.yaml; \ - local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/02_dashboard-service.yaml; \ - local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/03_dashboard-secret.yaml; \ - local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/04_dashboard-configmap.yaml; \ - echo '{"apiVersion": "rbac.authorization.k8s.io/v1","kind": "ClusterRoleBinding","metadata": {"name": "kubernetes-dashboard","namespace": "kubernetes-dashboard"},"roleRef": {"apiGroup": "rbac.authorization.k8s.io","kind": "ClusterRole","name": "cluster-admin"},"subjects": [{"kind": "ServiceAccount","name": "kubernetes-dashboard","namespace": "kubernetes-dashboard"}]}' | local-dev/kubectl -n kubernetes-dashboard apply -f - ; \ - local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/06_dashboard-deployment.yaml; \ - local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/07_scraper-service.yaml; \ - local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/08_scraper-deployment.yaml; \ - local-dev/kubectl -n kubernetes-dashboard patch deployment kubernetes-dashboard --patch '{"spec": {"template": {"spec": {"containers": [{"name": "kubernetes-dashboard","args": ["--auto-generate-certificates","--namespace=kubernetes-dashboard","--enable-skip-login"]}]}}}}'; \ - local-dev/kubectl -n kubernetes-dashboard rollout status deployment kubernetes-dashboard -w; \ + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/00_dashboard-namespace.yaml; \ + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/01_dashboard-serviceaccount.yaml; \ + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/02_dashboard-service.yaml; \ + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/03_dashboard-secret.yaml; \ + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/04_dashboard-configmap.yaml; \ + echo '{"apiVersion": "rbac.authorization.k8s.io/v1","kind": "ClusterRoleBinding","metadata": {"name": "kubernetes-dashboard","namespace": "kubernetes-dashboard"},"roleRef": {"apiGroup": "rbac.authorization.k8s.io","kind": "ClusterRole","name": "cluster-admin"},"subjects": [{"kind": "ServiceAccount","name": "kubernetes-dashboard","namespace": "kubernetes-dashboard"}]}' | local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n kubernetes-dashboard apply -f - ; \ + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/06_dashboard-deployment.yaml; \ + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/07_scraper-service.yaml; \ + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/08_scraper-deployment.yaml; \ + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n kubernetes-dashboard patch deployment kubernetes-dashboard --patch '{"spec": {"template": {"spec": {"containers": [{"name": "kubernetes-dashboard","args": ["--auto-generate-certificates","--namespace=kubernetes-dashboard","--enable-skip-login"]}]}}}}'; \ + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n kubernetes-dashboard rollout status deployment kubernetes-dashboard -w; \ open http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/ ; \ - local-dev/kubectl proxy + local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' proxy k8s-dashboard: - kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended.yaml; \ - kubectl -n kubernetes-dashboard rollout status deployment kubernetes-dashboard -w; \ + kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended.yaml; \ + kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n kubernetes-dashboard rollout status deployment kubernetes-dashboard -w; \ echo -e "\nUse this token:"; \ - kubectl -n lagoon describe secret $$(local-dev/kubectl -n lagoon get secret | grep kubernetesbuilddeploy | awk '{print $$1}') | grep token: | awk '{print $$2}'; \ + kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon describe secret $$(local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon get secret | grep kubernetesbuilddeploy | awk '{print $$1}') | grep token: | awk '{print $$2}'; \ open http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/ ; \ - kubectl proxy + kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' proxy # Stop k3d .PHONY: k3d/stop @@ -1134,3 +1142,7 @@ rebuild-push-oc-build-deploy-dind: .PHONY: ui-development ui-development: build/api build/api-db build/local-api-data-watcher-pusher build/ui build/keycloak build/keycloak-db build/broker build/broker-single IMAGE_REPO=$(CI_BUILD_TAG) docker-compose -p $(CI_BUILD_TAG) --compatibility up -d api api-db local-api-data-watcher-pusher ui keycloak keycloak-db broker + +.PHONY: api-development +api-development: build/api build/api-db build/local-api-data-watcher-pusher build/keycloak build/keycloak-db build/broker build/broker-single + IMAGE_REPO=$(CI_BUILD_TAG) docker-compose -p $(CI_BUILD_TAG) --compatibility up -d api api-db local-api-data-watcher-pusher keycloak keycloak-db broker diff --git a/charts/index.yaml b/charts/index.yaml index 82a4943898..f49ac5c23d 100644 --- a/charts/index.yaml +++ b/charts/index.yaml @@ -3,7 +3,67 @@ entries: lagoon-logging: - apiVersion: v2 appVersion: 0.1.0 - created: "2020-05-20T21:04:11.988795-04:00" + created: "2020-07-15T13:20:44.801012001+08:00" + dependencies: + - name: logging-operator + repository: https://kubernetes-charts.banzaicloud.com + version: ~3.4.0 + description: | + A Helm chart for Kubernetes which installs the lagoon container and router logs collection system. + digest: e06440b9199bc69f46c2fb66a20d3ed153c2f0fe749c97a40b2bf137f97c7205 + name: lagoon-logging + type: application + urls: + - lagoon-logging-0.6.3.tgz + version: 0.6.3 + - apiVersion: v2 + appVersion: 0.1.0 + created: "2020-07-15T13:20:44.793677497+08:00" + dependencies: + - name: logging-operator + repository: https://kubernetes-charts.banzaicloud.com + version: ~3.3.0 + description: | + A Helm chart for Kubernetes which installs the lagoon container and router logs collection system. + digest: d944b8a7dd5ba927eab5be5df30ebbd0fafb5c45277a550c89c300853b80167a + name: lagoon-logging + type: application + urls: + - lagoon-logging-0.6.2.tgz + version: 0.6.2 + - apiVersion: v2 + appVersion: 0.1.0 + created: "2020-07-15T13:20:44.786136365+08:00" + dependencies: + - name: logging-operator + repository: https://kubernetes-charts.banzaicloud.com + version: ~3.3.0 + description: | + A Helm chart for Kubernetes which installs the lagoon container and router logs collection system. + digest: 516961903c4c2fc2d8b39b3a9c8f594bdb5db7a1e70c480b89554e64d7303902 + name: lagoon-logging + type: application + urls: + - lagoon-logging-0.6.1.tgz + version: 0.6.1 + - apiVersion: v2 + appVersion: 0.1.0 + created: "2020-07-15T13:20:44.777543065+08:00" + dependencies: + - name: logging-operator + repository: https://kubernetes-charts.banzaicloud.com + version: ~3.2.0 + description: | + A Helm chart for Kubernetes which installs the lagoon container and router logs collection system. + digest: 94c4a3b92dad2f23f61a750d3b9b6e69084c93a6775a0051ce990c3528e90f25 + name: lagoon-logging + type: application + urls: + - lagoon-logging-0.2.0.tgz + version: 0.2.0 + - apiVersion: v2 + appVersion: 0.1.0 + created: "2020-07-15T13:20:44.769678229+08:00" dependencies: - name: logging-operator repository: https://kubernetes-charts.banzaicloud.com @@ -16,10 +76,33 @@ entries: urls: - lagoon-logging-0.1.0.tgz version: 0.1.0 + lagoon-logs-concentrator: + - apiVersion: v2 + appVersion: 1.16.0 + created: "2020-07-15T13:20:44.802816825+08:00" + description: A Helm chart for Kubernetes which installs the Lagoon logs-concentrator + service. + digest: a4373f224b6435b3c4b4556c99a081c9467edc7748991446a11b1735789bbdcb + name: lagoon-logs-concentrator + type: application + urls: + - lagoon-logs-concentrator-0.2.1.tgz + version: 0.2.1 + - apiVersion: v2 + appVersion: 1.16.0 + created: "2020-07-15T13:20:44.802021602+08:00" + description: A Helm chart for Kubernetes which installs the Lagoon logs-concentrator + service. + digest: c66bc7450f61a74cb1e8742c4feb5146c7361e2c04e3171235c1e776ca958327 + name: lagoon-logs-concentrator + type: application + urls: + - lagoon-logs-concentrator-0.2.0.tgz + version: 0.2.0 lagoon-remote: - apiVersion: v2 appVersion: 1.4.0 - created: "2020-05-20T21:04:11.990249-04:00" + created: "2020-07-15T13:20:44.803852312+08:00" description: A Helm chart to run a lagoon-remote digest: 96bc41bc9985cd6a7fbd85a32affea3bbbabdf4baa0cd829e7e3d33fb975ceeb name: lagoon-remote @@ -29,7 +112,7 @@ entries: version: 0.1.3 - apiVersion: v2 appVersion: 1.4.0 - created: "2020-05-20T21:04:11.989691-04:00" + created: "2020-07-15T13:20:44.803424311+08:00" description: A Helm chart to run a lagoon-remote digest: 5756a3fbb46a11f2f43fdcadb41d709d90c70208b90fa0257d48dcacc4df3040 name: lagoon-remote @@ -37,4 +120,4 @@ entries: urls: - lagoon-remote-0.1.2.tgz version: 0.1.2 -generated: "2020-05-20T21:04:11.982298-04:00" +generated: "2020-07-15T13:20:44.761029818+08:00" diff --git a/charts/lagoon-logging-0.2.0.tgz b/charts/lagoon-logging-0.2.0.tgz new file mode 100644 index 0000000000..56c9dd0a4d Binary files /dev/null and b/charts/lagoon-logging-0.2.0.tgz differ diff --git a/charts/lagoon-logging-0.6.1.tgz b/charts/lagoon-logging-0.6.1.tgz new file mode 100644 index 0000000000..74d0031b53 Binary files /dev/null and b/charts/lagoon-logging-0.6.1.tgz differ diff --git a/charts/lagoon-logging-0.6.2.tgz b/charts/lagoon-logging-0.6.2.tgz new file mode 100644 index 0000000000..1fdee215a2 Binary files /dev/null and b/charts/lagoon-logging-0.6.2.tgz differ diff --git a/charts/lagoon-logging-0.6.3.tgz b/charts/lagoon-logging-0.6.3.tgz new file mode 100644 index 0000000000..b23e145f71 Binary files /dev/null and b/charts/lagoon-logging-0.6.3.tgz differ diff --git a/charts/lagoon-logging.values.yaml b/charts/lagoon-logging.values.yaml deleted file mode 100644 index 2abc7accf5..0000000000 --- a/charts/lagoon-logging.values.yaml +++ /dev/null @@ -1,91 +0,0 @@ -# Default values for lagoon-logging. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -nameOverride: "" -fullnameOverride: "" - -logsDispatcher: - - name: logs-dispatcher - - replicaCount: 2 - - image: - repository: amazeeiolagoon/logs-dispatcher - pullPolicy: Always - # Overrides the image tag whose default is the chart version. - tag: v1-5-0 - - serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname - # template - # If this value is set, the serviceAccount named must have clusterrole - # view. - name: "" - - podAnnotations: {} - - podSecurityContext: {} - # fsGroup: 2000 - - securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - - resources: {} - # If you want to specify resources, uncomment the following lines, adjust - # them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - - nodeSelector: {} - - tolerations: [] - - affinity: {} - -# Don't collect logs from these namespaces. -# Comment out this field to collect from all namespaces. -excludeNamespaces: -- cattle-prometheus -- kube-system -- syn -- syn-cert-manager -- syn-synsights -- syn-cluster-autoscaler - -# Configure the cluster output buffer. -# This may require tweaking to handle high volumes of logs. -clusterOutputBuffer: - flush_thread_count: 256 - timekey: 1m - timekey_wait: 10s - timekey_use_utc: true - -# Elasticsearch output config. -elasticsearchHostPort: "443" -elasticsearchScheme: https -# The values below must be supplied during installation as they have no sane -# defaults. -elasticsearchAdminPassword: SOp1qe31Bb6jqIjjpPaqNURtMbBIo7Ah -elasticsearchHost: logs-db.ch2.amazee.io -clusterName: amazeeio-de3 - -# chart dependency on logging-operator -logging-operator: - enabled: true - createCustomResource: false diff --git a/charts/lagoon-logging/Chart.lock b/charts/lagoon-logging/Chart.lock index dfc19bef5c..861f97b626 100644 --- a/charts/lagoon-logging/Chart.lock +++ b/charts/lagoon-logging/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: logging-operator repository: https://kubernetes-charts.banzaicloud.com - version: 3.0.5 -digest: sha256:b5f1e93500944b39e9f49083594eaecdb4e584ec94dfcd8a38ef4c4835377e35 -generated: "2020-05-07T22:37:40.078678817+08:00" + version: 3.4.0 +digest: sha256:d248221846af4df24cb1402d84c6bf8d8db6c26a6348d10345bd5572ed6d8ab7 +generated: "2020-07-15T11:52:39.381243481+08:00" diff --git a/charts/lagoon-logging/Chart.yaml b/charts/lagoon-logging/Chart.yaml index 71155cb9f5..7e2d3e6760 100644 --- a/charts/lagoon-logging/Chart.yaml +++ b/charts/lagoon-logging/Chart.yaml @@ -12,7 +12,7 @@ type: application # time you make changes to the chart and its templates, including the app # version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 0.6.3 # This is the version number of the application being deployed. This version # number should be incremented each time you make changes to the application. @@ -23,4 +23,4 @@ appVersion: 0.1.0 dependencies: - name: logging-operator repository: https://kubernetes-charts.banzaicloud.com - version: ~3.0.5 + version: ~3.4.0 diff --git a/charts/lagoon-logging/README.md b/charts/lagoon-logging/README.md index 6fcb5088a2..b881442bee 100644 --- a/charts/lagoon-logging/README.md +++ b/charts/lagoon-logging/README.md @@ -19,13 +19,15 @@ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com helm dependency build lagoon-logging ``` -1. Create a `lagoon-logging.values.yaml` file inside `charts/` directory containing these fields with the - relevant values added. +1. Create a `lagoon-logging.values.yaml` file inside `charts/` directory containing these fields with the relevant values added. + For required values and documentation see the comment block at the end of the chart's `values.yaml`. + +**OpenShift only** + +You must set allow the fluentbit pods to run in privileged mode: ``` -elasticsearchHost: ... -elasticsearchAdminPassword: ... -clusterName: ... +fluentbitPrivileged: true ``` 2. Test installation. @@ -38,12 +40,39 @@ helm template --debug --namespace lagoon-logging -f ./lagoon-logging.values.yaml helm upgrade --dry-run --install --debug --create-namespace --namespace lagoon-logging -f ./lagoon-logging.values.yaml lagoon-logging lagoon-logging ``` -2. Run installation. +3. Run installation. ``` helm upgrade --install --debug --create-namespace --namespace lagoon-logging -f ./lagoon-logging.values.yaml lagoon-logging lagoon-logging ``` +**OpenShift only** + +Give the various serviceaccounts permissions required: +``` +oc project lagoon-logging + +# fluentd statefulset serviceaccount (logging-operator chart) +oc adm policy add-scc-to-user nonroot -z lagoon-logging-fluentd + +# fluentbit daemonset serviceaccount (logging-operator chart) +oc adm policy add-scc-to-user privileged -z lagoon-logging-fluentbit + +# logs-dispatcher statefulset serviceaccount (lagoon-logging chart) +oc adm policy add-scc-to-user anyuid -z lagoon-logging-logs-dispatcher +``` + +And make the project network global: +``` +oc adm pod-network make-projects-global lagoon-logging +``` + +4. Update application-logs and router-logs services + +The `application-logs` and `router-logs` services in the `lagoon` namespace needs to be updated to point their `externalName` to the `lagoon-logging-logs-dispatcher` service in the `lagoon-logging` namespace (or wherever you've installed it). + +If you are migrating from the old lagoon logging infrastructure and want to keep logs flowing to both old and new infrastructure, point these services at the relevant `logs-tee` service in the `lagoon-logging` namespace. The `logs-tee` services then need to have the legacy `endpoint` configured. See the comments in the chart `values.yaml` for an example. + ## View logs ### For namespaces without a lagoon.sh/project label @@ -51,6 +80,7 @@ helm upgrade --install --debug --create-namespace --namespace lagoon-logging -f Logs will appear in indices matching these patterns: ``` +application-logs-*_$CLUSTERNAME-* container-logs-*_$CLUSTERNAME-* router-logs-*_$CLUSTERNAME-* ``` @@ -58,6 +88,7 @@ router-logs-*_$CLUSTERNAME-* e.g. if `clusterName: test1` ``` +application-logs-*_test1-* container-logs-*_test1-* router-logs-*_test1-* ``` @@ -67,6 +98,7 @@ router-logs-*_test1-* Logs will appear in indices matching these patterns: ``` +application-logs-$PROJECT-* container-logs-$PROJECT-* router-logs-$PROJECT-* ``` @@ -74,6 +106,38 @@ router-logs-$PROJECT-* e.g. if `lagoon.sh/project: drupal-example` ``` +application-logs-drupal-example-* container-logs-drupal-example-* router-logs-drupal-example-* ``` + +## How to upgrade + +NOTE: If the `logging-operator` chart upgrade doesn't work, just uninstall the helm release and install it again. Logs won't be lost since fluentbit will send the contents of the log files once it is reinstalled. + +``` +helm upgrade --debug --namespace lagoon-logging --reuse-values lagoon-logging lagoon-logging +``` + +## Log export + +The `logs-dispatcher` includes support for sending logs to external sinks such as [cloudwatch](https://github.com/fluent-plugins-nursery/fluent-plugin-cloudwatch-logs) or [S3](https://docs.fluentd.org/output/s3). +This feature uses the [fluentd copy plugin](https://docs.fluentd.org/output/copy), so see that link for syntax. + +For example configure the `exportLogs` value like so: + +``` +exportLogs: + s3.conf: | + + @type s3 + ... + + cloudwatch.conf: | + + @type cloudwatch_logs + ... + +``` + +IMPORTANT: use `ignore_error` so that the main log flow to elasticsearch is not interrupted. diff --git a/charts/lagoon-logging/templates/NOTES.txt b/charts/lagoon-logging/templates/NOTES.txt index 21d6d73b23..311f0a2bee 100644 --- a/charts/lagoon-logging/templates/NOTES.txt +++ b/charts/lagoon-logging/templates/NOTES.txt @@ -2,4 +2,4 @@ Thank you for installing {{ .Chart.Name }}. Your release is named {{ .Release.Name }}. -Your logs are now being sent to {{ .Values.elasticsearchScheme }}://{{ .Values.elasticsearchHost }}:{{ .Values.elasticsearchHostPort }} +Your logs are now being sent to {{ coalesce .Values.forward.host }}:{{ .Values.forward.hostPort }}. diff --git a/charts/lagoon-logging/templates/_helpers.tpl b/charts/lagoon-logging/templates/_helpers.tpl index 55c9d789d8..6431344a78 100644 --- a/charts/lagoon-logging/templates/_helpers.tpl +++ b/charts/lagoon-logging/templates/_helpers.tpl @@ -92,3 +92,85 @@ Create the name of the service account to use {{- default "default" .Values.logsDispatcher.serviceAccount.name }} {{- end }} {{- end }} + +{{/* +Create a default fully qualified app name for logs-tee-router +We truncate at 63 chars because some Kubernetes name fields are limited to this +(by the DNS naming spec). +*/}} +{{- define "lagoon-logging.logsTeeRouter.fullname" -}} +{{- include "lagoon-logging.fullname" . }}-{{ .Values.logsTeeRouter.name }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "lagoon-logging.logsTeeRouter.selectorLabels" -}} +app.kubernetes.io/name: {{ include "lagoon-logging.name" . }} +app.kubernetes.io/component: {{ include "lagoon-logging.logsTeeRouter.fullname" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "lagoon-logging.logsTeeRouter.labels" -}} +helm.sh/chart: {{ include "lagoon-logging.chart" . }} +{{ include "lagoon-logging.logsTeeRouter.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "lagoon-logging.logsTeeRouter.serviceAccountName" -}} +{{- if .Values.logsTeeRouter.serviceAccount.create }} +{{- default (include "lagoon-logging.logsTeeRouter.fullname" .) .Values.logsTeeRouter.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.logsTeeRouter.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Create a default fully qualified app name for logs-tee-application +We truncate at 63 chars because some Kubernetes name fields are limited to this +(by the DNS naming spec). +*/}} +{{- define "lagoon-logging.logsTeeApplication.fullname" -}} +{{- include "lagoon-logging.fullname" . }}-{{ .Values.logsTeeApplication.name }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "lagoon-logging.logsTeeApplication.selectorLabels" -}} +app.kubernetes.io/name: {{ include "lagoon-logging.name" . }} +app.kubernetes.io/component: {{ include "lagoon-logging.logsTeeApplication.fullname" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "lagoon-logging.logsTeeApplication.labels" -}} +helm.sh/chart: {{ include "lagoon-logging.chart" . }} +{{ include "lagoon-logging.logsTeeApplication.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "lagoon-logging.logsTeeApplication.serviceAccountName" -}} +{{- if .Values.logsTeeApplication.serviceAccount.create }} +{{- default (include "lagoon-logging.logsTeeApplication.fullname" .) .Values.logsTeeApplication.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.logsTeeApplication.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/charts/lagoon-logging/templates/clusterflow.yaml b/charts/lagoon-logging/templates/clusterflow.yaml index 5ced60dfce..4b19d295cb 100644 --- a/charts/lagoon-logging/templates/clusterflow.yaml +++ b/charts/lagoon-logging/templates/clusterflow.yaml @@ -1,7 +1,6 @@ apiVersion: logging.banzaicloud.io/v1beta1 kind: ClusterFlow metadata: - namespace: {{ .Release.Namespace | quote }} name: {{ include "lagoon-logging.fullname" . }} labels: {{- include "lagoon-logging.labels" . | nindent 4 }} @@ -9,12 +8,19 @@ spec: # match entries are considered in order # the empty "select: {}" indicates all namespaces/labels match: - {{- with .Values.excludeNamespaces }} - exclude: namespaces: + - {{ .Release.Namespace }} + {{- with .Values.excludeNamespaces }} {{- toYaml . | nindent 6 }} - {{- end }} + {{- end }} + {{- with .Values.selectNamespaces }} + - select: + namespaces: + {{- toYaml . | nindent 6 }} + {{- else }} - select: {} + {{- end }} filters: - tag_normaliser: {} outputRefs: diff --git a/charts/lagoon-logging/templates/clusteroutput.yaml b/charts/lagoon-logging/templates/clusteroutput.yaml index 05cb76dcf8..f30fb1ead6 100644 --- a/charts/lagoon-logging/templates/clusteroutput.yaml +++ b/charts/lagoon-logging/templates/clusteroutput.yaml @@ -1,7 +1,6 @@ apiVersion: logging.banzaicloud.io/v1beta1 kind: ClusterOutput metadata: - namespace: {{ .Release.Namespace | quote }} name: {{ include "lagoon-logging.fullname" . }} labels: {{- include "lagoon-logging.labels" . | nindent 4 }} diff --git a/charts/lagoon-logging/templates/logging.yaml b/charts/lagoon-logging/templates/logging.yaml index 62c6424c2d..0150941c18 100644 --- a/charts/lagoon-logging/templates/logging.yaml +++ b/charts/lagoon-logging/templates/logging.yaml @@ -1,11 +1,23 @@ apiVersion: logging.banzaicloud.io/v1beta1 kind: Logging metadata: - namespace: {{ .Release.Namespace | quote }} name: {{ include "lagoon-logging.fullname" . }} labels: {{- include "lagoon-logging.labels" . | nindent 4 }} spec: - fluentd: {} + fluentd: + security: + podSecurityContext: + runAsUser: 100 + fsGroup: 0 + scaling: + replicas: 3 + {{- with .Values.fluentbitPrivileged }} + fluentbit: + security: + securityContext: + privileged: {{ . }} + {{- else }} fluentbit: {} + {{- end }} controlNamespace: {{ .Release.Namespace | quote }} diff --git a/charts/lagoon-logging/templates/logs-dispatcher.clusterrolebinding.yaml b/charts/lagoon-logging/templates/logs-dispatcher.clusterrolebinding.yaml index 6ae0578013..8d6228be38 100644 --- a/charts/lagoon-logging/templates/logs-dispatcher.clusterrolebinding.yaml +++ b/charts/lagoon-logging/templates/logs-dispatcher.clusterrolebinding.yaml @@ -2,7 +2,6 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - namespace: {{ .Release.Namespace | quote }} name: {{ include "lagoon-logging.logsDispatcher.fullname" . }} labels: {{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }} diff --git a/charts/lagoon-logging/templates/logs-dispatcher.env.configmap.yaml b/charts/lagoon-logging/templates/logs-dispatcher.env.configmap.yaml index 0bb3be47c7..bea40f2150 100644 --- a/charts/lagoon-logging/templates/logs-dispatcher.env.configmap.yaml +++ b/charts/lagoon-logging/templates/logs-dispatcher.env.configmap.yaml @@ -1,12 +1,18 @@ apiVersion: v1 kind: ConfigMap metadata: - namespace: {{ .Release.Namespace | quote }} name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-env labels: {{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }} data: - CLUSTER_NAME: {{ required "A valid .Values.clusterName required!" .Values.clusterName }} - ELASTICSEARCH_HOST: {{ required "A valid .Values.elasticsearchHost required!" .Values.elasticsearchHost }} - ELASTICSEARCH_HOST_PORT: {{ .Values.elasticsearchHostPort | quote }} - ELASTICSEARCH_SCHEME: {{ .Values.elasticsearchScheme }} + CLUSTER_NAME: >- + {{ required "A valid .Values.clusterName required!" .Values.clusterName }} + LOGS_FORWARD_HOST: >- + {{ required "A valid .Values.forward.host required!" .Values.forward.host }} + LOGS_FORWARD_HOSTNAME: >- + {{ coalesce .Values.forward.hostName .Values.forward.host }} + LOGS_FORWARD_HOST_PORT: >- + {{ default "24224" .Values.forward.hostPort }} +{{- if .Values.lagoonLogs.enabled }} + RABBITMQ_HOST: {{ required "If .Values.lagoonLogs.enabled is true, a valid .Values.lagoonLogs.rabbitMQHost required!" .Values.lagoonLogs.rabbitMQHost }} +{{- end }} diff --git a/charts/lagoon-logging/templates/logs-dispatcher.fluent-conf.configmap.yaml b/charts/lagoon-logging/templates/logs-dispatcher.fluent-conf.configmap.yaml index f8df6425cc..0489d27405 100644 --- a/charts/lagoon-logging/templates/logs-dispatcher.fluent-conf.configmap.yaml +++ b/charts/lagoon-logging/templates/logs-dispatcher.fluent-conf.configmap.yaml @@ -1,7 +1,6 @@ apiVersion: v1 kind: ConfigMap metadata: - namespace: {{ .Release.Namespace | quote }} name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-fluent-conf labels: {{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }} @@ -12,21 +11,94 @@ data: workers 2 + # prometheus metrics + + @type prometheus + + + @type prometheus_monitor + + + @type prometheus_output_monitor + + + # container logs collected by the logging-operator - # fluentd parameters @type forward - @id container - tag "lagoon.#{ENV['CLUSTER_NAME']}.raw" + @id in_container + tag process.container + + + # application logs emitted by the lagoon_logs drupal module + + @type udp + @id in_application + tag "lagoon.#{ENV['CLUSTER_NAME']}.application" + port 5140 + # max IPv4 UDP payload size + message_length_limit 65507 + + @type json + + + + # router logs emitted by the openshift routers + + @type syslog + @id in_router_openshift + tag "lagoon.#{ENV['CLUSTER_NAME']}.router.openshift" + emit_unmatched_lines true + # syslog parameters + port 5141 + severity_key severity + # max IPv4 UDP payload size + message_length_limit 65507 + + @type regexp + # parse HTTP logs based on the haproxy documentation + # As per the documentation here + # https://www.haproxy.com/documentation/hapee/1-8r1/onepage/#8.2.3, except + # we split the frontend_name into its constituent parts as used by + # openshift. + expression /^.{,15} (?\w+)\[(?\d+)\]: (?\S+):(?\d+) \[(?\S+)\] (?\S+) (?\S+):(?(?\S+):\S+\/pod:(?[^:]+):(?[^:]+)):\S+ (?[\d-]+)\/(?[\d-]+)\/(?[\d-]+)\/(?[\d-]+)\/(?[\d-]+) (?\d+) (?\d+) (?\S+) (?\S+) (?\S+) (?\d+)\/(?\d+)\/(?\d+)\/(?\d+)\/(?\d+) (?\d+)\/(?\d+) "(?.+)"/ + time_key request_date + time_format %d/%b/%Y:%T.%L + types pid:integer,client_port:integer,TR:integer,Tw:integer,Tc:integer,Tr:integer,Ta:integer,bytes_read:integer,actconn:integer,feconn:integer,beconn:integer,srv_conn:integer,retries:integer,srv_queue:integer,backend_queue:integer + - # relabel router logs + # + # optional sources which can be enabled in the chart + # + @include source.d/*.conf + + # + # capture unmatched openshift router logs + # + + @type record_modifier + + index_name router-logs-openshift_parse_error_${ENV['CLUSTER_NAME']}-${Time.at(time).strftime("%Y.%m")} + + + + # + # pre-processing for nginx_router logs + # + # the reason for having the two match blocks is because we have two checks + # to distinguish nginx_router logs: + # * app label is "nginx-ingress" + # * namespace is "syn-nginx-ingress" + # if either of those checks fails the message is tagged as a regular + # container log. + # # check app name first. if app name didn't match, set tag to container log. - + @type rewrite_tag_filter key $.kubernetes.labels.app pattern ^nginx-ingress$ - tag "app-nginx-ingress" + tag "process.app_nginx_ingress" invert true @@ -37,12 +109,12 @@ data: # check namespace_name. if it is okay too, tag as router log. # if namespace didn't match, set tag to container log. - + @type rewrite_tag_filter key $.kubernetes.namespace_name pattern ^syn-nginx-ingress$ - tag "lagoon.#{ENV['CLUSTER_NAME']}.router" + tag "lagoon.#{ENV['CLUSTER_NAME']}.router.nginx" invert true @@ -52,135 +124,203 @@ data: - # strip the duplicated log field from router logs - + # + # process container logs + # + # restructure so the kubernetes_metadata plugin can find the keys it needs + + @type record_modifier + remove_keys _dummy_ + + _dummy_ ${record['docker'] = {'container_id' => "#{record.dig('kubernetes','docker_id')}"}; nil} + + + # enrich with k8s metadata (will get the namespace labels) + + @type kubernetes_metadata + @log_level warn + skip_container_metadata true + skip_master_url true + + # strip the duplicate information so that it doesn't appear in logs + + @type record_modifier + remove_keys docker + + # add the index name + + @type record_modifier + + index_name container-logs-${record.dig('kubernetes','namespace_labels','lagoon_sh/project') || "#{record.dig('kubernetes','namespace_name') || 'unknown_project'}_#{ENV['CLUSTER_NAME']}"}-_-${record.dig('kubernetes','namespace_labels','lagoon_sh/environmentType') || "unknown_environmenttype"}-_-${Time.at(time).strftime("%Y.%m")} + + + # post-process to try to eke some more structure out of the logs. + # the last "format none" block is a catch-all for unmatched messages. + + @type parser + key_name log + reserve_data true + + @type multi_format + + format nginx + types size:integer + + + format none + + + + # some container logs have a duplicate message field for some reason, so + # remove that. + + @type record_modifier + remove_keys message + + + # + # process application logs + # + # restructure so the kubernetes_metadata plugin can find the keys it needs + @type record_modifier - remove_keys log + remove_keys _dummy_,type + + _dummy_ ${record['openshift_project'] = record['type']; record['kubernetes'] = {'namespace_name' => record['type'], 'pod_name' => record['host'], 'container_name' => 'unknown'}; record['docker'] = {'container_id' => "#{record['type']}_#{record['host']}"}; nil} + + + # enrich with k8s metadata (will get the namespace labels) + + @type kubernetes_metadata + @log_level warn + skip_container_metadata true + skip_master_url true + + # add the index_name + + @type record_modifier + + index_name application-logs-${record.dig('kubernetes','namespace_labels','lagoon_sh/project') || "#{record.dig('kubernetes','namespace_name') || 'unknown_project'}_#{ENV['CLUSTER_NAME']}"}-_-${record.dig('kubernetes','namespace_labels','lagoon_sh/environmentType') || "unknown_environmenttype"}-_-${Time.at(time).strftime("%Y.%m")} + + + # strip the kubernetes data as it's duplicated in container/router logs and + # not really relevant for application logs + + @type record_modifier + remove_keys docker,kubernetes - # logs are now tagged appropriately, so route to labels based on the tag - - @type route - # route _all_ logs container logs (even nginx-ingress) to @container - - copy - @label @container - - # route just the router logs to @router - - copy - @label @router - - + # + # process nginx_router logs + # + # Strip the nginx-ingress namespace info and add enough dummy information + # so that kubernetes_metadata plugin can get the namespace labels. + # Also strip the duplicated log field. + + @type record_modifier + remove_keys _dummy_,log + + _dummy_ ${record['kubernetes'] = {'namespace_name' => record['namespace'], 'pod_name' => 'nopod', 'container_name' => 'nocontainer'}; record['docker'] = {'container_id' => "#{record['namespace']}_#{record['ingress_name']}"}; nil} + + + # enrich with k8s metadata (will get the namespace labels) + + @type kubernetes_metadata + @log_level warn + skip_container_metadata true + skip_master_url true + + # strip the dummy information so that it doesn't appear in logs + + @type record_modifier + remove_keys _dummy_,docker + + _dummy_ ${record['kubernetes'].delete('pod_name'); record['kubernetes'].delete('container_name'); record['kubernetes'].delete('pod_id'); nil} + + + + # + # process openshift router logs + # + # retructure the record enough for the kubernetes_metadata plugin to get + # namespace labels + + @type record_modifier + remove_keys _dummy_,kubernetes_namespace_name,kubernetes_pod_name,kubernetes_container_name,docker_container_id + + _dummy_ ${record['kubernetes'] = {'namespace_name' => record['kubernetes_namespace_name'], 'pod_name' => record['kubernetes_pod_name'], 'container_name' => record['kubernetes_container_name']}; record['docker'] = {'container_id' => record['docker_container_id']}; nil} + + + # enrich with k8s metadata + + @type kubernetes_metadata + @log_level warn + skip_container_metadata true + skip_master_url true + - + # + # add the router index_name + # + + @type record_modifier + + index_name router-logs-${record.dig('kubernetes','namespace_labels','lagoon_sh/project') || "#{record.dig('kubernetes','namespace_name') || 'unknown_project'}_#{ENV['CLUSTER_NAME']}"}-_-${record.dig('kubernetes','namespace_labels','lagoon_sh/environmentType') || "unknown_environmenttype"}-_-${Time.at(time).strftime("%Y.%m")} + + - + # + # add the lagoon index_name + # the source for this tag is included when lagoonLogs.enabled is true + # + + @type record_modifier + + index_name lagoon-logs-${record['project']}-_-all_environments-_-${Time.at(time).strftime("%Y.%m")} + + - + + @include store.d/*.conf + diff --git a/charts/lagoon-logging/templates/logs-dispatcher.secret.yaml b/charts/lagoon-logging/templates/logs-dispatcher.secret.yaml index eabf555307..bd128655e8 100644 --- a/charts/lagoon-logging/templates/logs-dispatcher.secret.yaml +++ b/charts/lagoon-logging/templates/logs-dispatcher.secret.yaml @@ -2,9 +2,31 @@ apiVersion: v1 kind: Secret type: Opaque metadata: - namespace: {{ .Release.Namespace | quote }} - name: {{ include "lagoon-logging.logsDispatcher.fullname" . }} + name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-env labels: {{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }} stringData: - LOGSDB_ADMIN_PASSWORD: {{ required "A valid .Values.elasticsearchAdminPassword required!" .Values.elasticsearchAdminPassword }} + LOGS_FORWARD_USERNAME: {{ required "A valid .Values.forward.username required!" .Values.forward.username }} + LOGS_FORWARD_PASSWORD: {{ required "A valid .Values.forward.password required!" .Values.forward.password }} + # self hostname should match the CN on the client certificate + LOGS_FORWARD_SELF_HOSTNAME: {{ required "A valid .Values.forward.selfHostname required!" .Values.forward.selfHostname }} + LOGS_FORWARD_SHARED_KEY: {{ required "A valid .Values.forward.sharedKey required!" .Values.forward.sharedKey }} +{{- if .Values.lagoonLogs.enabled }} + RABBITMQ_USER: {{ required "If .Values.lagoonLogs.enabled is true, a valid .Values.lagoonLogs.rabbitMQUser required!" .Values.lagoonLogs.rabbitMQUser }} + RABBITMQ_PASSWORD: {{ required "If .Values.lagoonLogs.enabled is true, a valid .Values.lagoonLogs.rabbitMQPassword required!" .Values.lagoonLogs.rabbitMQPassword }} +{{- end }} +--- +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-tls + labels: + {{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }} +stringData: + ca.crt: | + {{- required "A valid .Values.tls.caCert required!" .Values.tls.caCert | nindent 4 }} + client.crt: | + {{- required "A valid .Values.tls.clientCert required!" .Values.tls.clientCert | nindent 4 }} + client.key: | + {{- required "A valid .Values.tls.clientKey required!" .Values.tls.clientKey | nindent 4 }} diff --git a/charts/lagoon-logging/templates/logs-dispatcher.service.yaml b/charts/lagoon-logging/templates/logs-dispatcher.service.yaml index 2503031345..6a777b09e5 100644 --- a/charts/lagoon-logging/templates/logs-dispatcher.service.yaml +++ b/charts/lagoon-logging/templates/logs-dispatcher.service.yaml @@ -1,7 +1,6 @@ apiVersion: v1 kind: Service metadata: - namespace: {{ .Release.Namespace | quote }} name: {{ include "lagoon-logging.logsDispatcher.fullname" . }} labels: {{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }} @@ -13,8 +12,16 @@ spec: - name: forward port: 24224 protocol: TCP - targetPort: 24224 + targetPort: forward - name: metrics port: 24231 protocol: TCP - targetPort: 24231 + targetPort: metrics + - name: application + port: 5140 + protocol: UDP + targetPort: application + - name: syslog-router + port: 5141 + protocol: UDP + targetPort: syslog-router diff --git a/charts/lagoon-logging/templates/logs-dispatcher.serviceaccount.yaml b/charts/lagoon-logging/templates/logs-dispatcher.serviceaccount.yaml index 5ce527bb83..35e99d41f2 100644 --- a/charts/lagoon-logging/templates/logs-dispatcher.serviceaccount.yaml +++ b/charts/lagoon-logging/templates/logs-dispatcher.serviceaccount.yaml @@ -2,7 +2,6 @@ apiVersion: v1 kind: ServiceAccount metadata: - namespace: {{ .Release.Namespace | quote }} name: {{ include "lagoon-logging.logsDispatcher.serviceAccountName" . }} labels: {{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }} diff --git a/charts/lagoon-logging/templates/logs-dispatcher.source-lagoon.configmap.yaml b/charts/lagoon-logging/templates/logs-dispatcher.source-lagoon.configmap.yaml new file mode 100644 index 0000000000..b941adf64b --- /dev/null +++ b/charts/lagoon-logging/templates/logs-dispatcher.source-lagoon.configmap.yaml @@ -0,0 +1,26 @@ +{{- if .Values.lagoonLogs.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-source-lagoon + labels: + {{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }} +data: + lagoon.conf: | + # lagoon logs from rabbitmq + + @type rabbitmq + @id in_lagoon + tag "lagoon.#{ENV['CLUSTER_NAME']}.lagoon" + host "#{ENV['RABBITMQ_HOST']}" + user "#{ENV['RABBITMQ_USER']}" + pass "#{ENV['RABBITMQ_PASSWORD']}" + # logstash default vhost + vhost / + exchange lagoon-logs + exchange_type direct + routing_key "" + queue logs-dispatcher + durable true + +{{- end }} diff --git a/charts/lagoon-logging/templates/logs-dispatcher.statefulset.yaml b/charts/lagoon-logging/templates/logs-dispatcher.statefulset.yaml index 6c576c9166..cea31ef5ec 100644 --- a/charts/lagoon-logging/templates/logs-dispatcher.statefulset.yaml +++ b/charts/lagoon-logging/templates/logs-dispatcher.statefulset.yaml @@ -1,7 +1,6 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - namespace: {{ .Release.Namespace | quote }} name: {{ include "lagoon-logging.logsDispatcher.fullname" . }} labels: {{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }} @@ -13,8 +12,17 @@ spec: {{- include "lagoon-logging.logsDispatcher.selectorLabels" . | nindent 6 }} template: metadata: - {{- with .Values.logsDispatcher.podAnnotations }} annotations: + checksum/secret: {{ include (print $.Template.BasePath "/logs-dispatcher.secret.yaml") . | sha256sum }} + checksum/env-configmap: {{ include (print $.Template.BasePath "/logs-dispatcher.env.configmap.yaml") . | sha256sum }} + checksum/fluent-conf-configmap: {{ include (print $.Template.BasePath "/logs-dispatcher.fluent-conf.configmap.yaml") . | sha256sum }} + {{- if .Values.exportLogs }} + checksum/store-configmap: {{ include (print $.Template.BasePath "/logs-dispatcher.store.configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.lagoonLogs.enabled }} + checksum/source-lagoon-configmap: {{ include (print $.Template.BasePath "/logs-dispatcher.source-lagoon.configmap.yaml") . | sha256sum }} + {{- end }} + {{- with .Values.logsDispatcher.podAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} labels: @@ -47,6 +55,12 @@ spec: - containerPort: 24231 protocol: TCP name: metrics + - containerPort: 5140 + protocol: UDP + name: application + - containerPort: 5141 + protocol: UDP + name: syslog-router readinessProbe: tcpSocket: port: 24224 @@ -59,11 +73,21 @@ spec: - mountPath: /fluentd/etc/fluent.conf name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-fluent-conf subPath: fluent.conf + {{- if .Values.exportLogs }} + - mountPath: /fluentd/etc/store.d + name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-store + {{- end }} + {{- if .Values.lagoonLogs.enabled }} + - mountPath: /fluentd/etc/source.d + name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-source-lagoon + {{- end }} - mountPath: /fluentd/buffer/ name: buffer + - mountPath: /fluentd/tls/ + name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-tls envFrom: - secretRef: - name: {{ include "lagoon-logging.logsDispatcher.fullname" . }} + name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-env - configMapRef: name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-env resources: @@ -88,6 +112,22 @@ spec: path: fluent.conf name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-fluent-conf name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-fluent-conf + {{- if .Values.exportLogs }} + - configMap: + defaultMode: 420 + name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-store + name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-store + {{- end }} + {{- if .Values.lagoonLogs.enabled }} + - configMap: + defaultMode: 420 + name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-source-lagoon + name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-source-lagoon + {{- end }} + - secret: + defaultMode: 420 + secretName: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-tls + name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-tls volumeClaimTemplates: - metadata: name: buffer diff --git a/charts/lagoon-logging/templates/logs-dispatcher.store.configmap.yaml b/charts/lagoon-logging/templates/logs-dispatcher.store.configmap.yaml new file mode 100644 index 0000000000..f3bb1cd1c8 --- /dev/null +++ b/charts/lagoon-logging/templates/logs-dispatcher.store.configmap.yaml @@ -0,0 +1,10 @@ +{{- if .Values.exportLogs }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-store + labels: + {{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }} +data: + {{- toYaml .Values.exportLogs | nindent 2 }} +{{- end }} diff --git a/charts/lagoon-logging/templates/logs-tee.deployment.yaml b/charts/lagoon-logging/templates/logs-tee.deployment.yaml new file mode 100644 index 0000000000..bafcb90776 --- /dev/null +++ b/charts/lagoon-logging/templates/logs-tee.deployment.yaml @@ -0,0 +1,132 @@ +{{- if .Values.logsTeeRouter.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "lagoon-logging.logsTeeRouter.fullname" . }} + labels: + {{- include "lagoon-logging.logsTeeRouter.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.logsTeeRouter.replicaCount }} + selector: + matchLabels: + {{- include "lagoon-logging.logsTeeRouter.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "lagoon-logging.logsTeeRouter.selectorLabels" . | nindent 8 }} + spec: + serviceAccountName: {{ include "lagoon-logging.logsTeeRouter.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.logsTeeRouter.podSecurityContext | nindent 8 }} + containers: + - name: socat + securityContext: + {{- toYaml .Values.logsTeeRouter.securityContext | nindent 12 }} + image: "{{ .Values.logsTeeRouter.image.repository }}:{{ .Values.logsTeeRouter.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.logsTeeRouter.image.pullPolicy }} + args: + # UDP port in + - {{ .Values.logsTeeRouter.listenPort | quote }} + # UDP endpoints out + - {{ include "lagoon-logging.logsDispatcher.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:5141 + {{- with .Values.logsTeeRouter.endpoints }} + {{- toYaml . | nindent 10 }} + {{- end }} + ports: + - containerPort: {{ .Values.logsTeeRouter.listenPort }} + protocol: UDP + name: syslog-router + readinessProbe: + exec: + command: + - pgrep + - socat + initialDelaySeconds: 20 + livenessProbe: + exec: + command: + - pgrep + - socat + initialDelaySeconds: 120 + resources: + {{- toYaml .Values.logsTeeRouter.resources | nindent 12 }} + {{- with .Values.logsTeeRouter.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.logsTeeRouter.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.logsTeeRouter.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} +{{- if .Values.logsTeeApplication.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "lagoon-logging.logsTeeApplication.fullname" . }} + labels: + {{- include "lagoon-logging.logsTeeApplication.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.logsTeeApplication.replicaCount }} + selector: + matchLabels: + {{- include "lagoon-logging.logsTeeApplication.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "lagoon-logging.logsTeeApplication.selectorLabels" . | nindent 8 }} + spec: + serviceAccountName: {{ include "lagoon-logging.logsTeeApplication.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.logsTeeApplication.podSecurityContext | nindent 8 }} + containers: + - name: socat + securityContext: + {{- toYaml .Values.logsTeeApplication.securityContext | nindent 12 }} + image: "{{ .Values.logsTeeApplication.image.repository }}:{{ .Values.logsTeeApplication.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.logsTeeApplication.image.pullPolicy }} + args: + # UDP port in + - {{ .Values.logsTeeApplication.listenPort | quote }} + # UDP endpoints out + - {{ include "lagoon-logging.logsDispatcher.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:5140 + {{- with .Values.logsTeeApplication.endpoints }} + {{- toYaml . | nindent 10 }} + {{- end }} + ports: + - containerPort: {{ .Values.logsTeeApplication.listenPort }} + protocol: UDP + name: application + readinessProbe: + exec: + command: + - pgrep + - socat + initialDelaySeconds: 20 + livenessProbe: + exec: + command: + - pgrep + - socat + initialDelaySeconds: 120 + resources: + {{- toYaml .Values.logsTeeApplication.resources | nindent 12 }} + {{- with .Values.logsTeeApplication.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.logsTeeApplication.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.logsTeeApplication.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/lagoon-logging/templates/logs-tee.service.yaml b/charts/lagoon-logging/templates/logs-tee.service.yaml new file mode 100644 index 0000000000..6ca5133dd8 --- /dev/null +++ b/charts/lagoon-logging/templates/logs-tee.service.yaml @@ -0,0 +1,36 @@ +{{- if .Values.logsTeeRouter.enabled }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "lagoon-logging.logsTeeRouter.fullname" . }} + labels: + {{- include "lagoon-logging.logsTeeRouter.labels" . | nindent 4 }} +spec: + type: ClusterIP + selector: + {{- include "lagoon-logging.logsTeeRouter.selectorLabels" . | nindent 4 }} + ports: + - name: syslog-router + port: {{ .Values.logsTeeRouter.listenPort }} + protocol: UDP + targetPort: syslog-router +{{- end }} +{{- if .Values.logsTeeApplication.enabled }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "lagoon-logging.logsTeeApplication.fullname" . }} + labels: + {{- include "lagoon-logging.logsTeeApplication.labels" . | nindent 4 }} +spec: + type: ClusterIP + selector: + {{- include "lagoon-logging.logsTeeApplication.selectorLabels" . | nindent 4 }} + ports: + - name: application + port: {{ .Values.logsTeeApplication.listenPort }} + protocol: UDP + targetPort: application +{{- end }} diff --git a/charts/lagoon-logging/values.yaml b/charts/lagoon-logging/values.yaml index 8ee43e35ab..18d44d330f 100644 --- a/charts/lagoon-logging/values.yaml +++ b/charts/lagoon-logging/values.yaml @@ -9,13 +9,13 @@ logsDispatcher: name: logs-dispatcher - replicaCount: 2 + replicaCount: 3 image: repository: amazeeiolagoon/logs-dispatcher pullPolicy: Always # Overrides the image tag whose default is the chart version. - tag: v1-5-0 + tag: master serviceAccount: # Specifies whether a service account should be created @@ -58,34 +58,259 @@ logsDispatcher: affinity: {} +logsTeeRouter: + + enabled: false + + name: logs-tee-router + + replicaCount: 3 + + image: + repository: amazeeiolagoon/logs-tee + pullPolicy: Always + # Overrides the image tag whose default is the chart version. + tag: master + + serviceAccount: + # Specifies whether a service account should be created + create: false + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname + # template + # If this value is set, the serviceAccount named must have clusterrole + # view. + name: "" + + listenPort: 5140 + # the logs-dispatcher endpoint is automatically added to this list + # define other endpoints here + #endpoints: + #- logs2logs-db.lagoon.svc.cluster.local + + podAnnotations: {} + + podSecurityContext: {} + # fsGroup: 2000 + + securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + resources: {} + # If you want to specify resources, uncomment the following lines, adjust + # them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + nodeSelector: {} + + tolerations: [] + + affinity: {} + +logsTeeApplication: + + enabled: false + + name: logs-tee-application + + replicaCount: 3 + + image: + repository: amazeeiolagoon/logs-tee + pullPolicy: Always + # Overrides the image tag whose default is the chart version. + tag: master + + serviceAccount: + # Specifies whether a service account should be created + create: false + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname + # template + # If this value is set, the serviceAccount named must have clusterrole + # view. + name: "" + + listenPort: 5140 + # the logs-dispatcher endpoint is automatically added to this list + # define other endpoints here + #endpoints: + #- logs2logs-db.lagoon.svc.cluster.local + + podAnnotations: {} + + podSecurityContext: {} + # fsGroup: 2000 + + securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + resources: {} + # If you want to specify resources, uncomment the following lines, adjust + # them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + nodeSelector: {} + + tolerations: [] + + affinity: {} + # Don't collect logs from these namespaces. # Comment out this field to collect from all namespaces. excludeNamespaces: +# k8s - cattle-prometheus +- cattle-system +- dbaas-operator +- default +- kube-cleanup-operator +- kube-node-lease +- kube-public - kube-system +- metrics-server - syn +- syn-backup - syn-cert-manager -- syn-synsights - syn-cluster-autoscaler +- syn-efs-provisioner +- syn-resource-locker +- syn-synsights +# openshift +- acme-controller +- appuio-baas-operator +- appuio-dnsmonitor +- appuio-espejo +- appuio-infra +- appuio-monitoring +- appuio-pruner +- appuio-tiller +- dioscuri-controller +- kube-service-catalog +- management-infra +- monitoring-infra +- openshift +- openshift-ansible-service-broker +- openshift-console +- openshift-infra +- openshift-logging +- openshift-metrics-server +- openshift-monitoring +- openshift-node +- openshift-sdn +- openshift-web-console +- tiller # Configure the cluster output buffer. # This may require tweaking to handle high volumes of logs. clusterOutputBuffer: - flush_thread_count: 256 + flush_thread_count: 32 timekey: 1m timekey_wait: 10s timekey_use_utc: true -# Elasticsearch output config. -elasticsearchHostPort: "443" -elasticsearchScheme: https -# The values below must be supplied during installation as they have no sane -# defaults. -elasticsearchHost: "" -elasticsearchAdminPassword: "" -clusterName: "" - # chart dependency on logging-operator logging-operator: enabled: true createCustomResource: false + +# lagoon logs collection disabled by default. see below for instructions on +# enabling this. +lagoonLogs: + enabled: false + +# The values below must be supplied during installation. +# Certificates should be provided in PEM format, and are generated as described +# in the README for the lagoon-logs-concentrator chart. +# Sample data shown below. + +#tls: +# caCert: | +# -----BEGIN CERTIFICATE----- +# ... +# -----END CERTIFICATE----- +# clientCert: | +# -----BEGIN CERTIFICATE----- +# ... +# -----END CERTIFICATE----- +# clientKey: | +# -----BEGIN EC PRIVATE KEY----- +# ... +# -----END EC PRIVATE KEY----- +# +#forward: +# username: "example1" +# password: "securepass" +# host: "203.0.113.9" +# # hostName is optional - it is used for TLS verification for when host is an +# # IP address. +# # NOTE: if host is _not_ an IP address and it is presents a certificate +# # without that hostname, you'll also need to set tlsVerifyHostname to +# # false. The hostName field does _not_ override the host field for TLS +# # verification when host is not an IP address. +# hostName: "logs.server.example.com" +# # tlsVerifyHostname: false +# # hostPort is optional, default 24224 +# hostPort: "24224" +# selfHostname: "logs-dispatcher.example1.lagoon.example.com" +# sharedKey: "supersecurekey" +# +#clusterName: "example1" + +# Optional lagoon logs configuration. This should be enabled on a full lagoon +# install, but not in a lagoon-remote install. If enabled, the rabbitMQ* values +# are required. +# +#lagoonLogs: +# enabled: true +# rabbitMQHost: secureuser +# rabbitMQUser: secureuser +# rabbitMQPassword: secureuser + +# Optional namespace selection. Logs will _only_ be collected from these +# namespaces. You probably don't want to configure this, except for debugging. +# +#selectNamespaces: +#- drupal-example + +# Optional log export configuration + +#exportLogs: +# s3.conf: | +# +# @type s3 +# ... +# +# cloudwatch.conf: | +# +# @type cloudwatch_logs +# ... +# + +# Openshift only! + +#fluentbitPrivileged: true diff --git a/charts/lagoon-logs-concentrator-0.2.0.tgz b/charts/lagoon-logs-concentrator-0.2.0.tgz new file mode 100644 index 0000000000..f85f353c1b Binary files /dev/null and b/charts/lagoon-logs-concentrator-0.2.0.tgz differ diff --git a/charts/lagoon-logs-concentrator-0.2.1.tgz b/charts/lagoon-logs-concentrator-0.2.1.tgz new file mode 100644 index 0000000000..9eba50868b Binary files /dev/null and b/charts/lagoon-logs-concentrator-0.2.1.tgz differ diff --git a/charts/lagoon-logs-concentrator/.helmignore b/charts/lagoon-logs-concentrator/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/charts/lagoon-logs-concentrator/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/lagoon-logs-concentrator/Chart.yaml b/charts/lagoon-logs-concentrator/Chart.yaml new file mode 100644 index 0000000000..0fdc4d2301 --- /dev/null +++ b/charts/lagoon-logs-concentrator/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v2 +name: lagoon-logs-concentrator +description: A Helm chart for Kubernetes which installs the Lagoon logs-concentrator service. + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.2.1 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +appVersion: 1.16.0 diff --git a/charts/lagoon-logs-concentrator/README.md b/charts/lagoon-logs-concentrator/README.md new file mode 100644 index 0000000000..4335be7c4f --- /dev/null +++ b/charts/lagoon-logs-concentrator/README.md @@ -0,0 +1,53 @@ +# Logs Concentrator + +This service collects logs from logs-dispatchers (both local and remote) using +fluentd's forward protocol, and sends them to Elasticsearch. + +## Configuration + +See the commented sample configuration at the end of `values.yaml`. + +## TLS + +Clients connect to this service via TLS. Mutual TLS authentication is performed by the client and server. + +Important notes: + +* We run our own CA since the in-cluster CA signs certificates with only one year expiry. +* The instructions below require [cfssl](https://github.com/cloudflare/cfssl). +* Refer to [this documentation](https://coreos.com/os/docs/latest/generate-self-signed-certificates.html) for further details. + +### Generate a CA certificate + +This is only required the first time you set up this chart. + +Edit the `ca-csr.json` as required and run this command: + +``` +cfssl gencert -initca ca-csr.json | cfssljson -bare ca - +rm ca.csr +``` + +You'll end up with `ca-key.pem` and `ca.pem`, which are the CA key and certificate. Store these somewhere safe, they'll be used to generate all future certificates. + +### Generate a server certificate + +This will be the certificate used by the `logs-concentrator`. + +Edit the `server.json` as required and run this command: + +``` +cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server server.json | cfssljson -bare server +rm server.csr +``` + +### Generate a client certificate + +This will be the certificate used by the `lagoon-logging` chart's `logs-dispatcher`. + +Edit the `client.json` as required and run this command: + +``` +cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client.json | cfssljson -bare client +rm client.csr +``` diff --git a/charts/lagoon-logs-concentrator/ca-config.json b/charts/lagoon-logs-concentrator/ca-config.json new file mode 100644 index 0000000000..213ea49e67 --- /dev/null +++ b/charts/lagoon-logs-concentrator/ca-config.json @@ -0,0 +1,25 @@ +{ + "signing": { + "default": { + "expiry": "87600h" + }, + "profiles": { + "server": { + "expiry": "87600h", + "usages": [ + "signing", + "key encipherment", + "server auth" + ] + }, + "client": { + "expiry": "87600h", + "usages": [ + "signing", + "key encipherment", + "client auth" + ] + } + } + } +} diff --git a/charts/lagoon-logs-concentrator/ca-csr.json b/charts/lagoon-logs-concentrator/ca-csr.json new file mode 100644 index 0000000000..91122dfc61 --- /dev/null +++ b/charts/lagoon-logs-concentrator/ca-csr.json @@ -0,0 +1,13 @@ +{ + "CN": "logs-ca.cluster1.example.com", + "hosts": [ + "logs-ca.cluster1.example.com" + ], + "key": { + "algo": "ecdsa", + "size": 256 + }, + "ca": { + "expiry": "87600h" + } +} diff --git a/charts/lagoon-logs-concentrator/client.json b/charts/lagoon-logs-concentrator/client.json new file mode 100644 index 0000000000..4813dad0cc --- /dev/null +++ b/charts/lagoon-logs-concentrator/client.json @@ -0,0 +1,10 @@ +{ + "hosts": [ + "logs-dispatcher.cluster2.example.com" + ], + "CN": "logs-dispatcher.cluster2.example.com", + "key": { + "algo": "ecdsa", + "size": 256 + } +} diff --git a/charts/lagoon-logs-concentrator/server.json b/charts/lagoon-logs-concentrator/server.json new file mode 100644 index 0000000000..326e3580a0 --- /dev/null +++ b/charts/lagoon-logs-concentrator/server.json @@ -0,0 +1,10 @@ +{ + "hosts": [ + "logs-concentrator.cluster1.example.com" + ], + "CN": "logs-concentrator.cluster1.example.com", + "key": { + "algo": "ecdsa", + "size": 256 + } +} diff --git a/charts/lagoon-logs-concentrator/templates/NOTES.txt b/charts/lagoon-logs-concentrator/templates/NOTES.txt new file mode 100644 index 0000000000..a92a735533 --- /dev/null +++ b/charts/lagoon-logs-concentrator/templates/NOTES.txt @@ -0,0 +1,5 @@ +Thank you for installing {{ .Chart.Name }}. + +Your release is named {{ .Release.Name }}. + +Your logs are now being sent to {{ default "http" .Values.elasticsearchScheme }}://{{ .Values.elasticsearchHost }}:{{ default "9200" .Values.elasticsearchHostPort }} diff --git a/charts/lagoon-logs-concentrator/templates/_helpers.tpl b/charts/lagoon-logs-concentrator/templates/_helpers.tpl new file mode 100644 index 0000000000..e9dfc9e1f8 --- /dev/null +++ b/charts/lagoon-logs-concentrator/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "lagoon-logs-concentrator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "lagoon-logs-concentrator.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "lagoon-logs-concentrator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "lagoon-logs-concentrator.labels" -}} +helm.sh/chart: {{ include "lagoon-logs-concentrator.chart" . }} +{{ include "lagoon-logs-concentrator.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "lagoon-logs-concentrator.selectorLabels" -}} +app.kubernetes.io/name: {{ include "lagoon-logs-concentrator.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "lagoon-logs-concentrator.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "lagoon-logs-concentrator.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/charts/lagoon-logs-concentrator/templates/env.configmap.yaml b/charts/lagoon-logs-concentrator/templates/env.configmap.yaml new file mode 100644 index 0000000000..01002ee081 --- /dev/null +++ b/charts/lagoon-logs-concentrator/templates/env.configmap.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "lagoon-logs-concentrator.fullname" . }}-env + labels: + {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }} +data: + ELASTICSEARCH_HOST: {{ required "A valid .Values.elasticsearchHost required!" .Values.elasticsearchHost }} +{{- if .Values.elasticsearchHostPort }} + ELASTICSEARCH_HOST_PORT: {{ .Values.elasticsearchHostPort | quote }} +{{- end }} +{{- if .Values.elasticsearchScheme }} + ELASTICSEARCH_SCHEME: {{ .Values.elasticsearchScheme }} +{{- end }} diff --git a/charts/lagoon-logs-concentrator/templates/fluent-conf.configmap.yaml b/charts/lagoon-logs-concentrator/templates/fluent-conf.configmap.yaml new file mode 100644 index 0000000000..f90259838a --- /dev/null +++ b/charts/lagoon-logs-concentrator/templates/fluent-conf.configmap.yaml @@ -0,0 +1,76 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "lagoon-logs-concentrator.fullname" . }}-fluent-conf + labels: + {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }} +data: + fluent.conf: | + # vi: ft=fluentd + + workers 4 + + # prometheus metrics + + @type prometheus + + + @type prometheus_monitor + + + @type prometheus_output_monitor + + + @type forward + @id in_forward + add_tag_prefix in_forward + # avoid verbose OpenSSL warnings in fluentd logs due to liveness probes + # @log_level error + + self_hostname logs-concentrator + user_auth true + shared_key "#{ENV['FORWARD_SHARED_KEY']}" + @include user.d/*.conf + + + ca_path /fluentd/tls/ca.crt + cert_path /fluentd/tls/server.crt + private_key_path /fluentd/tls/server.key + client_cert_auth true + + + # send to elasticsearch + + @type elasticsearch + @id out_elasticsearch + # ingestion + target_index_key index_name + include_timestamp true + time_key time + # endpoint + host "#{ENV['ELASTICSEARCH_HOST']}" + port "#{ENV.fetch('ELASTICSEARCH_HOST_PORT','9200')}" + scheme "#{ENV.fetch('ELASTICSEARCH_SCHEME','http')}" + ssl_min_version TLSv1_2 + ssl_max_version TLSv1_3 + user admin + password "#{ENV['LOGSDB_ADMIN_PASSWORD']}" + # endpoint error handling + reconnect_on_error true + reload_on_failure true + request_timeout 600s + slow_flush_log_threshold 300s + log_es_400_reason true + + @type file + path /fluentd/buffer/elasticsearch + # buffer params (per worker) + total_limit_size 8GB + # flush params + flush_thread_count 4 + overflow_action drop_oldest_chunk + + # silence warnings (these have no effect) + type_name _doc + ssl_version TLSv1_2 + diff --git a/charts/lagoon-logs-concentrator/templates/hpa.yaml b/charts/lagoon-logs-concentrator/templates/hpa.yaml new file mode 100644 index 0000000000..dd328ef938 --- /dev/null +++ b/charts/lagoon-logs-concentrator/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "lagoon-logs-concentrator.fullname" . }} + labels: + {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Statefulset + name: {{ include "lagoon-logs-concentrator.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/lagoon-logs-concentrator/templates/ingress.yaml b/charts/lagoon-logs-concentrator/templates/ingress.yaml new file mode 100644 index 0000000000..cb4b4defe2 --- /dev/null +++ b/charts/lagoon-logs-concentrator/templates/ingress.yaml @@ -0,0 +1,41 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "lagoon-logs-concentrator.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} diff --git a/charts/lagoon-logs-concentrator/templates/secret.yaml b/charts/lagoon-logs-concentrator/templates/secret.yaml new file mode 100644 index 0000000000..dcaf596bf6 --- /dev/null +++ b/charts/lagoon-logs-concentrator/templates/secret.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: {{ include "lagoon-logs-concentrator.fullname" . }}-env + labels: + {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }} +stringData: + FORWARD_SHARED_KEY: {{ required "A valid .Values.forwardSharedKey required!" .Values.forwardSharedKey }} + LOGSDB_ADMIN_PASSWORD: {{ required "A valid .Values.elasticsearchAdminPassword required!" .Values.elasticsearchAdminPassword }} +--- +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: {{ include "lagoon-logs-concentrator.fullname" . }}-tls + labels: + {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }} +stringData: + ca.crt: | + {{- required "A valid .Values.tls.caCert required!" .Values.tls.caCert | nindent 4}} + server.crt: | + {{- required "A valid .Values.tls.serverCert required!" .Values.tls.serverCert | nindent 4}} + server.key: | + {{- required "A valid .Values.tls.serverKey required!" .Values.tls.serverKey | nindent 4}} +--- +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: {{ include "lagoon-logs-concentrator.fullname" . }}-users + labels: + {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }} +stringData: + user.conf: | + {{- range .Values.users }} + + username "{{ .username }}" + password "{{ .password }}" + + {{- end }} diff --git a/charts/lagoon-logs-concentrator/templates/service.yaml b/charts/lagoon-logs-concentrator/templates/service.yaml new file mode 100644 index 0000000000..ffaaa3cc9a --- /dev/null +++ b/charts/lagoon-logs-concentrator/templates/service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: +{{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} +{{- end }} + name: {{ include "lagoon-logs-concentrator.fullname" . }} + labels: + {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: forward + protocol: TCP + name: forward + selector: + {{- include "lagoon-logs-concentrator.selectorLabels" . | nindent 4 }} diff --git a/charts/lagoon-logs-concentrator/templates/serviceaccount.yaml b/charts/lagoon-logs-concentrator/templates/serviceaccount.yaml new file mode 100644 index 0000000000..b29517f125 --- /dev/null +++ b/charts/lagoon-logs-concentrator/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "lagoon-logs-concentrator.serviceAccountName" . }} + labels: + {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/lagoon-logs-concentrator/templates/statefulset.yaml b/charts/lagoon-logs-concentrator/templates/statefulset.yaml new file mode 100644 index 0000000000..0f41ac058e --- /dev/null +++ b/charts/lagoon-logs-concentrator/templates/statefulset.yaml @@ -0,0 +1,116 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "lagoon-logs-concentrator.fullname" . }} + labels: + {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }} +spec: +{{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} +{{- end }} + serviceName: {{ include "lagoon-logs-concentrator.fullname" . }} + selector: + matchLabels: + {{- include "lagoon-logs-concentrator.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + checksum/env-configmap: {{ include (print $.Template.BasePath "/env.configmap.yaml") . | sha256sum }} + checksum/fluent-conf-configmap: {{ include (print $.Template.BasePath "/fluent-conf.configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "lagoon-logs-concentrator.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "lagoon-logs-concentrator.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + initContainers: + - name: chown-buffer + image: busybox:musl + imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + command: + - chown + - '100:0' + - /fluentd/buffer + volumeMounts: + - mountPath: /fluentd/buffer/ + name: {{ include "lagoon-logs-concentrator.fullname" . }}-buffer + containers: + - name: fluentd + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: forward + containerPort: 24224 + protocol: TCP + livenessProbe: + tcpSocket: + port: forward + readinessProbe: + tcpSocket: + port: forward + envFrom: + - secretRef: + name: {{ include "lagoon-logs-concentrator.fullname" . }}-env + - configMapRef: + name: {{ include "lagoon-logs-concentrator.fullname" . }}-env + volumeMounts: + - mountPath: /fluentd/etc/fluent.conf + name: {{ include "lagoon-logs-concentrator.fullname" . }}-fluent-conf + subPath: fluent.conf + - mountPath: /fluentd/etc/user.d/ + name: {{ include "lagoon-logs-concentrator.fullname" . }}-users + - mountPath: /fluentd/buffer/ + name: {{ include "lagoon-logs-concentrator.fullname" . }}-buffer + - mountPath: /fluentd/tls/ + name: {{ include "lagoon-logs-concentrator.fullname" . }}-tls + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - configMap: + defaultMode: 420 + items: + - key: fluent.conf + path: fluent.conf + name: {{ include "lagoon-logs-concentrator.fullname" . }}-fluent-conf + name: {{ include "lagoon-logs-concentrator.fullname" . }}-fluent-conf + - secret: + defaultMode: 420 + secretName: {{ include "lagoon-logs-concentrator.fullname" . }}-tls + name: {{ include "lagoon-logs-concentrator.fullname" . }}-tls + - secret: + defaultMode: 420 + secretName: {{ include "lagoon-logs-concentrator.fullname" . }}-users + name: {{ include "lagoon-logs-concentrator.fullname" . }}-users + volumeClaimTemplates: + - metadata: + name: {{ include "lagoon-logs-concentrator.fullname" . }}-buffer + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 32Gi diff --git a/charts/lagoon-logs-concentrator/templates/tests/test-connection.yaml b/charts/lagoon-logs-concentrator/templates/tests/test-connection.yaml new file mode 100644 index 0000000000..adac30105c --- /dev/null +++ b/charts/lagoon-logs-concentrator/templates/tests/test-connection.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "lagoon-logs-concentrator.fullname" . }}-test-connection" + labels: + {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: nc + image: busybox + command: ['nc'] + args: + - "-zvw5" + - {{ include "lagoon-logs-concentrator.fullname" . }} + - {{ .Values.service.port }} + restartPolicy: Never diff --git a/charts/lagoon-logs-concentrator/values.yaml b/charts/lagoon-logs-concentrator/values.yaml new file mode 100644 index 0000000000..11ec47fbf8 --- /dev/null +++ b/charts/lagoon-logs-concentrator/values.yaml @@ -0,0 +1,109 @@ +# Default values for lagoon-logs-concentrator. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: amazeeiolagoon/logs-concentrator + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart version. + tag: logs-concentrator + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: false + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 24224 + # Annotations to add to the service + annotations: {} + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: + requests: + cpu: 250m + memory: 256Mi + +autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 4 + targetCPUUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +# The values below must be supplied during installation. +# Certificates should be provided in PEM format, and are generated as described +# in the README. +# Sample data shown below. + +#elasticsearchHost: "logs-db-service.elasticsearch.svc.cluster.local" +#elasticsearchAdminPassword: "securepass" +#tls: +# caCert: | +# -----BEGIN CERTIFICATE----- +# ... +# -----END CERTIFICATE----- +# serverCert: | +# -----BEGIN CERTIFICATE----- +# ... +# -----END CERTIFICATE----- +# serverKey: | +# -----BEGIN EC PRIVATE KEY----- +# ... +# -----END EC PRIVATE KEY----- +#forwardSharedKey: "securekey" +#users: +#- username: "example1" +# password: "securepass" +#- username: "example2" +# password: "securepass" + +# The values below are optional. + +#elasticsearchHostPort: "443" # default 9200 +#elasticsearchScheme: https # default http +#service: +# type: LoadBalancer # default ClusterIP. Set to LoadBalancer to +# # expose the logs-concentrator service +# # publicly. diff --git a/docker-compose.yaml b/docker-compose.yaml index 05feea5519..d46cfb244d 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -10,7 +10,7 @@ services: labels: lagoon.type: custom lagoon.template: services/api-db/.lagoon.app.yml - lagoon.image: amazeeiolagoon/api-db:v1-6-0 + lagoon.image: amazeeiolagoon/api-db:v1-8-1 webhook-handler: image: ${IMAGE_REPO:-lagoon}/webhook-handler command: yarn run dev @@ -22,7 +22,7 @@ services: labels: lagoon.type: custom lagoon.template: services/webhook-handler/.lagoon.app.yml - lagoon.image: amazeeiolagoon/webhook-handler:v1-6-0 + lagoon.image: amazeeiolagoon/webhook-handler:v1-8-1 backup-handler: image: ${IMAGE_REPO:-lagoon}/backup-handler restart: on-failure @@ -31,7 +31,7 @@ services: labels: lagoon.type: custom lagoon.template: services/backup-handler/.lagoon.app.yml - lagoon.image: amazeeiolagoon/backup-handler:v1-6-0 + lagoon.image: amazeeiolagoon/backup-handler:v1-8-1 depends_on: - broker broker: @@ -42,7 +42,7 @@ services: labels: lagoon.type: rabbitmq-cluster lagoon.template: services/broker/.lagoon.app.yml - lagoon.image: amazeeiolagoon/broker:v1-6-0 + lagoon.image: amazeeiolagoon/broker:v1-8-1 openshiftremove: image: ${IMAGE_REPO:-lagoon}/openshiftremove command: yarn run dev @@ -52,7 +52,7 @@ services: labels: lagoon.type: custom lagoon.template: services/openshiftremove/.lagoon.app.yml - lagoon.image: amazeeiolagoon/openshiftremove:v1-6-0 + lagoon.image: amazeeiolagoon/openshiftremove:v1-8-1 openshiftbuilddeploy: image: ${IMAGE_REPO:-lagoon}/openshiftbuilddeploy command: yarn run dev @@ -64,7 +64,7 @@ services: labels: lagoon.type: custom lagoon.template: services/openshiftbuilddeploy/.lagoon.app.yml - lagoon.image: amazeeiolagoon/openshiftbuilddeploy:v1-6-0 + lagoon.image: amazeeiolagoon/openshiftbuilddeploy:v1-8-1 openshiftbuilddeploymonitor: image: ${IMAGE_REPO:-lagoon}/openshiftbuilddeploymonitor command: yarn run dev @@ -78,7 +78,7 @@ services: labels: lagoon.type: custom lagoon.template: services/openshiftbuilddeploymonitor/.lagoon.app.yml - lagoon.image: amazeeiolagoon/openshiftbuilddeploymonitor:v1-6-0 + lagoon.image: amazeeiolagoon/openshiftbuilddeploymonitor:v1-8-1 openshiftjobs: image: ${IMAGE_REPO:-lagoon}/openshiftjobs command: yarn run dev @@ -92,7 +92,7 @@ services: labels: lagoon.type: custom lagoon.template: services/openshiftjobs/.lagoon.app.yml - lagoon.image: amazeeiolagoon/openshiftjobs:v1-6-0 + lagoon.image: amazeeiolagoon/openshiftjobs:v1-8-1 openshiftjobsmonitor: image: ${IMAGE_REPO:-lagoon}/openshiftjobsmonitor command: yarn run dev @@ -102,7 +102,7 @@ services: labels: lagoon.type: custom lagoon.template: services/openshiftjobsmonitor/.lagoon.app.yml - lagoon.image: amazeeiolagoon/openshiftjobsmonitor:v1-6-0 + lagoon.image: amazeeiolagoon/openshiftjobsmonitor:v1-8-1 openshiftmisc: image: ${IMAGE_REPO:-lagoon}/openshiftmisc command: yarn run dev @@ -112,7 +112,7 @@ services: labels: lagoon.type: custom lagoon.template: services/openshiftmisc/.lagoon.app.yml - lagoon.image: amazeeiolagoon/openshiftmisc:v1-6-0 + lagoon.image: amazeeiolagoon/openshiftmisc:v1-8-1 kubernetesmisc: image: ${IMAGE_REPO:-lagoon}/kubernetesmisc command: yarn run dev @@ -122,7 +122,7 @@ services: labels: lagoon.type: custom lagoon.template: services/kubernetesmisc/.lagoon.app.yml - lagoon.image: amazeeiolagoon/kubernetesmisc:v1-6-0 + lagoon.image: amazeeiolagoon/kubernetesmisc:v1-8-1 kubernetesbuilddeploy: image: ${IMAGE_REPO:-lagoon}/kubernetesbuilddeploy command: yarn run dev @@ -135,7 +135,7 @@ services: labels: lagoon.type: custom lagoon.template: services/kubernetesbuilddeploy/.lagoon.app.yml - lagoon.image: amazeeiolagoon/kubernetesbuilddeploy:v1-6-0 + lagoon.image: amazeeiolagoon/kubernetesbuilddeploy:v1-8-1 kubernetesdeployqueue: image: ${IMAGE_REPO:-lagoon}/kubernetesdeployqueue command: yarn run dev @@ -145,7 +145,7 @@ services: labels: lagoon.type: custom lagoon.template: services/kubernetesdeployqueue/.lagoon.app.yml - lagoon.image: amazeeiolagoon/kubernetesdeployqueue:v1-6-0 + lagoon.image: amazeeiolagoon/kubernetesdeployqueue:v1-8-1 kubernetesbuilddeploymonitor: image: ${IMAGE_REPO:-lagoon}/kubernetesbuilddeploymonitor command: yarn run dev @@ -159,7 +159,7 @@ services: labels: lagoon.type: custom lagoon.template: services/kubernetesbuilddeploymonitor/.lagoon.app.yml - lagoon.image: amazeeiolagoon/kubernetesbuilddeploymonitor:v1-6-0 + lagoon.image: amazeeiolagoon/kubernetesbuilddeploymonitor:v1-8-1 kubernetesjobs: image: ${IMAGE_REPO:-lagoon}/kubernetesjobs command: yarn run dev @@ -173,7 +173,7 @@ services: labels: lagoon.type: custom lagoon.template: services/kubernetesjobs/.lagoon.app.yml - lagoon.image: amazeeiolagoon/kubernetesjobs:v1-6-0 + lagoon.image: amazeeiolagoon/kubernetesjobs:v1-8-1 kubernetesjobsmonitor: image: ${IMAGE_REPO:-lagoon}/kubernetesjobsmonitor command: yarn run dev @@ -187,7 +187,7 @@ services: labels: lagoon.type: custom lagoon.template: services/kubernetesjobsmonitor/.lagoon.app.yml - lagoon.image: amazeeiolagoon/kubernetesjobsmonitor:v1-6-0 + lagoon.image: amazeeiolagoon/kubernetesjobsmonitor:v1-8-1 kubernetesremove: image: ${IMAGE_REPO:-lagoon}/kubernetesremove command: yarn run dev @@ -197,7 +197,7 @@ services: labels: lagoon.type: custom lagoon.template: services/kubernetesremove/.lagoon.app.yml - lagoon.image: amazeeiolagoon/kubernetesremove:v1-6-0 + lagoon.image: amazeeiolagoon/kubernetesremove:v1-8-1 logs2rocketchat: image: ${IMAGE_REPO:-lagoon}/logs2rocketchat command: yarn run dev @@ -207,7 +207,7 @@ services: labels: lagoon.type: custom lagoon.template: services/logs2rocketchat/.lagoon.app.yml - lagoon.image: amazeeiolagoon/logs2rocketchat:v1-6-0 + lagoon.image: amazeeiolagoon/logs2rocketchat:v1-8-1 logs2slack: image: ${IMAGE_REPO:-lagoon}/logs2slack command: yarn run dev @@ -217,7 +217,7 @@ services: labels: lagoon.type: custom lagoon.template: services/logs2slack/.lagoon.app.yml - lagoon.image: amazeeiolagoon/logs2slack:v1-6-0 + lagoon.image: amazeeiolagoon/logs2slack:v1-8-1 logs2microsoftteams: image: ${IMAGE_REPO:-lagoon}/logs2microsoftteams command: yarn run dev @@ -227,7 +227,7 @@ services: labels: lagoon.type: custom lagoon.template: services/logs2microsoftteams/.lagoon.app.yml - lagoon.image: amazeeiolagoon/logs2microsoftteams:v1-6-0 + lagoon.image: amazeeiolagoon/logs2microsoftteams:v1-8-1 logs2email: image: ${IMAGE_REPO:-lagoon}/logs2email command: yarn run dev @@ -237,7 +237,7 @@ services: labels: lagoon.type: custom lagoon.template: services/logs2slack/.lagoon.app.yml - lagoon.image: amazeeiolagoon/logs2email:v1-6-0 + lagoon.image: amazeeiolagoon/logs2email:v1-8-1 depends_on: - mailhog mailhog: @@ -255,7 +255,7 @@ services: labels: lagoon.type: custom lagoon.template: services/webhooks2tasks/.lagoon.app.yml - lagoon.image: amazeeiolagoon/webhooks2tasks:v1-6-0 + lagoon.image: amazeeiolagoon/webhooks2tasks:v1-8-1 api: image: ${IMAGE_REPO:-lagoon}/api command: yarn run dev @@ -267,10 +267,14 @@ services: - keycloak ports: - '3000:3000' + # Uncomment for local new relic tracking + # environment: + # - NEW_RELIC_LICENSE_KEY= + # - NEW_RELIC_APP_NAME=api-local labels: lagoon.type: custom lagoon.template: services/api/.lagoon.app.yml - lagoon.image: amazeeiolagoon/api:v1-6-0 + lagoon.image: amazeeiolagoon/api:v1-8-1 ui: image: ${IMAGE_REPO:-lagoon}/ui command: yarn run dev @@ -284,7 +288,7 @@ services: labels: lagoon.type: custom lagoon.template: services/ui/.lagoon.app.yml - lagoon.image: amazeeiolagoon/ui:v1-6-0 + lagoon.image: amazeeiolagoon/ui:v1-8-1 ssh: image: ${IMAGE_REPO:-lagoon}/ssh depends_on: @@ -305,7 +309,7 @@ services: labels: lagoon.type: custom lagoon.template: services/ssh/.lagoon.app.yml - lagoon.image: amazeeiolagoon/ssh:v1-6-0 + lagoon.image: amazeeiolagoon/ssh:v1-8-1 auth-server: image: ${IMAGE_REPO:-lagoon}/auth-server command: yarn run dev @@ -319,7 +323,7 @@ services: labels: lagoon.type: custom lagoon.template: services/auth-server/.lagoon.app.yml - lagoon.image: amazeeiolagoon/auth-server:v1-6-0 + lagoon.image: amazeeiolagoon/auth-server:v1-8-1 keycloak: image: ${IMAGE_REPO:-lagoon}/keycloak user: '111111111' @@ -330,7 +334,7 @@ services: labels: lagoon.type: custom lagoon.template: services/keycloak/.lagoon.app.yml - lagoon.image: amazeeiolagoon/keycloak:v1-6-0 + lagoon.image: amazeeiolagoon/keycloak:v1-8-1 keycloak-db: image: ${IMAGE_REPO:-lagoon}/keycloak-db ports: @@ -338,7 +342,7 @@ services: labels: lagoon.type: custom lagoon.template: services/keycloak-db/.lagoon.app.yml - lagoon.image: amazeeiolagoon/keycloak-db:v1-6-0 + lagoon.image: amazeeiolagoon/keycloak-db:v1-8-1 tests-kubernetes: image: ${IMAGE_REPO:-lagoon}/tests environment: @@ -454,7 +458,7 @@ services: labels: lagoon.type: custom lagoon.template: services/drush-alias/.lagoon.app.yml - lagoon.image: amazeeiolagoon/drush-alias:v1-6-0 + lagoon.image: amazeeiolagoon/drush-alias:v1-8-1 version: '2' logs-db: image: ${IMAGE_REPO:-lagoon}/logs-db @@ -470,14 +474,14 @@ services: labels: lagoon.type: elasticsearch lagoon.template: services/logs-db/.lagoon.single.yml - lagoon.image: amazeeiolagoon/logs-db:v1-6-0 + lagoon.image: amazeeiolagoon/logs-db:v1-8-1 logs-forwarder: image: ${IMAGE_REPO:-lagoon}/logs-forwarder user: '111111111' labels: lagoon.type: custom lagoon.template: services/logs-forwarder/.lagoon.single.yml - lagoon.image: amazeeiolagoon/logs-forwarder:v1-6-0 + lagoon.image: amazeeiolagoon/logs-forwarder:v1-8-1 logs-db-ui: image: ${IMAGE_REPO:-lagoon}/logs-db-ui user: '111111111' @@ -489,14 +493,14 @@ services: labels: lagoon.type: kibana lagoon.template: services/logs-db-ui/.lagoon.yml - lagoon.image: amazeeiolagoon/logs-db-ui:v1-6-0 + lagoon.image: amazeeiolagoon/logs-db-ui:v1-8-1 logs-db-curator: image: ${IMAGE_REPO:-lagoon}/logs-db-curator user: '111111111' labels: lagoon.type: cli lagoon.template: services/logs-db-curator/.lagoon.app.yml - lagoon.image: amazeeiolagoon/logs-db-curator:v1-6-0 + lagoon.image: amazeeiolagoon/logs-db-curator:v1-8-1 logs2logs-db: image: ${IMAGE_REPO:-lagoon}/logs2logs-db user: '111111111' @@ -512,7 +516,7 @@ services: labels: lagoon.type: logstash lagoon.template: services/logs2logs-db/.lagoon.yml - lagoon.image: amazeeiolagoon/logs2logs-db:v1-6-0 + lagoon.image: amazeeiolagoon/logs2logs-db:v1-8-1 auto-idler: image: ${IMAGE_REPO:-lagoon}/auto-idler user: '111111111' @@ -521,11 +525,11 @@ services: - './services/auto-idler/idle-clis.sh:/idle-clis.sh' - './services/auto-idler/openshift-services.sh:/openshift-services.sh' - './services/auto-idler/openshift-clis.sh:/openshift-clis.sh' - - './services/auto-idler/create_jwt.sh:/create_jwt.sh' + - './services/auto-idler/create_jwt.py:/create_jwt.py' labels: lagoon.type: custom lagoon.template: services/auto-idler/.lagoon.yml - lagoon.image: amazeeiolagoon/auto-idler:v1-6-0 + lagoon.image: amazeeiolagoon/auto-idler:v1-8-1 storage-calculator: image: ${IMAGE_REPO:-lagoon}/storage-calculator user: '111111111' @@ -534,7 +538,7 @@ services: labels: lagoon.type: custom lagoon.template: services/storage-calculator/.lagoon.yml - lagoon.image: amazeeiolagoon/storage-calculator:v1-6-0 + lagoon.image: amazeeiolagoon/storage-calculator:v1-8-1 logs-collector: image: openshift/origin-logging-fluentd:v3.6.1 labels: @@ -594,7 +598,7 @@ services: - HTTPS_PROXY= - NO_PROXY=harbor-core,harbor-jobservice,harbor-database,harborregistry,harbor-portal,harbor-trivy,127.0.0.1,localhost,.local,.internal - HARBOR_NGINX_ENDPOINT=http://harbor-nginx:8080 - - ROBOT_TOKEN_DURATION=999 + - ROBOT_TOKEN_DURATION=500 - CORE_SECRET=secret123 - JOBSERVICE_SECRET=secret123 - REGISTRY_HTTP_SECRET=secret123 @@ -606,7 +610,7 @@ services: labels: lagoon.type: custom lagoon.template: services/harbor-core/harbor-core.yml - lagoon.image: amazeeiolagoon/harbor-core:v1-6-0 + lagoon.image: amazeeiolagoon/harbor-core:v1-8-1 harbor-database: image: ${IMAGE_REPO:-lagoon}/harbor-database hostname: harbor-database @@ -620,7 +624,7 @@ services: labels: lagoon.type: custom lagoon.template: services/harbor-database/harbor-database.yml - lagoon.image: amazeeiolagoon/harbor-database:v1-6-0 + lagoon.image: amazeeiolagoon/harbor-database:v1-8-1 harbor-jobservice: image: ${IMAGE_REPO:-lagoon}/harbor-jobservice hostname: harbor-jobservice @@ -649,7 +653,7 @@ services: labels: lagoon.type: custom lagoon.template: services/harbor-jobservice/harbor-jobservice.yml - lagoon.image: amazeeiolagoon/harbor-jobservice:v1-6-0 + lagoon.image: amazeeiolagoon/harbor-jobservice:v1-8-1 harbor-nginx: image: ${IMAGE_REPO:-lagoon}/harbor-nginx hostname: harbor-nginx @@ -665,7 +669,7 @@ services: labels: lagoon.type: custom lagoon.template: services/harbor-nginx/harbor-nginx.yml - lagoon.image: amazeeiolagoon/harbor-nginx:v1-6-0 + lagoon.image: amazeeiolagoon/harbor-nginx:v1-8-1 harbor-portal: image: ${IMAGE_REPO:-lagoon}/harbor-portal hostname: harbor-portal @@ -675,7 +679,7 @@ services: labels: lagoon.type: custom lagoon.template: services/harbor-portal/harbor-portal.yml - lagoon.image: amazeeiolagoon/harbor-portal:v1-6-0 + lagoon.image: amazeeiolagoon/harbor-portal:v1-8-1 harbor-redis: image: ${IMAGE_REPO:-lagoon}/harbor-redis hostname: harbor-redis @@ -685,7 +689,7 @@ services: labels: lagoon.type: custom lagoon.template: services/harbor-redis/harbor-redis.yml - lagoon.image: amazeeiolagoon/harbor-redis:v1-6-0 + lagoon.image: amazeeiolagoon/harbor-redis:v1-8-1 harbor-trivy: image: ${IMAGE_REPO:-lagoon}/harbor-trivy hostname: harbor-trivy @@ -717,7 +721,7 @@ services: lagoon.type: custom lagoon.template: services/harbor-trivy/harbor-trivy.yml lagoon.name: harbor-trivy - lagoon.image: amazeeiolagoon/harbor-trivy:v1-6-0 + lagoon.image: amazeeiolagoon/harbor-trivy:v1-8-1 harborregistry: image: ${IMAGE_REPO:-lagoon}/harborregistry hostname: harborregistry @@ -739,7 +743,7 @@ services: lagoon.type: custom lagoon.template: services/harborregistry/harborregistry.yml lagoon.name: harborregistry - lagoon.image: amazeeiolagoon/harborregistry:v1-6-0 + lagoon.image: amazeeiolagoon/harborregistry:v1-8-1 harborregistryctl: image: ${IMAGE_REPO:-lagoon}/harborregistryctl hostname: harborregistryctl @@ -754,4 +758,10 @@ services: lagoon.type: custom lagoon.template: services/harborregistryctl/harborregistry.yml lagoon.name: harborregistry - lagoon.image: amazeeiolagoon/harborregistryctl:v1-6-0 + lagoon.image: amazeeiolagoon/harborregistryctl:v1-8-1 + api-redis: + image: ${IMAGE_REPO:-lagoon}/api-redis + labels: + lagoon.type: custom + lagoon.template: services/api-redis/.lagoon.app.yml + lagoon.image: amazeeiolagoon/api-redis:v1-8-1 diff --git a/docs/administering_lagoon/install_k8s.md b/docs/administering_lagoon/install_k8s.md new file mode 100644 index 0000000000..24ea63f34e --- /dev/null +++ b/docs/administering_lagoon/install_k8s.md @@ -0,0 +1,125 @@ +# Install local Kubernetes cluster for Lagoon + +Let's see how to install a local lightweight k8s cluster using +k3s by Rancher: [rancher/k3s](https://github.com/rancher/k3s) + +!!!hint + In order to have the best experience we recommend the following: + Linux or Mac OSX + 32 GB+ RAM total + 12 GB+ RAM allocated to Docker + 6+ cores allocated to Docker + SSD disk with 25GB+ free + +## Installation checklist +1. Make sure you have a clean state checking the following (use `-n` option for dry-run): + 1. Make sure no lagoon containers are running running `make kill`. + 2. Make sure to clean any old lagoon containers and volumes running `make down`. + 3. Now your `build` dir should be empty and `docker ps` should show no containers running. +2. Make sure to allow `172.17.0.1:5000` as insecure registry, check the [docker docs](https://docs.docker.com/registry/insecure/) for more information. + 1. Edit `insecure-registries` key in your `/etc/docker/daemon.json` and add `"insecure-registries":["172.17.0.1:5000"]` then restart docker service with `systemctl restart docker`. +3. Using `sysctl vm.max_map_count` check the value of `vm.max_map_count` is at least `262144` or set it is using `sysctl -w vm.max_map_count=262144`. We need to increase this value to avoid error [`max virtual memory areas is too low`](https://stackoverflow.com/questions/51445846/elasticsearch-max-virtual-memory-areas-vm-max-map-count-65530-is-too-low-inc/51448773#51448773) on `logs-db` Elasticsearch service. + +## Create a local k8s cluster +1. Now you can create a local k3s Kubernetes cluster running `make k3d` and see the following notable outputs: + * + ``` + INFO[0000] Creating cluster [k3s-lagoon] + INFO[0000] Creating server using docker.io/rancher/k3s:v1.17.0-k3s.1... + INFO[0008] SUCCESS: created cluster [k3s-lagoon] + ... + The push refers to repository [localhost:5000/lagoon/docker-host] + ... + The push refers to repository [localhost:5000/lagoon/kubectl-build-deploy-dind] + ... + Release "k8up" does not exist. Installing it now. + NAME: k8up + LAST DEPLOYED: Thu May 7 10:45:46 2020 + NAMESPACE: k8up + STATUS: deployed + REVISION: 1 + TEST SUITE: None + namespace/dbaas-operator created + "dbaas-operator" has been added to your repositories + Release "dbaas-operator" does not exist. Installing it now. + NAME: dbaas-operator + LAST DEPLOYED: Thu May 7 10:45:47 2020 + NAMESPACE: dbaas-operator + STATUS: deployed + REVISION: 1 + TEST SUITE: None + Release "mariadbprovider" does not exist. Installing it now. + coalesce.go:165: warning: skipped value for providers: Not a table. + NAME: mariadbprovider + LAST DEPLOYED: Thu May 7 10:45:48 2020 + NAMESPACE: dbaas-operator + STATUS: deployed + REVISION: 1 + TEST SUITE: None + namespace/lagoon created + Release "lagoon-remote" does not exist. Installing it now. + NAME: lagoon-remote + LAST DEPLOYED: Thu May 7 10:45:48 2020 + NAMESPACE: lagoon + STATUS: deployed + REVISION: 1 + TEST SUITE: None + + ``` +2. At the end of the script, using `docker ps` you should see an output like the following: + * + ``` + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 0d61e8ba168e rancher/k3s:v1.17.0-k3s.1 "/bin/k3s server --h…" 28 minutes ago Up 28 minutes 0.0.0.0:16643->16643/tcp, 0.0.0.0:18080->80/tcp, 0.0.0.0:18443->443/tcp k3d-k3s-lagoon-server + a7960981caaa lagoon/local-registry "/entrypoint.sh /etc…" 30 minutes ago Up 30 minutes 0.0.0.0:5000->5000/tcp lagoon_local-registry_1 + + ``` +2. `make k3d-kubeconfig` will print the `KUBECONFIG` env var you need to start using the cluster. + 1. Execute `export KUBECONFIG="$(./local-dev/k3d get-kubeconfig --name=$(cat k3d))"` inside the terminal. + 2. Now you should be able to use the cluster via an already installed `kubectl` or making a symbolic link to `/usr/local/bin/kubectl -> /your/path/amazee/lagoon/local-dev/kubectl` + 3. If you prefer to use something more visual you could install [k9s](https://k9scli.io/topics/install/) cli tool. + 4. Here the complete list of pods you should see with `kubectl get pod -A` + ``` + NAMESPACE NAME + kube-system local-path-provisioner + kube-system metrics-server + k8up k8up-operator + dbaas-operator kube-rbac-proxy,manager + kube-system coredns + lagoon docker-host + kube-system helm + kube-system nginx-ingress-default-backend + kube-system lb-port-80,lb-port-443 + kube-system nginx-ingress-controller + ``` + 5. Here the complete list of deployed helm [releases](https://helm.sh/docs/helm/helm_list/) you should see with `local-dev/helm/helm ls --all-namespaces`. + ``` + NAME NAMESPACE + dbaas-operator dbaas-operator + k8up k8up + lagoon-remote lagoon + mariadbprovider dbaas-operator + nginx kube-system + ``` + +## Deploy Lagoon on Kubernetes +1. TODO + +## Configure Installed Lagoon + +We have a fully running Kubernetes cluster. Now it's time to configure the first project inside of it. Follow the examples in [GraphQL API](graphql_api.md). + +## Clean up + +Clean up k3s cluster with `make k3d/stop`. + + +## Troubleshooting + +⚠ **Unable to connect to the server: x509: certificate signed by unknown authority** + +Rebuild the cluster via +``` +make k3d/stop +make k3d +``` diff --git a/docs/developing_lagoon/code_of_conduct.md b/docs/code_of_conduct.md similarity index 100% rename from docs/developing_lagoon/code_of_conduct.md rename to docs/code_of_conduct.md diff --git a/docs/developing_lagoon/contributing.md b/docs/contributing.md similarity index 100% rename from docs/developing_lagoon/contributing.md rename to docs/contributing.md diff --git a/docs/using_lagoon/active_standby.md b/docs/using_lagoon/active_standby.md index ea479a0566..e31567308c 100644 --- a/docs/using_lagoon/active_standby.md +++ b/docs/using_lagoon/active_standby.md @@ -3,10 +3,10 @@ Lagoon supports Active/Standby (also known as blue/green) deployments. ## Configuration -To change an existing project to support active/standby you'll need to configure some project settings in the Lagoon API +To change an existing project to support active/standby you'll need to configure some project settings in the Lagoon API. -`productionEnviromment` should be set to the branch name of the current environment that is active -`standbyProductionEnvironment` should be set to the branch name of the current environment that is in standby +* `productionEnviromment` should be set to the branch name of the current environment that is active. +* `standbyProductionEnvironment` should be set to the branch name of the environment that will be in standby. ``` mutation updateProject { @@ -23,6 +23,7 @@ mutation updateProject { } } ``` + ### `.lagoon.yml` - `production_routes` To configure a project for active/standby in the `.lagoon.yml` file, you'll need to configure the `production_routes` section with any routes you want to attach to the `active` environment, and any routes to the `standby` environment. During an Active/Standby switch, these routes will migrate between the two environments. @@ -52,10 +53,14 @@ production_routes: > Note: Any routes that are under the section `environments..routes` will not be moved as part of active/standby, these routes will always be attached to the environment as defined. Ensure that if you do need a specific route to be migrated during an active/standby switch, that you remove them from the `environments` section and place them under the `production_routes` section specific to if it should be an `active` or `standby` route. -## Triggering a switch event +## Triggering the active/standby switch +### via the UI +To trigger the switching of environment routes, you can visit the standby environment in the Lagoon UI and click on the button labeled `Switch Active/Standby environments`. You will be prompted to confirm your action. -To trigger an event to switch the environments, you can run the following graphQL mutation, this will inform lagoon to begin the process. +Once confirmed, it will take you to the tasks page where you can view the progress of the switch. +### via the API +The following graphQL mutation can be executed which will start the process of switching the environment routes. ``` mutation ActiveStandby { switchActiveStandby( @@ -94,7 +99,9 @@ By default, projects will be created with the following aliases that will be ava * `lagoon-production` * `lagoon-standby` -The `lagoon-production` alias will resolve point to whichever site is defined as `productionEnvironment`, where `lagoon-standby` will always resolve to the site that is defined as `standbyProductionEnvironment` +The `lagoon-production` alias will resolve to whichever environment is currently in the API as `productionEnvironment`, where `lagoon-standby` will always resolve to the environment that is defined as `standbyProductionEnvironment`. + +> As the active/standby switch updates these as required, `lagoon-production` will always be the `active` environment. These alias are configurable by updating the project, but be aware that changing them may require you to update any scripts that rely on them. @@ -112,4 +119,39 @@ mutation updateProject { standbyAlias } } +``` + +## Notes + +When the active/standby trigger has been executed, the `productionEnvironment` and `standbyProductionEnvironments` will switch within the Lagoon API. Both environments are still classed as `production` environment types. We use the `productionEnvironment` to determine which one is labelled as `active`. For more information on the differences between environment types, read the [documentation for `environment types`](environment_types.md#environment-types) + +``` +query projectByName { + projectByName(name:"drupal-example"){ + productionEnvironment + standbyProductionEnvironment + } +} +``` +Before switching environments +``` +{ + "data": { + "projectByName": { + "productionEnvironment": "production-brancha", + "standbyProductionEnvironment": "production-branchb" + } + } +} +``` +After switching environments +``` +{ + "data": { + "projectByName": { + "productionEnvironment": "production-branchb", + "standbyProductionEnvironment": "production-brancha" + } + } +} ``` \ No newline at end of file diff --git a/docs/using_lagoon/configure_webhooks.md b/docs/using_lagoon/configure_webhooks.md index 0f3dfe440e..27f6c5b2ef 100644 --- a/docs/using_lagoon/configure_webhooks.md +++ b/docs/using_lagoon/configure_webhooks.md @@ -7,7 +7,7 @@ Your Lagoon administrator will also give you the route to the webhook-handler. Y - [Bitbucket](#bitbucket) !!!hint - If you are an amazee.io customer, the route to the webhook-handler is: [`https://hooks.lagoon.amazeeio.cloud`](https://hooks.lagoon.amazeeio.cloud). + If you are an amazee.io customer, the route to the webhook-handler is: [`https://webhook.amazeeio.cloud`](https://hooks.lagoon.amazeeio.cloud). !!!warning Managing the following settings will require you to have a high level of access to these repositories, which will be controlled by your organization. If you cannot access these settings, please contact your systems administrator or the appropriate person within your organization . diff --git a/docs/using_lagoon/docker_images/mariadb.md b/docs/using_lagoon/docker_images/mariadb.md index 00fc6edcea..2129f3bc2a 100644 --- a/docs/using_lagoon/docker_images/mariadb.md +++ b/docs/using_lagoon/docker_images/mariadb.md @@ -42,6 +42,8 @@ Environment variables defined in MariaDB base image: | `MARIADB_LOG_SLOW` | empty | Variable to control the save of slow queries. | | `MARIADB_LOG_QUERIES` | empty | Variable to control the save of ALL queries. | | `BACKUPS_DIR` | /var/lib/mysql/backup | Default path for databases backups. | +| `MARIADB_DATA_DIR` | /var/lib/mysql | Path of the mariadb data dir, be careful, changing this can occur data loss! | +| `MARIADB_COPY_DATA_DIR_SOURCE` | unset | Path which the entrypoint script of mariadb will use to copy into the defined `MARIADB_DATA_DIR`, this can be used for prepopulating the MariaDB with a database. The scripts expects actual MariaDB data files and not a sql file! Plus it only copies data if the destination does not already have a mysql datadir in it. | If the `LAGOON_ENVIRONMENT_TYPE` variable is set to `production`, performances are set accordingly by using `MARIADB_INNODB_BUFFER_POOL_SIZE=1024` and `MARIADB_INNODB_LOG_FILE_SIZE=256`. diff --git a/docs/using_lagoon/docker_images/nginx.md b/docs/using_lagoon/docker_images/nginx.md index 24472d9a8b..3f107510b9 100644 --- a/docs/using_lagoon/docker_images/nginx.md +++ b/docs/using_lagoon/docker_images/nginx.md @@ -60,3 +60,4 @@ Environment variables are meant to contain common information for the `Nginx` co | `BASIC_AUTH` | `restricted` | By not setting `BASIC_AUTH` this will instruct Lagoon to automatically enable basic authentication if `BASIC_AUTH_USERNAME` and `BASIC_AUTH_PASSWORD` are set. To disable basic authentication even if `BASIC_AUTH_USERNAME` and `BASIC_AUTH_PASSWORD` are set, set `BASIC_AUTH` to `off`. | | `BASIC_AUTH_USERNAME` | \(not set\) | Username for basic authentication | | `BASIC_AUTH_PASSWORD` | \(not set\) | Password for basic authentication \(unencrypted\) | +| `FAST_HEALTH_CHECK` | \(not set\) | If set to `true` this will redirect GET requests from certain user agents (StatusCake, Pingdom, Site25x7, Uptime, nagios) to the lightweight Lagoon service healthcheck. | diff --git a/docs/using_lagoon/docker_images/php-fpm.md b/docs/using_lagoon/docker_images/php-fpm.md index 4d72a41d19..2e4466ddb7 100644 --- a/docs/using_lagoon/docker_images/php-fpm.md +++ b/docs/using_lagoon/docker_images/php-fpm.md @@ -88,6 +88,7 @@ Environment variables are meant to contain common information for the PHP contai | :--- | :--- | :--- | | `NEWRELIC_ENABLED` | `false` | Enable NewRelic performance monitoring, needs `NEWRELIC_LICENSE` be configured. | | `NEWRELIC_LICENSE` | \(not set\) | NewRelic license to be used, Important: `NEWRELIC_ENABLED` needs to be set to`true` in order for NewRelic to be enabled. | +| `NEWRELIC_BROWSER_MONITORING_ENABLED` | `true` | This enables auto-insertion of the JavaScript fragments for NewRelic browser monitoring, Important: `NEWRELIC_ENABLED` needs to be set to`true` in order for NewRelic to be enabled. | | `PHP_APC_ENABLED` | `1` | Can be set to 0 to disable APC. [See php.net](http://php.net/manual/en/apc.configuration.php#ini.apc.enabled). | | `PHP_APC_SHM_SIZE` | `32m` | The size of each shared memory segment given. [See php.net](http://php.net/manual/en/apc.configuration.php#ini.apc.shm-size). | | `PHP_DISPLAY_ERRORS` | `Off` | This determines whether errors should be printed to the screen as part of the output or if they should be hidden from the user. [See php.net](http://php.net/display-errors). | @@ -104,4 +105,3 @@ Environment variables are meant to contain common information for the PHP contai | `PHP_MAX_INPUT_VARS` | `2000` | How many input variables will be accepted. [See php.net](http://php.net/manual/en/info.configuration.php#ini.max-input-vars). | | `PHP_MEMORY_LIMIT` | `400M` | Maximum amount of memory a script may consume. [See php.net](http://php.net/memory-limit). | | `XDEBUG_ENABLE` | \(not set\) | Used to enable `xdebug` extension. [See php.net](http://php.net/manual/en/apc.configuration.php#ini.apc.enabled). | - diff --git a/docs/using_lagoon/docker_images/redis.md b/docs/using_lagoon/docker_images/redis.md index 635ef54e02..ee89cf3896 100644 --- a/docs/using_lagoon/docker_images/redis.md +++ b/docs/using_lagoon/docker_images/redis.md @@ -25,7 +25,7 @@ Environment variables defined in Redis base image. See also [https://raw.githubu | Environment Variable | Default | Description | | :--- | :--- | :--- | -| `LOGLEVEL` | notice | Define the level of logs | | `DATABASES` | -1 | Default number of databases created at startup | +| `LOGLEVEL` | notice | Define the level of logs | | `MAXMEMORY` | 100mb | Maximum amount of memory | - +| `REDIS_PASSWORD` | disabled | Enables [authentication feature](https://redis.io/topics/security#authentication-feature) | diff --git a/docs/using_lagoon/docker_images/solr.md b/docs/using_lagoon/docker_images/solr.md index 5ed29c1b06..55b8fb2883 100644 --- a/docs/using_lagoon/docker_images/solr.md +++ b/docs/using_lagoon/docker_images/solr.md @@ -25,4 +25,5 @@ Environment variables defined in `Solr` base image. | Environment Variable | Default | Description | | :--- | :--- | :--- | | `SOLR_JAVA_MEM` | 512M | Default Java HEAP size \(ie. `SOLR_JAVA_MEM="-Xms10g -Xmx10g"` \). | - +| `SOLR_DATA_DIR` | /var/solr | Path of the solr data dir, be careful, changing this can occur data loss! | +| `SOLR_COPY_DATA_DIR_SOURCE` | unset | Path which the entrypoint script of solr will use to copy into the defined `SOLR_DATA_DIR`, this can be used for prepopulating the Solr with a core. The scripts expects actual Solr data files! Plus it only copies data if the destination does not already have a solr core in it. | \ No newline at end of file diff --git a/docs/using_lagoon/drupal/lagoonize.md b/docs/using_lagoon/drupal/lagoonize.md index 8be7a68f45..90826dc72b 100644 --- a/docs/using_lagoon/drupal/lagoonize.md +++ b/docs/using_lagoon/drupal/lagoonize.md @@ -4,14 +4,13 @@ In order for Drupal to work with Lagoon, we need to teach Drupal about Lagoon and Lagoon about Drupal. This happens by copying specific YAML and PHP Files into your Git repository. -You find [these Files in our GitHub repository](https://github.com/amazeeio/lagoon/tree/master/docs/using_lagoon/drupal); the easiest way is to [download these files as a ZIP file](https://minhaskamal.github.io/DownGit/#/home?url=https://github.com/amazeeio/lagoon/tree/master/docs/using_lagoon/drupal) and copy them into your Git repository. For each Drupal version and database type you will find an individual folder. A short overview of what they are: +You find [these Files in our GitHub repository](https://github.com/amazeeio/lagoon/tree/master/docs/using_lagoon/drupal); the easiest way is to [download these files as a ZIP file](https://downgit.github.io/#/home?url=https://github.com/amazeeio/lagoon/tree/master/docs/using_lagoon/drupal) and copy them into your Git repository. For each Drupal version and database type you will find an individual folder. A short overview of what they are: - `.lagoon.yml` - The main file that will be used by Lagoon to understand what should be deployed and many more things. This file has some sensible Drupal defaults, if you would like to edit or modify, please check the specific [Documentation for .lagoon.yml](../lagoon_yml.md) - `docker-compose.yml`, `.dockerignore`, and `*.dockerfile` (or `Dockerfile`) - These files are used to run your local Drupal development environment, they tell Docker which services to start and how to build them. They contain sensible defaults and many commented lines. iWe hope that it's well-commented enough to be self-describing. If you would like to find out more, see [Documentation for docker-compose.yml](../docker-compose_yml.md) - `sites/default/*` - These .php and .yml files teach Drupal how to communicate with Lagoon containers both locally and in production. It also provides an easy system for specific overrides in development and production environments. Unlike other Drupal hosting systems, Lagoon never ever injects Drupal settings files into your Drupal. Therefore you can edit them however you like. Like all other files, they contain sensible defaults and some commented parts. - `drush/aliases.drushrc.php` - These files are specific to Drush and tell Drush how to talk to the Lagoon GraphQL API in order to learn about all Site Aliases there are. - `drush/drushrc.php` - Some sensible defaults for Drush commands. -- Add `patches` directory if you choose [drupal8-composer-mariadb](../drupal/services/mariadb.md). ### Update your `.gitignore` Settings diff --git a/docs/using_lagoon/drupal/services/solr.md b/docs/using_lagoon/drupal/services/solr.md index e865fc1c4c..f06216f5d2 100644 --- a/docs/using_lagoon/drupal/services/solr.md +++ b/docs/using_lagoon/drupal/services/solr.md @@ -1,10 +1,14 @@ # Solr ## Standard use - For Solr 5.5 and 6.6 we ship the default schema files provided by [search_api_solr](https://www.drupal.org/project/search_api_solr) version 8.x-1.2. Add the Solr version you would like to use in your docker-compose.yml file, following [our example](https://github.com/amazeeio/drupal-example/blob/master/docker-compose.yml#L103-L111). +We provide you with the default schema files provided by [search_api_solr](https://www.drupal.org/project/search_api_solr) version 8.x-1.2. This works for Solr 5.5 and 6.6 + +Specify the Solr version you would like to use in your docker-compose.yml file, following [our example](https://github.com/amazeeio/drupal-example/blob/master/docker-compose.yml#L103-L111). ## Custom schema -To implement schema customizations for Solr in your project, look to how Lagoon [creates our standard images](https://github.com/amazeeio/lagoon/blob/master/images/solr-drupal/Dockerfile). +If you use a different version of the search_api_solr module, you may need to add your own custom schema. The module allows you to download an easy config.zip file containing what you need. + +Also if for any other reason you would like to implement schema customizations for Solr in your project, look to how Lagoon [creates our standard images](https://github.com/amazeeio/lagoon/blob/master/images/solr-drupal/Dockerfile). * In the `solr` section of your docker-compose file replace `image: amazeeio/solr:6.6` with: diff --git a/docs/using_lagoon/harbor/using_harbor.md b/docs/using_lagoon/harbor/using_harbor.md index a6a74d1c50..653f43e0bb 100644 --- a/docs/using_lagoon/harbor/using_harbor.md +++ b/docs/using_lagoon/harbor/using_harbor.md @@ -4,7 +4,7 @@ If you are running Lagoon locally, you can access that UI at [localhost:8084](https://localhost:8084/). The username is `admin` and the password is `admin` -**Note:** If you are hosting a site with amazee.io, we do not allow customer access to the Harbor UI within amazee.io's Lagoon. +**Note:** If you are hosting a site with amazee.io, we do not allow customer access to the Harbor UI within amazee.io's Lagoon. Once logged in, the first screen is a list of all repositories your user has access to. Each "repository" in Harbor correlates to a project in Lagoon. @@ -17,3 +17,31 @@ Within each Harbor repository, you'll see a list of container images from all en From here, you can drill down into an individual container in order to see its details, including an overview of its security scan results. ![Harbor Container Overview](container_overview.png) + + +## How Harbor interacts with the Problems Database + +Lagoon can respond to Harbor webhook scan events and, if the result set matches a Project environment, will use the results and generate entries in the Problems system. + +By default, Lagoon tries to parse out the incoming Harbor repo to match the pattern `PROJECT/ENVIRONMENT/SERVICE` - so if we had a project FOO, with environments PROD and DEV, and each of these environments had services `nodejs` and `mariadb` - an incoming Harbor repo name of `FOO/DEV/mariadb` will tell us precisely which service this image scan corresponds to. + +In the case where the Harbor repo name does not correspond to this schema, we need some way of mapping scans to the right Project/environment/service. To allow this, we introduce an object called a Harbor Scan Matcher. This is essentially a regex that is meant to match against incoming Harbor repo names that associates the scan with existing projects and environments (or a range of them).We make use named capture groups in our regexes to match Project, Environment, and Service names. If the name of an environment, project, or service is not present in your regex, you can assign a default name for each of these that will be used instead. This is best illustrated by an example. + +Below we create a Harbor Scan Matcher that matches an incoming repo name for the FOO project - let's assume that the incoming repo name from Harbor does not contain anything corresponding to the environment name, so we'll by default assume that this is going to go to an environment named PROD.If the harbor repo name is something like `MY_ENTERPRISE_PROJECT-` we could match with by creating the following Harbor Scan Matcher: + +```graphql +mutation addProblemHarborScan { + addProblemHarborScanMatch(input: {name: "EnterpriseProjectMatcher", description:"Matches incoming Harbor Scans for FOO", + defaultLagoonProject: "FOO" + defaultLagoonService: "nodejs" + regex: "^MY_ENTERPRISE_PROJECT-(?.+)$" + }) { + id + name + description + regex + } +} +``` + +This will now match any incoming string of the form `MY_ENTERPRISE_PROJECT-` - assign it to the FOO project and attach any vulnerabilities found to the `nodejs` service.This is an ADMIN ONLY function since the Harbor Scan Matchers need to be set across the entire Lagoon instance. If non-admin users could set these then it would be possible to reroute Harbor scans from one project to another. \ No newline at end of file diff --git a/docs/using_lagoon/lagoon_yml.md b/docs/using_lagoon/lagoon_yml.md index 06ef55ee23..8c91ec1cc4 100644 --- a/docs/using_lagoon/lagoon_yml.md +++ b/docs/using_lagoon/lagoon_yml.md @@ -126,6 +126,17 @@ Note: If you would like to temporarily disable pre/post-rollout tasks during a d This allows for the disabling of the automatically created routes \(NOT the custom routes per environment, see below for them\) all together. +### `routes.autogenerate.allowPullrequests` + +This allows pull request to get autogenerated routes when route autogeneration is disabled. + +``` +routes: + autogenerate: + enabled: false + allowPullrequests: true +``` + ### `routes.autogenerate.insecure` This allows you to define the behavior of the automatic creates routes \(NOT the custom routes per environment, see below for more\). The following options are allowed: @@ -140,10 +151,10 @@ Environment names match your deployed branches or pull requests. This allows for #### `environments.[name].monitoring_urls` -At the end of a deploy, Lagoon will check this field for any URLs which you have specified to add to the API for the purpose of monitoring. The default value for this field is the first route for a project. It is useful for adding specific paths of a project to the API, for consumption by a monitoring service. +!!!danger + This feature will be removed in an upcoming release of Lagoon. Please use the newer [`monitoring-path` method](lagoon_yml.md#monitoring-a-specific-path) on your specific route. -!!!hint - Please note, Lagoon does not provide any direct integration to a monitoring service, this just adds the URLs to the API. On amazee.io, we take the `monitoring_urls` and add them to our StatusCake account. +At the end of a deploy, Lagoon will check this field for any URLs which you have specified to add to the API for the purpose of monitoring. The default value for this field is the first route for a project. It is useful for adding specific paths of a project to the API, for consumption by a monitoring service. #### `environments.[name].routes` @@ -176,6 +187,14 @@ In the `"www.example.com"` example repeated below, we see two more options \(als hsts: max-age=31536000 ``` +#### Monitoring a specific path +When [UptimeRobot](https://uptimerobot.com/) is configured for your cluster (OpenShift or Kubernetes), Lagoon will inject annotations to each route/ingress for use by the `stakater/IngressControllerMonitor`. The default action is to monitor the homepage of the route. If you have a specific route to be monitored, this can be overriden by adding a `monitoring-path` to your route specification. A common use is to set up a path for monitoring which bypasses caching to give a more real-time monitoring of your site. + +``` + - "www.example.com": + monitoring-path: "/bypass-cache" +``` + #### Ingress annotations (Redirects) !!!hint @@ -264,6 +283,18 @@ environments: mariadb: statefulset ``` +### `environments.[name].autogenerateRoutes` + +This allows for any environments to get autogenerated routes when route autogeneration is disabled. + +``` +routes: + autogenerate: + enabled: false +environments: + develop: + autogenerateRoutes: true +``` #### Cron jobs - `environments.[name].cronjobs` diff --git a/helpers/check_acme_routes.sh b/helpers/check_acme_routes.sh new file mode 100755 index 0000000000..c4dc864235 --- /dev/null +++ b/helpers/check_acme_routes.sh @@ -0,0 +1,301 @@ +#!/bin/bash + +# Description: script to check routes with exposer pods. +# In case of no DNS record or mis-configuration, script will update the route +# by disabling the tls-acme, removing other acme related annotations and add +# an interal one for filtering purpose + +set -eu -o pipefail + +# Set DEBUG variable to true, to start bash in debug mode +DEBUG="${DEBUG:-"false"}" +if [ "$DEBUG" = "true" ]; then + set -x +fi + +# Some variables + +# Cluster full hostname and API hostname +CLUSTER_HOSTNAME="${CLUSTER_HOSTNAME:-""}" +CLUSTER_API_HOSTNAME="${CLUSTER_API_HOSTNAME:-"$CLUSTER_HOSTNAME"}" + +# Default command +COMMAND=${1:-"help"} + +# Set DRYRUN variable to true to run in dry-run mode +DRYRUN="${DRYRUN:-"false"}" + + +# Set a REGEX variable to filter the execution of the script +REGEX=${REGEX:-".*"} + +# Set NOTIFYONLY to true if you want to send customers a notification +# explaining why Lagoon is not able to issue Let'S Encrypt certificate for +# some routes defined in customer's .lagoon.yml file. +# If set to true, no other action rather than notification is done (ie: no annotation or deletion) +NOTIFYONLY=${NOTIFYONLY:-"false"} + +# Help function +function usage() { + echo -e "The available commands are: + - help (get this help) + - getpendingroutes (get a list of routes with acme \"orderStatus\" in Pending + - getdisabledroutes (get a list of routes with \"administratively-disabled\" annotation + - getbrokenroutes (get a list of all possible broken routes) + - updateroutes (update broken routes) + + By default, script doesn't set any default cluster to run routes' checks. Please set CLUSTER_HOSTNAME and CLUSTER_API_HOSTNAME variables. + If you want to change the API endpoint, set CLUSTER_API_HOSTNAME variable. + If you want to change the cluster's hostname, set CLUSTER_HOSTNAME variable. + If you want to filter the execution of the script only for certain projects, set the REGEX variable. + If you want to test against a specific IP, set the CLUSTER_IPS array. + + Examples: + CLUSTER_HOSTNAME=\"ch.amazee.io\" CLUSTER_API_HOSTNAME=\"ch.amazeeio.cloud\" ./check_acme_routes.sh getpendingroutes (Returns a list of all routes witl TLS in Pending status for the defined cluster) + REGEX=\"drupal-example\" ./check_acme_routes.sh getpendingroutes (Returns a list of all routes for all projects matchiing the regex \`drupal-example\` with TLS in Pending status) + REGEX=\"drupal-example-master\" DRYRUN=true ./check_acme_routes.sh updateroutes (Will run in DRYRUN mode to check and update all broken routes in \`drupal-example-master\` project)" + +} + +# Function that performs mandatory variales and dependencies checks +function initial_checks() { + # By default script doesn't set CLUSTER_HOSTNAME and CLUSTER_API_HOSTNAME. At least CLUSTER_HOSTNAME must be set + if [ -z "$CLUSTER_HOSTNAME" ]; then + echo "Please set CLUSTER_HOSTNAME variable" + usage + exit 1 + fi + + # Script depends on `lagoon-cli`. Check if it in installed + if [[ ! $(command -v lagoon) ]]; then + echo "Please install \`lagoon-cli\` from https://github.com/amazeeio/lagoon-cli because the script relys on it" + exit 1 + fi +} + +# function to get a list of all "administratively-disabled" routes +function get_all_disabled_routes() { + echo -e "List of routes administratively disabled\n" + oc get route --all-namespaces -o=jsonpath="{range .items[?(@.metadata.annotations.amazee\.io/administratively-disabled)]}{.metadata.namespace}{'\t'}{.metadata.name}{'\n'}{end}" + exit 0 +} + +# Function to check if you are running the script on the right cluster and if you're logged in correctly +function check_cluster_api() { + # Check on which cluster you're going to run commands + if oc whoami --show-server | grep -q -v "$CLUSTER_API_HOSTNAME"; then + echo "Please connect to the right cluster" + exit 1 + fi + + # Check if you're logged in correctly + if [ $(oc status|grep -q "Unauthorized";echo $?) -eq 0 ]; then + echo "Please login into the cluster" + exit 1 + fi +} + +# Function to get a list of all routes with acme.openshift.io/status.provisioningStatus.orderStatus=pending +function get_pending_routes() { + for namespace in $(oc get projects --no-headers=true |awk '{print $1}'|sort -u|grep -E "$REGEX") + do + IFS=$';' + # For each route in a namespace with `tls-acme` set to true, check the `orderStatus` if in pending status + for routelist in $(oc get route -n "$namespace" -o=jsonpath="{range .items[?(@.metadata.annotations.kubernetes\.io/tls-acme=='true')]}{.metadata.name}{'\n'}{.metadata.annotations.acme\.openshift\.io/status}{';'}{end}"|sed "s/^[[:space:]]*//") + do + PENDING_ROUTE_NAME=$(echo "$routelist"|sed -n 1p) + if echo "$routelist"|sed -n 4p | grep -q pending; then + STATUS="Pending" + echo "Route $PENDING_ROUTE_NAME in $namespace is in $STATUS status" + fi + + done + unset IFS + done +} + +# Function for creating an array with all routes that might be updated +function create_routes_array() { + # Get the list of namespaces with broker routes, according to REGEX + for namespace in $(oc get routes --all-namespaces|grep exposer|awk '{print $1}'|sort -u|grep -E "$REGEX") + do + # Raw JSON Openshift project output + PROJECTJSON="$(oc get project "$namespace" -o json)" + + # Gather project name based on a label or an annotation + if [ $(echo $PROJECTJSON |grep -q 'lagoon.sh/project'; echo $?) -eq 0 ]; then + PROJECTNAME=$(echo "${PROJECTJSON}" | grep 'lagoon.sh/project' | awk -F'"' '{print $4}') + else + PROJECTNAME=$(echo "${PROJECTJSON}" |grep display-name|awk -F'[][]' '{print $2}'|tr "_" "-") + fi + + # Get the list of broken unique routes for each namespace + for routelist in $(oc get -n "$namespace" route|grep exposer|awk -vNAMESPACE="$namespace" -vPROJECTNAME="$PROJECTNAME" '{print $1";"$2";"NAMESPACE";"PROJECTNAME}'|sort -u -k2 -t ";") + do + # Put the list into an array + ROUTES_ARRAY+=("$routelist") + done + done + + # Create a sorted array of unique route to check + ROUTES_ARRAY_SORTED=($(sort -u -k 2 -t ";"<<<"${ROUTES_ARRAY[*]}")) +} + +# Function to check the routes, update them and delete the exposer's routes +function check_routes() { + + # Cluster array of IPs + CLUSTER_IPS=($(dig +short "$CLUSTER_HOSTNAME")) + for i in "${ROUTES_ARRAY_SORTED[@]}" + do + # Tranform the item into an array + route=($(echo "$i" | tr ";" "\n")) + + # Gather some useful variables + ROUTE_NAME=${route[0]} + ROUTE_HOSTNAME=${route[1]} + ROUTE_NAMESPACE=${route[2]} + ROUTE_PROJECTNAME=${route[3]} + + # Get route DNS record(s) + if [[ $(dig +short "$ROUTE_HOSTNAME" &> /dev/null; echo $?) -ne 0 ]]; then + ROUTE_HOSTNAME_IP="null" + else + ROUTE_HOSTNAME_IP=$(dig +short "$ROUTE_HOSTNAME") + fi + + # Check if the route matches the Cluster's IP(s) + if echo "$ROUTE_HOSTNAME_IP" | grep -E -q -v "${CLUSTER_IPS[*]}"; then + + # If IP is empty, then no DNS record set + if [ -z "$ROUTE_HOSTNAME_IP" ]; then + DNS_ERROR="No A or CNAME record set" + else + DNS_ERROR="$ROUTE_HOSTNAME in $ROUTE_NAMESPACE has no DNS record poiting to ${CLUSTER_IPS[*]} and going to disable tls-acme" + fi + + # Print the error on stdout + echo "$DNS_ERROR" + + if [[ "$NOTIFYONLY" = "true" ]]; then + notify_customer "$ROUTE_PROJECTNAME" + else + # Call the update function to update the route + update_annotation "$ROUTE_HOSTNAME" "$ROUTE_NAMESPACE" + notify_customer "$ROUTE_PROJECTNAME" + + # Now once the main route is updated, it's time to get rid of exposers' routes + for j in $(oc get -n "$ROUTE_NAMESPACE" route|grep exposer|grep -E '(^|\s)'"$ROUTE_HOSTNAME"'($|\s)'|awk '{print $1";"$2}') + do + ocroute=($(echo "$j" | tr ";" "\n")) + OCROUTE_NAME=${ocroute[0]} + if [[ $DRYRUN = true ]]; then + echo -e "DRYRUN oc delete -n $ROUTE_NAMESPACE route $OCROUTE_NAME" + else + echo -e "\nDelete route $OCROUTE_NAME" + oc delete -n "$ROUTE_NAMESPACE" route "$OCROUTE_NAME" + fi + done + fi + fi + echo -e "\n" + + + done +} + +# Function to update route's annotation (ie: update tls-amce, remove tls-acme-awaiting-* and set a new one for internal purpose) +function update_annotation() { + echo "Update route's annotations" + OCOPTIONS="--overwrite" + if [[ "$DRYRUN" = "true" ]]; then + OCOPTIONS="--dry-run --overwrite" + fi + + # Annotate the route + oc annotate -n "$2" $OCOPTIONS route "$1" acme.openshift.io/status- kubernetes.io/tls-acme-awaiting-authorization-owner- kubernetes.io/tls-acme-awaiting-authorization-at-url- kubernetes.io/tls-acme="false" amazee.io/administratively-disabled="$(date +%s)" +} + + +# Function to notify customer about the misconfiguration of their routes +function notify_customer() { + + # Get Slack|Rocketchat channel and webhook + if [ $(TEST=$(lagoon list slack -p "$1" --no-header|awk '{print $3";"$4}'); echo $?) -eq 0 ]; then + NOTIFICATION="slack" + elif [ $(TEST=$(lagoon list rocketchat -p "$1" --no-header|awk '{print $3";"$4}'); echo $?) -eq 0 ]; then + NOTIFICATION="rocketchat" + else + echo "No notification set" + return 0 + fi + + MESSAGE="Your $ROUTE_HOSTNAME route is configured in the \`.lagoon.yml\` file to issue an TLS certificate from Lets Encrypt. Unfortunately Lagoon is unable to issue a certificate as $DNS_ERROR.\nTo be issued correctly, the DNS records for $ROUTE_HOSTNAME should point to $CLUSTER_HOSTNAME with an CNAME record (preferred) or to ${CLUSTER_IPS[*]} via an A record (also possible but not preferred).\nIf you don't need the SSL certificate or you are using a CDN that provides you with an TLS certificate, please update your .lagoon.yml file by setting the tls-acme parameter to false for $ROUTE_HOSTNAME, as described here: https://lagoon.readthedocs.io/en/latest/using_lagoon/lagoon_yml/#ssl-configuration-tls-acme.\nWe have now administratively disabled the issuing of Lets Encrypt certificate for $ROUTE_HOSTNAME in order to protect the cluster, this will be reset during the next deployment, therefore we suggest to resolve this issue as soon as possible. Feel free to reach out to us for further information.\nThanks you.\namazee.io team" + + NOTIFICATION_DATA=($(lagoon list $NOTIFICATION -p "$1" --no-header|awk '{print $3";"$4}')) + for notification in ${NOTIFICATION_DATA[@]} + do + CHANNEL=$(echo "$notification"|cut -f1 -d ";") + WEBHOOK=$(echo "$notification"|cut -f2 -d ";") + + # json Payload + PAYLOAD="\"channel\": \"$CHANNEL\", \"text\": \"${MESSAGE}\"" + + echo -e "Sending notification into ${CHANNEL}" + + # Execute curl to send message into the channel + if [[ $DRYRUN = true ]]; then + echo "DRYRUN Sending notification on \"$NOTIFICATION\" curl -X POST -H 'Content-type: application/json' --data '{'"$PAYLOAD"'}' "$WEBHOOK"" + else + curl -X POST -H 'Content-type: application/json' --data '{'"${PAYLOAD}"'}' ${WEBHOOK} + fi + done +} + +# Main function +function main() { + + COMMAND="$1" + + # Check first the cluster you're connected to + echo -e "You're running the script on $CLUSTER_HOSTNAME\nDRYRUN mode is set to \"$DRYRUN\"" + check_cluster_api + + case "$COMMAND" in + help) + usage + ;; + getpendingroutes) + get_pending_routes + ;; + getdisabledroutes) + get_all_disabled_routes + ;; + getbrokenroutes) + echo -e "\nCreating a list of possible broken routes" + create_routes_array + echo -e "ROUTE_NAMESPACE;ROUTE_NAME;ROUTE_HOSTNAME"|column -t -s ";" + for i in "${ROUTES_ARRAY_SORTED[@]}" + do + # Tranform the item into an array + route=($(echo "$i" | tr ";" "\n")) + # Gather some useful variables + ROUTE_NAME=${route[0]} + ROUTE_HOSTNAME=${route[1]} + ROUTE_NAMESPACE=${route[2]} + echo -e "$ROUTE_NAMESPACE;$ROUTE_NAME;$ROUTE_HOSTNAME"|column -t -s ";" + done + ;; + updateroutes) + echo -e "Checking routes\n" + create_routes_array + check_routes + ;; + *) + usage + ;; + esac +} + +initial_checks "$COMMAND" +main "$COMMAND" diff --git a/helpers/label-namespaces.sh b/helpers/label-namespaces.sh new file mode 100755 index 0000000000..e9f1f93c8e --- /dev/null +++ b/helpers/label-namespaces.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +## +# Label all namespaces with lagoon info +# +# Old environments weren't labelled the way that Lagoon expects. This script +# can be run against a cluster to add the missing labels. + +set -euo pipefail +#set -x + +# Loop through all oc projects. +while read -r project ; do + + # Check if lagoon-env configmap exists. + if oc get configmap -n "$project" lagoon-env >/dev/null 2>&1; then + + echo "################################################" + echo "Annotating project: $project..." + echo "################################################" + + LAGOON_PROJECT=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/LAGOON_PROJECT:/ { print $2 }') + LAGOON_ENVIRONMENT_TYPE=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/LAGOON_ENVIRONMENT_TYPE:/ { print $2 }') + LAGOON_GIT_SAFE_BRANCH=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/LAGOON_GIT_SAFE_BRANCH:/ { print $2 }') + MARIADB_DATABASE=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/MARIADB_DATABASE:/ { print $2 }') + MARIADB_USERNAME=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/MARIADB_USERNAME:/ { print $2 }') + + oc label namespace "$project" "lagoon.sh/project=$LAGOON_PROJECT" --overwrite + oc label namespace "$project" "lagoon.sh/environmentType=$LAGOON_ENVIRONMENT_TYPE" --overwrite + oc label namespace "$project" "lagoon.sh/environment=$LAGOON_GIT_SAFE_BRANCH" --overwrite + oc label namespace "$project" "lagoon.sh/mariadb-schema=$MARIADB_DATABASE" --overwrite + oc label namespace "$project" "lagoon.sh/mariadb-username=$MARIADB_USERNAME" --overwrite + else + + echo "No lagoon-env configmap found for $project" + + fi + +done < <(oc get ns -l '!lagoon.sh/project' | sed '1d' | awk '{print $1}') diff --git a/helpers/shared-to-shared-migrate.sh b/helpers/shared-to-shared-migrate.sh index a3878c909a..f734e3d663 100755 --- a/helpers/shared-to-shared-migrate.sh +++ b/helpers/shared-to-shared-migrate.sh @@ -4,7 +4,7 @@ # What this script is for # ======================= # This script will migrate a database user, access, database and contents from -# a source cluster to a destination cluster. +# an existing cluster to a destination cluster. # # At the moment, this is geared towards the Ansible Service Broker, but likely # can be modified in the future to work with the DBaaS operator. @@ -18,13 +18,13 @@ # ============ # * You are logged into OpenShift CLI and have access to the NAMESPACE you want # to migrate. -# * You have a `.my.cnf` file for the source and desintation database clusters. -# * If your database clusters are not directly accessible, then you have -# created SSH tunnels to expose them on a local port. +# * You have a `.my.cnf` file for the destination database cluster. +# * If your destination database cluster is not directly accessible, then you +# have created SSH tunnels to expose them on a local port. # # How to get your existing ASB root credentials # ============================================= -# oc -n openshift-ansible-service-broker get secret/lagoon-dbaas-db-credentials -o JSON | jq '.data' +# oc -n openshift-ansible-service-broker get secret/lagoon-dbaas-db-credentials -o json | jq '.data | map_values(@base64d)' # # How to create a `.my.cnf` file # ============================== @@ -39,22 +39,33 @@ # ======================================================================= # ssh -L 33007:shared-cluster.cluster-banana.ap-southeast-2.rds.amazonaws.com:3306 jumpbox.aws.amazee.io # -# Example commands -# ================ +# Example command 1 +# ================= # ./helpers/shared-to-shared-migrate.sh \ -# --source shared-cluster.cluster-banana.ap-southeast-2.rds.amazonaws.com \ # --destination shared-cluster.cluster-apple.ap-southeast-2.rds.amazonaws.com \ # --replica shared-cluster.cluster-r0-apple.ap-southeast-2.rds.amazonaws.com \ # --namespace NAMESPACE \ # --dry-run # +# Example command 2 +# ================= +# namespaces=" +# foo-example-com-production +# bar-example-com-production +# baz-example-com-production +# quux-example-com-production +# " +# for namespace in $namespaces; do +# ./helpers/shared-to-shared-migrate.sh \ +# --dry-run \ +# --namespace "$namespace" \ +# --destination shared-mysql-production-1-cluster.cluster-plum.ap-southeast-2.rds.amazonaws.com \ +# --replica shared-mysql-production-1-cluster.cluster-ro-plum.ap-southeast-2.rds.amazonaws.com +# done +# set -euo pipefail -# Reset in case getopts has been used previously in the shell. -OPTIND=1 - # Initialize our own variables: -SOURCE_CLUSTER="" DESTINATION_CLUSTER="" REPLICA_CLUSTER="" NAMESPACE="" @@ -63,31 +74,39 @@ TIMESTAMP=$(date +%s) # Colours. shw_grey () { - echo $(tput bold)$(tput setaf 0) $@ $(tput sgr 0) + tput bold + tput setaf 0 + echo "$@" + tput sgr0 } shw_norm () { - echo $(tput bold)$(tput setaf 9) $@ $(tput sgr 0) + tput bold + tput setaf 9 + echo "$@" + tput sgr0 } shw_info () { - echo $(tput bold)$(tput setaf 4) $@ $(tput sgr 0) + tput bold + tput setaf 4 + echo "$@" + tput sgr0 } shw_warn () { - echo $(tput bold)$(tput setaf 2) $@ $(tput sgr 0) + tput bold + tput setaf 2 + echo "$@" + tput sgr0 } shw_err () { - echo $(tput bold)$(tput setaf 1) $@ $(tput sgr 0) + tput bold + tput setaf 1 + echo "$@" + tput sgr0 } # Parse input arguments. while [[ $# -gt 0 ]] ; do - key="$1" - - case $key in - -s|--source) - SOURCE_CLUSTER="$2" - shift # past argument - shift # past value - ;; + case $1 in -d|--destination) DESTINATION_CLUSTER="$2" shift # past argument @@ -107,53 +126,51 @@ while [[ $# -gt 0 ]] ; do DRY_RUN="TRUE" shift # past argument ;; + *) + echo "Invalid Argument: $1" + exit 3 + ;; esac done shw_grey "================================================" -shw_grey " SOURCE_CLUSTER=$SOURCE_CLUSTER" +shw_grey " START_TIMESTAMP='$(date +%Y-%m-%dT%H:%M:%S%z)'" +shw_grey "================================================" shw_grey " DESTINATION_CLUSTER=$DESTINATION_CLUSTER" shw_grey " REPLICA_CLUSTER=$REPLICA_CLUSTER" shw_grey " NAMESPACE=$NAMESPACE" shw_grey "================================================" -for util in oc jq mysql shyaml; do +for util in oc jq mysql; do if ! command -v ${util} > /dev/null; then shw_err "Please install ${util}" exit 1 fi done -CONF_FILE=${HOME}/.my.cnf-${SOURCE_CLUSTER} -if [ ! -f "$CONF_FILE" ]; then - shw_err "ERROR: please create $CONF_FILE so I can know how to connect to ${SOURCE_CLUSTER}" - exit 2 -fi - CONF_FILE=${HOME}/.my.cnf-${DESTINATION_CLUSTER} if [ ! -f "$CONF_FILE" ]; then shw_err "ERROR: please create $CONF_FILE so I can know how to connect to ${DESTINATION_CLUSTER}" exit 2 fi -if [ ! -z "${DRY_RUN}" ] ; then +if [ "$DRY_RUN" ] ; then shw_warn "Dry run is enabled, so no network service changes will take place." fi # Load the DBaaS credentials for the project -SECRETS=/tmp/${NAMESPACE}-migration.yaml -oc -n ${NAMESPACE} get secret mariadb-servicebroker-credentials -o yaml > $SECRETS +SECRETS=$(oc -n "$NAMESPACE" get secret mariadb-servicebroker-credentials -o json) -DB_NETWORK_SERVICE=$(cat $SECRETS | shyaml get-value data.DB_HOST | base64 -D) -if cat ${SECRETS} | grep DB_READREPLICA_HOSTS > /dev/null ; then - DB_READREPLICA_HOSTS=$(cat $SECRETS | shyaml get-value data.DB_READREPLICA_HOSTS | base64 -D) +DB_NETWORK_SERVICE=$(echo "$SECRETS" | jq -er '.data.DB_HOST | @base64d') +if echo "$SECRETS" | grep -q DB_READREPLICA_HOSTS ; then + DB_READREPLICA_HOSTS=$(echo "$SECRETS" | jq -er '.data.DB_READREPLICA_HOSTS | @base64d') else DB_READREPLICA_HOSTS="" fi -DB_USER=$(cat $SECRETS | shyaml get-value data.DB_USER | base64 -D) -DB_PASSWORD=$(cat $SECRETS | shyaml get-value data.DB_PASSWORD | base64 -D) -DB_NAME=$(cat $SECRETS | shyaml get-value data.DB_NAME | base64 -D) -DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D) +DB_USER=$(echo "$SECRETS" | jq -er '.data.DB_USER | @base64d') +DB_PASSWORD=$(echo "$SECRETS" | jq -er '.data.DB_PASSWORD | @base64d') +DB_NAME=$(echo "$SECRETS" | jq -er '.data.DB_NAME | @base64d') +DB_PORT=$(echo "$SECRETS" | jq -er '.data.DB_PORT | @base64d') shw_grey "================================================" shw_grey " DB_NETWORK_SERVICE=$DB_NETWORK_SERVICE" @@ -161,10 +178,11 @@ shw_grey " DB_READREPLICA_HOSTS=$DB_READREPLICA_HOSTS" shw_grey " DB_USER=$DB_USER" shw_grey " DB_PASSWORD=$DB_PASSWORD" shw_grey " DB_NAME=$DB_NAME" +shw_grey " DB_PORT=$DB_PORT" shw_grey "================================================" # Ensure there is a database in the destination. -shw_info "> Setting up the MySQL bits" +shw_info "> Preparing Database, User, and permissions on destination" shw_info "================================================" CONF_FILE=${HOME}/.my.cnf-${DESTINATION_CLUSTER} mysql --defaults-file="$CONF_FILE" -se "CREATE DATABASE IF NOT EXISTS \`${DB_NAME}\`;" @@ -178,43 +196,49 @@ shw_info "================================================" mysql --defaults-file="$CONF_FILE" -e "SELECT * FROM mysql.db WHERE Db = '${DB_NAME}'\G;" # Dump the database inside the CLI pod. -POD=$(oc -n ${NAMESPACE} get pods -o json --show-all=false -l service=cli | jq -r '.items[].metadata.name') -shw_info "> Dumping database ${DB_NAME} on pod ${POD} on host ${DB_NETWORK_SERVICE}" +POD=$(oc -n "$NAMESPACE" get pods -o json --field-selector=status.phase=Running -l service=cli | jq -r '.items[0].metadata.name // empty') +if [ -z "$POD" ]; then + shw_warn "No running cli pod in namespace $NAMESPACE" + shw_warn "Scaling up 1 cli DeploymentConfig pod" + oc -n "$NAMESPACE" scale dc cli --replicas=1 --timeout=2m + sleep 32 # hope for timely scheduling + POD=$(oc -n "$NAMESPACE" get pods -o json --field-selector=status.phase=Running -l service=cli | jq -er '.items[0].metadata.name') +fi +shw_info "> Dumping database $DB_NAME on pod $POD on host $DB_NETWORK_SERVICE" shw_info "================================================" -oc -n ${NAMESPACE} exec ${POD} -- bash -c "time mysqldump -h ${DB_NETWORK_SERVICE} -u ${DB_USER} -p${DB_PASSWORD} ${DB_NAME} > /tmp/migration.sql" -oc -n ${NAMESPACE} exec ${POD} -- ls -lath /tmp/migration.sql || exit 1 -oc -n ${NAMESPACE} exec ${POD} -- head -n 5 /tmp/migration.sql -oc -n ${NAMESPACE} exec ${POD} -- tail -n 5 /tmp/migration.sql || exit 1 +oc -n "$NAMESPACE" exec "$POD" -- bash -c "time mysqldump -h '$DB_NETWORK_SERVICE' -u '$DB_USER' -p'$DB_PASSWORD' '$DB_NAME' > /tmp/migration.sql" +oc -n "$NAMESPACE" exec "$POD" -- ls -lh /tmp/migration.sql +oc -n "$NAMESPACE" exec "$POD" -- head -n 5 /tmp/migration.sql +oc -n "$NAMESPACE" exec "$POD" -- tail -n 5 /tmp/migration.sql shw_norm "> Dump is done" shw_norm "================================================" # Import to new database. shw_info "> Importing the dump into ${DESTINATION_CLUSTER}" shw_info "================================================" -oc -n ${NAMESPACE} exec ${POD} -- bash -c "time mysql -h ${DESTINATION_CLUSTER} -u ${DB_USER} -p${DB_PASSWORD} ${DB_NAME} < /tmp/migration.sql" -oc -n ${NAMESPACE} exec ${POD} -- bash -c "rm /tmp/migration.sql" +oc -n "$NAMESPACE" exec "$POD" -- bash -c "time mysql -h '$DESTINATION_CLUSTER' -u '$DB_USER' -p'$DB_PASSWORD' '$DB_NAME' < /tmp/migration.sql" +oc -n "$NAMESPACE" exec "$POD" -- rm /tmp/migration.sql shw_norm "> Import is done" shw_norm "================================================" # Alter the network service(s). -shw_info "> Altering the Network Service ${DB_NETWORK_SERVICE} to point at ${DESTINATION_CLUSTER}" +shw_info "> Altering the Network Service $DB_NETWORK_SERVICE to point at $DESTINATION_CLUSTER" shw_info "================================================" -oc -n ${NAMESPACE} get svc/${DB_NETWORK_SERVICE} -o yaml > /tmp/${NAMESPACE}-svc.yaml -if [ -z "${DRY_RUN}" ] ; then - oc -n ${NAMESPACE} patch svc/${DB_NETWORK_SERVICE} -p "{\"spec\":{\"externalName\": \"${DESTINATION_CLUSTER}\"}}" -else +ORIGINAL_DB_HOST=$(oc -n "$NAMESPACE" get "svc/$DB_NETWORK_SERVICE" -o json --export | tee "/tmp/$NAMESPACE-svc.json" | jq -er '.spec.externalName') +if [ "$DRY_RUN" ] ; then echo "**DRY RUN**" +else + oc -n "$NAMESPACE" patch "svc/$DB_NETWORK_SERVICE" -p "{\"spec\":{\"externalName\": \"$DESTINATION_CLUSTER\"}}" fi -if [ ! -z "${DB_READREPLICA_HOSTS}" ]; then - shw_info "> Altering the Network Service ${DB_READREPLICA_HOSTS} to point at ${REPLICA_CLUSTER}" +if [ "$DB_READREPLICA_HOSTS" ]; then + shw_info "> Altering the Network Service $DB_READREPLICA_HOSTS to point at $REPLICA_CLUSTER" shw_info "================================================" - oc -n ${NAMESPACE} get svc/${DB_READREPLICA_HOSTS} -o yaml > /tmp/${NAMESPACE}-svc-replica.yaml - ORIGINAL_DB_READREPLICA_HOSTS=$(cat /tmp/${NAMESPACE}-svc-replica.yaml | shyaml get-value spec.externalName) - if [ -z "${DRY_RUN}" ] ; then - oc -n ${NAMESPACE} patch svc/${DB_READREPLICA_HOSTS} -p "{\"spec\":{\"externalName\": \"${REPLICA_CLUSTER}\"}}" - else + ORIGINAL_DB_READREPLICA_HOSTS=$(oc -n "$NAMESPACE" get "svc/$DB_READREPLICA_HOSTS" -o json --export | tee "/tmp/$NAMESPACE-svc-replica.json" | jq -er '.spec.externalName') + if [ "$DRY_RUN" ] ; then echo "**DRY RUN**" + else + oc -n "$NAMESPACE" patch "svc/$DB_READREPLICA_HOSTS" -p "{\"spec\":{\"externalName\": \"$REPLICA_CLUSTER\"}}" fi fi @@ -225,30 +249,33 @@ sleep 1 # Verify the correct RDS cluster. shw_info "> Output the RDS cluster that Drush is connecting to" shw_info "================================================" -oc -n ${NAMESPACE} exec ${POD} -- bash -c "drush sqlq 'SELECT @@aurora_server_id;'" +oc -n "$NAMESPACE" exec "$POD" -- bash -c "drush sqlq 'SELECT @@aurora_server_id;'" # Drush status. shw_info "> Drush status" shw_info "================================================" -oc -n ${NAMESPACE} exec ${POD} -- bash -c "drush status" +oc -n "$NAMESPACE" exec "$POD" -- bash -c "drush status" # Get routes, and ensure a cache bust works. -ROUTE=$(oc -n ${NAMESPACE} get routes -o json | jq --raw-output '.items[0].spec.host') +ROUTE=$(oc -n "$NAMESPACE" get routes -o json | jq -er '.items[0].spec.host') shw_info "> Testing the route https://${ROUTE}/?${TIMESTAMP}" shw_info "================================================" curl -skLIXGET "https://${ROUTE}/?${TIMESTAMP}" \ -A "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36" \ - --cookie "NO_CACHE=1" | grep -E "HTTP|Cache|Location|LAGOON" || TRUE + --cookie "NO_CACHE=1" | grep -E "HTTP|Cache|Location|LAGOON" || true shw_grey "================================================" shw_grey "" shw_grey "In order to rollback this change, edit the Network Service(s) like so:" shw_grey "" -shw_grey "oc -n ${NAMESPACE} patch svc/${DB_NETWORK_SERVICE} -p \"{\\\"spec\\\":{\\\"externalName': \\\"${SOURCE_CLUSTER}\\\"}}\"" -if [ ! -z "${DB_READREPLICA_HOSTS}" ]; then - shw_grey "oc -n ${NAMESPACE} patch svc/${DB_READREPLICA_HOSTS} -p \"{\\\"spec\\\":{\\\"externalName': \\\"${ORIGINAL_DB_READREPLICA_HOSTS}\\\"}}\"" +shw_grey "oc -n $NAMESPACE patch svc/$DB_NETWORK_SERVICE -p '{\"spec\":{\"externalName\": \"$ORIGINAL_DB_HOST\"}}'" +if [ "$DB_READREPLICA_HOSTS" ]; then + shw_grey "oc -n $NAMESPACE patch svc/$DB_READREPLICA_HOSTS -p '{\"spec\":{\"externalName\": \"$ORIGINAL_DB_READREPLICA_HOSTS\"}}'" fi echo "" -shw_norm "Done" +shw_grey "================================================" +shw_grey " END_TIMESTAMP='$(date +%Y-%m-%dT%H:%M:%S%z)'" +shw_grey "================================================" +shw_norm "Done in $SECONDS seconds" exit 0 diff --git a/helpers/update-versions.yml b/helpers/update-versions.yml new file mode 100644 index 0000000000..33d5180709 --- /dev/null +++ b/helpers/update-versions.yml @@ -0,0 +1,58 @@ +# Lagoon Version Update Helper +# +# Helper to update Version inside Dockerfiles +# Update versions below in `vars` and execute locally +# +# ansible-playbook helpers/update-versions.yml +- name: update versions + hosts: 127.0.0.1 + connection: local + vars: + # Newrelic - https://docs.newrelic.com/docs/release-notes/agent-release-notes/php-release-notes/ + NEWRELIC_VERSION: '9.11.0.267' + # Composer - https://getcomposer.org/download/ + COMPOSER_VERSION: '1.10.8' + COMPOSER_HASH_SHA256: '4c40737f5d5f36d04f8b2df37171c6a1ff520efcadcb8626cc7c30bd4c5178e5' + # Drupal Console Launcher - https://github.com/hechoendrupal/drupal-console-launcher/releases + DRUPAL_CONSOLE_LAUNCHER_VERSION: 1.9.4 + DRUPAL_CONSOLE_LAUNCHER_SHA: b7759279668caf915b8e9f3352e88f18e4f20659 + # Drush - https://github.com/drush-ops/drush/releases + DRUSH_VERSION: 8.3.5 + # Drush Launcher Version - https://github.com/drush-ops/drush-launcher/releases + DRUSH_LAUNCHER_VERSION: 0.6.0 + tasks: + - name: update NEWRELIC_VERSION + lineinfile: + path: "{{ lookup('env', 'PWD') }}/images/php/fpm/Dockerfile" + regexp: 'ENV NEWRELIC_VERSION=' + line: 'ENV NEWRELIC_VERSION={{ NEWRELIC_VERSION }}' + - name: update COMPOSER_VERSION + lineinfile: + path: "{{ lookup('env', 'PWD') }}/images/php/cli/Dockerfile" + regexp: 'ENV COMPOSER_VERSION=' + line: 'ENV COMPOSER_VERSION={{ COMPOSER_VERSION }} \' + - name: update COMPOSER_HASH_SHA256 + lineinfile: + path: "{{ lookup('env', 'PWD') }}/images/php/cli/Dockerfile" + regexp: 'COMPOSER_HASH_SHA256=' + line: ' COMPOSER_HASH_SHA256={{ COMPOSER_HASH_SHA256 }}' + - name: update DRUPAL_CONSOLE_LAUNCHER_VERSION + lineinfile: + path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile" + regexp: 'ENV DRUPAL_CONSOLE_LAUNCHER_VERSION=' + line: 'ENV DRUPAL_CONSOLE_LAUNCHER_VERSION={{ DRUPAL_CONSOLE_LAUNCHER_VERSION }} \' + - name: update DRUPAL_CONSOLE_LAUNCHER_SHA + lineinfile: + path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile" + regexp: 'DRUPAL_CONSOLE_LAUNCHER_SHA=' + line: ' DRUPAL_CONSOLE_LAUNCHER_SHA={{ DRUPAL_CONSOLE_LAUNCHER_SHA }} \' + - name: update DRUSH_VERSION + lineinfile: + path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile" + regexp: 'DRUSH_VERSION=' + line: ' DRUSH_VERSION={{ DRUSH_VERSION }} \' + - name: update DRUSH_LAUNCHER_VERSION + lineinfile: + path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile" + regexp: 'DRUSH_LAUNCHER_VERSION=' + line: ' DRUSH_LAUNCHER_VERSION={{ DRUSH_LAUNCHER_VERSION }} \' diff --git a/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh b/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh index 587e502a9f..c57454f145 100755 --- a/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh +++ b/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh @@ -183,8 +183,8 @@ if [[ ( "$BUILD_TYPE" == "pullrequest" || "$BUILD_TYPE" == "branch" ) && ! $TH LAGOON_POSTROLLOUT_DISABLED=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_POSTROLLOUT_DISABLED") | "\(.value)"')) fi if [ ! -z "$LAGOON_ENVIRONMENT_VARIABLES" ]; then - LAGOON_PREROLLOUT_DISABLED=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_PREROLLOUT_DISABLED") | "\(.value)"')) - LAGOON_POSTROLLOUT_DISABLED=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_POSTROLLOUT_DISABLED") | "\(.value)"')) + LAGOON_PREROLLOUT_DISABLED=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_PREROLLOUT_DISABLED") | "\(.value)"')) + LAGOON_POSTROLLOUT_DISABLED=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_POSTROLLOUT_DISABLED") | "\(.value)"')) fi set -x @@ -240,6 +240,13 @@ if [[ ( "$BUILD_TYPE" == "pullrequest" || "$BUILD_TYPE" == "branch" ) && ! $TH do DOCKERFILE=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$IMAGE_NAME.build.dockerfile false) + + # allow to overwrite build dockerfile for this environment and service + ENVIRONMENT_DOCKERFILE_OVERRIDE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.overrides.$IMAGE_NAME.build.dockerfile false) + if [ ! $ENVIRONMENT_DOCKERFILE_OVERRIDE == "false" ]; then + DOCKERFILE=$ENVIRONMENT_DOCKERFILE_OVERRIDE + fi + if [ $DOCKERFILE == "false" ]; then # No Dockerfile defined, assuming to download the Image directly @@ -250,6 +257,13 @@ if [[ ( "$BUILD_TYPE" == "pullrequest" || "$BUILD_TYPE" == "branch" ) && ! $TH # allow to overwrite image that we pull OVERRIDE_IMAGE=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$IMAGE_NAME.labels.lagoon\\.image false) + + # allow to overwrite image that we pull for this environment and service + ENVIRONMENT_IMAGE_OVERRIDE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.overrides.$IMAGE_NAME.image false) + if [ ! $ENVIRONMENT_IMAGE_OVERRIDE == "false" ]; then + OVERRIDE_IMAGE=$ENVIRONMENT_IMAGE_OVERRIDE + fi + if [ ! $OVERRIDE_IMAGE == "false" ]; then # expand environment variables from ${OVERRIDE_IMAGE} PULL_IMAGE=$(echo "${OVERRIDE_IMAGE}" | envsubst) @@ -269,6 +283,13 @@ if [[ ( "$BUILD_TYPE" == "pullrequest" || "$BUILD_TYPE" == "branch" ) && ! $TH TEMPORARY_IMAGE_NAME="${NAMESPACE}-${IMAGE_NAME}" BUILD_CONTEXT=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$IMAGE_NAME.build.context .) + + # allow to overwrite build context for this environment and service + ENVIRONMENT_BUILD_CONTEXT_OVERRIDE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.overrides.$IMAGE_NAME.build.context false) + if [ ! $ENVIRONMENT_BUILD_CONTEXT_OVERRIDE == "false" ]; then + BUILD_CONTEXT=$ENVIRONMENT_BUILD_CONTEXT_OVERRIDE + fi + if [ ! -f $BUILD_CONTEXT/$DOCKERFILE ]; then echo "defined Dockerfile $DOCKERFILE for service $IMAGE_NAME not found"; exit 1; fi @@ -348,6 +369,15 @@ else fi ROUTES_AUTOGENERATE_ENABLED=$(cat .lagoon.yml | shyaml get-value routes.autogenerate.enabled true) +ROUTES_AUTOGENERATE_ALLOW_PRS=$(cat .lagoon.yml | shyaml get-value routes.autogenerate.allowPullrequests $ROUTES_AUTOGENERATE_ENABLED) +if [[ "$TYPE" == "pullrequest" && "$ROUTES_AUTOGENERATE_ALLOW_PRS" == "true" ]]; then + ROUTES_AUTOGENERATE_ENABLED=true +fi +## fail silently if the key autogenerateRoutes doesn't exist and default to whatever ROUTES_AUTOGENERATE_ENABLED is set to +ROUTES_AUTOGENERATE_BRANCH=$(cat .lagoon.yml | shyaml -q get-value environments.${BRANCH//./\\.}.autogenerateRoutes $ROUTES_AUTOGENERATE_ENABLED) +if [ "$ROUTES_AUTOGENERATE_BRANCH" =~ [Tt]rue ]; then + ROUTES_AUTOGENERATE_ENABLED=true +fi touch /kubectl-build-deploy/values.yaml @@ -424,11 +454,6 @@ do touch /kubectl-build-deploy/${SERVICE_NAME}-values.yaml - SERVICE_TYPE_OVERRIDE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.types.$SERVICE_NAME false) - if [ ! $SERVICE_TYPE_OVERRIDE == "false" ]; then - SERVICE_TYPE=$SERVICE_TYPE_OVERRIDE - fi - HELM_SERVICE_TEMPLATE="templates/service.yaml" if [ -f /kubectl-build-deploy/helmcharts/${SERVICE_TYPE}/$HELM_SERVICE_TEMPLATE ]; then cat /kubectl-build-deploy/values.yaml @@ -464,6 +489,128 @@ TEMPLATE_PARAMETERS=() ### CUSTOM ROUTES FROM .lagoon.yml ############################################## + +ROUTES_SERVICE_COUNTER=0 +# we need to check for production routes for active/standby if they are defined, as these will get migrated between environments as required +if [ "${ENVIRONMENT_TYPE}" == "production" ]; then + if [ "${BRANCH//./\\.}" == "${ACTIVE_ENVIRONMENT}" ]; then + if [ -n "$(cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER 2> /dev/null)" ]; then + while [ -n "$(cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER 2> /dev/null)" ]; do + ROUTES_SERVICE=$(cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER) + + ROUTE_DOMAIN_COUNTER=0 + while [ -n "$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER 2> /dev/null)" ]; do + # Routes can either be a key (when the have additional settings) or just a value + if cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER &> /dev/null; then + ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER) + # Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml + ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g') + ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true) + ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true) + ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect) + ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null) + else + # Only a value given, assuming some defaults + ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER) + ROUTE_TLS_ACME=true + ROUTE_MIGRATE=true + ROUTE_INSECURE=Redirect + ROUTE_HSTS=null + fi + + touch /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml + echo "$ROUTE_ANNOTATIONS" | yq p - annotations > /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml + + # The very first found route is set as MAIN_CUSTOM_ROUTE + if [ -z "${MAIN_CUSTOM_ROUTE+x}" ]; then + MAIN_CUSTOM_ROUTE=$ROUTE_DOMAIN + fi + + ROUTE_SERVICE=$ROUTES_SERVICE + + cat /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml + + helm template ${ROUTE_DOMAIN} \ + /kubectl-build-deploy/helmcharts/custom-ingress \ + --set host="${ROUTE_DOMAIN}" \ + --set service="${ROUTE_SERVICE}" \ + --set tls_acme="${ROUTE_TLS_ACME}" \ + --set insecure="${ROUTE_INSECURE}" \ + --set hsts="${ROUTE_HSTS}" \ + --set routeMigrate="${ROUTE_MIGRATE}" \ + -f /kubectl-build-deploy/values.yaml -f /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${ROUTE_DOMAIN}.yaml + + let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1 + done + + let ROUTES_SERVICE_COUNTER=ROUTES_SERVICE_COUNTER+1 + done + fi + fi + if [ "${BRANCH//./\\.}" == "${STANDBY_ENVIRONMENT}" ]; then + if [ -n "$(cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER 2> /dev/null)" ]; then + while [ -n "$(cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER 2> /dev/null)" ]; do + ROUTES_SERVICE=$(cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER) + + ROUTE_DOMAIN_COUNTER=0 + while [ -n "$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER 2> /dev/null)" ]; do + # Routes can either be a key (when the have additional settings) or just a value + if cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER &> /dev/null; then + ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER) + # Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml + ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g') + ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true) + ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true) + ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect) + ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null) + else + # Only a value given, assuming some defaults + ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER) + ROUTE_TLS_ACME=true + ROUTE_MIGRATE=true + ROUTE_INSECURE=Redirect + ROUTE_HSTS=null + fi + + touch /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml + echo "$ROUTE_ANNOTATIONS" | yq p - annotations > /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml + + # The very first found route is set as MAIN_CUSTOM_ROUTE + if [ -z "${MAIN_CUSTOM_ROUTE+x}" ]; then + MAIN_CUSTOM_ROUTE=$ROUTE_DOMAIN + fi + + ROUTE_SERVICE=$ROUTES_SERVICE + + cat /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml + + helm template ${ROUTE_DOMAIN} \ + /kubectl-build-deploy/helmcharts/custom-ingress \ + --set host="${ROUTE_DOMAIN}" \ + --set service="${ROUTE_SERVICE}" \ + --set tls_acme="${ROUTE_TLS_ACME}" \ + --set insecure="${ROUTE_INSECURE}" \ + --set hsts="${ROUTE_HSTS}" \ + --set routeMigrate="${ROUTE_MIGRATE}" \ + -f /kubectl-build-deploy/values.yaml -f /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${ROUTE_DOMAIN}.yaml + + let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1 + done + + let ROUTES_SERVICE_COUNTER=ROUTES_SERVICE_COUNTER+1 + done + fi + fi +fi + +# set some monitoring defaults +if [ "${ENVIRONMENT_TYPE}" == "production" ]; then + MONITORING_ENABLED="true" +else + MONITORING_ENABLED="false" + +fi + # Two while loops as we have multiple services that want routes and each service has multiple routes ROUTES_SERVICE_COUNTER=0 if [ -n "$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER 2> /dev/null)" ]; then @@ -478,13 +625,16 @@ if [ -n "$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\. # Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g') ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true) + ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false) ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect) ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null) + MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "") ROUTE_ANNOTATIONS=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.annotations {}) else # Only a value given, assuming some defaults ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER) ROUTE_TLS_ACME=true + ROUTE_MIGRATE=false ROUTE_INSECURE=Redirect ROUTE_HSTS=null ROUTE_ANNOTATIONS="{}" @@ -509,6 +659,11 @@ if [ -n "$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\. --set tls_acme="${ROUTE_TLS_ACME}" \ --set insecure="${ROUTE_INSECURE}" \ --set hsts="${ROUTE_HSTS}" \ + --set routeMigrate="${ROUTE_MIGRATE}" \ + --set ingressmonitorcontroller.enabled="${MONITORING_ENABLED}" \ + --set ingressmonitorcontroller.path="${MONITORING_PATH}" \ + --set ingressmonitorcontroller.alertContacts="${MONITORING_ALERTCONTACT}" \ + --set ingressmonitorcontroller.statuspageId="${MONITORING_STATUSPAGEID}" \ -f /kubectl-build-deploy/values.yaml -f /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${ROUTE_DOMAIN}.yaml let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1 @@ -528,13 +683,16 @@ else # Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g') ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true) + ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false) ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect) ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null) + MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "") ROUTE_ANNOTATIONS=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.annotations {}) else # Only a value given, assuming some defaults ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER) ROUTE_TLS_ACME=true + ROUTE_MIGRATE=false ROUTE_INSECURE=Redirect ROUTE_HSTS=null ROUTE_ANNOTATIONS="{}" @@ -559,6 +717,11 @@ else --set tls_acme="${ROUTE_TLS_ACME}" \ --set insecure="${ROUTE_INSECURE}" \ --set hsts="${ROUTE_HSTS}" \ + --set routeMigrate="${ROUTE_MIGRATE}" \ + --set ingressmonitorcontroller.enabled="${MONITORING_ENABLED}" \ + --set ingressmonitorcontroller.path="${MONITORING_PATH}" \ + --set ingressmonitorcontroller.alertContacts="${MONITORING_ALERTCONTACT}" \ + --set ingressmonitorcontroller.statuspageId="${MONITORING_STATUSPAGEID}" \ -f /kubectl-build-deploy/values.yaml -f /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${ROUTE_DOMAIN}.yaml let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1 @@ -605,20 +768,6 @@ if [ "$(ls -A $YAML_FOLDER/)" ]; then kubectl apply --insecure-skip-tls-verify -n ${NAMESPACE} -f $YAML_FOLDER/ fi -############################################## -### CUSTOM MONITORING_URLS FROM .lagoon.yml -############################################## -URL_COUNTER=0 -while [ -n "$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.monitoring_urls.$URL_COUNTER 2> /dev/null)" ]; do - MONITORING_URL="$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.monitoring_urls.$URL_COUNTER)" - if [[ $URL_COUNTER > 0 ]]; then - MONITORING_URLS="${MONITORING_URLS}, ${MONITORING_URL}" - else - MONITORING_URLS="${MONITORING_URL}" - fi - let URL_COUNTER=URL_COUNTER+1 -done - ############################################## ### PROJECT WIDE ENV VARIABLES ############################################## @@ -639,25 +788,25 @@ fi # Load all routes with correct schema and comma separated ROUTES=$(kubectl -n ${NAMESPACE} get ingress --sort-by='{.metadata.name}' -l "acme.openshift.io/exposer!=true" -o=go-template --template='{{range $indexItems, $ingress := .items}}{{if $indexItems}},{{end}}{{$tls := .spec.tls}}{{range $indexRule, $rule := .spec.rules}}{{if $indexRule}},{{end}}{{if $tls}}https://{{else}}http://{{end}}{{.host}}{{end}}{{end}}') +# Active / Standby routes +ACTIVE_ROUTES="" +STANDBY_ROUTES="" +if [ ! -z "${STANDBY_ENVIRONMENT}" ]; then +ACTIVE_ROUTES=$(kubectl -n ${NAMESPACE} get ingress --sort-by='{.metadata.name}' -l "dioscuri.amazee.io/migrate=true" -o=go-template --template='{{range $indexItems, $ingress := .items}}{{if $indexItems}},{{end}}{{$tls := .spec.tls}}{{range $indexRule, $rule := .spec.rules}}{{if $indexRule}},{{end}}{{if $tls}}https://{{else}}http://{{end}}{{.host}}{{end}}{{end}}') +STANDBY_ROUTES=$(kubectl -n ${NAMESPACE} get ingress --sort-by='{.metadata.name}' -l "dioscuri.amazee.io/migrate=true" -o=go-template --template='{{range $indexItems, $ingress := .items}}{{if $indexItems}},{{end}}{{$tls := .spec.tls}}{{range $indexRule, $rule := .spec.rules}}{{if $indexRule}},{{end}}{{if $tls}}https://{{else}}http://{{end}}{{.host}}{{end}}{{end}}') +fi + # Get list of autogenerated routes AUTOGENERATED_ROUTES=$(kubectl -n ${NAMESPACE} get ingress --sort-by='{.metadata.name}' -l "lagoon.sh/autogenerated=true" -o=go-template --template='{{range $indexItems, $ingress := .items}}{{if $indexItems}},{{end}}{{$tls := .spec.tls}}{{range $indexRule, $rule := .spec.rules}}{{if $indexRule}},{{end}}{{if $tls}}https://{{else}}http://{{end}}{{.host}}{{end}}{{end}}') -# If no MONITORING_URLS were specified, fall back to the ROUTE of the project -if [ -z "$MONITORING_URLS"]; then - echo "No monitoring_urls provided, using ROUTE" - MONITORING_URLS="${ROUTE}" -fi - yq write -i /kubectl-build-deploy/values.yaml 'route' "$ROUTE" yq write -i /kubectl-build-deploy/values.yaml 'routes' "$ROUTES" yq write -i /kubectl-build-deploy/values.yaml 'autogeneratedRoutes' "$AUTOGENERATED_ROUTES" -yq write -i /kubectl-build-deploy/values.yaml 'monitoringUrls' "$MONITORING_URLS" echo -e "\ LAGOON_ROUTE=${ROUTE}\n\ LAGOON_ROUTES=${ROUTES}\n\ LAGOON_AUTOGENERATED_ROUTES=${AUTOGENERATED_ROUTES}\n\ -LAGOON_MONITORING_URLS=${MONITORING_URLS}\n\ " >> /kubectl-build-deploy/values.env # Generate a Config Map with project wide env variables @@ -776,10 +925,13 @@ elif [ "$BUILD_TYPE" == "pullrequest" ] || [ "$BUILD_TYPE" == "branch" ]; then parallel --retries 4 < /kubectl-build-deploy/lagoon/push fi + + # load the image hashes for just pushed Images for IMAGE_NAME in "${!IMAGES_BUILD[@]}" do - IMAGE_HASHES[${IMAGE_NAME}]=$(docker inspect ${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}:${IMAGE_TAG:-latest} --format '{{index .RepoDigests 0}}') + JQ_QUERY=(jq -r ".[]|select(test(\"${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}\"))") + IMAGE_HASHES[${IMAGE_NAME}]=$(docker inspect ${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}:${IMAGE_TAG:-latest} --format '{{json .RepoDigests}}' | "${JQ_QUERY[@]}") done # elif [ "$BUILD_TYPE" == "promote" ]; then @@ -1046,4 +1198,4 @@ if [ "${LAGOON_POSTROLLOUT_DISABLED}" != "true" ]; then done else echo "post-rollout tasks are currently disabled LAGOON_POSTROLLOUT_DISABLED is set to true" -fi \ No newline at end of file +fi diff --git a/images/kubectl-build-deploy-dind/build-deploy.sh b/images/kubectl-build-deploy-dind/build-deploy.sh index 4bd8c2b414..ff7e1a557a 100755 --- a/images/kubectl-build-deploy-dind/build-deploy.sh +++ b/images/kubectl-build-deploy-dind/build-deploy.sh @@ -106,15 +106,15 @@ do if [ $PRIVATE_CONTAINER_REGISTRY_URL != "false" ]; then echo "Attempting to log in to $PRIVATE_CONTAINER_REGISTRY_URL with user $PRIVATE_CONTAINER_REGISTRY_USERNAME - $PRIVATE_CONTAINER_REGISTRY_PASSWORD" docker login --username $PRIVATE_CONTAINER_REGISTRY_USERNAME --password $PRIVATE_REGISTRY_CREDENTIAL $PRIVATE_CONTAINER_REGISTRY_URL - kubectl create secret docker-registry "lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret" --docker-server=$PRIVATE_CONTAINER_REGISTRY_URL --docker-username=PRIVATE_CONTAINER_REGISTRY_USERNAME --docker-password=$PRIVATE_REGISTRY_REGISTRY_PASSWORD --dry-run -o yaml | kubectl apply -f - + kubectl create secret docker-registry "lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret" --docker-server=$PRIVATE_CONTAINER_REGISTRY_URL --docker-username=$PRIVATE_CONTAINER_REGISTRY_USERNAME --docker-password=$PRIVATE_REGISTRY_CREDENTIAL --dry-run -o yaml | kubectl apply -f - REGISTRY_SECRETS+=("lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret") - let PRIVATE_REGISTRY_COUNTER++ + let ++PRIVATE_REGISTRY_COUNTER else echo "Attempting to log in to docker hub with user $PRIVATE_CONTAINER_REGISTRY_USERNAME - $PRIVATE_CONTAINER_REGISTRY_PASSWORD" docker login --username $PRIVATE_CONTAINER_REGISTRY_USERNAME --password $PRIVATE_REGISTRY_CREDENTIAL - kubectl create secret docker-registry "lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret" --docker-server="https://index.docker.io/v1/" --docker-username=PRIVATE_CONTAINER_REGISTRY_USERNAME --docker-password=$PRIVATE_REGISTRY_REGISTRY_PASSWORD --dry-run -o yaml | kubectl apply -f - + kubectl create secret docker-registry "lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret" --docker-server="https://index.docker.io/v1/" --docker-username=$PRIVATE_CONTAINER_REGISTRY_USERNAME --docker-password=$PRIVATE_REGISTRY_CREDENTIAL --dry-run -o yaml | kubectl apply -f - REGISTRY_SECRETS+=("lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret") - let PRIVATE_REGISTRY_COUNTER++ + let ++PRIVATE_REGISTRY_COUNTER fi fi done diff --git a/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/_helpers.tpl index d1cc78cae5..c05589bb6b 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/_helpers.tpl +++ b/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/_helpers.tpl @@ -73,4 +73,18 @@ lagoon.sh/prNumber: {{ .Values.prNumber | quote }} lagoon.sh/prHeadBranch: {{ .Values.prHeadBranch | quote }} lagoon.sh/prBaseBranch: {{ .Values.prBaseBranch | quote }} {{- end }} -{{- end -}} \ No newline at end of file +{{- end -}} + +{{/* +Generate name for twig storage emptyDir +*/}} +{{- define "cli-persistent.twig-storage.name" -}} +{{- printf "%s-twig" .Values.persistentStorage.name }} +{{- end -}} + +{{/* +Generate path for twig storage emptyDir +*/}} +{{- define "cli-persistent.twig-storage.path" -}} +{{- printf "%s/php/twig" .Values.persistentStorage.path }} +{{- end -}} diff --git a/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/deployment.yaml index 557e3fb05e..4faf4e1241 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/deployment.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/deployment.yaml @@ -31,6 +31,8 @@ spec: - name: {{ .Values.persistentStorage.name }} persistentVolumeClaim: claimName: {{ .Values.persistentStorage.name }} + - name: {{ include "cli-persistent.twig-storage.name" . | quote }} + emptyDir: {} priorityClassName: {{ include "cli-persistent.lagoonPriority" . }} enableServiceLinks: false securityContext: @@ -59,6 +61,8 @@ spec: readOnly: true - name: {{ .Values.persistentStorage.name }} mountPath: {{ .Values.persistentStorage.path | quote }} + - name: {{ include "cli-persistent.twig-storage.name" . | quote }} + mountPath: {{ include "cli-persistent.twig-storage.path" . | quote }} resources: {{- toYaml .Values.resources | nindent 12 }} {{- with .Values.nodeSelector }} diff --git a/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/ingress.yaml b/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/ingress.yaml index 06489fb824..9504bc6258 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/ingress.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/ingress.yaml @@ -7,6 +7,7 @@ kind: Ingress metadata: name: {{ include "custom-ingress.fullname" . }} labels: + dioscuri.amazee.io/migrate: {{ .Values.routeMigrate | quote }} lagoon.sh/autogenerated: "false" {{- include "custom-ingress.labels" . | nindent 4 }} annotations: @@ -21,6 +22,15 @@ metadata: nginx.ingress.kubernetes.io/ssl-redirect: "true" ingress.kubernetes.io/ssl-redirect: "true" {{- end }} + monitor.stakater.com/enabled: "{{ .Values.ingressmonitorcontroller.enabled }}" + uptimerobot.monitor.stakater.com/interval: "{{ .Values.ingressmonitorcontroller.interval }}" + uptimerobot.monitor.stakater.com/alert-contacts: "{{ .Values.ingressmonitorcontroller.alertContacts }}" + {{- if .Values.ingressmonitorcontroller.path }} + monitor.stakater.com/overridePath: "{{ .Values.ingressmonitorcontroller.path }}" + {{- end }} + {{- if .Values.ingressmonitorcontroller.statuspageId }} + uptimerobot.monitor.stakater.com/status-pages: "{{ .Values.ingressmonitorcontroller.statuspageId }}" + {{- end }} # HSTS Handling {{- if .Values.hsts}} # haproxy.router.openshift.io/hsts_header: {{ .Values.route_hsts }} @@ -42,5 +52,3 @@ spec: - backend: serviceName: {{ .Values.service }} servicePort: http - - diff --git a/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/values.yaml index 1190022f2d..59eec1e5ee 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/values.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/values.yaml @@ -7,4 +7,10 @@ hsts: 'null' tls_acme: true insecure: Allow service: '' -annotations: {} \ No newline at end of file +annotations: {} +routeMigrate: false + +ingressmonitorcontroller: + enabled: 'false' + interval: '60' + alertContacts: 'unconfigured' diff --git a/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/values.yaml index 799d97a64f..3ce2edbb09 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/values.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/values.yaml @@ -7,7 +7,6 @@ replicaCount: 1 image: "" environmentType: production persistentStorage: - # name: "mariadb" #don't use this, use the fullname and append -data to it in pvc.yaml so that if one or more are deployed there is no clash size: 5Gi path: "/var/lib/mysql" diff --git a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/_helpers.tpl index d3eec8be18..98ba6e299d 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/_helpers.tpl +++ b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/_helpers.tpl @@ -28,6 +28,14 @@ Create full hostname for autogenerated hosts {{- printf "%s.%s" .Release.Name .Values.routesAutogenerateSuffix | trimSuffix "-" -}} {{- end -}} +{{/* +Generate name of Persistent Storage +Uses the Release Name (Lagoon Service Name) unless it's overwritten via .Values.persistentStorage.name +*/}} +{{- define "nginx-php-persistent.persistentStorageName" -}} +{{- default .Release.Name .Values.persistentStorage.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + {{/* Common labels */}} @@ -79,4 +87,18 @@ lagoon.sh/prNumber: {{ .Values.prNumber | quote }} lagoon.sh/prHeadBranch: {{ .Values.prHeadBranch | quote }} lagoon.sh/prBaseBranch: {{ .Values.prBaseBranch | quote }} {{- end }} -{{- end -}} \ No newline at end of file +{{- end -}} + +{{/* +Generate name for twig storage emptyDir +*/}} +{{- define "nginx-php-persistent.twig-storage.name" -}} +{{- printf "%s-twig" (include "nginx-php-persistent.persistentStorageName" .) }} +{{- end -}} + +{{/* +Generate path for twig storage emptyDir +*/}} +{{- define "nginx-php-persistent.twig-storage.path" -}} +{{- printf "%s/php/twig" .Values.persistentStorage.path }} +{{- end -}} diff --git a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/deployment.yaml index 37ad0dd03d..975c6fac64 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/deployment.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/deployment.yaml @@ -24,9 +24,11 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} volumes: - - name: {{ .Values.persistentStorage.name }} + - name: {{ include "nginx-php-persistent.persistentStorageName" . }} persistentVolumeClaim: - claimName: {{ .Values.persistentStorage.name }} + claimName: {{ include "nginx-php-persistent.persistentStorageName" . }} + - name: {{ include "nginx-php-persistent.twig-storage.name" . | quote }} + emptyDir: {} priorityClassName: {{ include "nginx-php-persistent.lagoonPriority" . }} enableServiceLinks: false securityContext: @@ -65,7 +67,7 @@ spec: - configMapRef: name: lagoon-env volumeMounts: - - name: {{ .Values.persistentStorage.name }} + - name: {{ include "nginx-php-persistent.persistentStorageName" . }} mountPath: {{ .Values.persistentStorage.path | quote }} resources: {{- toYaml .Values.resources.nginx | nindent 12 }} @@ -97,8 +99,10 @@ spec: - name: NGINX_FASTCGI_PASS value: '127.0.0.1' volumeMounts: - - name: {{ .Values.persistentStorage.name }} + - name: {{ include "nginx-php-persistent.persistentStorageName" . }} mountPath: {{ .Values.persistentStorage.path | quote }} + - name: {{ include "nginx-php-persistent.twig-storage.name" . | quote }} + mountPath: {{ include "nginx-php-persistent.twig-storage.path" . | quote }} resources: {{- toYaml .Values.resources.php | nindent 12 }} {{- with .Values.nodeSelector }} @@ -112,4 +116,4 @@ spec: {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} - {{- end }} \ No newline at end of file + {{- end }} diff --git a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/pvc.yaml index 311e16f9e8..286c758d02 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/pvc.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/pvc.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: {{ .Values.persistentStorage.name | quote }} + name: {{ include "nginx-php-persistent.persistentStorageName" . }} labels: {{- include "nginx-php-persistent.labels" . | nindent 4 }} annotations: diff --git a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/_helpers.tpl index 7b1917e7d3..e90cd35afd 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/_helpers.tpl +++ b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/_helpers.tpl @@ -28,6 +28,14 @@ Create full hostname for autogenerated hosts {{- printf "%s.%s" .Release.Name .Values.routesAutogenerateSuffix | trimSuffix "-" -}} {{- end -}} +{{/* +Generate name of Persistent Storage +Uses the Release Name (Lagoon Service Name) unless it's overwritten via .Values.persistentStorage.name +*/}} +{{- define "node-persistent.persistentStorageName" -}} +{{- default .Release.Name .Values.persistentStorage.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + {{/* Common labels */}} diff --git a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/deployment.yaml index 8dc14d5aa1..48fc911bf5 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/deployment.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/deployment.yaml @@ -24,9 +24,9 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} volumes: - - name: {{ .Values.persistentStorage.name }} + - name: {{ include "node-persistent.persistentStorageName" . }} persistentVolumeClaim: - claimName: {{ .Values.persistentStorage.name }} + claimName: {{ include "node-persistent.persistentStorageName" . }} priorityClassName: {{ include "node-persistent.lagoonPriority" . }} enableServiceLinks: false securityContext: @@ -61,7 +61,7 @@ spec: - configMapRef: name: lagoon-env volumeMounts: - - name: {{ .Values.persistentStorage.name }} + - name: {{ include "node-persistent.persistentStorageName" . }} mountPath: {{ .Values.persistentStorage.path | quote }} resources: {{- toYaml .Values.resources | nindent 12 }} diff --git a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/pvc.yaml index 0352448414..97adc712a7 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/pvc.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/pvc.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: {{ .Values.persistentStorage.name | quote }} + name: {{ include "node-persistent.persistentStorageName" . }} labels: {{- include "node-persistent.labels" . | nindent 4 }} annotations: diff --git a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/_helpers.tpl index cabff1e753..fdaf4b55cf 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/_helpers.tpl +++ b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/_helpers.tpl @@ -28,6 +28,14 @@ Create full hostname for autogenerated hosts {{- printf "%s.%s" .Release.Name .Values.routesAutogenerateSuffix | trimSuffix "-" -}} {{- end -}} +{{/* +Generate name of Persistent Storage +Uses the Release Name (Lagoon Service Name) unless it's overwritten via .Values.persistentStorage.name +*/}} +{{- define "redis-persistent.persistentStorageName" -}} +{{- default .Release.Name .Values.persistentStorage.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + {{/* Common labels */}} diff --git a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/deployment.yaml index 16f677f02c..df5e291d7b 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/deployment.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/deployment.yaml @@ -26,9 +26,9 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} volumes: - - name: {{ .Values.persistentStorage.name }} + - name: {{ include "redis-persistent.persistentStorageName" . }} persistentVolumeClaim: - claimName: {{ .Values.persistentStorage.name }} + claimName: {{ include "redis-persistent.persistentStorageName" . }} priorityClassName: {{ include "redis-persistent.lagoonPriority" . }} enableServiceLinks: false securityContext: @@ -64,7 +64,7 @@ spec: - configMapRef: name: lagoon-env volumeMounts: - - name: {{ .Values.persistentStorage.name }} + - name: {{ include "redis-persistent.persistentStorageName" . }} mountPath: {{ .Values.persistentStorage.path | quote }} resources: {{- toYaml .Values.resources | nindent 12 }} diff --git a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/pvc.yaml index c22ee60730..a2e665a74f 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/pvc.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/pvc.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: {{ .Values.persistentStorage.name | quote }} + name: {{ include "redis-persistent.persistentStorageName" . }} labels: {{- include "redis-persistent.labels" . | nindent 4 }} annotations: diff --git a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/values.yaml index ef13f0a93b..b69515e9b4 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/values.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/values.yaml @@ -9,7 +9,6 @@ image: "" environmentType: production persistentStorage: - name: "redis" size: 5Gi path: "/data" diff --git a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/_helpers.tpl index 5dccb65c6c..469558e333 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/_helpers.tpl +++ b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/_helpers.tpl @@ -28,6 +28,14 @@ Create full hostname for autogenerated hosts {{- printf "%s.%s" .Release.Name .Values.routesAutogenerateSuffix | trimSuffix "-" -}} {{- end -}} +{{/* +Generate name of Persistent Storage +Uses the Release Name (Lagoon Service Name) unless it's overwritten via .Values.persistentStorage.name +*/}} +{{- define "solr.persistentStorageName" -}} +{{- default .Release.Name .Values.persistentStorage.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + {{/* Common labels */}} diff --git a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/deployment.yaml index d08626aa01..4c89c90633 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/deployment.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/deployment.yaml @@ -26,9 +26,9 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} volumes: - - name: {{ .Values.persistentStorage.name }} + - name: {{ include "solr.persistentStorageName" . }} persistentVolumeClaim: - claimName: {{ .Values.persistentStorage.name }} + claimName: {{ include "solr.persistentStorageName" . }} priorityClassName: {{ include "solr.lagoonPriority" . }} enableServiceLinks: false securityContext: @@ -60,7 +60,7 @@ spec: - name: CRONJOBS value: {{ .Values.inPodCronjobs | quote }} volumeMounts: - - name: {{ .Values.persistentStorage.name }} + - name: {{ include "solr.persistentStorageName" . }} mountPath: {{ .Values.persistentStorage.path | quote }} resources: {{- toYaml .Values.resources | nindent 12 }} diff --git a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/pvc.yaml index 95037b14f5..5c9a313b1c 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/pvc.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/pvc.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: {{ .Values.persistentStorage.name | quote }} + name: {{ include "solr.persistentStorageName" . }} labels: {{- include "solr.labels" . | nindent 4 }} annotations: diff --git a/images/kubectl-build-deploy-dind/helmcharts/solr/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/solr/values.yaml index 00c2ee294e..825122a659 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/solr/values.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/solr/values.yaml @@ -9,7 +9,6 @@ image: "" environmentType: production persistentStorage: size: 5Gi - name: solr path: '/var/solr' imagePullPolicy: Always diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/_helpers.tpl index 4f9b22221c..38b8033a93 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/_helpers.tpl +++ b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/_helpers.tpl @@ -28,6 +28,14 @@ Create full hostname for autogenerated hosts {{- printf "%s.%s" .Release.Name .Values.routesAutogenerateSuffix | trimSuffix "-" -}} {{- end -}} +{{/* +Generate name of Persistent Storage +Uses the Release Name (Lagoon Service Name) unless it's overwritten via .Values.persistentStorage.name +*/}} +{{- define "varnish-persistent.persistentStorageName" -}} +{{- default .Release.Name .Values.persistentStorage.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + {{/* Common labels */}} diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/deployment.yaml index b93c1aed0a..ce5974dc14 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/deployment.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/deployment.yaml @@ -26,9 +26,9 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} volumes: - - name: {{ .Values.persistentStorage.name }} + - name: {{ include "varnish-persistent.persistentStorageName" . }} persistentVolumeClaim: - claimName: {{ .Values.persistentStorage.name }} + claimName: {{ include "varnish-persistent.persistentStorageName" . }} priorityClassName: {{ include "varnish-persistent.lagoonPriority" . }} enableServiceLinks: false securityContext: @@ -53,6 +53,9 @@ spec: - name: http containerPort: 8080 protocol: TCP + - name: controlport + containerPort: 6082 + protocol: TCP livenessProbe: tcpSocket: port: http @@ -60,7 +63,7 @@ spec: tcpSocket: port: http volumeMounts: - - name: {{ .Values.persistentStorage.name }} + - name: {{ include "varnish-persistent.persistentStorageName" . }} mountPath: {{ .Values.persistentStorage.path | quote }} resources: {{- toYaml .Values.resources | nindent 12 }} diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/pvc.yaml index bd7532fa15..9e956adfa5 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/pvc.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/pvc.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: {{ .Values.persistentStorage.name | quote }} + name: {{ include "varnish-persistent.persistentStorageName" . }} labels: {{- include "varnish-persistent.labels" . | nindent 4 }} annotations: diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/service.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/service.yaml index e8902fe154..623ddf32e5 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/service.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/service.yaml @@ -13,5 +13,9 @@ spec: targetPort: http protocol: TCP name: http + - port: 6082 + targetPort: controlport + protocol: TCP + name: controlport selector: {{- include "varnish-persistent.selectorLabels" . | nindent 4 }} diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/values.yaml index 124dcfc89b..ebe5dea5cc 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/values.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/values.yaml @@ -9,7 +9,6 @@ image: "" environmentType: production persistentStorage: - name: "varnish" size: 5Gi path: "/var/cache/varnish" diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/deployment.yaml index 7ebcabdda1..dd5feb812c 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/deployment.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/deployment.yaml @@ -45,6 +45,9 @@ spec: - name: http containerPort: 8080 protocol: TCP + - name: controlport + containerPort: 6082 + protocol: TCP livenessProbe: tcpSocket: port: http diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/service.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/service.yaml index 17d9aa366e..79d3f6bc95 100644 --- a/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/service.yaml +++ b/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/service.yaml @@ -13,5 +13,9 @@ spec: targetPort: http protocol: TCP name: http + - port: 6082 + targetPort: controlport + protocol: TCP + name: controlport selector: {{- include "varnish.selectorLabels" . | nindent 4 }} diff --git a/images/mariadb/Dockerfile b/images/mariadb/Dockerfile index c3bf44e43d..bb400b8b70 100644 --- a/images/mariadb/Dockerfile +++ b/images/mariadb/Dockerfile @@ -53,10 +53,9 @@ COPY entrypoints/ /lagoon/entrypoints/ COPY mysql-backup.sh /lagoon/ COPY my.cnf /etc/mysql/my.cnf -RUN for i in /var/run/mysqld /var/lib/mysql /etc/mysql/conf.d /docker-entrypoint-initdb.d/ "${BACKUPS_DIR}"; \ +RUN for i in /var/run/mysqld /var/lib/mysql /etc/mysql/conf.d /docker-entrypoint-initdb.d/ "${BACKUPS_DIR}" /home; \ do mkdir -p $i; chown mysql $i; /bin/fix-permissions $i; \ - done && \ - ln -s /var/lib/mysql/.my.cnf /home/.my.cnf + done COPY root/usr/share/container-scripts/mysql/readiness-probe.sh /usr/share/container-scripts/mysql/readiness-probe.sh RUN /bin/fix-permissions /usr/share/container-scripts/mysql/ \ diff --git a/images/mariadb/entrypoints/9999-mariadb-init.bash b/images/mariadb/entrypoints/9999-mariadb-init.bash index 1540ff34bf..72dc40ad22 100755 --- a/images/mariadb/entrypoints/9999-mariadb-init.bash +++ b/images/mariadb/entrypoints/9999-mariadb-init.bash @@ -23,7 +23,23 @@ for arg; do esac done +# check if MARIADB_COPY_DATA_DIR_SOURCE is set, if yes we're coping the contents of the given folder into the data dir folder +# this allows to prefill the datadir with a provided datadir (either added in a Dockerfile build, or mounted into the running container). +# This is different than just setting $MARIADB_DATA_DIR to the source folder, as only /var/lib/mysql is a persistent folder, so setting +# $MARIADB_DATA_DIR to another folder will make mariadb to not store the datadir across container restarts, while with this copy system +# the data will be prefilled and persistent across container restarts. +if [ -n "$MARIADB_COPY_DATA_DIR_SOURCE" ]; then + if [ -d ${MARIADB_DATA_DIR:-/var/lib/mysql}/mysql ]; then + echo "MARIADB_COPY_DATA_DIR_SOURCE is set, but MySQL directory already present in '${MARIADB_DATA_DIR:-/var/lib/mysql}/mysql' skipping copying" + else + echo "MARIADB_COPY_DATA_DIR_SOURCE is set, copying datadir contents from '$MARIADB_COPY_DATA_DIR_SOURCE' to '${MARIADB_DATA_DIR:-/var/lib/mysql}'" + CUR_DIR=${PWD} + cd ${MARIADB_COPY_DATA_DIR_SOURCE}/; tar cf - . | (cd ${MARIADB_DATA_DIR:-/var/lib/mysql}; tar xvf -) + cd $CUR_DIR + fi +fi +ln -sf ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf /home/.my.cnf if [ "$1" = 'mysqld' -a -z "$wantHelp" ]; then if [ ! -d "/run/mysqld" ]; then @@ -31,7 +47,7 @@ if [ "$1" = 'mysqld' -a -z "$wantHelp" ]; then chown -R mysql:mysql /run/mysqld fi - if [ -d /var/lib/mysql/mysql ]; then + if [ -d ${MARIADB_DATA_DIR:-/var/lib/mysql}/mysql ]; then echo "MySQL directory already present, skipping creation" echo "starting mysql for mysql upgrade." @@ -56,7 +72,7 @@ if [ "$1" = 'mysqld' -a -z "$wantHelp" ]; then else echo "MySQL data directory not found, creating initial DBs" - mysql_install_db --skip-name-resolve --skip-test-db --auth-root-authentication-method=normal --datadir=/var/lib/mysql --basedir=/usr + mysql_install_db --skip-name-resolve --skip-test-db --auth-root-authentication-method=normal --datadir=${MARIADB_DATA_DIR:-/var/lib/mysql} --basedir=/usr echo "starting mysql for initdb.d import." /usr/bin/mysqld --skip-networking --wsrep_on=OFF & @@ -107,11 +123,11 @@ EOF cat $tfile | mysql -v -u root rm -v -f $tfile - echo "[client]" >> /var/lib/mysql/.my.cnf - echo "user=root" >> /var/lib/mysql/.my.cnf - echo "password=${MARIADB_ROOT_PASSWORD}" >> /var/lib/mysql/.my.cnf - echo "[mysql]" >> /var/lib/mysql/.my.cnf - echo "database=${MARIADB_DATABASE}" >> /var/lib/mysql/.my.cnf + echo "[client]" >> ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf + echo "user=root" >> ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf + echo "password=${MARIADB_ROOT_PASSWORD}" >> ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf + echo "[mysql]" >> ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf + echo "database=${MARIADB_DATABASE}" >> ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf for f in `ls /docker-entrypoint-initdb.d/*`; do case "$f" in @@ -129,6 +145,6 @@ EOF fi -echo "done, now starting daemon" + echo "done, now starting daemon" fi diff --git a/images/mariadb/my.cnf b/images/mariadb/my.cnf index d7714558e0..2a727bb2d0 100644 --- a/images/mariadb/my.cnf +++ b/images/mariadb/my.cnf @@ -9,6 +9,7 @@ socket = /run/mysqld/mysqld.sock [mysqld] port = 3306 socket = /run/mysqld/mysqld.sock +datadir = ${MARIADB_DATA_DIR:-/var/lib/mysql} character_set_server = ${MARIADB_CHARSET:-utf8mb4} collation_server = ${MARIADB_COLLATION:-utf8mb4_bin} expire_logs_days = 10 diff --git a/images/mariadb/mysql-backup.sh b/images/mariadb/mysql-backup.sh index bf66958096..bea76622ef 100755 --- a/images/mariadb/mysql-backup.sh +++ b/images/mariadb/mysql-backup.sh @@ -21,7 +21,7 @@ set -eu -o pipefail # directory to put the backup files -BACKUP_DIR=/var/lib/mysql/backup +BACKUP_DIR=${MARIADB_DATA_DIR:-/var/lib/mysql}/backup # MYSQL Parameters MARIADB_USER=${MARIADB_USER:-lagoon} diff --git a/images/mariadb/root/usr/share/container-scripts/mysql/readiness-probe.sh b/images/mariadb/root/usr/share/container-scripts/mysql/readiness-probe.sh index d8c6d827c3..368be4374b 100755 --- a/images/mariadb/root/usr/share/container-scripts/mysql/readiness-probe.sh +++ b/images/mariadb/root/usr/share/container-scripts/mysql/readiness-probe.sh @@ -3,7 +3,7 @@ # openshift-mariadb: mysqld readinessProbe # -mysql --defaults-file=/var/lib/mysql/.my.cnf -e"SHOW DATABASES;" +mysql --defaults-file=${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf -e"SHOW DATABASES;" if [ $? -ne 0 ]; then exit 1 diff --git a/images/nginx/Dockerfile b/images/nginx/Dockerfile index ceb5c8c1b2..311bf2625f 100644 --- a/images/nginx/Dockerfile +++ b/images/nginx/Dockerfile @@ -36,6 +36,7 @@ COPY fastcgi.conf /etc/nginx/fastcgi_params COPY helpers/ /etc/nginx/helpers/ COPY static-files.conf /etc/nginx/conf.d/app.conf COPY redirects-map.conf /etc/nginx/redirects-map.conf +COPY healthcheck/healthz.locations healthcheck/healthz.locations.php.disable /etc/nginx/conf.d/ RUN mkdir -p /app \ && rm -f /etc/nginx/conf.d/default.conf \ diff --git a/images/nginx/docker-entrypoint b/images/nginx/docker-entrypoint index 602171d227..2f8458ec4a 100755 --- a/images/nginx/docker-entrypoint +++ b/images/nginx/docker-entrypoint @@ -15,8 +15,20 @@ ep /etc/nginx/* # Find all folders within /etc/nginx/conf.d/ find /etc/nginx/conf.d/ -type d | while read DIR; do # envplate if found folder is not empty - if find $DIR -mindepth 1 | read; then + if find $DIR -mindepth 1 | read; then ep $DIR/*; fi done ep /etc/nginx/helpers/* + +# If PHP is enabled, we override the Luascript /healthz check +echo "Setting up Healthz routing" +if [ ! -z "$NGINX_FASTCGI_PASS" ]; then + echo "Healthz routing - using PHP" + cp /etc/nginx/conf.d/healthz.locations.php.disable /etc/nginx/conf.d/healthz.locations +fi + +if [ "$FAST_HEALTH_CHECK" == "true" ]; then + echo "FAST HEALTH CHECK ENABLED" + cp /etc/nginx/helpers/90_healthz_fast_check.conf.disabled /etc/nginx/helpers/90_health_fast_check.conf +fi \ No newline at end of file diff --git a/images/nginx/healthcheck/README.md b/images/nginx/healthcheck/README.md new file mode 100644 index 0000000000..43751e2e11 --- /dev/null +++ b/images/nginx/healthcheck/README.md @@ -0,0 +1,10 @@ +# Healthcheck + +In this directory you'll find two files + +- healthz.locations.php.disable +- healthz.locations + +Both are designed to expose a `/.lagoonhealthz` location from the nginx service. The difference being that the `.php.disable` file is used to point to the [healthz-php](https://github.com/amazeeio/healthz-php) application _if_ there is a PHP service attached to this application. + +The logic for which of the two files are enabled are contained in this image's `docker-entrypoint` file - there we check for the existence of the env var `NGINX_FASTCGI_PASS`, which indicates (or should indicate) the presence of a PHP-fpm service. \ No newline at end of file diff --git a/images/nginx/healthcheck/healthz.locations b/images/nginx/healthcheck/healthz.locations new file mode 100644 index 0000000000..95cf2ed753 --- /dev/null +++ b/images/nginx/healthcheck/healthz.locations @@ -0,0 +1,8 @@ +location /.lagoonhealthz { + content_by_lua_block { + ngx.status = ngx.HTTP_OK; + ngx.header.content_type = 'application/json'; + ngx.say('{"check_nginx":"pass"}'); + ngx.exit(ngx.OK); + } +} diff --git a/images/nginx/healthcheck/healthz.locations.php.disable b/images/nginx/healthcheck/healthz.locations.php.disable new file mode 100644 index 0000000000..dd6be8e7ea --- /dev/null +++ b/images/nginx/healthcheck/healthz.locations.php.disable @@ -0,0 +1,10 @@ +location /.lagoonhealthz { + rewrite ^/.lagoonhealthz(/.*)?$ /.lagoonhealthz/index.php; + + location ~* \.php(/|$) { + include /etc/nginx/fastcgi.conf; + fastcgi_param SCRIPT_NAME /index.php; + fastcgi_param SCRIPT_FILENAME /healthz-php/index.php; + fastcgi_pass ${NGINX_FASTCGI_PASS:-php}:9000; + } +} diff --git a/images/nginx/helpers/90_healthz.conf b/images/nginx/helpers/90_healthz.conf new file mode 100644 index 0000000000..33356eda06 --- /dev/null +++ b/images/nginx/helpers/90_healthz.conf @@ -0,0 +1 @@ +include /etc/nginx/conf.d/healthz.locations; diff --git a/images/nginx/helpers/90_healthz_fast_check.conf.disabled b/images/nginx/helpers/90_healthz_fast_check.conf.disabled new file mode 100644 index 0000000000..78cb43761e --- /dev/null +++ b/images/nginx/helpers/90_healthz_fast_check.conf.disabled @@ -0,0 +1,13 @@ +set $fhcc none; + +if ( $http_user_agent ~* "StatusCake|Pingdom|Site25x7|Uptime|nagios" ) { + set $fhcc "A"; +} + +if ( $request_method = 'GET' ) { + set $fhcc "$fhcc G"; +} + +if ( $fhcc = 'A G' ) { + rewrite ~* /.lagoonhealthz last; +} \ No newline at end of file diff --git a/images/oc-build-deploy-dind/build-deploy-docker-compose.sh b/images/oc-build-deploy-dind/build-deploy-docker-compose.sh index 3144102fbe..fadb00cda6 100755 --- a/images/oc-build-deploy-dind/build-deploy-docker-compose.sh +++ b/images/oc-build-deploy-dind/build-deploy-docker-compose.sh @@ -91,7 +91,7 @@ do if oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get service "$SERVICE_NAME" &> /dev/null; then SERVICE_TYPE="mariadb-single" # check if an existing mariadb service instance already exists - elif oc -insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get serviceinstance "$SERVICE_NAME" &> /dev/null; then + elif oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get serviceinstance "$SERVICE_NAME" &> /dev/null; then SERVICE_TYPE="mariadb-shared" # check if we can use the dbaas operator elif oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mariadbconsumer.v1.mariadb.amazee.io &> /dev/null; then @@ -272,8 +272,8 @@ if [[ ( "$TYPE" == "pullrequest" || "$TYPE" == "branch" ) && ! $THIS_IS_TUG == LAGOON_POSTROLLOUT_DISABLED=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_POSTROLLOUT_DISABLED") | "\(.value)"')) fi if [ ! -z "$LAGOON_ENVIRONMENT_VARIABLES" ]; then - LAGOON_PREROLLOUT_DISABLED=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_PREROLLOUT_DISABLED") | "\(.value)"')) - LAGOON_POSTROLLOUT_DISABLED=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_POSTROLLOUT_DISABLED") | "\(.value)"')) + LAGOON_PREROLLOUT_DISABLED=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_PREROLLOUT_DISABLED") | "\(.value)"')) + LAGOON_POSTROLLOUT_DISABLED=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_POSTROLLOUT_DISABLED") | "\(.value)"')) fi set -x @@ -429,7 +429,15 @@ else fi ROUTES_AUTOGENERATE_ENABLED=$(cat .lagoon.yml | shyaml get-value routes.autogenerate.enabled true) - +ROUTES_AUTOGENERATE_ALLOW_PRS=$(cat .lagoon.yml | shyaml get-value routes.autogenerate.allowPullrequests $ROUTES_AUTOGENERATE_ENABLED) +if [[ "$TYPE" == "pullrequest" && "$ROUTES_AUTOGENERATE_ALLOW_PRS" == "true" ]]; then + ROUTES_AUTOGENERATE_ENABLED=true +fi +## fail silently if the key autogenerateRoutes doesn't exist and default to whatever ROUTES_AUTOGENERATE_ENABLED is set to +ROUTES_AUTOGENERATE_BRANCH=$(cat .lagoon.yml | shyaml -q get-value environments.${BRANCH//./\\.}.autogenerateRoutes $ROUTES_AUTOGENERATE_ENABLED) +if [ "$ROUTES_AUTOGENERATE_BRANCH" =~ [Tt]rue ]; then + ROUTES_AUTOGENERATE_ENABLED=true +fi for SERVICE_TYPES_ENTRY in "${SERVICE_TYPES[@]}" do @@ -505,7 +513,15 @@ TEMPLATE_PARAMETERS=() ### CUSTOM ROUTES FROM .lagoon.yml ############################################## +if [ "${ENVIRONMENT_TYPE}" == "production" ]; then + MONITORING_ENABLED="true" +else + MONITORING_ENABLED="false" +fi +MONITORING_INTERVAL=60 + ROUTES_SERVICE_COUNTER=0 + # we need to check for production routes for active/standby if they are defined, as these will get migrated between environments as required if [ "${ENVIRONMENT_TYPE}" == "production" ]; then if [ "${BRANCH//./\\.}" == "${ACTIVE_ENVIRONMENT}" ]; then @@ -524,6 +540,7 @@ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true) ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect) ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null) + MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "") else # Only a value given, assuming some defaults ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER) @@ -565,6 +582,7 @@ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true) ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect) ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null) + MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "") else # Only a value given, assuming some defaults ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER) @@ -609,6 +627,7 @@ if [ -n "$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\. ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false) ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect) ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null) + MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "") else # Only a value given, assuming some defaults ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER) @@ -647,6 +666,7 @@ else ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false) ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect) ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null) + MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "") else # Only a value given, assuming some defaults ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER) @@ -707,8 +727,10 @@ fi ############################################## ### CUSTOM MONITORING_URLS FROM .lagoon.yml ############################################## +# @DEPRECATED - to be removed with Lagoon 2.0 URL_COUNTER=0 while [ -n "$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.monitoring_urls.$URL_COUNTER 2> /dev/null)" ]; do + echo "DEPRECATION WARNING: 'monitoring_urls' is being moved to a per-route 'monitoring-path', please update your route" MONITORING_URL="$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.monitoring_urls.$URL_COUNTER)" if [[ $URL_COUNTER > 0 ]]; then MONITORING_URLS="${MONITORING_URLS}, ${MONITORING_URL}" @@ -741,10 +763,8 @@ ROUTES=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get routes -l "ac # Active / Standby routes ACTIVE_ROUTES="" STANDBY_ROUTES="" -if [ "${BRANCH//./\\.}" == "${ACTIVE_ENVIRONMENT}" ]; then +if [ ! -z "${STANDBY_ENVIRONMENT}" ]; then ACTIVE_ROUTES=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get routes -l "dioscuri.amazee.io/migrate=true" -o=go-template --template='{{range $index, $route := .items}}{{if $index}},{{end}}{{if $route.spec.tls.termination}}https://{{else}}http://{{end}}{{$route.spec.host}}{{end}}') -fi -if [ "${BRANCH//./\\.}" == "${STANDBY_ENVIRONMENT}" ]; then STANDBY_ROUTES=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get routes -l "dioscuri.amazee.io/migrate=true" -o=go-template --template='{{range $index, $route := .items}}{{if $index}},{{end}}{{if $route.spec.tls.termination}}https://{{else}}http://{{end}}{{$route.spec.host}}{{end}}') fi diff --git a/images/oc-build-deploy-dind/openshift-templates/cli-persistent/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/cli-persistent/deployment.yml index 8312fbc933..dfe504fa80 100644 --- a/images/oc-build-deploy-dind/openshift-templates/cli-persistent/deployment.yml +++ b/images/oc-build-deploy-dind/openshift-templates/cli-persistent/deployment.yml @@ -103,6 +103,8 @@ objects: secret: defaultMode: 420 secretName: lagoon-sshkey + - name: ${PERSISTENT_STORAGE_NAME}-twig + emptyDir: {} priorityClassName: lagoon-priority-${ENVIRONMENT_TYPE} containers: - image: ${SERVICE_IMAGE} @@ -125,6 +127,8 @@ objects: - mountPath: /var/run/secrets/lagoon/sshkey/ name: lagoon-sshkey readOnly: true + - name: ${PERSISTENT_STORAGE_NAME}-twig + mountPath: ${PERSISTENT_STORAGE_PATH}/php/twig resources: requests: cpu: 10m diff --git a/images/oc-build-deploy-dind/openshift-templates/nginx-php-persistent/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/nginx-php-persistent/deployment.yml index 82da081d48..0f089522e3 100644 --- a/images/oc-build-deploy-dind/openshift-templates/nginx-php-persistent/deployment.yml +++ b/images/oc-build-deploy-dind/openshift-templates/nginx-php-persistent/deployment.yml @@ -103,6 +103,8 @@ objects: - name: ${SERVICE_NAME} persistentVolumeClaim: claimName: ${SERVICE_NAME} + - name: ${SERVICE_NAME}-twig + emptyDir: {} priorityClassName: lagoon-priority-${ENVIRONMENT_TYPE} containers: - image: ${NGINX_SERVICE_IMAGE} @@ -170,6 +172,8 @@ objects: volumeMounts: - name: ${SERVICE_NAME} mountPath: ${PERSISTENT_STORAGE_PATH} + - name: ${SERVICE_NAME}-twig + mountPath: ${PERSISTENT_STORAGE_PATH}/php/twig resources: requests: cpu: 10m diff --git a/images/oc-build-deploy-dind/openshift-templates/nginx-php-redis-persistent/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/nginx-php-redis-persistent/deployment.yml index 7e300f35c0..d81fbff00e 100644 --- a/images/oc-build-deploy-dind/openshift-templates/nginx-php-redis-persistent/deployment.yml +++ b/images/oc-build-deploy-dind/openshift-templates/nginx-php-redis-persistent/deployment.yml @@ -109,6 +109,8 @@ objects: - name: ${SERVICE_NAME} persistentVolumeClaim: claimName: ${SERVICE_NAME} + - name: ${SERVICE_NAME}-twig + emptyDir: {} priorityClassName: lagoon-priority-${ENVIRONMENT_TYPE} containers: - image: ${NGINX_SERVICE_IMAGE} @@ -180,6 +182,8 @@ objects: volumeMounts: - name: ${SERVICE_NAME} mountPath: ${PERSISTENT_STORAGE_PATH} + - name: ${SERVICE_NAME}-twig + mountPath: ${PERSISTENT_STORAGE_PATH}/php/twig resources: requests: cpu: 10m diff --git a/images/oc-build-deploy-dind/openshift-templates/rabbitmq/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/rabbitmq/deployment.yml index 20d2c87446..58e8ea4b4e 100644 --- a/images/oc-build-deploy-dind/openshift-templates/rabbitmq/deployment.yml +++ b/images/oc-build-deploy-dind/openshift-templates/rabbitmq/deployment.yml @@ -68,8 +68,8 @@ objects: service: ${SERVICE_NAME} branch: ${SAFE_BRANCH} project: ${SAFE_PROJECT} - annotations: - lagoon.sh/configMapSha: ${CONFIG_MAP_SHA} + annotations: + lagoon.sh/configMapSha: ${CONFIG_MAP_SHA} spec: priorityClassName: lagoon-priority-${ENVIRONMENT_TYPE} containers: diff --git a/images/oc-build-deploy-dind/openshift-templates/route.yml b/images/oc-build-deploy-dind/openshift-templates/route.yml index 2f3c1e04d6..1f8ecca4ea 100644 --- a/images/oc-build-deploy-dind/openshift-templates/route.yml +++ b/images/oc-build-deploy-dind/openshift-templates/route.yml @@ -40,6 +40,21 @@ parameters: - name: ROUTE_MIGRATE description: Setting to determine if this route should be migratable for active/standby purposes required: true + - name: MONITORING_ENABLED + description: Default to monitoring disabled, only enabled on production routes + value: "false" + - name: MONITORING_INTERVAL + description: Frequency of checks by monitoring + value: "" + - name: MONITOR_ALERTCONTACTS + description: Alertcontacts to associate to this monitor + value: "" + - name: MONITORING_PATH + description: Path for monitoring of this route + value: "" + - name: MONITORING_STATUSPAGEID + description: Uptime Robot status page ID + value: "" objects: - apiVersion: v1 kind: Route @@ -48,6 +63,11 @@ objects: haproxy.router.openshift.io/disable_cookies: 'true' haproxy.router.openshift.io/hsts_header: '${ROUTE_HSTS}' kubernetes.io/tls-acme: '${ROUTE_TLS_ACME}' + monitor.stakater.com/enabled: '${MONITORING_ENABLED}' + uptimerobot.monitor.stakater.com/interval: '${MONITORING_INTERVAL}' + uptimerobot.monitor.stakater.com/alert-contacts: '${MONITOR_ALERTCONTACTS}' + monitor.stakater.com/overridePath: '${MONITORING_PATH}' + uptimerobot.monitor.stakater.com/status-pages: '${MONITORING_STATUSPAGEID}' creationTimestamp: null labels: branch: ${SAFE_BRANCH} diff --git a/images/oc-build-deploy-dind/scripts/exec-openshift-create-route.sh b/images/oc-build-deploy-dind/scripts/exec-openshift-create-route.sh index 013045e6a3..0d68a1cc44 100644 --- a/images/oc-build-deploy-dind/scripts/exec-openshift-create-route.sh +++ b/images/oc-build-deploy-dind/scripts/exec-openshift-create-route.sh @@ -2,7 +2,7 @@ # TODO: find out why we are using the if/else and if it's still needed for kubernetes if oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get route "$ROUTE_DOMAIN" &> /dev/null; then - oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} patch route "$ROUTE_DOMAIN" -p "{\"metadata\":{\"labels\":{\"dioscuri.amazee.io/migrate\": \"${ROUTE_MIGRATE}\"},\"annotations\":{\"kubernetes.io/tls-acme\":\"${ROUTE_TLS_ACME}\",\"haproxy.router.openshift.io/hsts_header\":\"${ROUTE_HSTS}\"}},\"spec\":{\"to\":{\"name\":\"${ROUTE_SERVICE}\"},\"tls\":{\"insecureEdgeTerminationPolicy\":\"${ROUTE_INSECURE}\"}}}" + oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} patch route "$ROUTE_DOMAIN" -p "{\"metadata\":{\"labels\":{\"dioscuri.amazee.io/migrate\":\"${ROUTE_MIGRATE}\"},\"annotations\":{\"haproxy.router.openshift.io/disable_cookies\":\"true\",\"kubernetes.io/tls-acme\":\"${ROUTE_TLS_ACME}\",\"haproxy.router.openshift.io/hsts_header\":\"${ROUTE_HSTS}\",\"monitor.stakater.com/enabled\":\"${MONITORING_ENABLED}\",\"uptimerobot.monitor.stakater.com/interval\":\"${MONITORING_INTERVAL}\",\"uptimerobot.monitor.stakater.com/alert-contacts\":\"${MONITORING_ALERTCONTACT}\",\"monitor.stakater.com/overridePath\":\"${MONITORING_PATH}\",\"uptimerobot.monitor.stakater.com/status-pages\":\"${MONITORING_STATUSPAGEID}\"}},\"spec\":{\"to\":{\"name\":\"${ROUTE_SERVICE}\"},\"tls\":{\"insecureEdgeTerminationPolicy\":\"${ROUTE_INSECURE}\"}}}" else oc process --local -o yaml --insecure-skip-tls-verify \ -n ${OPENSHIFT_PROJECT} \ @@ -19,5 +19,10 @@ else -p ROUTE_INSECURE="${ROUTE_INSECURE}" \ -p ROUTE_HSTS="${ROUTE_HSTS}" \ -p ROUTE_MIGRATE="${ROUTE_MIGRATE}" \ + -p MONITORING_ENABLED="${MONITORING_ENABLED}" \ + -p MONITOR_ALERTCONTACTS="${MONITOR_ALERTCONTACTS}" \ + -p MONITORING_PATH="${MONITORING_PATH}" \ + -p MONITORING_INTERVAL="${MONITORING_INTERVAL}" \ + -p MONITORING_STATUSPAGEID="${MONITORING_STATUSPAGEID}" \ | outputToYaml fi diff --git a/images/php/cli-drupal/Dockerfile b/images/php/cli-drupal/Dockerfile index bb284c4ea3..bf2a8a4fa2 100644 --- a/images/php/cli-drupal/Dockerfile +++ b/images/php/cli-drupal/Dockerfile @@ -9,7 +9,7 @@ ENV LAGOON=cli-drupal # Defining Versions - https://github.com/hechoendrupal/drupal-console-launcher/releases ENV DRUPAL_CONSOLE_LAUNCHER_VERSION=1.9.4 \ DRUPAL_CONSOLE_LAUNCHER_SHA=b7759279668caf915b8e9f3352e88f18e4f20659 \ - DRUSH_VERSION=8.3.1 \ + DRUSH_VERSION=8.3.5 \ DRUSH_LAUNCHER_VERSION=0.6.0 \ DRUSH_LAUNCHER_FALLBACK=/opt/drush8/vendor/bin/drush diff --git a/images/php/cli/Dockerfile b/images/php/cli/Dockerfile index 419b38be4b..07254ebb97 100644 --- a/images/php/cli/Dockerfile +++ b/images/php/cli/Dockerfile @@ -8,8 +8,8 @@ ENV LAGOON=cli # Defining Versions - Composer # @see https://getcomposer.org/download/ -ENV COMPOSER_VERSION=1.10.1 \ - COMPOSER_HASH_SHA256=1831f266580f221189dc04d4b58d7fc50c934ffc3a0eca89ecb4a53aa44867e2 +ENV COMPOSER_VERSION=1.10.8 \ + COMPOSER_HASH_SHA256=4c40737f5d5f36d04f8b2df37171c6a1ff520efcadcb8626cc7c30bd4c5178e5 RUN apk add --no-cache git \ unzip \ diff --git a/images/php/fpm/Dockerfile b/images/php/fpm/Dockerfile index 6eb59880bd..c5462d2b7d 100644 --- a/images/php/fpm/Dockerfile +++ b/images/php/fpm/Dockerfile @@ -3,6 +3,11 @@ ARG PHP_IMAGE_VERSION ARG ALPINE_VERSION ARG IMAGE_REPO FROM ${IMAGE_REPO:-lagoon}/commons as commons + +FROM composer:latest as healthcheckbuilder + +RUN composer create-project --no-dev amazeeio/healthz-php /healthz-php v0.0.3 + FROM php:${PHP_IMAGE_VERSION}-fpm-alpine${ALPINE_VERSION} LABEL maintainer="amazee.io" @@ -17,6 +22,10 @@ COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/ COPY --from=commons /sbin/tini /sbin/ COPY --from=commons /home /home +# Copy healthcheck files + +COPY --from=healthcheckbuilder /healthz-php /healthz-php + RUN chmod g+w /etc/passwd \ && mkdir -p /home @@ -39,12 +48,9 @@ COPY ssmtp.conf /etc/ssmtp/ssmtp.conf # New Relic PHP Agent. # @see https://docs.newrelic.com/docs/release-notes/agent-release-notes/php-release-notes/ # @see https://docs.newrelic.com/docs/agents/php-agent/getting-started/php-agent-compatibility-requirements -ENV NEWRELIC_VERSION=9.10.1.263 +ENV NEWRELIC_VERSION=9.11.0.267 -# Pin curl to Version 7.69.1-r0 as the current shipped one 7.67.0 has a bug, see -# https://github.com/curl/curl/issues/4624 -# TODO: Remove as soon as Alpine 3.11 is shipped with a version higher than 7.67.0 -RUN apk add --no-cache curl=7.69.1-r0 libcurl=7.69.1-r0 --repository http://dl-cdn.alpinelinux.org/alpine/edge/main/ +RUN apk add --no-cache curl --repository http://dl-cdn.alpinelinux.org/alpine/edge/main/ RUN apk add --no-cache fcgi \ ssmtp \ @@ -64,6 +70,8 @@ RUN apk add --no-cache fcgi \ # for webp libwebp-dev \ postgresql-dev \ + # for yaml + yaml-dev \ # for imagemagick imagemagick \ imagemagick-libs \ @@ -71,6 +79,7 @@ RUN apk add --no-cache fcgi \ && apk add --no-cache --virtual .phpize-deps $PHPIZE_DEPS \ && yes '' | pecl install -f apcu \ && yes '' | pecl install -f xdebug \ + && yes '' | pecl install -f yaml \ && yes '' | pecl install -f redis-4.3.0 \ && yes '' | pecl install -f imagick \ && docker-php-ext-enable apcu redis xdebug imagick \ @@ -86,12 +95,14 @@ RUN apk add --no-cache fcgi \ && sed -i '1s/^/;Intentionally disabled. Enable via setting env variable XDEBUG_ENABLE to true\n;/' /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini \ && rm -rf /var/cache/apk/* /tmp/pear/ \ && apk del .phpize-deps \ + && echo "extension=yaml.so" > /usr/local/etc/php/conf.d/yaml.ini \ && mkdir -p /tmp/newrelic && cd /tmp/newrelic \ && wget https://download.newrelic.com/php_agent/archive/${NEWRELIC_VERSION}/newrelic-php5-${NEWRELIC_VERSION}-linux-musl.tar.gz \ && gzip -dc newrelic-php5-${NEWRELIC_VERSION}-linux-musl.tar.gz | tar --strip-components=1 -xf - \ && NR_INSTALL_USE_CP_NOT_LN=1 NR_INSTALL_SILENT=1 ./newrelic-install install \ && sed -i -e "s/newrelic.appname = .*/newrelic.appname = \"\${LAGOON_PROJECT:-noproject}-\${LAGOON_GIT_SAFE_BRANCH:-nobranch}\"/" /usr/local/etc/php/conf.d/newrelic.ini \ && sed -i -e "s/;newrelic.enabled = .*/newrelic.enabled = \${NEWRELIC_ENABLED:-false}/" /usr/local/etc/php/conf.d/newrelic.ini \ + && sed -i -e "s/;newrelic.browser_monitoring.auto_instrument = .*/newrelic.browser_monitoring.auto_instrument = \${NEWRELIC_BROWSER_MONITORING_ENABLED:-true}/" /usr/local/etc/php/conf.d/newrelic.ini \ && sed -i -e "s/newrelic.license = .*/newrelic.license = \"\${NEWRELIC_LICENSE:-}\"/" /usr/local/etc/php/conf.d/newrelic.ini \ && sed -i -e "s/;newrelic.loglevel = .*/newrelic.loglevel = \"\${NEWRELIC_LOG_LEVEL:-warning}\"/" /usr/local/etc/php/conf.d/newrelic.ini \ && sed -i -e "s/;newrelic.daemon.loglevel = .*/newrelic.daemon.loglevel = \"\${NEWRELIC_DAEMON_LOG_LEVEL:-warning}\"/" /usr/local/etc/php/conf.d/newrelic.ini \ diff --git a/images/php/fpm/entrypoints/60-php-xdebug.sh b/images/php/fpm/entrypoints/60-php-xdebug.sh index 5bd0959a0e..1d05a274f1 100755 --- a/images/php/fpm/entrypoints/60-php-xdebug.sh +++ b/images/php/fpm/entrypoints/60-php-xdebug.sh @@ -3,7 +3,7 @@ # Tries to find the Dockerhost get_dockerhost() { # https://docs.docker.com/docker-for-mac/networking/#known-limitations-use-cases-and-workarounds - if busybox timeout 1 busybox nslookup host.docker.internal &> /dev/null; then + if busybox timeout 1 busybox nslookup -query=A host.docker.internal &> /dev/null; then echo "host.docker.internal" return fi diff --git a/images/redis/conf/redis.conf b/images/redis/conf/redis.conf index 1c82d74c42..06425ea1c6 100644 --- a/images/redis/conf/redis.conf +++ b/images/redis/conf/redis.conf @@ -11,4 +11,6 @@ maxmemory-policy allkeys-lru protected-mode no bind 0.0.0.0 +${REQUIREPASS_CONF:-} + include /etc/redis/${FLAVOR:-ephemeral}.conf diff --git a/images/redis/docker-entrypoint b/images/redis/docker-entrypoint index 93bcd95616..fafbb758ef 100755 --- a/images/redis/docker-entrypoint +++ b/images/redis/docker-entrypoint @@ -1,5 +1,13 @@ #!/bin/sh +if [[ -n "${REDIS_PASSWORD}" ]]; then + export REQUIREPASS_CONF="# Enable basic/simple authentication +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +requirepass ${REDIS_PASSWORD}" +fi + ep /etc/redis/* exec "$@" diff --git a/images/solr/20-solr-datadir.sh b/images/solr/20-solr-datadir.sh index c9e9e955aa..d7c1f486cc 100755 --- a/images/solr/20-solr-datadir.sh +++ b/images/solr/20-solr-datadir.sh @@ -1,6 +1,27 @@ #!/bin/sh set -eo pipefail +# check if SOLR_COPY_DATA_DIR_SOURCE is set, if yes we're coping the contents of the given folder into the data dir folder +# this allows to prefill the datadir with a provided datadir (either added in a Dockerfile build, or mounted into the running container). +# This is different than just setting $SOLR_DATA_DIR to the source folder, as only /var/solr is a persistent folder, so setting +# $SOLR_DATA_DIR to another folder will make solr to not store the datadir across container restarts, while with this copy system +# the data will be prefilled and persistent across container restarts. +if [ -n "$SOLR_COPY_DATA_DIR_SOURCE" ]; then + echo "MARIADB_COPY_DATA_DIR_SOURCE is set, start copying from source location" + for solrcorepath in $(ls -d $SOLR_COPY_DATA_DIR_SOURCE/*/ | grep -v lost+found) ; do + corename=$(basename $solrcorepath) + if [ -d ${SOLR_DATA_DIR:-/var/solr}/$corename ]; then + echo "core $corename already present in destination, skipping copying" + else + echo "copying datadir contents from '$SOLR_COPY_DATA_DIR_SOURCE/$corename to '${SOLR_DATA_DIR:-/var/solr}/$corename'" + CUR_DIR=${PWD} + mkdir ${SOLR_DATA_DIR:-/var/solr}/$corename + cd $SOLR_COPY_DATA_DIR_SOURCE/$corename; tar cf - . | (cd ${SOLR_DATA_DIR:-/var/solr}/$corename; tar xvf -) + cd $CUR_DIR + fi + done +fi + # Previously the Solr Config and Solr Data Dir was both kept in the persistent volume: # - Solr data: /opt/solr/server/solr/mycores/${corename}/data # - Solr config: /opt/solr/server/solr/mycores/${corename}/config @@ -41,9 +62,9 @@ if [ ! -n "$(ls /opt/solr/server/solr/mycores)" ]; then printf "\n\n" fi -if [ -n "$(ls /var/solr)" ]; then +if [ -n "$(ls ${SOLR_DATA_DIR:-/var/solr})" ]; then # Iterate through all existing solr cores - for solrcorepath in $(ls -d /var/solr/*/ | grep -v lost+found) ; do + for solrcorepath in $(ls -d ${SOLR_DATA_DIR:-/var/solr}/*/ | grep -v lost+found) ; do corename=$(basename $solrcorepath) if [ -d ${solrcorepath}data ]; then echo "${solrcorepath} has it's data in deprecated location ${solrcorepath}data, moving to ${solrcorepath}." @@ -72,17 +93,19 @@ fi function fixConfig { fail=0 - if cat $1/solrconfig.xml | grep dataDir | grep -qv '/var/solr/${solr.core.name}'; then + if cat $1/solrconfig.xml | grep dataDir | grep -qv "${SOLR_DATA_DIR:-/var/solr}/\${solr.core.name}"; then echo "Found old non lagoon compatible dataDir config in solrconfig.xml:" cat $1/solrconfig.xml | grep dataDir + SOLR_DATA_DIR=${SOLR_DATA_DIR:-/var/solr} + SOLR_DATA_DIR_ESCAPED=${SOLR_DATA_DIR//\//\\/} # escapig the forward slashes with backslahes if [ -w $1/ ]; then - sed -ibak 's/.*/\/var\/solr\/${solr.core.name}<\/dataDir>/' $1/solrconfig.xml + sed -ibak "s/.*/$SOLR_DATA_DIR_ESCAPED\/\${solr.core.name}<\/dataDir>/" $1/solrconfig.xml echo "automagically updated to compatible config: " - echo ' /var/solr/${solr.core.name}' + echo " ${SOLR_DATA_DIR:-/var/solr}/\${solr.core.name}" echo "Please update your solrconfig.xml to make this persistent." else echo "but no write permission to automagically change to compatible config: " - echo ' /var/solr/${solr.core.name}' + echo " ${SOLR_DATA_DIR:-/var/solr}/\${solr.core.name}" echo "Please update your solrconfig.xml and commit again." fail=1 fi diff --git a/images/varnish-drupal/drupal.vcl b/images/varnish-drupal/drupal.vcl index b94c7b4551..02d5e2692d 100644 --- a/images/varnish-drupal/drupal.vcl +++ b/images/varnish-drupal/drupal.vcl @@ -14,10 +14,10 @@ backend default { # Allow purging from localhost # @TODO allow from openshift network acl purge { - "127.0.0.1"; - "10.0.0.0"/8; - "172.16.0.0"/12; - "192.168.0.0"/16; + "127.0.0.1"; + "10.0.0.0"/8; + "172.16.0.0"/12; + "192.168.0.0"/16; } sub vcl_init { @@ -31,52 +31,55 @@ sub vcl_init { # This configuration is optimized for Drupal hosting: # Respond to incoming requests. sub vcl_recv { - if (req.url ~ "^/varnish_status$") { + if (req.url ~ "^/varnish_status$") { return (synth(200,"OK")); } # set the backend, which should be used: set req.backend_hint = www_dir.backend("${VARNISH_BACKEND_HOST:-nginx}"); # Always set the forward ip. - if (req.restarts == 0) { - if (req.http.x-forwarded-for) { - set req.http.X-Forwarded-For = req.http.X-Forwarded-For + ", " + client.ip; - } else { - set req.http.X-Forwarded-For = client.ip; - } - } - - + if (req.restarts == 0) { + if (req.http.x-forwarded-for) { + set req.http.X-Forwarded-For = req.http.X-Forwarded-For + ", " + client.ip; + } + else { + set req.http.X-Forwarded-For = client.ip; + } + } if (req.http.X-LAGOON-VARNISH ) { - ## Pass all Requests which are handled via an upstream Varnish + # Pass all Requests which are handled via an upstream Varnish set req.http.X-LAGOON-VARNISH = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}, " + req.http.X-LAGOON-VARNISH; set req.http.X-LAGOON-VARNISH-BYPASS = "true"; - } else if (req.http.Fastly-FF) { - ## Pass all Requests which are handled via Fastly + } + else if (req.http.Fastly-FF) { + # Pass all Requests which are handled via Fastly set req.http.X-LAGOON-VARNISH = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}, fastly"; set req.http.X-LAGOON-VARNISH-BYPASS = "true"; set req.http.X-Forwarded-For = req.http.Fastly-Client-IP; - } else if (req.http.CF-RAY) { - ## Pass all Requests which are handled via CloudFlare + } + else if (req.http.CF-RAY) { + # Pass all Requests which are handled via CloudFlare set req.http.X-LAGOON-VARNISH = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}, cloudflare"; set req.http.X-LAGOON-VARNISH-BYPASS = "true"; set req.http.X-Forwarded-For = req.http.CF-Connecting-IP; - } else if (req.http.X-Pull) { - ## Pass all Requests which are handled via KeyCDN + } + else if (req.http.X-Pull) { + # Pass all Requests which are handled via KeyCDN set req.http.X-LAGOON-VARNISH = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}, keycdn"; set req.http.X-LAGOON-VARNISH-BYPASS = "true"; - } else { - ## We set a header to let a Varnish Chain know that it already has been varnishcached + } + else { + # We set a header to let a Varnish chain know that it already has been varnishcached set req.http.X-LAGOON-VARNISH = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}"; - ## Allow to bypass based on env variable `VARNISH_BYPASS` + # Allow to bypass based on env variable `VARNISH_BYPASS` set req.http.X-LAGOON-VARNISH-BYPASS = "${VARNISH_BYPASS:-false}"; } # Websockets are piped if (req.http.Upgrade ~ "(?i)websocket") { - return (pipe); + return (pipe); } if (req.http.X-LAGOON-VARNISH-BYPASS == "true" || req.http.X-LAGOON-VARNISH-BYPASS == "TRUE") { @@ -98,42 +101,43 @@ sub vcl_recv { # Bypass a cache hit (the request is still sent to the backend) if (req.method == "REFRESH") { - if (!client.ip ~ purge) { return (synth(405, "Not allowed")); } - set req.method = "GET"; - set req.hash_always_miss = true; + if (!client.ip ~ purge) { + return (synth(405, "Not allowed")); + } + set req.method = "GET"; + set req.hash_always_miss = true; } # Only allow BAN requests from IP addresses in the 'purge' ACL. if (req.method == "BAN" || req.method == "URIBAN" || req.method == "PURGE") { - # Only allow BAN from defined ACL - if (!client.ip ~ purge) { - return (synth(403, "Your IP is not allowed.")); - } - - # Only allows BAN if the Host Header has the style of with "${SERVICE_NAME:-varnish}:8080" or "${SERVICE_NAME:-varnish}". - # Such a request is only possible from within the Docker network, as a request from external goes trough the Kubernetes Router and for that needs a proper Host Header - if (!req.http.host ~ "^${SERVICE_NAME:-varnish}(:\d+)?$") { - return (synth(403, "Only allowed from within own network.")); - } + # Only allow BAN from defined ACL + if (!client.ip ~ purge) { + return (synth(403, "Your IP is not allowed.")); + } - if (req.method == "BAN") { - # Logic for the ban, using the Cache-Tags header. - if (req.http.Cache-Tags) { - ban("obj.http.Cache-Tags ~ " + req.http.Cache-Tags); - # Throw a synthetic page so the request won't go to the backend. - return (synth(200, "Ban added.")); - } - else { - return (synth(403, "Cache-Tags header missing.")); - } - } + # Only allows BAN if the Host Header has the style of with "${SERVICE_NAME:-varnish}:8080" or "${SERVICE_NAME:-varnish}". + # Such a request is only possible from within the Docker network, as a request from external goes trough the Kubernetes Router and for that needs a proper Host Header + if (!req.http.host ~ "^${SERVICE_NAME:-varnish}(:\d+)?$") { + return (synth(403, "Only allowed from within own network.")); + } - if (req.method == "URIBAN" || req.method == "PURGE") { - ban("req.url ~ " + req.url); + if (req.method == "BAN") { + # Logic for the ban, using the Cache-Tags header. + if (req.http.Cache-Tags) { + ban("obj.http.Cache-Tags ~ " + req.http.Cache-Tags); # Throw a synthetic page so the request won't go to the backend. return (synth(200, "Ban added.")); } + else { + return (synth(403, "Cache-Tags header missing.")); + } + } + if (req.method == "URIBAN" || req.method == "PURGE") { + ban("req.url ~ " + req.url); + # Throw a synthetic page so the request won't go to the backend. + return (synth(200, "Ban added.")); + } } # Non-RFC2616 or CONNECT which is weird, we pipe that @@ -147,23 +151,27 @@ sub vcl_recv { return (pipe); } - # We only try to cache GET and HEAD, other things are passed + # Large binary files are passed. + if (req.url ~ "\.(msi|exe|dmg|zip|tgz|gz|pkg)$") { + return(pass); + } + + # We only try to cache GET and HEAD, other things are passed. if (req.method != "GET" && req.method != "HEAD") { return (pass); } - # Any requests with Basic Auth are passed - if (req.http.Authorization || req.http.Authenticate) - { + # Any requests with Basic Authentication are passed. + if (req.http.Authorization || req.http.Authenticate) { return (pass); } - ## Pass requests which are from blackfire + # Blackfire requests are passed. if (req.http.X-Blackfire-Query) { return (pass); } - # Some URLs should never be cached + # Some URLs should never be cached. if (req.url ~ "^/status\.php$" || req.url ~ "^/update\.php$" || req.url ~ "^/admin([/?]|$).*$" || @@ -176,9 +184,8 @@ sub vcl_recv { return (pass); } - # Plupload likes to get piped - if (req.url ~ "^.*/plupload-handle-uploads.*$" - ) { + # Plupload likes to get piped. + if (req.url ~ "^.*/plupload-handle-uploads.*$") { return (pipe); } @@ -259,31 +266,32 @@ sub vcl_pipe { } sub vcl_hit { - if (obj.ttl >= 0s) { - # normal hit - return (deliver); + if (obj.ttl >= 0s) { + # normal hit + return (deliver); + } + # We have no fresh fish. Lets look at the stale ones. + if (std.healthy(req.backend_hint)) { + # Backend is healthy. If the object is not older then 30secs, deliver it to the client + # and automatically create a separate backend request to warm the cache for this request. + if (obj.ttl + 30s > 0s) { + set req.http.grace = "normal(limited)"; + return (deliver); + } else { + # No candidate for grace. Fetch a fresh object. + return (miss); } - # We have no fresh fish. Lets look at the stale ones. - if (std.healthy(req.backend_hint)) { - # Backend is healthy. If the object is not older then 30secs, deliver it to the client - # and automatically create a separate backend request to warm the cache for this request. - if (obj.ttl + 30s > 0s) { - set req.http.grace = "normal(limited)"; - return (deliver); - } else { - # No candidate for grace. Fetch a fresh object. - return(miss); - } + } + else { + # backend is sick - use full grace + if (obj.ttl + obj.grace > 0s) { + set req.http.grace = "full"; + return (deliver); } else { - # backend is sick - use full grace - if (obj.ttl + obj.grace > 0s) { - set req.http.grace = "full"; - return (deliver); - } else { - # no graced object. - return (miss); - } + # no graced object. + return (miss); } + } } sub vcl_backend_response { @@ -295,19 +303,18 @@ sub vcl_backend_response { set beresp.http.X-Host = bereq.http.host; # If the backend sends a X-LAGOON-VARNISH-BACKEND-BYPASS header we directly deliver - if(beresp.http.X-LAGOON-VARNISH-BACKEND-BYPASS == "TRUE") { + if (beresp.http.X-LAGOON-VARNISH-BACKEND-BYPASS == "TRUE") { return (deliver); } # Cache 404 and 403 for 10 seconds - if(beresp.status == 404 || beresp.status == 403) { + if (beresp.status == 404 || beresp.status == 403) { set beresp.ttl = 10s; return (deliver); } # Don't allow static files to set cookies. if (bereq.url ~ "(?i)\.(css|js|jpg|jpeg|gif|ico|png|tiff|tif|img|tga|wmf|swf|html|htm|woff|woff2|mp4|ttf|eot|svg)(\?.*)?$") { - # beresp == Back-end response from the web server. unset beresp.http.set-cookie; unset beresp.http.Cache-Control; @@ -321,6 +328,14 @@ sub vcl_backend_response { set beresp.http.Cache-Control = "public, max-age=${VARNISH_ASSETS_TTL:-2628001}"; set beresp.http.Expires = "" + (now + beresp.ttl); } + + # Files larger than 10 MB get streamed. + if (beresp.http.Content-Length ~ "[0-9]{8,}") { + set beresp.do_stream = true; + set beresp.uncacheable = true; + set beresp.ttl = 120s; + } + # Disable buffering only for BigPipe responses if (beresp.http.Surrogate-Control ~ "BigPipe/1.0") { set beresp.do_stream = true; @@ -359,18 +374,19 @@ sub vcl_deliver { } sub vcl_hash { - hash_data(req.url); - if (req.http.host) { - hash_data(req.http.host); - } else { - hash_data(server.ip); - } - if (req.http.X-Forwarded-Proto) { - hash_data(req.http.X-Forwarded-Proto); - } - if (req.http.HTTPS) { - hash_data(req.http.HTTPS); - } + hash_data(req.url); + if (req.http.host) { + hash_data(req.http.host); + } + else { + hash_data(server.ip); + } + if (req.http.X-Forwarded-Proto) { + hash_data(req.http.X-Forwarded-Proto); + } + if (req.http.HTTPS) { + hash_data(req.http.HTTPS); + } return (lookup); } @@ -387,20 +403,20 @@ sub vcl_synth { # Create our synthetic response synthetic(""); return(deliver); -} + } return (deliver); } sub vcl_backend_error { - # Restart the request, when we have a backend server error, to try another backend. - # Restart max twice. - if (bereq.retries < 2) { - return(retry); - } + # Restart the request, when we have a backend server error, to try another backend. + # Restart max twice. + if (bereq.retries < 2) { + return(retry); + } - set beresp.http.Content-Type = "text/html; charset=utf-8"; - set beresp.http.Retry-After = "5"; - synthetic( {" + set beresp.http.Content-Type = "text/html; charset=utf-8"; + set beresp.http.Retry-After = "5"; + synthetic({" @@ -443,6 +459,6 @@ sub vcl_backend_error { -"} ); - return (deliver); +"}); + return (deliver); } diff --git a/images/varnish/default.vcl b/images/varnish/default.vcl index d5e2336927..6dcd0a8118 100644 --- a/images/varnish/default.vcl +++ b/images/varnish/default.vcl @@ -43,18 +43,31 @@ sub vcl_recv { return (synth(200,"OK")); } + # Large binary files are passed. + if (req.url ~ "\.(msi|exe|dmg|zip|tgz|gz|pkg)$") { + return(pass); + } } sub vcl_backend_response { - # Happens after we have read the response headers from the backend. - # - # Here you clean the response headers, removing silly Set-Cookie headers - # and other mistakes your backend does. + # Happens after we have read the response headers from the backend. + # + # Here you clean the response headers, removing silly Set-Cookie headers + # and other mistakes your backend does. + + # Files larger than 10 MB get streamed. + if (beresp.http.Content-Length ~ "[0-9]{8,}") { + set beresp.do_stream = true; + set beresp.uncacheable = true; + set beresp.ttl = 120s; + } + + return (deliver); } sub vcl_deliver { - # Happens when we have all the pieces we need, and are about to send the - # response to the client. - # - # You can do accounting or modifying the final object here. + # Happens when we have all the pieces we need, and are about to send the + # response to the client. + # + # You can do accounting or modifying the final object here. } diff --git a/lagoon-remote/docker-compose.yaml b/lagoon-remote/docker-compose.yaml index 32008d281e..c833ea9320 100644 --- a/lagoon-remote/docker-compose.yaml +++ b/lagoon-remote/docker-compose.yaml @@ -35,61 +35,61 @@ services: lagoon.type: custom lagoon.template: harborclair/harborclair.yml lagoon.name: harborclair - lagoon.image: amazeeiolagoon/harborclair:v1-6-0 + lagoon.image: amazeeiolagoon/harborclair:v1-8-1 harborclairadapter: image: ${IMAGE_REPO:-lagoon}/harborclairadapter labels: lagoon.type: custom lagoon.template: harborclairadapter/harborclair.yml lagoon.name: harborclair - lagoon.image: amazeeiolagoon/harborclairadapter:v1-6-0 + lagoon.image: amazeeiolagoon/harborclairadapter:v1-8-1 harbor-core: image: ${IMAGE_REPO:-lagoon}/harbor-core labels: lagoon.type: custom lagoon.template: harbor-core/harbor-core.yml - lagoon.image: amazeeiolagoon/harbor-core:v1-6-0 + lagoon.image: amazeeiolagoon/harbor-core:v1-8-1 harbor-database: image: ${IMAGE_REPO:-lagoon}/harbor-database labels: lagoon.type: custom lagoon.template: harbor-database/harbor-database.yml - lagoon.image: amazeeiolagoon/harbor-database:v1-6-0 + lagoon.image: amazeeiolagoon/harbor-database:v1-8-1 harbor-jobservice: image: ${IMAGE_REPO:-lagoon}/harbor-jobservice labels: lagoon.type: custom lagoon.template: harbor-jobservice/harbor-jobservice.yml - lagoon.image: amazeeiolagoon/harbor-jobservice:v1-6-0 + lagoon.image: amazeeiolagoon/harbor-jobservice:v1-8-1 harbor-nginx: image: ${IMAGE_REPO:-lagoon}/harbor-nginx labels: lagoon.type: custom lagoon.template: harbor-nginx/harbor-nginx.yml - lagoon.image: amazeeiolagoon/harbor-nginx:v1-6-0 + lagoon.image: amazeeiolagoon/harbor-nginx:v1-8-1 harbor-portal: image: ${IMAGE_REPO:-lagoon}/harbor-portal labels: lagoon.type: custom lagoon.template: harbor-portal/harbor-portal.yml - lagoon.image: amazeeiolagoon/harbor-portal:v1-6-0 + lagoon.image: amazeeiolagoon/harbor-portal:v1-8-1 harbor-redis: image: ${IMAGE_REPO:-lagoon}/harbor-redis labels: lagoon.type: custom lagoon.template: harbor-redis/harbor-redis.yml - lagoon.image: amazeeiolagoon/harbor-redis:v1-6-0 + lagoon.image: amazeeiolagoon/harbor-redis:v1-8-1 harborregistry: image: ${IMAGE_REPO:-lagoon}/harborregistry labels: lagoon.type: custom lagoon.template: harborregistry/harborregistry.yml lagoon.name: harborregistry - lagoon.image: amazeeiolagoon/harborregistry:v1-6-0 + lagoon.image: amazeeiolagoon/harborregistry:v1-8-1 harborregistryctl: image: ${IMAGE_REPO:-lagoon}/harborregistryctl labels: lagoon.type: custom lagoon.template: harborregistryctl/harborregistry.yml lagoon.name: harborregistry - lagoon.image: amazeeiolagoon/harborregistryctl:v1-6-0 + lagoon.image: amazeeiolagoon/harborregistryctl:v1-8-1 diff --git a/local-dev/api-data/03-populate-api-data-kubernetes.gql b/local-dev/api-data/03-populate-api-data-kubernetes.gql index d6a389eb16..3020fd9fa3 100644 --- a/local-dev/api-data/03-populate-api-data-kubernetes.gql +++ b/local-dev/api-data/03-populate-api-data-kubernetes.gql @@ -1119,4 +1119,55 @@ mutation PopulateApi { ) { id } + + CiActiveStandby: addProject( + input: { + id: 1024 + name: "ci-active-standby-k8s" + openshift: 1002 + gitUrl: "ssh://git@192.168.42.1:2222/git/active-standby.git" + productionEnvironment:"master-a" + standbyProductionEnvironment:"master-b" + activeSystemsTask: "lagoon_kubernetesJob" + activeSystemsMisc: "lagoon_kubernetesMisc" + activeSystemsDeploy: "lagoon_kubernetesBuildDeploy" + activeSystemsRemove: "lagoon_kubernetesRemove" + privateKey: "-----BEGIN RSA PRIVATE KEY-----\nMIIJKAIBAAKCAgEAxGZZrOV7Islo5p51Moabfd1YB8qbHvQZfJDZJmSU4jNxMf8G\nQH8KIM6ndi60xIiDlkh9R50Gs0fgnPaBwpjMjcUay5EvzBJdMmd/EPhg359+4f5Z\nQiGTVaB5UoGJKg9DEK4Xxi+QLpQ1CiJXvd3QOqnQlJZ2WYwz4kdLxF0d+sRrl+0G\nAISt9Gg9kq6wa7k7RRr4/OyD/9DhDr1GXvHXFPRv6QqKq084CqiUaarP7OcbZKi5\nEyMkf0s86ZTttQPqQijWsenLAw6t7J0vM38eojPDMFX4fJ7wVvbHmsdcwb2YxwD0\nk7I85mV5uM99v7owJ/0YSJGcN2YESq8c68rbRp1iSfDhchd9SUyYMFlrj3R+dxte\nTwvN2W5hIEld8Ip+dUWUVaaTeLkFLmqmVhvcMJNmuj+Wnp9USwki6U5HdpgBJPT5\nYJia3/LyE5IlPaRfZ+vBDQqKOeeqkncVtqcZ468ylT0qpqjtV4OSuCzl+P/TeJ+K\npCxDoqp88yQpYRYn9ztvEVvOkT8RERnT0/NVCNKAwHFOXrkK/BJs/h3fj2NddeVC\nJXdwiB4m+T2C/RHtGxVColqUf2nEntXxweuJgqBz+4YxXqRrKu4p5L4NuudcFAyg\nbIRpbaTZDI+vmmXnTXNP6ymMo5fNJ0/IPEBAoDkkc6ZmKdM5Yth6RaCzolsCAwEA\nAQKCAgBRL4n0SaxcFevyISCLMU+HeP8RwXPcGMWMU4ggMcXFt8QcCbK46Ir0QtjA\nps/8yf2zGuYGu2dwps63s8KyAV3VYNwRuEOM1S6HTncdOb850YGuw+h7yMtRwxND\nwt5Db6mmdIDJYRH13zgJp2ajytRv25CoS4ZCwA0WhahRVLi214kudUFc53vNI2YI\ng4PUE+7nQx4X12E9V0ghQqabStdBB0ZXjA8Ef6vH5CXthDmNUX9mXcSbn5RPtndI\ni1Kz2Bl3HdCaHO3ZprDItbU0UWEFZeZSzcb5JO5u1HZwiebTA5/q638uqqg4CUyG\n0+bEYZ/Ud/AY13A/CkHN6ZNH+UQotCH3GLyaTQq6OhyXlgMBojn3xs9iMUclFcuy\nkbZ1jAxqci25pxCIeNDHBDKRyxgSkDPna8ZZ4eKGXjIZzsds4+IDkYJLMg0OCtd2\nKm+ZPM2CFU2YCqt11dlr0higGK/9gdpajJMVgEYAmigQ670LdcBc4XIOpPMrR70a\nPjbF9ze/UqtKDWtz8KMIUcvr0CjhHa3XRgMJiM34hIOZU6xf6rjEgkN2Geq24u1b\nhUW8URb+5dcasQ9iIfkNn3R+zK5PzyvQNf6+XrbVjpLqPHXJYrD85EKlXkPqiE6y\n3ehYMrIaZIY6gMuIVcg8AEtsm5EwQY7ETw4YnMQLKfyweLsHEQKCAQEA5vavDyQj\nn6PqfF1Ntr3N/FfwRr/167HB+/3wHT+FwRpCw62ACeBC0iFXmzG2wfQfot60fUGa\nQEJiUDGZFvBM0tiTpzmgGG7QpRbPe1919Sl5LZeLA9bngRnmqn5zAkmVTeBCO/4F\nMSk9hnBZ0v0W3DqNmjuIH298g3gl4VJeKpILd62LbhjvhjT/LXlekYDEj3p9Xob8\n1OQxFJgftFyg4P07RRaUzsNLhqEdY3VxDcUMb9459fEYeb7sYig+zVPaZQ31aMVK\nj6K+XiH5M5uKJWkPdMDUG84jreFAdBY9kGCuSulTWgmTLlMKSI85q5hkckY2EQQL\n5u456xfyuFcnEwKCAQEA2bCCcqcGIAiMwk/6z7XIJeUsSUg+ObvkEdogk5n6Y1Ea\nt5LxMpQzhG6SHJ2f38VyKgv9e/jnwXI8eiejper6OeQEBG4+AedcLYi0V5SUMIgD\nX4bxT9+qCwYrwt9YHkJySk237WZUWJPVfxHg0vqNYyD/CXBowx0nm8jEuZ8iT+CW\nO2uZq+0DO2WqoYT54lZux6aEzm+oAkzwJJVXJcUVPg7bJXK1ObOzvHpkZJxHL8+S\nKufzew/CXALKWHoCkEP/P8b7oxjcjQI3KK0EM2fABNvN28+qscqTqQjfAsNw24Ob\nP8rL8amgd/x7iddIbEpOSoLAH1gVoxJXA0oqkC6YmQKCAQEAiIeoomW1nUgTdCLf\nrrfyzUnaoqgVIpf42RKa319OnQD+GJg2iSAFwBuvovE3XN4H2YqW3Thri7LyVP+M\nxM+WSkl2tzLMXcUcF4staXvbyeoTVQ0xQzrFrT53aa/IIsEGvntkC/y0awQ6937w\nylWMLvF6BYNNi2+nBjx+//xl5/pfRwbS1mltJkOr2ODXM2IQT9STyc44JU0jak4m\n58Kd44IuiD+6BaPSwKn7KnEhPIeQO4z9bFJyKn3fVIL/5Pa9smEXAjEmS1Rj/ldM\n7eHzPvwlA9p9SFaKJt5x8G25ROCyB1x4RlBEreyutofcCoDSV+8DRPnEY2XN3RhS\nBgCW+wKCAQAyHrqaDSyS2YUXA/UaN59CYlZk9PYmssTa+16+vRfyH+1H0NQpsgd+\neTq4ze3ORpZ3adVN4svxNQ0JjvDVtZkxFPd70aoDJDKL5UqoU3QfDGHCb75FhpiO\n+ze+IVAXf3Ly+pvbU9Edp8PjAsnBqaA9743orXHhYmgJLRtmJWZv/6R3P9ncxLAW\nz9yOXaBu5TmSTBuNsBV9mhs8QQoUjyDJ7f+0yolBJMfAIGnW5EhbkK31pPvhHIrC\nRn4yCr1mW9F77KNpNMMq0BTFD7jE4SFLvRPThSem0Z5Xr8wwxbDJSa7H7DgyhryE\ng6Qp42AwVpdZ/mqfjNdGeWWBQ2UzVxcZAoIBAHNXgjD3umcKciYRAbltNJrXx9xk\ndAv8I69oEMCy4hCmvKUjy/UI9NqXFjBb/G6VGgh6NUE9o9o27t1Y5Plm0izyCA1M\nDFruaRfjyJ8qjbEifcqRtcF3rzsBiXIwdmbN6qT4PUipN2elpUAd7J1OIwGIIe3u\nCWNyOTNL+2+oQ/Eg1Y99mg3yrsVyOwhynVE80/X5cy07bXXR5rv1x4NKSVbPhlnt\nL6J5iAoqoDKICzjcgF5x3mj9YFWZrC3aRxRrN5RoEgeVdcXeK56UJqXHjmKN++m3\nc8OPEIBZiD8UJuhSNSOLiBFrGz6toy6rpHavqqknGhVWotXsAs1h8LNkBe8=\n-----END RSA PRIVATE KEY-----" + } + ) { + id + } + CiActiveStandbyGroup3: addGroupsToProject( + input: { + project: { + name: "ci-active-standby-k8s" + } + groups: [ + { + name: "ci-group" + } + ] + } + ) { + id + } + CiActiveStandbyRocketChat: addNotificationToProject( + input: { + project: "ci-active-standby-k8s" + notificationType: ROCKETCHAT + notificationName: "amazeeio--lagoon-local-ci" + } + ) { + id + } + CiActiveStandbyEmail: addNotificationToProject( + input: { + project: "ci-active-standby-k8s" + notificationType: EMAIL + notificationName: "local-email-testing" + } + ) { + id + } + } diff --git a/mkdocs.yml b/mkdocs.yml index bbf3eb7f67..a2ef1d6f68 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -76,9 +76,10 @@ pages: - Development: - Developing Lagoon: developing_lagoon/index.md - Tests: developing_lagoon/tests.md - - Contributing: developing_lagoon/contributing.md - API Debugging: developing_lagoon/api-debugging.md - - Code Of Conduct: developing_lagoon/code_of_conduct.md +- Code Of Conduct: code_of_conduct.md +- Contributing: contributing.md + theme: readthedocs markdown_extensions: diff --git a/node-packages/commons/src/api.ts b/node-packages/commons/src/api.ts index c33f6a3192..6e8d0af8c9 100644 --- a/node-packages/commons/src/api.ts +++ b/node-packages/commons/src/api.ts @@ -105,7 +105,7 @@ const options = { const transport = new Transport(`${API_HOST}/graphql`, options); -const graphqlapi = new Lokka({ transport }); +export const graphqlapi = new Lokka({ transport }); class ProjectNotFound extends Error { constructor(message) { @@ -1000,7 +1000,9 @@ export const getOpenShiftInfoForProject = (project: string): Promise => token projectUser routerPattern + monitoringConfig } + availability gitUrl privateKey subfolder @@ -1020,6 +1022,20 @@ export const getOpenShiftInfoForProject = (project: string): Promise => } `); +export const getBillingGroupForProject = (project: string): Promise => + graphqlapi.query(` + { + project:projectByName(name: "${project}"){ + groups { + ... on BillingGroup { + type + uptimeRobotStatusPageId + } + } + } + } +`); + interface GetEnvironentsForProjectEnvironmentResult { name: string; environmentType: EnvType; @@ -1281,5 +1297,154 @@ export const getGroupMembersByGroupName = groupName => } } }`, - { name: groupName } - ); + { name: groupName } +); + +export const addProblem = ({ + id = null, + environment, + identifier, + severity, + source, + severityScore, + data, + service, + associatedPackage, + description, + version, + fixedVersion, + links +}) => { + return graphqlapi.mutate( + `($id: Int, + $environment: Int!, + $identifier: String!, + $severity: ProblemSeverityRating!, + $source: String!, + $severityScore: SeverityScore, + $data: String!, + $service: String, + $associatedPackage: String, + $description: String, + $version: String, + $fixedVersion: String, + $links: String) { + addProblem(input: { + id: $id + environment: $environment + identifier: $identifier + severity: $severity + source: $source + severityScore: $severityScore + data: $data + service: $service + associatedPackage: $associatedPackage + description: $description + version: $version + fixedVersion: $fixedVersion + links: $links + }) { + id + environment { + id + } + identifier + severity + source + severityScore + data + associatedPackage + description + version + fixedVersion + links + } + }`, + { + id, + environment, + identifier, + severity, + source, + severityScore, + data, + service, + associatedPackage, + description, + version, + fixedVersion, + links + }, +)}; + +export const deleteProblemsFromSource = ( + environment, + source, + service +) => { + return graphqlapi.mutate( + `($environment: Int!, $source: String!, $service: String!) { + deleteProblemsFromSource(input: {environment: $environment, source: $source, service: $service }) + } + `, + { + environment, + source, + service + } + )}; + +const problemFragment = graphqlapi.createFragment(` +fragment on Problem { + id + severity + severityScore + identifier + service + source + associatedPackage + description + links + version + fixedVersion + data + created + deleted +} +`); + +export const getProblemsforProjectEnvironment = async ( + environmentName, + project +) => { + const response = await graphqlapi.query( + `query getProject($environmentName: String!, $project: Int!) { + environmentByName(name: $environmentName, project: $project) { + id + name + problems { + ...${problemFragment} + } + } + }` + , + { + environmentName, + project + }); + return response.environmentByName.problems; +}; + +export const getProblemHarborScanMatches = () => graphqlapi.query( + `query getProblemHarborScanMatches { + allProblemHarborScanMatchers { + id + name + description + defaultLagoonProject + defaultLagoonEnvironment + defaultLagoonService + regex + } + }` +); \ No newline at end of file diff --git a/node-packages/commons/src/harborApi.ts b/node-packages/commons/src/harborApi.ts new file mode 100644 index 0000000000..0bb146e2ba --- /dev/null +++ b/node-packages/commons/src/harborApi.ts @@ -0,0 +1,30 @@ +// @flow + +import axios from 'axios'; +import * as R from 'ramda'; + +const HARBOR_BASE_API_URL = + process.env.HARBOR_BASE_API_URL || + 'https://harbor-nginx-lagoon-master.ch.amazee.io/api/repositories/'; +const HARBOR_BASE_URL_POSTFIX = '/tags/latest/scan'; +const HARBOR_ACCEPT_HEADER = + 'application/vnd.scanner.adapter.vuln.report.harbor+json; version=1.0'; +const HARBOR_USERNAME = process.env.HARBOR_USERNAME || 'admin'; +const HARBOR_PASSWORD = process.env.HARBOR_ADMIN_PASSWORD; + +export const getVulnerabilitiesPayloadFromHarbor = async (repoFullName) => { + const endpoint = + HARBOR_BASE_API_URL + repoFullName + HARBOR_BASE_URL_POSTFIX; + const options = { + timeout: 30000, + headers: { + Accept: HARBOR_ACCEPT_HEADER, + Authorization: + 'Basic ' + + Buffer.from(HARBOR_USERNAME + ':' + (HARBOR_PASSWORD)).toString('base64'), + }, + }; + + const response = await axios.get(endpoint, options); + return response.data; +}; diff --git a/node-packages/commons/src/openshiftApi.ts b/node-packages/commons/src/openshiftApi.ts index 1012971d34..43c23ea8b8 100644 --- a/node-packages/commons/src/openshiftApi.ts +++ b/node-packages/commons/src/openshiftApi.ts @@ -18,7 +18,7 @@ export class RouteMigration extends ApiGroup { path: 'apis/dioscuri.amazee.io', version: options.version || 'v1', groupResources: [], - namespaceResources: ['routemigrates'], + namespaceResources: ['routemigrates','ingressmigrates'], }); super(options); } diff --git a/node-packages/commons/src/tasks.ts b/node-packages/commons/src/tasks.ts index e02d9e3668..fb61685893 100644 --- a/node-packages/commons/src/tasks.ts +++ b/node-packages/commons/src/tasks.ts @@ -71,6 +71,9 @@ const rabbitmqHost = process.env.RABBITMQ_HOST || 'broker'; const rabbitmqUsername = process.env.RABBITMQ_USERNAME || 'guest'; const rabbitmqPassword = process.env.RABBITMQ_PASSWORD || 'guest'; +const taskPrefetch = process.env.TASK_PREFETCH_COUNT ? Number(process.env.TASK_PREFETCH_COUNT) : 2; +const taskMonitorPrefetch = process.env.TASKMONITOR_PREFETCH_COUNT ? Number(process.env.TASKMONITOR_PREFETCH_COUNT) : 1; + class UnknownActiveSystem extends Error { constructor(message) { super(message); @@ -761,7 +764,7 @@ export const consumeTasks = async function( 'lagoon-tasks', taskQueueName ), - channel.prefetch(2), + channel.prefetch(taskPrefetch), channel.consume(`lagoon-tasks:${taskQueueName}`, onMessage, { noAck: false }) @@ -793,7 +796,13 @@ export const consumeTaskMonitor = async function( return; } - const retryDelayMilisecs = 5000; + let retryDelaySecs = 5; + + if (error.delayFn) { + retryDelaySecs = error.delayFn(retryCount); + } + + const retryDelayMilisecs = retryDelaySecs * 1000; // copying options from the original message const retryMsgOptions = { @@ -834,7 +843,7 @@ export const consumeTaskMonitor = async function( 'lagoon-tasks-monitor', taskMonitorQueueName ), - channel.prefetch(1), + channel.prefetch(taskMonitorPrefetch), channel.consume( `lagoon-tasks-monitor:${taskMonitorQueueName}`, onMessage, diff --git a/services/api-db/docker-entrypoint-initdb.d/00-tables.sql b/services/api-db/docker-entrypoint-initdb.d/00-tables.sql index f396532e71..9d5979fea9 100644 --- a/services/api-db/docker-entrypoint-initdb.d/00-tables.sql +++ b/services/api-db/docker-entrypoint-initdb.d/00-tables.sql @@ -29,15 +29,16 @@ CREATE TABLE IF NOT EXISTS customer ( ); CREATE TABLE IF NOT EXISTS openshift ( - id int NOT NULL auto_increment PRIMARY KEY, - name varchar(50) UNIQUE, - console_url varchar(300), - token varchar(2000), - router_pattern varchar(300), - project_user varchar(100), - ssh_host varchar(300), - ssh_port varchar(50), - created timestamp DEFAULT CURRENT_TIMESTAMP + id int NOT NULL auto_increment PRIMARY KEY, + name varchar(50) UNIQUE, + console_url varchar(300), + token varchar(2000), + router_pattern varchar(300), + project_user varchar(100), + ssh_host varchar(300), + ssh_port varchar(50), + monitoring_config varchar(2048), + created timestamp DEFAULT CURRENT_TIMESTAMP ); CREATE TABLE IF NOT EXISTS notification_microsoftteams ( @@ -89,6 +90,7 @@ CREATE TABLE IF NOT EXISTS project ( standby_alias varchar(100) NOT NULL DEFAULT 'lagoon-standby', auto_idle int(1) NOT NULL default 1, storage_calc int(1) NOT NULL default 1, + problems_ui int(1) NOT NULL default 0, openshift int REFERENCES openshift (id), openshift_project_pattern varchar(300), development_environments_limit int DEFAULT NULL, @@ -98,14 +100,16 @@ CREATE TABLE IF NOT EXISTS project ( CREATE TABLE IF NOT EXISTS billing_modifier ( id int NOT NULL auto_increment PRIMARY KEY, - group_id varchar(36), + group_id varchar(36), weight int NOT NULL DEFAULT 0, start_date datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, end_date datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, - discount_fixed DECIMAL NULL DEFAULT 0.0, - discount_percentage FLOAT NULL DEFAULT 0.0, - extra_fixed DECIMAL NULL DEFAULT 0.0, - extra_percentage FLOAT NULL DEFAULT 0.0, + discount_fixed DECIMAL NULL DEFAULT 0, + discount_percentage FLOAT NULL DEFAULT 0, + extra_fixed DECIMAL NULL DEFAULT 0, + extra_percentage FLOAT NULL DEFAULT 0, + min FLOAT NULL DEFAULT 0, + max FLOAT NULL DEFAULT 0, customer_comments text, admin_comments text ); @@ -207,6 +211,35 @@ CREATE TABLE IF NOT EXISTS s3_file ( deleted datetime NOT NULL DEFAULT '0000-00-00 00:00:00' ); +CREATE TABLE IF NOT EXISTS environment_problem ( + id int NOT NULL auto_increment PRIMARY KEY, + environment int REFERENCES environment (id), + severity varchar(300) DEFAULT '', + severity_score DECIMAL(1,1) DEFAULT 0.0, + identifier varchar(300) NOT NULL, + lagoon_service varchar(300) DEFAULT '', + source varchar(300) DEFAULT '', + associated_package varchar(300) DEFAULT '', + description TEXT NULL DEFAULT '', + version varchar(300) DEFAULT '', + fixed_version varchar(300) DEFAULT '', + links varchar(300) DEFAULT '', + data JSON, + created timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + deleted timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', + UNIQUE(environment, lagoon_service, version, identifier, deleted) +); + +CREATE TABLE IF NOT EXISTS problem_harbor_scan_matcher ( + id int NOT NULL auto_increment PRIMARY KEY, + name varchar(100) NOT NULL, + description text NULL, + default_lagoon_project varchar(300) NULL, + default_lagoon_environment varchar(300) NULL, + default_lagoon_service_name varchar(300) NULL, + regex varchar(300) NOT NULL +); + -- Junction Tables CREATE TABLE IF NOT EXISTS project_notification ( diff --git a/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql b/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql index 16ad2bbbf5..14f8c34eba 100644 --- a/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql +++ b/services/api-db/docker-entrypoint-initdb.d/01-migrations.sql @@ -30,6 +30,7 @@ CREATE OR REPLACE PROCEDURE IN standby_alias varchar(100), IN auto_idle int(1), IN storage_calc int(1), + IN problems_ui int(1), IN development_environments_limit int ) BEGIN @@ -73,6 +74,7 @@ CREATE OR REPLACE PROCEDURE standby_alias, auto_idle, storage_calc, + problems_ui, pullrequests, openshift, openshift_project_pattern, @@ -99,6 +101,7 @@ CREATE OR REPLACE PROCEDURE standby_alias, auto_idle, storage_calc, + problems_ui, pullrequests, os.id, openshift_project_pattern, @@ -1042,6 +1045,68 @@ CREATE OR REPLACE PROCEDURE END; $$ +CREATE OR REPLACE PROCEDURE + add_monitoring_config_to_openshift() + + BEGIN + IF NOT EXISTS ( + SELECT NULL + FROM INFORMATION_SCHEMA.COLUMNS + WHERE + table_name = 'openshift' + AND table_schema = 'infrastructure' + AND column_name = 'monitoring_config' + ) THEN + ALTER TABLE `openshift` + ADD `monitoring_config` varchar(2048); + END IF; + END; +$$ + +CREATE OR REPLACE PROCEDURE + add_additional_harbor_scan_fields_to_environment_problem() + + BEGIN + IF NOT EXISTS( + SELECT NULL + FROM INFORMATION_SCHEMA.COLUMNS + WHERE + table_name = 'environment_problem' + AND table_schema = 'infrastructure' + AND column_name = 'associated_package' + ) THEN + ALTER TABLE `environment_problem` + ADD `associated_package` varchar(300) DEFAULT '', + ADD `description` TEXT NULL DEFAULT '', + ADD `version` varchar(300) DEFAULT '', + ADD `fixed_version` varchar(300) DEFAULT '', + ADD `links` varchar(300) DEFAULT ''; + ALTER TABLE `environment_problem` + DROP INDEX environment; + ALTER TABLE `environment_problem` + ADD UNIQUE KEY `environment` (`environment`, `lagoon_service`, `version`, `identifier`, `deleted`); + END IF; + END; +$$ + +CREATE OR REPLACE PROCEDURE + add_problems_ui_to_project() + + BEGIN + IF NOT EXISTS( + SELECT NULL + FROM INFORMATION_SCHEMA.COLUMNS + WHERE + table_name = 'project' + AND table_schema = 'infrastructure' + AND column_name = 'problems_ui' + ) THEN + ALTER TABLE `project` + ADD `problems_ui` int(1) NOT NULL default '0'; + END IF; + END; +$$ + CREATE OR REPLACE PROCEDURE update_user_password() @@ -1062,6 +1127,25 @@ CREATE OR REPLACE PROCEDURE END; $$ +CREATE OR REPLACE PROCEDURE + add_min_max_to_billing_modifier() + + BEGIN + IF NOT EXISTS ( + SELECT NULL + FROM INFORMATION_SCHEMA.COLUMNS + WHERE + table_name = 'billing_modifier' + AND table_schema = 'infrastructure' + AND column_name = 'min' + ) THEN + ALTER TABLE `billing_modifier` + ADD `min` FLOAT DEFAULT 0, + ADD `max` FLOAT DEFAULT 0; + END IF; + END; +$$ + DELIMITER ; -- If adding new procedures, add them to the bottom of this list @@ -1102,6 +1186,7 @@ CALL convert_user_ssh_key_usid_to_char(); CALL add_private_key_to_project(); CALL add_index_for_environment_backup_environment(); CALL add_enum_email_microsoftteams_to_type_in_project_notification(); +CALL add_monitoring_config_to_openshift(); CALL add_standby_production_environment_to_project(); CALL add_standby_routes_to_project(); CALL add_production_routes_to_project(); @@ -1110,8 +1195,11 @@ CALL add_production_alias_to_project(); CALL add_active_systems_misc_to_project(); CALL add_container_registry_scope_to_env_vars(); CALL add_internal_container_registry_scope_to_env_vars(); +CALL add_additional_harbor_scan_fields_to_environment_problem(); CALL update_user_password(); +CALL add_problems_ui_to_project(); CALL add_metadata_to_project(); +CALL add_min_max_to_billing_modifier(); -- Drop legacy SSH key procedures DROP PROCEDURE IF EXISTS CreateProjectSshKey; @@ -1119,4 +1207,3 @@ DROP PROCEDURE IF EXISTS DeleteProjectSshKey; DROP PROCEDURE IF EXISTS CreateCustomerSshKey; DROP PROCEDURE IF EXISTS DeleteCustomerSshKey; DROP PROCEDURE IF EXISTS CreateSshKey; - diff --git a/services/api-db/docker-entrypoint-initdb.d/03-procedures.sql b/services/api-db/docker-entrypoint-initdb.d/03-procedures.sql index 7055e731dd..ff1c39b6bc 100644 --- a/services/api-db/docker-entrypoint-initdb.d/03-procedures.sql +++ b/services/api-db/docker-entrypoint-initdb.d/03-procedures.sql @@ -35,6 +35,7 @@ CREATE OR REPLACE PROCEDURE IN standby_alias varchar(100), IN auto_idle int(1), IN storage_calc int(1), + IN problems_ui int(1), IN development_environments_limit int ) BEGIN @@ -73,6 +74,7 @@ CREATE OR REPLACE PROCEDURE standby_routes, standby_alias, auto_idle, + problems_ui, storage_calc, pullrequests, openshift, @@ -99,6 +101,7 @@ CREATE OR REPLACE PROCEDURE standby_routes, standby_alias, auto_idle, + problems_ui, storage_calc, pullrequests, os.id, @@ -282,14 +285,15 @@ $$ CREATE OR REPLACE PROCEDURE CreateOpenshift ( - IN id int, - IN name varchar(50), - IN console_url varchar(300), - IN token varchar(2000), - IN router_pattern varchar(300), - IN project_user varchar(100), - IN ssh_host varchar(300), - IN ssh_port varchar(50) + IN id int, + IN name varchar(50), + IN console_url varchar(300), + IN token varchar(2000), + IN router_pattern varchar(300), + IN project_user varchar(100), + IN ssh_host varchar(300), + IN ssh_port varchar(50), + IN monitoring_config varchar(2048) ) BEGIN DECLARE new_oid int; @@ -306,7 +310,8 @@ CREATE OR REPLACE PROCEDURE router_pattern, project_user, ssh_host, - ssh_port + ssh_port, + monitoring_config ) VALUES ( id, name, @@ -315,7 +320,8 @@ CREATE OR REPLACE PROCEDURE router_pattern, project_user, ssh_host, - ssh_port + ssh_port, + monitoring_config ); IF (id = 0) THEN diff --git a/services/api-redis/.lagoon.app.yml b/services/api-redis/.lagoon.app.yml new file mode 100644 index 0000000000..e4ec8adba4 --- /dev/null +++ b/services/api-redis/.lagoon.app.yml @@ -0,0 +1,130 @@ +apiVersion: v1 +kind: Template +metadata: + creationTimestamp: null + name: lagoon-openshift-template-redis +parameters: + - name: SERVICE_NAME + description: Name of this service + required: true + - name: SAFE_BRANCH + description: Which branch this belongs to, special chars replaced with dashes + required: true + - name: SAFE_PROJECT + description: Which project this belongs to, special chars replaced with dashes + required: true + - name: BRANCH + description: Which branch this belongs to, original value + required: true + - name: PROJECT + description: Which project this belongs to, original value + required: true + - name: LAGOON_GIT_SHA + description: git hash sha of the current deployment + required: true + - name: SERVICE_ROUTER_URL + description: URL of the Router for this service + value: "" + - name: OPENSHIFT_PROJECT + description: Name of the Project that this service is in + required: true + - name: REGISTRY + description: Registry where Images are pushed to + required: true + - name: DEPLOYMENT_STRATEGY + description: Strategy of Deploymentconfig + value: "Rolling" + - name: SERVICE_IMAGE + description: Pullable image of service + required: true + - name: CRONJOBS + description: Oneliner of Cronjobs + value: "" + - name: ENVIRONMENT_TYPE + description: production level of this environment + value: 'production' + - name: CONFIG_MAP_SHA + description: SHA sum of the configmap + value: '' +objects: +- apiVersion: v1 + kind: DeploymentConfig + metadata: + creationTimestamp: null + labels: + service: ${SERVICE_NAME} + branch: ${SAFE_BRANCH} + project: ${SAFE_PROJECT} + name: ${SERVICE_NAME} + spec: + replicas: 1 + selector: + service: ${SERVICE_NAME} + strategy: + type: ${DEPLOYMENT_STRATEGY} + template: + metadata: + creationTimestamp: null + labels: + service: ${SERVICE_NAME} + branch: ${SAFE_BRANCH} + project: ${SAFE_PROJECT} + annotations: + lagoon.sh/configMapSha: ${CONFIG_MAP_SHA} + spec: + priorityClassName: lagoon-priority-${ENVIRONMENT_TYPE} + containers: + - image: ${SERVICE_IMAGE} + name: ${SERVICE_NAME} + ports: + - containerPort: 6379 + protocol: TCP + readinessProbe: + tcpSocket: + port: 6379 + initialDelaySeconds: 15 + timeoutSeconds: 1 + livenessProbe: + tcpSocket: + port: 6379 + initialDelaySeconds: 120 + periodSeconds: 10 + envFrom: + - configMapRef: + name: lagoon-env + env: + - name: SERVICE_NAME + value: ${SERVICE_NAME} + - name: CRONJOBS + value: ${CRONJOBS} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: api-redis-password + key: API_REDIS_PASSWORD + resources: + requests: + cpu: 10m + memory: 10Mi + test: false + triggers: + - type: ConfigChange +- apiVersion: v1 + kind: Service + metadata: + creationTimestamp: null + labels: + service: ${SERVICE_NAME} + branch: ${SAFE_BRANCH} + project: ${SAFE_PROJECT} + name: ${SERVICE_NAME} + spec: + ports: + - name: 6379-tcp + port: 6379 + protocol: TCP + targetPort: 6379 + selector: + service: ${SERVICE_NAME} + status: + loadBalancer: {} diff --git a/services/api-redis/Dockerfile b/services/api-redis/Dockerfile new file mode 100644 index 0000000000..3392398948 --- /dev/null +++ b/services/api-redis/Dockerfile @@ -0,0 +1,4 @@ +ARG IMAGE_REPO +FROM ${IMAGE_REPO:-lagoon}/redis + +ENV REDIS_PASSWORD=admin \ No newline at end of file diff --git a/services/api/.lagoon.app.yml b/services/api/.lagoon.app.yml index fd87b0aa1c..005463723f 100644 --- a/services/api/.lagoon.app.yml +++ b/services/api/.lagoon.app.yml @@ -124,11 +124,21 @@ objects: secretKeyRef: name: keycloak-api-client-secret key: KEYCLOAK_API_CLIENT_SECRET + - name: HARBOR_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: harbor-admin-password + key: HARBOR_ADMIN_PASSWORD - name: API_DB_PASSWORD valueFrom: secretKeyRef: name: api-db-password key: API_DB_PASSWORD + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: api-redis-password + key: API_REDIS_PASSWORD - name: SERVICE_NAME value: ${SERVICE_NAME} - name: CRONJOBS diff --git a/services/api/Dockerfile b/services/api/Dockerfile index 8225d75107..ba213b25ca 100644 --- a/services/api/Dockerfile +++ b/services/api/Dockerfile @@ -27,7 +27,9 @@ ENV NODE_ENV=production \ KEYCLOAK_ADMIN_PASSWORD=admin \ ELASTICSEARCH_HOST=logs-db-service:9200 \ ELASTICSEARCH_URL=http://logs-db-service:9200 \ - KEYCLOAK_API_CLIENT_SECRET=39d5282d-3684-4026-b4ed-04bbc034b61a + KEYCLOAK_API_CLIENT_SECRET=39d5282d-3684-4026-b4ed-04bbc034b61a \ + HARBOR_ADMIN_PASSWORD=admin \ + REDIS_PASSWORD=admin # The API is not very resilient to sudden mariadb restarts which can happen when the api and mariadb are starting # at the same time. So we have a small entrypoint which waits for mariadb to be fully ready. diff --git a/services/api/package.json b/services/api/package.json index 1b4aa8f28c..b9266c048d 100644 --- a/services/api/package.json +++ b/services/api/package.json @@ -23,8 +23,14 @@ "sync:harbor:projects": "node dist/migrations/2-harbor/harborSync.js" }, "nodemonConfig": { - "ignore": ["*.test.js", "../../node-packages/commons/dist/"], - "watch": ["src", "../../node-packages/"], + "ignore": [ + "*.test.js", + "../../node-packages/commons/dist/" + ], + "watch": [ + "src", + "../../node-packages/" + ], "ext": "js,ts,json", "exec": "yarn build --incremental && yarn start --inspect=0.0.0.0:9229" }, @@ -33,7 +39,8 @@ "license": "MIT", "dependencies": { "@lagoon/commons": "4.0.0", - "apollo-server-express": "^2.2.5", + "@types/redis": "^2.8.22", + "apollo-server-express": "^2.14.2", "aws-sdk": "^2.378.0", "body-parser": "^1.18.2", "camelcase-keys": "^4.2.0", @@ -60,8 +67,10 @@ "mariasql": "^0.2.6", "moment": "^2.24.0", "morgan": "^1.9.0", + "newrelic": "^6.9.0", "node-cache": "^4.2.1", "ramda": "^0.25.0", + "redis": "^3.0.2", "snakecase-keys": "^1.2.0", "sshpk": "^1.14.2", "validator": "^10.8.0", diff --git a/services/api/src/apolloServer.js b/services/api/src/apolloServer.js index 4fcc62484f..b0ab7b474c 100644 --- a/services/api/src/apolloServer.js +++ b/services/api/src/apolloServer.js @@ -2,16 +2,19 @@ const R = require('ramda'); const { ApolloServer, AuthenticationError, - makeExecutableSchema, + makeExecutableSchema } = require('apollo-server-express'); const NodeCache = require('node-cache'); +const gql = require('graphql-tag'); +const newrelic = require('newrelic'); const { getCredentialsForLegacyToken, getGrantForKeycloakToken, legacyHasPermission, - keycloakHasPermission, + keycloakHasPermission } = require('./util/auth'); const { getSqlClient } = require('./clients/sqlClient'); +const redisClient = require('./clients/redisClient'); const { getKeycloakAdminClient } = require('./clients/keycloak-admin'); const logger = require('./logger'); const typeDefs = require('./typeDefs'); @@ -54,7 +57,7 @@ const apolloServer = new ApolloServer({ if (!grant) { legacyCredentials = await getCredentialsForLegacyToken( sqlClientLegacy, - token, + token ); sqlClientLegacy.end(); } @@ -66,7 +69,7 @@ const apolloServer = new ApolloServer({ const keycloakAdminClient = await getKeycloakAdminClient(); const requestCache = new NodeCache({ stdTTL: 0, - checkperiod: 0, + checkperiod: 0 }); const sqlClient = getSqlClient(); @@ -80,12 +83,18 @@ const apolloServer = new ApolloServer({ keycloakGrant: grant, requestCache, models: { - UserModel: User.User({ keycloakAdminClient }), - GroupModel: Group.Group({ keycloakAdminClient }), - BillingModel: BillingModel.BillingModel({ keycloakAdminClient, sqlClient }), - ProjectModel: ProjectModel.ProjectModel({ keycloakAdminClient, sqlClient }), + UserModel: User.User({ keycloakAdminClient, redisClient }), + GroupModel: Group.Group({ keycloakAdminClient, sqlClient, redisClient }), + BillingModel: BillingModel.BillingModel({ + keycloakAdminClient, + sqlClient + }), + ProjectModel: ProjectModel.ProjectModel({ + keycloakAdminClient, + sqlClient + }), EnvironmentModel: EnvironmentModel.EnvironmentModel({ sqlClient }) - }, + } }; }, onDisconnect: (websocket, context) => { @@ -95,14 +104,14 @@ const apolloServer = new ApolloServer({ if (context.requestCache) { context.requestCache.flushAll(); } - }, + } }, context: async ({ req, connection }) => { // Websocket requests if (connection) { // onConnect must always provide connection.context. return { - ...connection.context, + ...connection.context }; } @@ -111,10 +120,10 @@ const apolloServer = new ApolloServer({ const keycloakAdminClient = await getKeycloakAdminClient(); const requestCache = new NodeCache({ stdTTL: 0, - checkperiod: 0, + checkperiod: 0 }); - const sqlClient = getSqlClient() + const sqlClient = getSqlClient(); return { keycloakAdminClient, @@ -123,18 +132,27 @@ const apolloServer = new ApolloServer({ ? keycloakHasPermission( req.kauth.grant, requestCache, - keycloakAdminClient, + keycloakAdminClient ) : legacyHasPermission(req.legacyCredentials), keycloakGrant: req.kauth ? req.kauth.grant : null, requestCache, models: { - UserModel: User.User({ keycloakAdminClient }), - GroupModel: Group.Group({ keycloakAdminClient, sqlClient }), - BillingModel: BillingModel.BillingModel({ keycloakAdminClient, sqlClient }), - ProjectModel: ProjectModel.ProjectModel({ keycloakAdminClient, sqlClient }), - EnvironmentModel: EnvironmentModel.EnvironmentModel({ keycloakAdminClient, sqlClient }) - }, + UserModel: User.User({ keycloakAdminClient, redisClient }), + GroupModel: Group.Group({ keycloakAdminClient, sqlClient, redisClient }), + BillingModel: BillingModel.BillingModel({ + keycloakAdminClient, + sqlClient + }), + ProjectModel: ProjectModel.ProjectModel({ + keycloakAdminClient, + sqlClient + }), + EnvironmentModel: EnvironmentModel.EnvironmentModel({ + keycloakAdminClient, + sqlClient + }) + } }; } }, @@ -146,10 +164,11 @@ const apolloServer = new ApolloServer({ path: error.path, ...(process.env.NODE_ENV === 'development' ? { extensions: error.extensions } - : {}), + : {}) }; }, plugins: [ + // mariasql client closer plugin { requestDidStart: () => ({ willSendResponse: response => { @@ -159,10 +178,50 @@ const apolloServer = new ApolloServer({ if (response.context.requestCache) { response.context.requestCache.flushAll(); } - }, - }), + } + }) }, - ], + // newrelic instrumentation plugin. Based heavily on https://github.com/essaji/apollo-newrelic-extension-plus + { + requestDidStart({ request }) { + const operationName = R.prop('operationName', request); + const queryString = R.prop('query', request); + const variables = R.prop('variables', request); + + const queryObject = gql` + ${queryString} + `; + const rootFieldName = queryObject.definitions[0].selectionSet.selections.reduce( + (init, q, idx) => + idx === 0 ? `${q.name.value}` : `${init}, ${q.name.value}`, + '' + ); + + // operationName is set by the client and optional. rootFieldName is + // set by the API type defs. + // operationName would be "getHighCottonProjectId" and rootFieldName + // would be "getProjectByName" with a query like: + // query getHighCottonProjectId { getProjectByName(name: "high-cotton") { id } } + const transactionName = operationName ? operationName : rootFieldName; + newrelic.setTransactionName(`graphql (${transactionName})`); + newrelic.addCustomAttribute('gqlQuery', queryString); + newrelic.addCustomAttribute('gqlVars', JSON.stringify(variables)); + + return { + willSendResponse: data => { + const { response } = data; + newrelic.addCustomAttribute( + 'errorCount', + R.pipe( + R.propOr([], 'errors'), + R.length + )(response) + ); + } + }; + } + } + ] }); module.exports = apolloServer; diff --git a/services/api/src/bitbucket-sync/repo-permissions.ts b/services/api/src/bitbucket-sync/repo-permissions.ts index fbf4c89bd5..4815443bb5 100644 --- a/services/api/src/bitbucket-sync/repo-permissions.ts +++ b/services/api/src/bitbucket-sync/repo-permissions.ts @@ -116,7 +116,9 @@ const addUser = async (email: string): Promise => { R.pluck('user'), // @ts-ignore R.pluck('email'), - )(currentMembersQuery) as [string]; + // @ts-ignore + R.map(R.toLower), + )(currentMembersQuery) as [string]; // Get current bitbucket uers const bitbucketUsers = R.pipe( @@ -124,6 +126,8 @@ const addUser = async (email: string): Promise => { R.pluck('user'), // @ts-ignore R.pluck('emailAddress'), + // @ts-ignore + R.map(R.toLower), )(userPermissions) as [string]; // Remove users from lagoon project that are removed in bitbucket repo diff --git a/services/api/src/clients/esClient.js b/services/api/src/clients/esClient.js index 312b8fe675..925e036848 100644 --- a/services/api/src/clients/esClient.js +++ b/services/api/src/clients/esClient.js @@ -1,9 +1,9 @@ const elasticsearch = require('elasticsearch'); -const { LOGSDB_ADMIN_PASSWORD, ELASTCISEARCH_HOST } = process.env; +const { LOGSDB_ADMIN_PASSWORD, ELASTICSEARCH_HOST } = process.env; const esClient = new elasticsearch.Client({ - host: ELASTCISEARCH_HOST || 'logs-db-service:9200', + host: ELASTICSEARCH_HOST || 'logs-db-service:9200', log: 'warning', httpAuth: `admin:${LOGSDB_ADMIN_PASSWORD || ''}`, }); diff --git a/services/api/src/clients/redisClient.ts b/services/api/src/clients/redisClient.ts new file mode 100644 index 0000000000..d15dcb02bb --- /dev/null +++ b/services/api/src/clients/redisClient.ts @@ -0,0 +1,78 @@ +import * as R from 'ramda'; +import redis, { ClientOpts } from 'redis'; +import { promisify } from 'util'; + +const { REDIS_HOST, REDIS_PASSWORD, REDIS_PORT } = process.env; + +let clientOptions: ClientOpts = { + host: REDIS_HOST || 'api-redis', + port: parseInt(REDIS_PORT, 10) || 6379, + enable_offline_queue: false +}; + +if (typeof REDIS_PASSWORD !== undefined) { + clientOptions.password = REDIS_PASSWORD; +} + +const redisClient = redis.createClient(clientOptions); + +redisClient.on('error', function(error) { + console.error(error); +}); + +const hgetall = promisify(redisClient.hgetall).bind(redisClient); +const smembers = promisify(redisClient.smembers).bind(redisClient); +const sadd = promisify(redisClient.sadd).bind(redisClient); +const del = promisify(redisClient.del).bind(redisClient); + +interface IUserResourceScope { + resource: string; + scope: string; + currentUserId: string; + project?: number; + group?: string; + users?: number[]; +} + +const hashKey = ({ resource, project, group, scope }: IUserResourceScope) => + `${resource}:${project ? `${project}:` : ''}${ + group ? `${group}:` : '' + }${scope}`; + +export const getRedisCache = async (resourceScope: IUserResourceScope) => { + const redisHash = await hgetall( + `cache:authz:${resourceScope.currentUserId}` + ); + const key = hashKey(resourceScope); + + return R.prop(key, redisHash); +}; + +export const saveRedisCache = async ( + resourceScope: IUserResourceScope, + value: number | string +) => { + const key = hashKey(resourceScope); + await redisClient.hmset( + `cache:authz:${resourceScope.currentUserId}`, + key, + value + ); +}; + +export const deleteRedisUserCache = userId => del(`cache:authz:${userId}`); + +export const getProjectGroupsCache = async projectId => + smembers(`project-groups:${projectId}`); +export const saveProjectGroupsCache = async (projectId, groupIds) => + sadd(`project-groups:${projectId}`, groupIds); +export const deleteProjectGroupsCache = async projectId => + del(`project-groups:${projectId}`); + +export default { + getRedisCache, + saveRedisCache, + deleteRedisUserCache, + getProjectGroupsCache, + saveProjectGroupsCache +}; diff --git a/services/api/src/data/mock-data.js b/services/api/src/data/mock-data.js new file mode 100644 index 0000000000..e61a42625a --- /dev/null +++ b/services/api/src/data/mock-data.js @@ -0,0 +1,50 @@ +export const packages = [ + 'ansible', + 'apache-log', + 'awl', + 'cacti', + 'chromium', + 'commons-configuration2', + 'consul', + 'dom4j', + 'drupal', + 'file-roller', + 'glibc', + 'golang-go.crypto', + 'graphicsmagick', + 'http-parser', + 'imagemagick', + 'jruby', + 'ksh', + 'libmicrodns', + 'libxml-security-java', + 'linux', + 'lucene-solr', + 'lxc-templates', + 'matrix-synapse', + 'mbedtls', + 'netty', + 'nginx', + 'node-yarnpkg', + 'nodejs', + 'nss', + 'openjdk-11', + 'phantomjs', + 'php7.1', + 'php7.3', + 'python', + 'rmysql', + 'ruby-json-jwt', + 'ruby-omniauth', + 'salt', + 'shiro', + 'slirp', + 'squid', + 'ssvnc', + 'thrift', + 'tomcat9', + 'trafficserver', + 'varnish', + 'xerces-c', + 'yubikey-val', +]; \ No newline at end of file diff --git a/services/api/src/helpers/billingGroups.ts b/services/api/src/helpers/billingGroups.ts index 26e040dce3..6808daf9ca 100644 --- a/services/api/src/helpers/billingGroups.ts +++ b/services/api/src/helpers/billingGroups.ts @@ -65,7 +65,7 @@ export const getAllBillingGroupsWithoutProjects = async () => { const GroupModel = Group({keycloakAdminClient }); // Get All Billing Groups - const groupTypeFilterFn = ({ name, value }, group) => { + const groupTypeFilterFn = ({ name, value }) => { return name === 'type' && value[0] === 'billing'; }; const groups = await GroupModel.loadGroupsByAttribute(groupTypeFilterFn); diff --git a/services/api/src/index.js b/services/api/src/index.js index 4ac227a914..0adf55220b 100644 --- a/services/api/src/index.js +++ b/services/api/src/index.js @@ -1,3 +1,4 @@ +require('newrelic'); const { initSendToLagoonLogs } = require('@lagoon/commons/dist/logs'); const { initSendToLagoonTasks } = require('@lagoon/commons/dist/tasks'); const waitForKeycloak = require('./util/waitForKeycloak'); diff --git a/services/api/src/mocks.js b/services/api/src/mocks.js index 33a371939a..8a16c8d496 100644 --- a/services/api/src/mocks.js +++ b/services/api/src/mocks.js @@ -1,5 +1,6 @@ import { MockList } from 'graphql-tools'; import faker from 'faker/locale/en'; +import { packages } from './data/mock-data'; // The mocks object is an Apollo Resolver Map where each mock function has the // following definition: (parent, args, context, info) => {} @@ -23,6 +24,29 @@ const addTime = (originalDate, hoursLimit) => { return date.toISOString(); }; +// Helper function to build an array of a given schema. +export const generator = (schema, min = 1, max) => { + max = max || min; + return Array.from({ + length: faker.random.number({ + min, + max, + }), + }).map(() => { + const innerGen = (anySchema) => Object.keys(anySchema).reduce((entity, key) => { + if (Object.prototype.toString.call(anySchema[key]) === '[object Object]') { + entity[key] = innerGen(anySchema[key]); + return entity; + } + entity[key] = faker.fake(anySchema[key]); + + return entity; + }, {}); + + return innerGen(schema()); + }); +}; + // // 'scalar' and 'enum' mocks from typeDefs. // @@ -209,6 +233,7 @@ MIIJKQIBAAKCAgEA+o[...]P0yoL8BoQQG2jCvYfWh6vyglQdrDYx/o6/8ecTwXokKKh6fg1q productionEnvironment: 'master', autoIdle: faker.random.arrayElement([0, 1]), storageCalc: faker.random.arrayElement([0, 1]), + problemsUi: faker.random.arrayElement([0, 1]), openshift: mocks.Openshift(), openshiftProjectPattern: '${project}-${name}', developmentEnvironmentsLimit: 10, @@ -385,6 +410,56 @@ mocks.Task = (parent, args = {}, context, info) => { }; }; +mocks.ProblemIdentifier = () => { + const recentYear = faker.random.arrayElement(['2019', '2020']); + const vuln_id = `CVE-${recentYear}-${faker.random.number({min: 1000, max: 99999})}`; + + return { + identifier: vuln_id, + problem: mocks.Problem(), + }; +}; + +mocks.Problem = () => { + const recentYear = faker.random.arrayElement(['2019', '2020']); + const vuln_id = `CVE-${recentYear}-${faker.random.number({min: 1000, max: 99999})}`; + const source = faker.random.arrayElement(['Harbor', 'Drutiny']); + const created = faker.date.between('2019-10-01 00:00:00', '2020-03-31 23:59:59').toUTCString(); + const associatedPackage = faker.random.arrayElement(packages); + const version = `${faker.random.number(4)}.${faker.random.number(9)}.${faker.random.number(49)}`; + const fixedVersion = `${version}+deb8u${faker.random.number(9)}`; + const severity = faker.random.arrayElement(['UNKNOWN', 'NEGLIGIBLE', 'LOW', 'MEDIUM', 'HIGH', 'CRITICAL']); + const description = faker.lorem.paragraph(); + const links = `https://security-tracker.debian.org/tracker/${vuln_id}`; + const severityScore = `0.${faker.random.number({min:1, max:9})}`; + const data = ({ id: faker.random.number(), hello: 'hello', world: 'world' }); + + return { + identifier: vuln_id, + severity: severity, + source: source, + severityScore: severityScore, + associatedPackage: associatedPackage, + description, + links, + data + }; +}; + +mocks.ProblemMutation = (schema) => { + return Array.from({ + length: faker.random.number({ + min: 1, + max: 500, + }), + }).map(() => { + let temp = schema(); + return ( + `problem${faker.random.number(1000000)}: addProblem(input: ${JSON.stringify(temp, 2, null)}) { identifier }` + ); + }); +}; + // // Query 'type' mock from typeDefs. // @@ -399,8 +474,9 @@ mocks.Query = () => ({ userCanSshToEnvironment: () => mocks.Environment(), deploymentByRemoteId: () => mocks.Deployment(), taskByRemoteId: () => mocks.Task(), - allProjects: () => new MockList(9), + allProjects: () => new MockList(600), allOpenshifts: () => new MockList(9), + allProblems: () => new MockList(5), allEnvironments: (parent, args = {}, context, info) => { const project = args.hasOwnProperty('project') ? args.project diff --git a/services/api/src/models/billing.ts b/services/api/src/models/billing.ts index 481662cd9e..6680042589 100644 --- a/services/api/src/models/billing.ts +++ b/services/api/src/models/billing.ts @@ -13,6 +13,8 @@ export interface BillingModifierBase { discountPercentage?: number; extraFixed?: number; extraPercentage?: number; + min?:number; + max?:number; customerComments?: string; adminComments?: string; weight?: number; @@ -86,7 +88,7 @@ export const BillingModel = (clients) => { const sql = Sql.getAllBillingModifierByBillingGroup(group.id, monthStart, monthEnd ); const result = (await query(sqlClient, sql)); - return result.map(({weight, discountFixed, discountPercentage, extraFixed, extraPercentage, ...rest}) => + return result.map(({weight, discountFixed, discountPercentage, extraFixed, extraPercentage, min, max, ...rest}) => ({ ...rest, group, @@ -94,7 +96,10 @@ export const BillingModel = (clients) => { discountFixed: parseFloat(discountFixed), discountPercentage: parseFloat(discountPercentage), extraFixed: parseFloat(extraFixed), - extraPercentage: parseFloat(extraPercentage) })); + extraPercentage: parseFloat(extraPercentage), + min: parseFloat(min), + max: parseFloat(max) + })); }; /** diff --git a/services/api/src/models/group.ts b/services/api/src/models/group.ts index 617a523113..19fbc5d6aa 100644 --- a/services/api/src/models/group.ts +++ b/services/api/src/models/group.ts @@ -41,6 +41,7 @@ export interface BillingGroup extends Group { currency?: string; billingSoftware?: string; type?: string; + uptimeRobotStatusPageId?: string; } interface GroupMembership { @@ -61,7 +62,7 @@ interface GroupEdit { } interface AttributeFilterFn { - (attribute: { name: string; value: string[] }, group: Group): boolean; + (attribute: { name: string; value: string[] }): boolean; } export class GroupExistsError extends Error { @@ -88,6 +89,15 @@ const attrLagoonProjectsLens = R.compose( R.lensPath([0]), ); +const getProjectIdsFromGroup = R.pipe( + // @ts-ignore + R.view(attrLagoonProjectsLens), + R.defaultTo(''), + R.split(','), + R.reject(R.isEmpty), + R.map(id => parseInt(id, 10)), +); + export const isRoleSubgroup = R.pathEq( ['attributes', 'type', 0], 'role-subgroup', @@ -97,7 +107,7 @@ const attributeKVOrNull = (key: string, group: GroupRepresentation) => String(R.pathOr(null, ['attributes', key], group)); export const Group = (clients) => { - const { keycloakAdminClient } = clients; + const { keycloakAdminClient, redisClient } = clients; const transformKeycloakGroups = async ( keycloakGroups: GroupRepresentation[], @@ -111,6 +121,7 @@ export const Group = (clients) => { type: attributeKVOrNull('type', keycloakGroup), currency: attributeKVOrNull('currency', keycloakGroup), billingSoftware: attributeKVOrNull('billingSoftware', keycloakGroup), + uptimeRobotStatusPageId: attributeKVOrNull('uptimeRobotStatusPageId', keycloakGroup), path: keycloakGroup.path, attributes: keycloakGroup.attributes, subGroups: keycloakGroup.subGroups, @@ -119,11 +130,10 @@ export const Group = (clients) => { let groupsWithGroupsAndMembers = []; for (const group of groups) { + const subGroups = R.reject(isRoleSubgroup)(group.subGroups); groupsWithGroupsAndMembers.push({ ...group, - groups: await transformKeycloakGroups( - R.reject(isRoleSubgroup)(group.subGroups), - ), + groups: R.isEmpty(subGroups) ? [] : await transformKeycloakGroups(subGroups), members: await getGroupMembership(group), }); } @@ -214,12 +224,11 @@ export const Group = (clients) => { R.cond([[R.isEmpty, R.always(null)], [R.T, loadGroupByName]]), )(groupInput); - const loadGroupsByAttribute = async ( + const filterGroupsByAttribute = ( + groups: Group[] | BillingGroup[], filterFn: AttributeFilterFn, - ): Promise => { - const allGroups = await loadAllGroups(); - - const filteredGroups = R.filter((group: Group) => + ): Group[] | BillingGroup[] => + R.filter((group: Group) => R.pipe( R.toPairs, R.reduce((isMatch: boolean, attribute: [string, string[]]): boolean => { @@ -229,16 +238,33 @@ export const Group = (clients) => { name: attribute[0], value: attribute[1], }, - group, ); } return isMatch; }, false), )(group.attributes), - )(allGroups); + )(groups); + + const loadGroupsByAttribute = async ( + filterFn: AttributeFilterFn, + ): Promise => { + const keycloakGroups = await keycloakAdminClient.groups.find(); - return filteredGroups; + let fullGroups: Group[] | BillingGroup[] = []; + for (const group of keycloakGroups) { + const fullGroup = await keycloakAdminClient.groups.findOne({ + id: group.id, + }); + + fullGroups = [...fullGroups, fullGroup]; + } + + const filteredGroups = filterGroupsByAttribute(fullGroups, filterFn); + + const groups = await transformKeycloakGroups(filteredGroups); + + return groups; }; const loadGroupsByProjectId = async ( @@ -255,20 +281,67 @@ export const Group = (clients) => { return false; }; - return loadGroupsByAttribute(filterFn); + let groupIds = []; + + // This function is called often and is expensive to compute so prefer + // performance over DRY + try { + groupIds = await redisClient.getProjectGroupsCache(projectId); + } catch (err) { + logger.warn(`Error loading project groups from cache: ${err.message}`); + groupIds = []; + } + + if (R.isEmpty(groupIds)) { + const keycloakGroups = await keycloakAdminClient.groups.find(); + // @ts-ignore + groupIds = R.pluck('id', keycloakGroups); + } + + let fullGroups = []; + for (const id of groupIds) { + const fullGroup = await keycloakAdminClient.groups.findOne({ + id, + }); + + fullGroups = [...fullGroups, fullGroup]; + } + + const filteredGroups = filterGroupsByAttribute(fullGroups, filterFn); + try { + const filteredGroupIds = R.pluck('id', filteredGroups); + await redisClient.saveProjectGroupsCache(projectId, filteredGroupIds); + } catch (err) { + logger.warn(`Error saving project groups to cache: ${err.message}`); + } + + const groups = await transformKeycloakGroups(filteredGroups); + + return groups; + }; + + // Recursive function to load membership "up" the group chain + const getMembersFromGroupAndParents = async ( + group: Group, + ): Promise => { + const members = R.prop('members', group); + + const parentGroup = await loadParentGroup(group); + const parentMembers = parentGroup + ? await getMembersFromGroupAndParents(parentGroup) + : []; + + return [ + ...members, + ...parentMembers, + ]; }; // Recursive function to load projects "up" the group chain const getProjectsFromGroupAndParents = async ( group: Group, ): Promise => { - const projectIds = R.pipe( - R.view(attrLagoonProjectsLens), - R.defaultTo(''), - R.split(','), - R.reject(R.isEmpty), - R.map(id => parseInt(id, 10)), - )(group); + const projectIds = getProjectIdsFromGroup(group); const parentGroup = await loadParentGroup(group); const parentProjectIds = parentGroup @@ -286,13 +359,7 @@ export const Group = (clients) => { const getProjectsFromGroupAndSubgroups = async ( group: Group, ): Promise => { - const groupProjectIds = R.pipe( - R.view(attrLagoonProjectsLens), - R.defaultTo(''), - R.split(','), - R.reject(R.isEmpty), - R.map(id => parseInt(id, 10)), - )(group); + const groupProjectIds = getProjectIdsFromGroup(group); let subGroupProjectIds = []; for (const subGroup of group.groups) { @@ -449,6 +516,10 @@ export const Group = (clients) => { }; const deleteGroup = async (id: string): Promise => { + const group = loadGroupById(id); + // @ts-ignore + const projectIds = getProjectIdsFromGroup(group); + try { await keycloakAdminClient.groups.del({ id }); } catch (err) { @@ -458,6 +529,14 @@ export const Group = (clients) => { throw new Error(`Error deleting group ${id}: ${err}`); } } + + for (const projectId of projectIds) { + try { + await redisClient.deleteProjectGroupsCache(projectId); + } catch (err) { + logger.warn(`Error deleting project groups cache: ${err.message}`); + } + } }; const addUserToGroup = async ( @@ -499,6 +578,12 @@ export const Group = (clients) => { throw new Error(`Could not add user to group: ${err.message}`); } + try { + await redisClient.deleteRedisUserCache(user.id) + } catch(err) { + logger.warn(`Error deleting user cache ${user.id}: ${err}`); + } + return await loadGroupById(group.id); }; @@ -521,6 +606,12 @@ export const Group = (clients) => { } catch (err) { throw new Error(`Could not remove user from group: ${err.message}`); } + + try { + await redisClient.deleteRedisUserCache(user.id) + } catch(err) { + logger.warn(`Error deleting user cache ${user.id}: ${err}`); + } } return await loadGroupById(group.id); @@ -556,6 +647,23 @@ export const Group = (clients) => { throw new Error( `Error setting projects for group ${group.name}: ${err.message}`, ); + }; + + // Clear the cache for users that gained access to the project + const groupAndParentsMembers = await getMembersFromGroupAndParents(group); + const userIds = R.map(R.path(['user', 'id']), groupAndParentsMembers); + for (const userId of userIds) { + try { + await redisClient.deleteRedisUserCache(userId) + } catch(err) { + logger.warn(`Error deleting user cache ${userId}: ${err}`); + } + } + + try { + await redisClient.deleteProjectGroupsCache(projectId); + } catch (err) { + logger.warn(`Error deleting project groups cache: ${err.message}`); } }; @@ -588,6 +696,23 @@ export const Group = (clients) => { throw new Error( `Error setting projects for group ${group.name}: ${err.message}`, ); + }; + + // Clear the cache for users that lost access to the project + const groupAndParentsMembers = await getMembersFromGroupAndParents(group); + const userIds = R.map(R.path(['user', 'id']), groupAndParentsMembers); + for (const userId of userIds) { + try { + await redisClient.deleteRedisUserCache(userId) + } catch(err) { + logger.warn(`Error deleting user cache ${userId}: ${err}`); + } + } + + try { + await redisClient.deleteProjectGroupsCache(projectId); + } catch (err) { + logger.warn(`Error deleting project groups cache: ${err.message}`); } }; diff --git a/services/api/src/models/user.ts b/services/api/src/models/user.ts index 659fede608..c1262625e7 100644 --- a/services/api/src/models/user.ts +++ b/services/api/src/models/user.ts @@ -1,5 +1,6 @@ import * as R from 'ramda'; import pickNonNil from '../util/pickNonNil'; +import * as logger from '../logger'; import UserRepresentation from 'keycloak-admin/lib/defs/userRepresentation'; import { Group, isRoleSubgroup } from './group'; @@ -64,7 +65,7 @@ const attrCommentLens = R.compose( ); export const User = (clients): UserModel => { - const { keycloakAdminClient } = clients; + const { keycloakAdminClient, redisClient } = clients; const fetchGitlabId = async (user: User): Promise => { const identities = await keycloakAdminClient.users.listFederatedIdentities({ @@ -351,6 +352,11 @@ export const User = (clients): UserModel => { throw new Error(`Error deleting user ${id}: ${err}`); } } + try { + await redisClient.deleteRedisUserCache(id) + } catch(err) { + logger.error(`Error deleting user cache ${id}: ${err}`); + } }; return { @@ -363,6 +369,6 @@ export const User = (clients): UserModel => { getUserRolesForProject, addUser, updateUser, - deleteUser, + deleteUser } }; diff --git a/services/api/src/newrelic.js b/services/api/src/newrelic.js new file mode 100644 index 0000000000..cfaf8705a5 --- /dev/null +++ b/services/api/src/newrelic.js @@ -0,0 +1,50 @@ +'use strict' +/** + * New Relic agent configuration. + * + * See lib/config/default.js in the agent distribution for a more complete + * description of configuration variables and their potential values. + */ +exports.config = { + /** + * Array of application names. + */ + app_name: ['api'], + logging: { + /** + * Level at which to log. 'trace' is most useful to New Relic when diagnosing + * issues with the agent, 'info' and higher will impose the least overhead on + * production applications. + */ + level: 'info', + enabled: false, + }, + /** + * When true, all request headers except for those listed in attributes.exclude + * will be captured for all traces, unless otherwise specified in a destination's + * attributes include/exclude lists. + */ + allow_all_headers: true, + attributes: { + /** + * Prefix of attributes to exclude from all destinations. Allows * as wildcard + * at end. + * + * NOTE: If excluding headers, they must be in camelCase form to be filtered. + * + * @env NEW_RELIC_ATTRIBUTES_EXCLUDE + */ + exclude: [ + 'request.headers.cookie', + 'request.headers.authorization', + 'request.headers.proxyAuthorization', + 'request.headers.setCookie*', + 'request.headers.x*', + 'response.headers.cookie', + 'response.headers.authorization', + 'response.headers.proxyAuthorization', + 'response.headers.setCookie*', + 'response.headers.x*' + ] + } +} diff --git a/services/api/src/resolvers.js b/services/api/src/resolvers.js index 765355d1f6..13483f971b 100644 --- a/services/api/src/resolvers.js +++ b/services/api/src/resolvers.js @@ -1,6 +1,23 @@ const GraphQLDate = require('graphql-iso-date'); const GraphQLJSON = require('graphql-type-json'); +const { + getAllProblems, + getProblemsByEnvironmentId, + addProblem, + deleteProblem, + deleteProblemsFromSource, + addProblemsFromSource, + getProblemSources, + getProblemHarborScanMatches, + addProblemHarborScanMatch, + deleteProblemHarborScanMatch, +} = require('./resources/problem/resolvers'); + +const { + SeverityScoreType +} = require('./resources/problem/types'); + const { getLagoonVersion, } = require('./resources/lagoon/resolvers'); @@ -47,6 +64,7 @@ const { addOrUpdateEnvironment, addOrUpdateEnvironmentStorage, getEnvironmentByName, + getEnvironmentById, getEnvironmentByOpenshiftProjectName, getEnvironmentHoursMonthByEnvironmentId, getEnvironmentStorageByEnvironmentId, @@ -226,6 +244,7 @@ const resolvers = { backups: getBackupsByEnvironmentId, envVariables: getEnvVarsByEnvironmentId, services: getEnvironmentServicesByEnvironmentId, + problems: getProblemsByEnvironmentId, }, Deployment: { environment: getEnvironmentByDeploymentId, @@ -266,7 +285,9 @@ const resolvers = { projectByGitUrl: getProjectByGitUrl, projectByName: getProjectByName, groupByName: getGroupByName, + problemSources: getProblemSources, environmentByName: getEnvironmentByName, + environmentById: getEnvironmentById, environmentByOpenshiftProjectName: getEnvironmentByOpenshiftProjectName, userCanSshToEnvironment, deploymentByRemoteId: getDeploymentByRemoteId, @@ -274,14 +295,21 @@ const resolvers = { allProjects: getAllProjects, allOpenshifts: getAllOpenshifts, allEnvironments: getAllEnvironments, + allProblems: getAllProblems, allGroups: getAllGroups, allProjectsInGroup: getAllProjectsInGroup, billingGroupCost: getBillingGroupCost, allBillingGroupsCost: getAllBillingGroupsCost, allBillingModifiers: getBillingModifiers, + allProblemHarborScanMatchers: getProblemHarborScanMatches, projectsByMetadata: getProjectsByMetadata }, Mutation: { + addProblem, + addProblemHarborScanMatch, + deleteProblem, + deleteProblemsFromSource, + deleteProblemHarborScanMatch, addOrUpdateEnvironment, updateEnvironment, deleteEnvironment, @@ -381,6 +409,7 @@ const resolvers = { }, Date: GraphQLDate, JSON: GraphQLJSON, + SeverityScore: SeverityScoreType, }; module.exports = resolvers; diff --git a/services/api/src/resources/billing/billingCalculations.test.ts b/services/api/src/resources/billing/billingCalculations.test.ts index 633e323a07..5d0b4c6c79 100644 --- a/services/api/src/resources/billing/billingCalculations.test.ts +++ b/services/api/src/resources/billing/billingCalculations.test.ts @@ -440,6 +440,223 @@ const mockData: IMockDataType = { } ] }, + { + name: 'RC', + expectations: { + hits: 87.78, + storage: 7.69, + prod: 30.02, + dev: 20.02 + }, + currency: CURRENCIES.CHF, + projects: [ + { + name: "srf_ch", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 59937, + storageDays: 454.666423, + prodHours: 720, + devHours: 720, + }, + { + name: "1cms-zh", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 3600, + storageDays: 56.651285, + prodHours: 720, + devHours: 1440, + }, + { + name: "1cms-bl", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 720, + storageDays: 0.005496, + prodHours: 720, + devHours: 720, + }, + { + name: "1cms-lu", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 0, + storageDays: 0.0049640000000000005, + prodHours: 720, + devHours: 720, + }, + { + name: "1cms-zg", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 0, + storageDays: 0.0016020000000000001, + prodHours: 720, + devHours: 720, + + }, + { + name: "1cms-sg", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 0, + storageDays: 0.0016020000000000001, + prodHours: 720, + devHours: 720, + + }, + { + name: "1cms-tg", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 0, + storageDays: 0.0049640000000000005, + prodHours: 720, + devHours: 720, + + }, + { + name: "1cms-uw", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 0, + storageDays: 0.001588, + prodHours: 720, + devHours: 720, + + }, + { + name: "1cms-ag", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 0, + storageDays: 0.003363, + prodHours: 720, + devHours: 720, + + }, + { + name: "1cms-ge", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 0, + storageDays: 0.004966, + prodHours: 720, + devHours: 720, + + }, + { + name: "1cms-ju", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 0, + storageDays: 0.004966, + prodHours: 720, + devHours: 720, + + }, + { + name: "1cms-ti", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 0, + storageDays: 0.0022919999999999998, + prodHours: 720, + devHours: 720, + + }, + { + name: "1cms-gr", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 0, + storageDays: 0.004966, + prodHours: 720, + devHours: 720, + + }, + { + name: "1cms-hs", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 0, + storageDays: 0.001588, + prodHours: 720, + devHours: 720, + + }, + { + name: "1cms-lt", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 0, + storageDays: 0.004966, + prodHours: 720, + devHours: 720, + + }, + { + name: "1cms-so", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 0, + storageDays: 0.004067, + prodHours: 720, + devHours: 720, + + }, + { + name: "1cms-smsv", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 360921, + storageDays: 19.414938, + prodHours: 720, + devHours: 720, + + }, + { + name: "1cms-sh", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 0, + storageDays: 0.001588, + prodHours: 720, + devHours: 720, + + }, + { + name: "1cms-sz", + availability: "POLYSITE", + month: 6, + year: 2020, + hits: 0, + storageDays: 0.004951, + prodHours: 720, + devHours: 720, + + } + ] + } ], }; @@ -764,7 +981,7 @@ describe('Billing Calculations #only-billing-calculations', () => { }); }); - describe('Billing Cost Modifiers', () => { + describe('Billing Cost Modifiers #modifiers', () => { beforeAll(async () => { await initializeGraphQL(); }); @@ -939,6 +1156,90 @@ describe('Billing Calculations #only-billing-calculations', () => { expect(result).toMatchObject(expected); }); + it('Total Costs with `max` modifier. (100). #modifiers, #max', () => { + // Arrange + const projects = [ + { + availability: 'STANDARD', + month: '11', + year: '2019', + hits: 1000000, + storageDays: 10000, + prodHours: 720, + devHours: 1500 + }, + { + availability: 'STANDARD', + month: '11', + year: '2019', + hits: 1000000, + storageDays: 10000, + prodHours: 720, + devHours: 1500 + } + ]; + + // Act + const result = getProjectsCosts('USD', projects, [ + { max: 100 } + ]); + + // Assert + const expected = { + hitCost: 324, + storageCost: 656.01, + environmentCost: { + prod: 60.05, + dev: 1.67 + }, + total: 100 // 1041.73 before extra + }; + + expect(result).toMatchObject(expected); + }); + + it('Total Costs with `min` modifier. (1000). #modifiers, #min', () => { + // Arrange + const projects = [ + { + availability: 'STANDARD', + month: '06', + year: '2020', + hits: 1, + storageDays: 1, + prodHours: 0, + devHours: 0 + }, + { + availability: 'STANDARD', + month: '06', + year: '2020', + hits: 0, + storageDays: 1, + prodHours: 0, + devHours: 0 + } + ]; + + // Act + const result = getProjectsCosts('USD', projects, [ + { min: 1000 } + ]); + + // Assert + const expected = { + hitCost: 69, + storageCost: 0, + environmentCost: { + prod: 0, + dev: 0 + }, + total: 1000 // 1041.73 before extra + }; + + expect(result).toMatchObject(expected); + }); + it('Total Costs with Multiple modifiers. (100% + $100 with lower weight used first). #modifiers, #multiple', () => { // Arrange const projects = [ @@ -1148,7 +1449,7 @@ describe('Billing Calculations #only-billing-calculations', () => { const lastMonth = moment().subtract(1, 'M').format('YYYY-MM').toString(); const nextMonth = moment().add(1, 'M').format('YYYY-MM').toString(); - const nextYear = moment().add(1, "year").format('YYYY-MM').toString(); + const nextYear = moment().add(1, 'year').format('YYYY-MM').toString(); const currMonth = moment().format('YYYY-MM').toString(); // Act @@ -1177,9 +1478,6 @@ describe('Billing Calculations #only-billing-calculations', () => { expect(nextYearBillingGroupModifiers.length).toBe(1); }); - - - it('Given a single, or multiple, Billing modifiers that would generage a negative total, ensure it does not go below 0 (zero). #belowZero', async() => { // Arrange diff --git a/services/api/src/resources/billing/billingCalculations.ts b/services/api/src/resources/billing/billingCalculations.ts index 45312c3c85..0675676cb3 100644 --- a/services/api/src/resources/billing/billingCalculations.ts +++ b/services/api/src/resources/billing/billingCalculations.ts @@ -37,6 +37,7 @@ export interface BillingGroupCosts { environmentCostDescription?: any; total?: number; modifiers?: [BillingModifier]; + modifiersDescription?: [string]; projects?: [any]; } @@ -103,17 +104,46 @@ export const getProjectsCosts = (currency, projects, modifiers: BillingModifier[ // Apply Modifiers const modifiersSortFn = (a:BillingModifier, b:BillingModifier) => a.weight < b.weight? -1 : 1 - const reducerFn: (previousValue: number, currentValue: BillingModifier) => number = - (total, modifier) => { - const { discountFixed, extraFixed, discountPercentage, extraPercentage } = modifier; + + const reducerFn: (acc: {total: number, description: any[]}, obj: BillingModifier) => { total: number, description: any[] } = + ({total, description}, modifier) => { + const { discountFixed, extraFixed, discountPercentage, extraPercentage, min, max } = modifier; + + if (discountFixed){ + description = [...description, { type: 'discountFixed', amt: discountFixed, subTotal: total}]; + } total = discountFixed ? total - discountFixed : total; - total = extraFixed ? total + extraFixed : total; + + if (extraFixed){ + description = [...description, { type: 'extraFixed', amt: extraFixed, subTotal: total}]; + } + total = extraFixed ? total + extraFixed : total; + + if (discountPercentage){ + description = [...description, { type: 'discountPercentage', amt: (total * (discountPercentage / 100)), subTotal: total}]; + } total = discountPercentage ? total - (total * (discountPercentage / 100)) : total; + + if (extraPercentage){ + description = [...description, { type: 'extraPercentage', amt: (total * (extraPercentage / 100)), subTotal: total}]; + } total = extraPercentage ? total + (total * (extraPercentage / 100)) : total; - return total; + + if (min){ + description = [...description, { type: 'min', amt: min, subTotal: total}]; + } + total = min ? Math.max(total, min) : total; + + if (max){ + description = [...description, { type: 'max', amt: max, subTotal: total}]; + } + total = max ? Math.min(total, max) : total; + + return { total, description }; } + const sortedModifiers = modifiers.sort(modifiersSortFn); const subTotal = hitCost.cost + storage.cost + prod.cost + dev.cost; - const total = Math.max(0, modifiers.sort(modifiersSortFn).reduce(reducerFn, subTotal)); + const { total, description } = sortedModifiers.reduce(reducerFn, {total: subTotal, description: []}); return ({ hitCost: hitCost.cost, @@ -122,8 +152,9 @@ export const getProjectsCosts = (currency, projects, modifiers: BillingModifier[ storageCostDescription, environmentCost, environmentCostDescription, - total, + total: Math.max(0, total), modifiers, + modifiersDescription: description, projects, }) as BillingGroupCosts; }; @@ -186,22 +217,16 @@ export const storageCost = ({ projects, currency }: IBillingGroup) => { const description = { projects: projects.map(({name, storageDays}) => ({name, storage: storageDays/days})), included: freeGBDays/days, - additional: storageToBill/days + additional: storageToBill/days, + qty: storageToBill } - return storageDays > freeGBDays - ? { - cost: Number((storageToBill * storagePerDay).toFixed(2)), - description, - unitPrice: storagePerDay, - quantity: averageGBsPerDay - } - : { - cost: 0, - description, - unitPrice: storagePerDay, - quantity: averageGBsPerDay - }; + return { + cost: storageDays > freeGBDays ? Number((storageToBill * storagePerDay).toFixed(2)) : 0, + description, + unitPrice: storagePerDay, + quantity: averageGBsPerDay + } }; /** diff --git a/services/api/src/resources/billing/graphql.ts b/services/api/src/resources/billing/graphql.ts index eb3c4efd8d..f1afb5a406 100644 --- a/services/api/src/resources/billing/graphql.ts +++ b/services/api/src/resources/billing/graphql.ts @@ -21,10 +21,11 @@ const requestConfig = (token) => ({ const getJWTToken = async () => { try { const { stdout: jwtToken, stderr } = await exec( - 'docker-compose exec -T auto-idler /create_jwt.sh', + 'docker-compose exec -T auto-idler /create_jwt.py', ); if (stderr) { - throw stderr; + // throw stderr; + console.error(stderr); } return jwtToken; } catch (err) { @@ -68,7 +69,7 @@ const graphql: AxiosGraphQL = async (query: String, variables?: any) => { } -const BILLING_MODIFIER_FIELDS = 'id, group { id, name, type }, startDate, endDate, discountFixed, discountPercentage, extraFixed, extraPercentage, customerComments, adminComments'; +const BILLING_MODIFIER_FIELDS = 'id, group { id, name, type }, startDate, endDate, discountFixed, discountPercentage, extraFixed, extraPercentage, min, max, customerComments, adminComments'; const ADD_BILLING_MODIFIER = ` mutation addBillingModifier($input: AddBillingModifierInput!) { diff --git a/services/api/src/resources/billing/resolvers.test.ts b/services/api/src/resources/billing/resolvers.test.ts index bb28e1b958..1eeddf9275 100644 --- a/services/api/src/resources/billing/resolvers.test.ts +++ b/services/api/src/resources/billing/resolvers.test.ts @@ -39,6 +39,8 @@ export const defaultModifier: BillingModifierInput = { discountPercentage: 0, extraFixed: 0, extraPercentage: 0, + min: 0, + max: 0, customerComments: 'xxx', adminComments: 'xxx' }; diff --git a/services/api/src/resources/deployment/resolvers.ts b/services/api/src/resources/deployment/resolvers.ts index 6fb62f5bc3..3fc893555b 100644 --- a/services/api/src/resources/deployment/resolvers.ts +++ b/services/api/src/resources/deployment/resolvers.ts @@ -805,13 +805,6 @@ export const switchActiveStandby: ResolverFn = async ( project: project.id, }); - const environmentRows = await query( - sqlClient, - environmentSql.selectEnvironmentByNameAndProject(project.productionEnvironment, project.id), - ); - const environment = environmentRows[0]; - var environmentId = parseInt(environment.id); - if (project.standbyProductionEnvironment == null) { sendToLagoonLogs( 'error', @@ -824,6 +817,14 @@ export const switchActiveStandby: ResolverFn = async ( return `Error: no standbyProductionEnvironment configured`; } + // we want the task to show in the standby environment, as this is where the task will be initiated. + const environmentRows = await query( + sqlClient, + environmentSql.selectEnvironmentByNameAndProject(project.standbyProductionEnvironment, project.id), + ); + const environment = environmentRows[0]; + var environmentId = parseInt(environment.id); + // construct the data for the misc task let uuid = uuid4(); diff --git a/services/api/src/resources/environment/resolvers.ts b/services/api/src/resources/environment/resolvers.ts index f00417f8f0..69951f8638 100644 --- a/services/api/src/resources/environment/resolvers.ts +++ b/services/api/src/resources/environment/resolvers.ts @@ -49,6 +49,25 @@ export const getEnvironmentByName: ResolverFn = async ( return environment; }; +export const getEnvironmentById = async ( + root, + args, + { sqlClient, hasPermission }, +) => { + const environment = await Helpers(sqlClient).getEnvironmentById(args.id); + + if (!environment) { + return null; + } + + await hasPermission('environment', 'view', { + project: environment.project, + }); + + const rows = await query(sqlClient, Sql.selectEnvironmentById(args.id)); + return rows[0]; +}; + export const getEnvironmentsByProjectId: ResolverFn = async ( project, unformattedArgs, @@ -260,6 +279,7 @@ export const getEnvironmentByOpenshiftProjectName: ResolverFn = async ( environment e JOIN project p ON e.project = p.id WHERE e.openshift_project_name = :openshift_project_name + AND e.deleted = "0000-00-00 00:00:00" `; const prep = prepare(sqlClient, str); diff --git a/services/api/src/resources/group/resolvers.test.ts b/services/api/src/resources/group/resolvers.test.ts index 2d4570f751..64259777f4 100644 --- a/services/api/src/resources/group/resolvers.test.ts +++ b/services/api/src/resources/group/resolvers.test.ts @@ -75,7 +75,7 @@ const requestConfig = { const getJWTToken = async () => { try { const { stdout: jwtToken, stderr } = await exec( - 'docker-compose exec -T auto-idler /create_jwt.sh', + 'docker-compose exec -T auto-idler /create_jwt.py', ); if (stderr) { throw stderr; diff --git a/services/api/src/resources/group/resolvers.ts b/services/api/src/resources/group/resolvers.ts index 985919b3e8..0a76449645 100644 --- a/services/api/src/resources/group/resolvers.ts +++ b/services/api/src/resources/group/resolvers.ts @@ -340,7 +340,7 @@ export const addGroupsToProject: ResolverFn = async ( export const addBillingGroup: ResolverFn = async ( _root, - { input: { name, currency, billingSoftware } }, + { input: { name, currency, billingSoftware, uptimeRobotStatusPageId } }, { models, hasPermission }, ) => { await hasPermission('group', 'add'); @@ -358,6 +358,7 @@ export const addBillingGroup: ResolverFn = async ( attributes: { type: ['billing'], currency: [currency], + uptimeRobotStatusPageId: [uptimeRobotStatusPageId], ...(billingSoftware ? { billingSoftware: [billingSoftware] } : {}), }, }); @@ -377,11 +378,12 @@ export const updateBillingGroup: ResolverFn = async ( throw new Error('Input patch requires at least 1 attribute'); } - const { name, currency, billingSoftware } = patch; + const { name, currency, billingSoftware, uptimeRobotStatusPageId } = patch; const updatedAttributes = { ...attributes, type: ['billing'], ...(currency ? { currency: [currency] } : {}), + ...(uptimeRobotStatusPageId ? {uptimeRobotStatusPageId: [uptimeRobotStatusPageId] }: {}), ...(billingSoftware ? { billingSoftware: [billingSoftware] } : {}), }; diff --git a/services/api/src/resources/openshift/resolvers.ts b/services/api/src/resources/openshift/resolvers.ts index 9bec4b8f3a..3e40957b93 100644 --- a/services/api/src/resources/openshift/resolvers.ts +++ b/services/api/src/resources/openshift/resolvers.ts @@ -33,7 +33,8 @@ export const addOpenshift: ResolverFn = async ( ${input.routerPattern ? ':router_pattern' : 'NULL'}, ${input.projectUser ? ':project_user' : 'NULL'}, ${input.sshHost ? ':ssh_host' : 'NULL'}, - ${input.sshPort ? ':ssh_port' : 'NULL'} + ${input.sshPort ? ':ssh_port' : 'NULL'}, + ${input.monitoringConfig ? ':monitoring_config' : 'NULL'} ); `, ); diff --git a/services/api/src/resources/problem/helpers.ts b/services/api/src/resources/problem/helpers.ts new file mode 100644 index 0000000000..bfb4f15914 --- /dev/null +++ b/services/api/src/resources/problem/helpers.ts @@ -0,0 +1,61 @@ +import * as R from 'ramda'; +import { MariaClient } from 'mariasql'; +import { query } from '../../util/db'; +import { Helpers as projectHelpers } from '../project/helpers'; +import { Sql } from './sql'; + +export const Helpers = (sqlClient: MariaClient) => { + const groupByProblemIdentifier = (problems) => problems.reduce((obj, problem) => { + obj[problem.identifier] = obj[problem.identifier] || []; + obj[problem.identifier].push(problem); + return obj; + }, {}); + + const getAllProblems = async (source, environment, envType, severity) => { + const environmentType = envType && envType.map(t => t.toLowerCase() || []); + + return await query( + sqlClient, + Sql.selectAllProblems({ + source, + environmentId: environment, + environmentType, + severity, + }) + ); + }; + + const getSeverityOptions = async () => ( + R.map( + R.prop('severity'), + await query(sqlClient, Sql.selectSeverityOptions()), + ) + ); + + const getProblemsWithProjects = async (problems, hasPermission, args: any = []) => { + const withProjects = await Object.keys(problems).map((key) => { + let projects = problems[key].map(async (problem) => { + const envType = !R.isEmpty(args.envType) && args.envType; + const {id, project, openshiftProjectName, name, envName, environmentType}: any = + await projectHelpers(sqlClient).getProjectByEnvironmentId(problem.environment, envType) || {}; + + hasPermission('project', 'view', { + project: !R.isNil(project) && project, + }); + + return (!R.isNil(id)) && {id, project, openshiftProjectName, name, environments: {name: envName}, type: environmentType}; + }); + const {...problem} = R.prop(0, problems[key]); + return {identifier: key, problem: {...problem}, projects: projects, problems: problems[key]}; + }); + + return await Promise.all(withProjects); + }; + + return { + getAllProblems, + getSeverityOptions, + groupByProblemIdentifier, + getProblemsWithProjects + }; +}; \ No newline at end of file diff --git a/services/api/src/resources/problem/resolvers.ts b/services/api/src/resources/problem/resolvers.ts new file mode 100644 index 0000000000..fe10ee8121 --- /dev/null +++ b/services/api/src/resources/problem/resolvers.ts @@ -0,0 +1,248 @@ +import * as R from 'ramda'; +import { query, prepare } from '../../util/db'; +import { Sql } from './sql'; +import { Helpers as problemHelpers } from './helpers'; +import { Helpers as environmentHelpers } from '../environment/helpers'; +import { ResolverFn } from '../'; +const logger = require('../../logger'); + +export const getAllProblems: ResolverFn = async ( + root, + args, + { sqlClient, hasPermission } +) => { + let rows = []; + + try { + if (!R.isEmpty(args)) { + rows = await problemHelpers(sqlClient).getAllProblems(args.source, args.environment, args.envType, args.severity); + } + else { + rows = await query(sqlClient, Sql.selectAllProblems({source: [], environmentId: 0, environmentType: [], severity: []})); + } + } + catch (err) { + if (err) { + logger.warn(err); + return []; + } + } + + const problems: any = rows && rows.map(async problem => { + const { environment: envId, name, project, environmentType, openshiftProjectName, ...rest} = problem; + + await hasPermission('problem', 'view', { + project: project, + }); + + return { ...rest, environment: { id: envId, name, project, environmentType, openshiftProjectName }}; + }); + + return Promise.all(problems).then((completed) => { + const sorted = R.sort(R.descend(R.prop('severity')), completed); + return sorted.map((row: any) => ({ ...(row as Object) })); + }); +}; + +export const getSeverityOptions = async ( + root, + args, + { sqlClient }, +) => { + return await problemHelpers(sqlClient).getSeverityOptions(); +}; + +export const getProblemSources = async ( + root, + args, + { sqlClient }, +) => { + const preparedQuery = prepare( + sqlClient, + `SELECT DISTINCT source FROM environment_problem`, + ); + + return R.map( + R.prop('source'), + await query(sqlClient, preparedQuery(args)) + ); +}; + +export const getProblemsByEnvironmentId = async ( + { id: environmentId }, + {severity, source}, + { sqlClient, hasPermission }, +) => { + const environment = await environmentHelpers(sqlClient).getEnvironmentById(environmentId); + + await hasPermission('problem', 'view', { + project: environment.project, + }); + + const rows = await query( + sqlClient, + Sql.selectProblemsByEnvironmentId({ + environmentId, + severity, + source, + }), + ); + + return R.sort(R.descend(R.prop('created')), rows); +}; + +export const addProblem = async ( + root, + { + input: { + id, severity, environment: environmentId, identifier, service, source, data, created, + severityScore, associatedPackage, description, version, fixedVersion, links + }, + }, + { sqlClient, hasPermission }, +) => { + const environment = await environmentHelpers(sqlClient).getEnvironmentById(environmentId); + + await hasPermission('problem', 'add', { + project: environment.project, + }); + + const { + info: { insertId }, + } = await query( + sqlClient, + Sql.insertProblem({ + severity, + severity_score: severityScore, + lagoon_service: service, + identifier, + environment: environmentId, + source, + associated_package: associatedPackage, + description, + version, + fixed_version: fixedVersion, + links: links, + data, + created, + }), + ); + + const rows = await query(sqlClient, Sql.selectProblemByDatabaseId(insertId)); + return R.prop(0, rows); +}; + +export const deleteProblem = async ( + root, + { + input : { + environment: environmentId, + identifier, + } + }, + { sqlClient, hasPermission }, +) => { + const environment = await environmentHelpers(sqlClient).getEnvironmentById(environmentId); + + await hasPermission('problem', 'delete', { + project: environment.project, + }); + + await query(sqlClient, Sql.deleteProblem(environmentId, identifier)); + + return 'success'; +}; + +export const deleteProblemsFromSource = async ( + root, + { + input : { + environment: environmentId, + source, + service, + } + }, + { sqlClient, hasPermission }, +) => { + const environment = await environmentHelpers(sqlClient).getEnvironmentById(environmentId); + + await hasPermission('problem', 'delete', { + project: environment.project, + }); + + await query(sqlClient, Sql.deleteProblemsFromSource(environmentId, source, service)); + + return 'success'; +} + +export const getProblemHarborScanMatches = async ( + root, + args, + { sqlClient, hasPermission }, +) => { + + await hasPermission('harbor_scan_match', 'view', {}); + + const rows = await query( + sqlClient, + Sql.selectAllProblemHarborScanMatches(), + ); + + return rows; +}; + +export const addProblemHarborScanMatch = async ( + root, + { + input: { + name, + description, + defaultLagoonProject, + defaultLagoonEnvironment, + defaultLagoonServiceName, + regex + }, + }, + { sqlClient, hasPermission }, +) => { + + await hasPermission('harbor_scan_match', 'add', {}); + + const { + info: { insertId }, + } = await query( + sqlClient, + Sql.insertProblemHarborScanMatch( + { + id: null, + name, + description, + default_lagoon_project: defaultLagoonProject, + default_lagoon_environment: defaultLagoonEnvironment, + default_lagoon_service_name: defaultLagoonServiceName, + regex + } + ), + ); + + const rows = await query(sqlClient, Sql.selectAllProblemHarborScanMatchByDatabaseId(insertId)); + return R.prop(0, rows); +}; + + +export const deleteProblemHarborScanMatch = async ( + root, + { + input : { + id + } + }, + { sqlClient, hasPermission }, +) => { + + await hasPermission('harbor_scan_match', 'delete', {}); + + await query(sqlClient, Sql.deleteProblemHarborScanMatch(id)); + + return 'success'; +}; diff --git a/services/api/src/resources/problem/sql.ts b/services/api/src/resources/problem/sql.ts new file mode 100644 index 0000000000..32323e3a16 --- /dev/null +++ b/services/api/src/resources/problem/sql.ts @@ -0,0 +1,132 @@ +const { knex } = require('../../util/db'); + +const standardEnvironmentReturn = { + id: 'id', + environment: 'environment', + severity: 'severity', + severityScore: 'severity_score', + identifier: 'identifier', + service: 'lagoon_service', + source: 'source', + associatedPackage: 'associated_package', + description: 'description', + version: 'version', + fixedVersion: 'fixed_version', + links: 'links', + data: 'data', + created: 'created', + deleted: 'deleted' +}; + +const standardProblemHarborScanMatchReturn = { + id: 'id', + name: 'name', + description: 'description', + default_lagoon_project: 'defaultLagoonProject', + default_lagoon_environment: 'defaultLagoonEnvironment', + default_lagoon_service_name: 'defaultLagoonServiceName', + regex: 'regex' +}; + +export const Sql = { + selectAllProblems: ({ + source = [], + environmentId, + environmentType = [], + severity = [], + }: { source: string[], environmentId: number, environmentType: string[], severity: string[]}) => { + let q = knex('environment_problem as p') + .join('environment as e', {environment: 'e.id'}, '=', {environment: 'p.environment'}) + .where('p.deleted', '=', '0000-00-00 00:00:00') + .select('p.*', {environment: 'e.id'}, { name: 'e.name', project: 'e.project', + environmentType: 'e.environment_type', openshiftProjectName: 'e.openshift_project_name'}); + + if (environmentType.length > 0) { + q.whereIn('e.environment_type', environmentType); + } + if (source.length > 0) { + q.whereIn('p.source', source); + } + if (environmentId) { + q.where('p.environment', environmentId); + } + if (severity.length > 0) { + q.whereIn('p.severity', severity); + } + return q.toString(); + }, + selectSeverityOptions: () => + knex('environment_problem') + .select('severity') + .toString(), + selectProblemByDatabaseId: (id) => + knex('environment_problem').where('id', id).toString(), + selectProblemsByEnvironmentId: ({ + environmentId, + severity = [], + source = [], + }) => { + let q = knex('environment_problem').select(standardEnvironmentReturn) + .where('environment', environmentId) + .where('deleted', '=', '0000-00-00 00:00:00'); + if (severity.length > 0) { + q.whereIn('severity', severity); + } + if (source.length > 0) { + q.whereIn('source', source); + } + return q.toString() + }, + insertProblem: ({environment, severity, severity_score, identifier, lagoon_service, source, + associated_package, description, version, fixed_version, links, data, created}) => + knex('environment_problem').insert({environment, severity, severity_score, identifier, lagoon_service, source, + associated_package, description, version, fixed_version, links, data, created}).toString(), + deleteProblem: (environment, identifier) => + knex('environment_problem') + .where({ + environment: environment, + identifier: identifier + }) + .where('deleted', '=', '0000-00-00 00:00:00') + .update({ deleted: knex.fn.now() }) + .toString(), + deleteProblemsFromSource: (environment, source, service) => + knex('environment_problem') + .where({ + environment: environment, + source: source, + lagoon_service: service, + }) + .where('deleted', '=', '0000-00-00 00:00:00') + .update({ deleted: knex.fn.now() }) + .toString(), + selectAllProblemHarborScanMatches: () => + knex('problem_harbor_scan_matcher') + .select(standardProblemHarborScanMatchReturn).toString(), + selectAllProblemHarborScanMatchByDatabaseId: (id) => + knex('problem_harbor_scan_matcher') + .select(standardProblemHarborScanMatchReturn). + where({ id: id }).toString(), + insertProblemHarborScanMatch: ({ + id, + name, + description, + default_lagoon_project, + default_lagoon_environment, + default_lagoon_service_name, + regex + }) => + knex('problem_harbor_scan_matcher').insert({ + name, + description, + default_lagoon_project, + default_lagoon_environment, + default_lagoon_service_name, + regex + }).toString(), + deleteProblemHarborScanMatch: (id) => + knex('problem_harbor_scan_matcher') + .where({ + id: id, + }).delete().toString(), +}; diff --git a/services/api/src/resources/problem/types.js b/services/api/src/resources/problem/types.js new file mode 100644 index 0000000000..ba799ecfa5 --- /dev/null +++ b/services/api/src/resources/problem/types.js @@ -0,0 +1,30 @@ +import { GraphQLScalarType, GraphQLError } from 'graphql'; +import { makeExecutableSchema } from 'graphql-tools'; +import { Kind } from 'graphql/language'; + +const SeverityScoreType = new GraphQLScalarType({ + name: 'SeverityScore', + description: 'Severity score is a numeric measure (0-1) of a problems severity', + serialize: parseFloat, + parseValue: parseFloat, + parseLiteral(ast) { + switch (ast.kind) { + case(Kind.NULL): return null; break; + case(Kind.INT): if(ast.value == 0 || ast.value == 1) { + return ast.value; + } + break; + case(Kind.FLOAT): + if(ast.value >= 0 && ast.value <= 1) { + return ast.value; + } + } + throw new GraphQLError('Severity Score is invalid - should be a one place decimal between 0 and 1 or null'); + } +}); + +const types = { + SeverityScoreType, +}; + +module.exports = types; \ No newline at end of file diff --git a/services/api/src/resources/project/harborSetup.ts b/services/api/src/resources/project/harborSetup.ts index 0897655ebb..61679c5e08 100644 --- a/services/api/src/resources/project/harborSetup.ts +++ b/services/api/src/resources/project/harborSetup.ts @@ -58,7 +58,7 @@ async function createHarborProject(sqlClient: MariaClient, harborClient, lagoonP logger.error(`Unable to get the harbor project id of "${lagoonProjectName}", as it does not exist in harbor!`) return } else { - logger.error(`Unable to get the harbor project id of "${lagoonProjectName}" !!`) + logger.error(`Unable to get the harbor project id of "${lagoonProjectName}", error: ${err}`) return "" } } @@ -92,7 +92,7 @@ async function createRobot(sqlClient: MariaClient, harborClient, lagoonProjectNa if (err.statusCode == 409) { logger.warn(`Unable to create a robot account for harbor project "${lagoonProjectName}", as a robot account of the same name already exists!`) } else { - logger.error(`Unable to create a robot account for harbor project "${lagoonProjectName}"!`, err) + logger.error(`Unable to create a robot account for harbor project "${lagoonProjectName}", error: ${err}`) return false } } @@ -239,5 +239,59 @@ export const createHarborOperations = (sqlClient /* : MariaSQL */) => ({ // Reset harbor project webhook to point to this Lagoon's Harbor if (! await resetHarborWebhook(sqlClient, harborClient, lagoonProjectName, lagoonWebhookAddress, harborProjectID)) {return} + }, + + deleteProject: async (lagoonProjectName) => { + const harborRepos = [] + + // Get existing harbor project's id + try { + const res = await harborClient.get(`projects?name=${lagoonProjectName}`) + var harborProjectID = res.body[0].project_id + logger.debug(`Got the harbor project id for project ${lagoonProjectName} successfully!`) + } catch (err) { + if (err.statusCode == 404) { + // This case could come to pass if a project was created + // before we began using Harbor as our container registry + logger.warn(`Unable to get the harbor project id of "${lagoonProjectName}", as it does not exist in harbor!`) + return + } else { + logger.error(`Unable to get the harbor project id of "${lagoonProjectName}", error: ${err}`) + return + } + } + logger.debug(`Harbor project id for ${lagoonProjectName}: ${harborProjectID}`) + + // Check for existing repositories within the project + try { + const res = await harborClient.get(`search?name=${lagoonProjectName}`) + for (var i = 0; i < res.repository.length; i++) { + if (res.repository[i].project_name == lagoonProjectName){ + harborRepos.push(res.repository[i]) + } + } + } catch (err) { + logger.error(`Unable to search for repositories within the harbor project "${lagoonProjectName}", error: ${err}`) + } + + // Delete any repositories within this project + try { + for (var i = 0; i < harborRepos.length; i++) { + var res = await harborClient.delete(`repositories/${harborRepos[i].repository_name}`) + } + } catch (err) { + logger.error(`Unable to delete repositories within the harbor project "${lagoonProjectName}", error: ${err}`) + } + + // Delete harbor project + try { + var res = await harborClient.delete(`projects/${harborProjectID}`); + logger.debug(`Harbor project ${lagoonProjectName} deleted!`) + } catch (err) { + // 400 means the project id is invalid + // 404 means project doesn't exist + // 412 means project still contains repositories + logger.info(`Unable to delete the harbor project "${lagoonProjectName}", error: ${err}`) + } } }) diff --git a/services/api/src/resources/project/helpers.ts b/services/api/src/resources/project/helpers.ts index 7b8b30e69c..985f7eb128 100644 --- a/services/api/src/resources/project/helpers.ts +++ b/services/api/src/resources/project/helpers.ts @@ -1,8 +1,8 @@ -import * as R from 'ramda'; +const R = require('ramda'); import { MariaClient } from 'mariasql'; -import { asyncPipe } from '@lagoon/commons/dist/util'; -import { query } from '../../util/db'; -import { Sql } from './sql'; +const { asyncPipe } = require('@lagoon/commons/dist/util'); +const { query } = require('../../util/db'); +const { Sql } = require('./sql'); export const Helpers = (sqlClient: MariaClient) => { const getProjectById = async (id: number) => { @@ -10,10 +10,15 @@ export const Helpers = (sqlClient: MariaClient) => { return R.prop(0, rows); }; - const getProjectByEnvironmentId = async (environmentId) => { - const rows = await query(sqlClient, Sql.selectProjectByEnvironmentId(environmentId)); + const getProjectByName = async (name: string) => { + const rows = await query(sqlClient, Sql.selectProjectByName(name)); return R.prop(0, rows); - } + }; + + const getProjectByEnvironmentId = async (environmentId: number, environmentType = null) => { + const rows = await query(sqlClient, Sql.selectProjectByEnvironmentId(environmentId, environmentType)); + return R.prop(0, rows); + }; const getProjectsByIds = (projectIds: number[]) => query(sqlClient, Sql.selectProjectsByIds(projectIds)); diff --git a/services/api/src/resources/project/resolvers.ts b/services/api/src/resources/project/resolvers.ts index f568bcf5eb..6613890bf3 100644 --- a/services/api/src/resources/project/resolvers.ts +++ b/services/api/src/resources/project/resolvers.ts @@ -168,6 +168,10 @@ export const getProjectByName: ResolverFn = async ( const rows = await query(sqlClient, prep(args)); const project = rows[0]; + if (!project) { + return null; + } + await hasPermission('project', 'view', { project: project.id, }); @@ -335,6 +339,7 @@ export const addProject = async ( ${input.standbyAlias ? ':standby_alias' : '"lagoon-standby"'}, ${input.autoIdle ? ':auto_idle' : '1'}, ${input.storageCalc ? ':storage_calc' : '1'}, + ${input.problemsUi ? ':problems_ui' : '0'}, ${ input.developmentEnvironmentsLimit ? ':development_environments_limit' @@ -475,6 +480,11 @@ export const deleteProject: ResolverFn = async ( logger.error(`Could not delete default user for project ${project.name}: ${err.message}`); } + // @TODO discuss if we want to delete projects in harbor or not + //const harborOperations = createHarborOperations(sqlClient); + + //const harborResults = await harborOperations.deleteProject(project.name) + return 'success'; }; @@ -503,6 +513,7 @@ export const updateProject: ResolverFn = async ( standbyAlias, autoIdle, storageCalc, + problemsUi, pullrequests, openshift, openshiftProjectPattern, @@ -591,6 +602,7 @@ export const updateProject: ResolverFn = async ( standbyAlias, autoIdle, storageCalc, + problemsUi, pullrequests, openshift, openshiftProjectPattern, diff --git a/services/api/src/resources/project/sql.ts b/services/api/src/resources/project/sql.ts index aea8d90b4a..b787cc531d 100644 --- a/services/api/src/resources/project/sql.ts +++ b/services/api/src/resources/project/sql.ts @@ -29,12 +29,19 @@ export const Sql = { knex('project as p') .whereIn('p.id', projectIds) .toString(), - selectProjectByEnvironmentId: (environmentId) => - knex('environment as e') - .select('e.id', 'e.project', 'e.openshift_project_name', 'p.name') - .leftJoin('project as p', 'p.id', '=', 'e.project') - .where('e.id', environmentId) - .toString(), + selectProjectByEnvironmentId: ( + environmentId, + environmentType = [] + ): {environmentId: number, environmentType: string} => { + let q = knex('environment as e') + .select('e.id', {envName: 'e.name'}, 'e.environment_type', 'e.project', 'e.openshift_project_name', 'p.name') + .leftJoin('project as p', 'p.id', '=', 'e.project'); + if (environmentType && environmentType.length > 0) { + q.where('e.environment_type', environmentType); + } + q.where('e.id', environmentId); + return q.toString(); + }, updateProject: ({ id, patch }: { id: number, patch: { [key: string]: any } }) => knex('project') .where('id', '=', id) diff --git a/services/api/src/resources/user/resolvers.ts b/services/api/src/resources/user/resolvers.ts index dcc4619b58..bc40c3436b 100644 --- a/services/api/src/resources/user/resolvers.ts +++ b/services/api/src/resources/user/resolvers.ts @@ -97,7 +97,6 @@ export const deleteUser: ResolverFn = async ( }); await models.UserModel.deleteUser(user.id); - // TODO remove user ssh keys return 'success'; diff --git a/services/api/src/typeDefs.js b/services/api/src/typeDefs.js index 8551583e4c..3c9eba1d4d 100644 --- a/services/api/src/typeDefs.js +++ b/services/api/src/typeDefs.js @@ -102,6 +102,94 @@ const typeDefs = gql` ZAR } + enum ProblemSeverityRating { + NONE + UNKNOWN + NEGLIGIBLE + LOW + MEDIUM + HIGH + CRITICAL + } + + scalar SeverityScore + + type Problem { + id: Int + environment: Environment + severity: ProblemSeverityRating + severityScore: SeverityScore + identifier: String + service: String + source: String + associatedPackage: String + description: String + links: String + version: String + fixedVersion: String + data: String + created: String + deleted: String + } + + type ProblemHarborScanMatch { + id: Int + name: String + description: String + defaultLagoonProject: String + defaultLagoonEnvironment: String + defaultLagoonService: String + regex: String + } + + input AddProblemHarborScanMatchInput { + name: String! + description: String! + defaultLagoonProject: String + defaultLagoonEnvironment: String + defaultLagoonService: String + regex: String! + } + + input DeleteProblemHarborScanMatchInput { + id: Int! + } + + input AddProblemInput { + id: Int + environment: Int! + severity: ProblemSeverityRating + severityScore: SeverityScore + identifier: String! + service: String + source: String! + associatedPackage: String + description: String + links: String + version: String + fixedVersion: String + data: String! + created: String + } + + input BulkProblem { + severity: ProblemSeverityRating + severityScore: SeverityScore + identifier: String + data: String + } + + input DeleteProblemInput { + environment: Int! + identifier: String! + } + + input DeleteProblemsFromSourceInput { + environment: Int! + source: String! + service: String! + } + type File { id: Int filename: String @@ -162,6 +250,7 @@ const typeDefs = gql` currency: String billingSoftware: String modifiers: [BillingModifier] + uptimeRobotStatusPageId: String } type Openshift { @@ -174,6 +263,7 @@ const typeDefs = gql` sshHost: String sshPort: String created: String + monitoringConfig: JSON } type NotificationMicrosoftTeams { @@ -326,6 +416,10 @@ const typeDefs = gql` """ storageCalc: Int """ + Should the Problems UI be available for this Project (\`1\` or \`0\`) + """ + problemsUi: Int + """ Reference to OpenShift Object this Project should be deployed to """ openshift: Openshift @@ -338,6 +432,10 @@ const typeDefs = gql` """ developmentEnvironmentsLimit: Int """ + Name of the OpenShift Project/Namespace + """ + openshiftProjectName: String + """ Deployed Environments for this Project """ environments( @@ -451,6 +549,7 @@ const typeDefs = gql` backups(includeDeleted: Boolean): [Backup] tasks(id: Int): [Task] services: [EnvironmentService] + problems(severity: [ProblemSeverityRating], source: [String]): [Problem] } type EnvironmentHitsMonth { @@ -545,6 +644,8 @@ const typeDefs = gql` discountPercentage: Float extraFixed: Float extraPercentage: Float + min: Float + max: Float customerComments: String adminComments: String weight: Int @@ -594,6 +695,7 @@ const typeDefs = gql` """ projectByGitUrl(gitUrl: String!): Project environmentByName(name: String!, project: Int!): Environment + environmentById(id: Int!): Environment """ Returns Environment Object by a given openshiftProjectName """ @@ -622,6 +724,11 @@ const typeDefs = gql` """ allEnvironments(createdAfter: String, type: EnvType, order: EnvOrderType): [Environment] """ + Returns all Problems matching given filter (all if no filter defined) + """ + allProblems(source: [String], project: Int, environment: Int, envType: [EnvType], identifier: String, severity: [ProblemSeverityRating]): [Problem] + problemSources: [String] + """ Returns all Groups matching given filter (all if no filter defined) """ allGroups(name: String, type: String): [GroupInterface] @@ -645,6 +752,10 @@ const typeDefs = gql` Returns LAGOON_VERSION """ lagoonVersion: JSON + """ + Returns all ProblemHarborScanMatchers + """ + allProblemHarborScanMatchers: [ProblemHarborScanMatch] } # Must provide id OR name @@ -708,6 +819,7 @@ const typeDefs = gql` storageCalc: Int developmentEnvironmentsLimit: Int privateKey: String + problemsUi: Int } input AddEnvironmentInput { @@ -839,6 +951,7 @@ const typeDefs = gql` projectUser: String sshHost: String sshPort: String + monitoringConfig: JSON } input DeleteOpenshiftInput { @@ -945,6 +1058,7 @@ const typeDefs = gql` openshift: Int openshiftProjectPattern: String developmentEnvironmentsLimit: Int + problemsUi: Int } input UpdateProjectInput { @@ -960,6 +1074,7 @@ const typeDefs = gql` projectUser: String sshHost: String sshPort: String + monitoringConfig: JSON } input UpdateOpenshiftInput { @@ -1105,8 +1220,6 @@ const typeDefs = gql` parentGroup: GroupInput } - - input AddBillingModifierInput { """ The existing billing group for this modifier @@ -1121,7 +1234,7 @@ const typeDefs = gql` """ endDate: String! """ - The amount that the total monthly bill should be discounted - Format (Int) + The amount that the total monthly bill should be discounted - Format (Float) """ discountFixed: Float """ @@ -1129,7 +1242,7 @@ const typeDefs = gql` """ discountPercentage: Float """ - The amount of exta cost that should be added to the total- Format (Int) + The amount of exta cost that should be added to the total- Format (Float) """ extraFixed: Float """ @@ -1137,6 +1250,14 @@ const typeDefs = gql` """ extraPercentage: Float """ + The minimum amount of the invoice applied to the total- Format (Float) + """ + min: Float + """ + The maximum amount of the invoice applied to the total- Format (Float) + """ + max: Float + """ Customer comments are visible to the customer """ customerComments: String @@ -1158,6 +1279,8 @@ const typeDefs = gql` discountPercentage: Float extraFixed: Float extraPercentage: Float + min: Float + max: Float customerComments: String adminComments: String weight: Int @@ -1210,6 +1333,7 @@ const typeDefs = gql` name: String! currency: Currency! billingSoftware: String + uptimeRobotStatusPageId: String } input ProjectBillingGroupInput { @@ -1221,6 +1345,7 @@ const typeDefs = gql` name: String! currency: Currency billingSoftware: String + uptimeRobotStatusPageId: String } input UpdateBillingGroupInput { @@ -1309,6 +1434,11 @@ const typeDefs = gql` updateDeployment(input: UpdateDeploymentInput): Deployment cancelDeployment(input: CancelDeploymentInput!): String addBackup(input: AddBackupInput!): Backup + addProblem(input: AddProblemInput!): Problem + addProblemHarborScanMatch(input: AddProblemHarborScanMatchInput!): ProblemHarborScanMatch + deleteProblem(input: DeleteProblemInput!): String + deleteProblemsFromSource(input: DeleteProblemsFromSourceInput!): String + deleteProblemHarborScanMatch(input: DeleteProblemHarborScanMatchInput!): String deleteBackup(input: DeleteBackupInput!): String deleteAllBackups: String addRestore(input: AddRestoreInput!): Restore @@ -1355,7 +1485,6 @@ const typeDefs = gql` removeGroupsFromProject(input: ProjectGroupsInput!): Project updateProjectMetadata(input: UpdateMetadataInput!): Project removeProjectMetadataByKey(input: RemoveMetadataInput!): Project - addBillingModifier(input: AddBillingModifierInput!): BillingModifier updateBillingModifier(input: UpdateBillingModifierInput!): BillingModifier deleteBillingModifier(input: DeleteBillingModifierInput!): String diff --git a/services/api/src/util/auth.ts b/services/api/src/util/auth.ts index 0406935fa9..c11b93a50e 100644 --- a/services/api/src/util/auth.ts +++ b/services/api/src/util/auth.ts @@ -1,11 +1,11 @@ import * as R from 'ramda'; +import { getRedisCache, saveRedisCache } from '../clients/redisClient'; import { verify } from 'jsonwebtoken'; import * as logger from '../logger'; import { keycloakGrantManager } from'../clients/keycloakClient'; import { User } from '../models/user'; import { Group } from '../models/group'; - const { JWTSECRET, JWTAUDIENCE } = process.env; interface ILegacyToken { @@ -105,7 +105,6 @@ export const keycloakHasPermission = (grant, requestCache, keycloakAdminClient) return async (resource, scope, attributes: IKeycloakAuthAttributes = {}) => { const currentUserId: string = grant.access_token.content.sub; - const currentUser = await UserModel.loadUserById(currentUserId); // Check if the same set of permissions has been granted already for this // api query. @@ -113,10 +112,31 @@ export const keycloakHasPermission = (grant, requestCache, keycloakAdminClient) // or group context) and cache a single query instead? const cacheKey = `${currentUserId}:${resource}:${scope}:${JSON.stringify(attributes)}`; const cachedPermissions = requestCache.get(cacheKey); - if (cachedPermissions !== undefined) { - return cachedPermissions; + if (cachedPermissions === true) { + return true; + } else if (!cachedPermissions === false) { + throw new KeycloakUnauthorizedError(`Unauthorized: You don't have permission to "${scope}" on "${resource}".`); + } + + // Check the redis cache before doing a full keycloak lookup. + const resourceScope = {resource, scope, currentUserId, ...attributes }; + let redisCacheResult: number; + try { + const data = await getRedisCache(resourceScope); + redisCacheResult = parseInt(data, 10); + } catch (err) { + logger.warn(`Could not lookup authz cache: ${err.message}`); } + if (redisCacheResult === 1) { + return true; + } else if (redisCacheResult === 0) { + logger.debug(`Redis authz cache returned denied for ${JSON.stringify(resourceScope)}`); + throw new KeycloakUnauthorizedError(`Unauthorized: You don't have permission to "${scope}" on "${resource}".`); + } + + + const currentUser = await UserModel.loadUserById(currentUserId); const serviceAccount = await keycloakGrantManager.obtainFromClientCredentials(); let claims: { @@ -249,6 +269,12 @@ export const keycloakHasPermission = (grant, requestCache, keycloakAdminClient) if (newGrant.access_token.hasPermission(resource, scope)) { requestCache.set(cacheKey, true); + try { + await saveRedisCache(resourceScope, 1); + } catch (err) { + logger.warn(`Could not save authz cache: ${err.message}`); + } + return; } } catch (err) { @@ -258,6 +284,12 @@ export const keycloakHasPermission = (grant, requestCache, keycloakAdminClient) } requestCache.set(cacheKey, false); + // TODO: Re-enable when we can distinguish between error and access denied + // try { + // await saveRedisCache(resourceScope, 0); + // } catch (err) { + // logger.warn(`Could not save authz cache: ${err.message}`); + // } throw new KeycloakUnauthorizedError(`Unauthorized: You don't have permission to "${scope}" on "${resource}".`); }; }; diff --git a/services/auto-idler/.lagoon.yml b/services/auto-idler/.lagoon.yml index 3f25655741..0384f6bad7 100644 --- a/services/auto-idler/.lagoon.yml +++ b/services/auto-idler/.lagoon.yml @@ -39,7 +39,9 @@ parameters: required: true - name: CRONJOBS description: Oneliner of Cronjobs - value: "" + value: |- + 30 * * * * /idle-clis.sh + 0 */4 * * * /idle-services.sh objects: - apiVersion: v1 kind: DeploymentConfig diff --git a/services/auto-idler/idle-clis.sh b/services/auto-idler/idle-clis.sh index 52f98f14e9..531285ac22 100755 --- a/services/auto-idler/idle-clis.sh +++ b/services/auto-idler/idle-clis.sh @@ -2,6 +2,8 @@ # set -e -o pipefail +if [ "${LAGOON_ENVIRONMENT_TYPE}" == "production" ]; then + prefixwith() { local prefix="$1" shift @@ -46,3 +48,5 @@ done sleep 5 # clean up the tmp file rm $TMP_DATA + +fi \ No newline at end of file diff --git a/services/auto-idler/idle-services.sh b/services/auto-idler/idle-services.sh index 8eb04d1b20..ec48759837 100755 --- a/services/auto-idler/idle-services.sh +++ b/services/auto-idler/idle-services.sh @@ -3,6 +3,8 @@ # make sure we stop if we fail set -eo pipefail +if [ "${LAGOON_ENVIRONMENT_TYPE}" == "production" ]; then + prefixwith() { local prefix="$1" shift @@ -52,3 +54,5 @@ done sleep 5 # clean up the tmp file rm $TMP_DATA + +fi \ No newline at end of file diff --git a/services/harbor-core/harbor-core.yml b/services/harbor-core/harbor-core.yml index 7a8d97075d..8b9626d6c4 100644 --- a/services/harbor-core/harbor-core.yml +++ b/services/harbor-core/harbor-core.yml @@ -190,6 +190,7 @@ objects: CLAIR_HEALTH_CHECK_SERVER_URL: "http://harborclair:6061" WITH_TRIVY: "true" TRIVY_ADAPTER_URL: "harbor-trivy:8080" + ROBOT_TOKEN_DURATION: "500" HTTP_PROXY: "" HTTPS_PROXY: "" NO_PROXY: "harbor-core,harbor-jobservice,harbor-database,harborclair,harborclairadapter,harborregistry,harbor-portal,harbor-trivy,127.0.0.1,localhost,.local,.internal" diff --git a/services/harbor-trivy/Dockerfile b/services/harbor-trivy/Dockerfile index ab569d0ac1..4e45815911 100644 --- a/services/harbor-trivy/Dockerfile +++ b/services/harbor-trivy/Dockerfile @@ -1,4 +1,4 @@ -FROM goharbor/trivy-adapter-photon:v2.0.0 +FROM aquasec/harbor-scanner-trivy:0.11.0 LABEL maintainer="amazee.io" COPY install_cert.sh /home/scanner @@ -9,8 +9,6 @@ RUN mkdir /home/scanner/.cache \ && mkdir /home/scanner/.cache/reports \ && chown scanner:root -R /home/scanner/.cache \ && chown scanner:root -R /home/scanner/.cache \ - && chown scanner:root /etc/pki/tls/certs/ca-bundle.crt \ - && chmod g+w /etc/pki/tls/certs/ca-bundle.crt \ && chown -R scanner:root /home/scanner \ && chmod g+rw /home/scanner USER scanner \ No newline at end of file diff --git a/services/keycloak/start.sh b/services/keycloak/start.sh index e9c1b865ae..a24b5f9673 100755 --- a/services/keycloak/start.sh +++ b/services/keycloak/start.sh @@ -1472,6 +1472,124 @@ EOF } +function configure_problems_system { + + echo "configure_problems_system running" + + CLIENT_ID=$(/opt/jboss/keycloak/bin/kcadm.sh get -r lagoon clients?clientId=api --config $CONFIG_PATH | python -c 'import sys, json; print json.load(sys.stdin)[0]["id"]') + problems_system=$(/opt/jboss/keycloak/bin/kcadm.sh get -r lagoon clients/$CLIENT_ID/authz/resource-server/permission?name=View+Problems --config $CONFIG_PATH) + echo Checking task:manageProblems + + if [ "$problems_system" != "[ ]" ]; then + echo "Problems Permissions already configured" + return 0 + fi + + echo Configuring Problems Permissions + + echo Creating resource problem + + echo '{"name":"problem","displayName":"problem","scopes":[{"name":"view"},{"name":"add"},{"name":"delete"}],"attributes":{},"uris":[],"ownerManagedAccess":""}' | /opt/jboss/keycloak/bin/kcadm.sh create clients/$CLIENT_ID/authz/resource-server/resource --config $CONFIG_PATH -r ${KEYCLOAK_REALM:-master} -f - + + # Create new permissions + /opt/jboss/keycloak/bin/kcadm.sh create clients/$CLIENT_ID/authz/resource-server/permission/scope --config $CONFIG_PATH -r lagoon -f - < { const result = await getOpenShiftInfoForProject(projectName); const projectOpenShift = result.project + const billingGroupResult = await getBillingGroupForProject(projectName); + const projectBillingGroup = billingGroupResult.project try { @@ -55,7 +57,13 @@ const messageConsumer = async msg => { environmentName = environmentName.concat('-' + hash) } - var environmentType = branch === projectOpenShift.productionEnvironment ? 'production' : 'development'; + var environmentType = 'development' + if ( + projectOpenShift.productionEnvironment === environmentName + || projectOpenShift.standbyProductionEnvironment === environmentName + ) { + environmentType = 'production' + } var gitSha = sha as string var projectId = projectOpenShift.id var openshiftConsole = projectOpenShift.openshift.consoleUrl.replace(/\/$/, ""); @@ -65,6 +73,8 @@ const messageConsumer = async msg => { var openshiftProjectUser = projectOpenShift.openshift.projectUser || "" var deployPrivateKey = projectOpenShift.privateKey var gitUrl = projectOpenShift.gitUrl + var projectProductionEnvironment = projectOpenShift.productionEnvironment + var projectStandbyEnvironment = projectOpenShift.standbyProductionEnvironment var subfolder = projectOpenShift.subfolder || "" var routerPattern = projectOpenShift.openshift.routerPattern ? projectOpenShift.openshift.routerPattern.replace('${environment}',environmentName).replace('${project}', projectName) : "" var prHeadBranch = headBranch || "" @@ -78,6 +88,16 @@ const messageConsumer = async msg => { var openshiftPromoteSourceProject = promoteSourceEnvironment ? `${projectName}-${ocsafety(promoteSourceEnvironment)}` : "" // A secret which is the same across all Environments of this Lagoon Project var projectSecret = crypto.createHash('sha256').update(`${projectName}-${jwtSecret}`).digest('hex'); + var alertContactHA = "" + var alertContactSA = "" + var monitoringConfig = JSON.parse(projectOpenShift.openshift.monitoringConfig) || "invalid" + if (monitoringConfig != "invalid"){ + alertContactHA = monitoringConfig.uptimerobot.alertContactHA || "" + alertContactSA = monitoringConfig.uptimerobot.alertContactSA || "" + } + var availability = projectOpenShift.availability || "STANDARD" + const billingGroup = projectBillingGroup.groups.find(i => i.type == "billing" ) || "" + var uptimeRobotStatusPageId = billingGroup.uptimeRobotStatusPageId || "" } catch(error) { logger.error(`Error while loading information for project ${projectName}`) logger.error(error) @@ -125,7 +145,7 @@ const messageConsumer = async msg => { buildImage = `amazeeio/kubectl-build-deploy-dind:${lagoonVersion}` } else { // we are a development enviornment, use the amazeeiolagoon image with the same branch name - buildImage = `amazeeiolagoon/oc-build-deploy-dind:${lagoonGitSafeBranch}` + buildImage = `amazeeiolagoon/kubectl-build-deploy-dind:${lagoonGitSafeBranch}` } let jobconfig = { @@ -213,6 +233,14 @@ const messageConsumer = async msg => { "name": "ENVIRONMENT_TYPE", "value": environmentType }, + { + "name": "ACTIVE_ENVIRONMENT", + "value": projectProductionEnvironment + }, + { + "name": "STANDBY_ENVIRONMENT", + "value": projectStandbyEnvironment + }, { "name": "KUBERNETES", "value": openshiftName @@ -265,6 +293,19 @@ const messageConsumer = async msg => { if (!R.isEmpty(environment.envVariables)) { jobconfig.spec.template.spec.containers[0].env.push({"name": "LAGOON_ENVIRONMENT_VARIABLES", "value": JSON.stringify(environment.envVariables)}) } + if (alertContactHA != undefined && alertContactSA != undefined){ + if (availability == "HIGH") { + jobconfig.spec.template.spec.containers[0].env.push({"name": "MONITORING_ALERTCONTACT","value": alertContactHA}) + } else { + jobconfig.spec.template.spec.containers[0].env.push({"name": "MONITORING_ALERTCONTACT","value": alertContactSA}) + } + } else { + jobconfig.spec.template.spec.containers[0].env.push({"name": "MONITORING_ALERTCONTACT","value": "unconfigured"}) + } + if (uptimeRobotStatusPageId){ + jobconfig.spec.template.spec.containers[0].env.push({"name": "MONITORING_STATUSPAGEID","value": uptimeRobotStatusPageId}) + } + return jobconfig } @@ -318,19 +359,19 @@ const messageConsumer = async msg => { "name":openshiftProject, "labels": { "lagoon.sh/project": projectName, - "lagoon.sh/environment": environmentName + "lagoon.sh/environment": environmentName, + "lagoon.sh/environmentType": environmentType } } } }) logger.info(`${openshiftProject}: Namespace ${openshiftProject} created`) } catch (err) { - console.log(err.code) // an already existing namespace throws an error, we check if it's a 409, means it does already exist, so we ignore that error. if (err.code == 409) { logger.info(`${openshiftProject}: Namespace ${openshiftProject} already exists`) } else { - logger.error(err) + logger.error(`Could not create namespace '${openshiftProject}': ${err.code} ${err.message}`); throw new Error } } diff --git a/services/kubernetesbuilddeploymonitor/src/index.ts b/services/kubernetesbuilddeploymonitor/src/index.ts index 54d95adfc2..2bc0de8e65 100644 --- a/services/kubernetesbuilddeploymonitor/src/index.ts +++ b/services/kubernetesbuilddeploymonitor/src/index.ts @@ -1,6 +1,7 @@ import { promisify } from 'util'; import kubernetesClient from 'kubernetes-client'; import R from 'ramda'; +import moment from 'moment'; import { logger } from '@lagoon/commons/dist/local-logging'; import { @@ -182,7 +183,6 @@ ${podLog}`; await updateDeployment(deployment.deploymentByRemoteId.id, { status: status.toUpperCase(), - created: convertDateFormat(jobInfo.metadata.creationTimestamp), started: dateOrNull(jobInfo.status.startTime), completed: dateOrNull(jobInfo.status.completionTime), }); @@ -342,12 +342,25 @@ const saveBuildLog = async(jobName, projectName, branchName, buildLog, status, r const deathHandler = async (msg, lastError) => { const { - jobName, + buildName: jobName, projectName, - openshiftProject, branchName, - sha - } = JSON.parse(msg.content.toString()) + sha, + deployment + } = JSON.parse(msg.content.toString()); + + // Don't leave the deployment in an active state + try { + const now = moment.utc(); + await updateDeployment(deployment.id, { + status: 'ERROR', + completed: now.format('YYYY-MM-DDTHH:mm:ss'), + }); + } catch (error) { + logger.error( + `Could not update deployment ${projectName} ${jobName}. Message: ${error}` + ); + } let logMessage = '' if (sha) { diff --git a/services/kubernetesdeployqueue/src/index.ts b/services/kubernetesdeployqueue/src/index.ts index 0570260470..24505d3e7b 100644 --- a/services/kubernetesdeployqueue/src/index.ts +++ b/services/kubernetesdeployqueue/src/index.ts @@ -1,8 +1,10 @@ const promisify = require('util').promisify; import KubernetesClient from 'kubernetes-client'; import R from 'ramda'; +import moment from 'moment'; import { logger } from '@lagoon/commons/dist/local-logging'; import { + graphqlapi, getOpenShiftInfoForProject, updateDeployment } from '@lagoon/commons/dist/api'; @@ -20,14 +22,55 @@ import { initSendToLagoonLogs(); initSendToLagoonTasks(); -const pause = duration => new Promise(res => setTimeout(res, duration)); +class AnotherBuildAlreadyRunning extends Error { + constructor(message) { + super(message); + this.name = 'AnotherBuildAlreadyRunning'; + } +} + +class BuildOutOfOrder extends Error { + delayFn: (retryCount: number) => number; -const retry = (retries, fn, delay = 1000) => - fn().catch(err => - retries > 1 - ? pause(delay).then(() => retry(retries - 1, fn, delay)) - : Promise.reject(err) + constructor(message) { + super(message); + this.name = 'BuildOutOfOrder'; + // Wait 30 seconds before checking again. + this.delayFn = () => 30; + } +} + +const getEnvironmentDeployments = async ( + openshiftProjectName: string +): Promise => { + const result = await graphqlapi.query( + ` + query getEnvironmentDeployments($openshiftProjectName: String!) { + environmentByOpenshiftProjectName(openshiftProjectName: $openshiftProjectName) { + deployments { + name + status + created + } + } + }`, + { openshiftProjectName } + ); + + return R.pathOr( + [], + ['environmentByOpenshiftProjectName', 'deployments'], + result ); +}; + +const filterByNewStatus = R.filter(R.propEq('status', 'new')); +const sortByCreatedDate = R.sort(R.ascend(R.prop('created'))); +const oldestNewDeployment = R.pipe( + filterByNewStatus, + sortByCreatedDate, + R.head +); const messageConsumer = async msg => { const { @@ -81,40 +124,41 @@ const messageConsumer = async msg => { } }); - const jobsGet = promisify( - kubernetesBatchApi.namespaces(openshiftProject).jobs.get - ); + const deployments = await getEnvironmentDeployments(openshiftProject); + const nextDeploymentToRun = oldestNewDeployment(deployments); - const hasNoActiveBuilds = () => - new Promise(async (resolve, reject) => { - const namespaceJobs = await jobsGet({ - qs: { - labelSelector: 'lagoon.sh/jobType=build' - } - }); - const activeBuilds: any = R.pipe( - R.propOr([], 'items'), - R.filter(R.pathSatisfies(R.lt(0), ['status', 'active'])) - )(namespaceJobs); - - if (R.isEmpty(activeBuilds)) { - resolve(); - } else { - logger.info( - `Delaying build of ${buildName} due to ${activeBuilds.length} pending builds` - ); - reject(); - } - }); + if (R.prop('name', nextDeploymentToRun) !== buildName) { + const msg = `The build "${buildName}" is not next in line for project "${openshiftProject}"`; + logger.debug(msg); + throw new BuildOutOfOrder(msg); + } - // Wait until an there are no active builds in this namespace running + // Check that there are no active builds in this namespace running + let activeBuilds; try { - // Check every minute for 30 minutes - await retry(30, hasNoActiveBuilds, 1 * 60 * 1000); + const jobsGetAll = promisify( + kubernetesBatchApi.namespaces(openshiftProject).jobs.get + ); + const namespaceJobs = await jobsGetAll({ + qs: { + labelSelector: 'lagoon.sh/jobType=build' + } + }); + activeBuilds = R.pipe( + R.propOr([], 'items'), + R.filter(R.pathSatisfies(R.lt(0), ['status', 'active'])) + )(namespaceJobs); } catch (err) { - throw new Error( - `${openshiftProject}: Requeue build due to error: ${err.message}` + logger.error( + `${openshiftProject}: Unexpected error loading current running Jobs, unable to build ${buildName}: ${err}` ); + return; + } + + if (!R.isEmpty(activeBuilds)) { + const msg = `${openshiftProject}: ${buildName} is waiting on ${activeBuilds.length} active builds`; + logger.debug(msg); + throw new AnotherBuildAlreadyRunning(msg); } // Load job, if not exists create @@ -131,7 +175,6 @@ const messageConsumer = async msg => { const jobPost = promisify( kubernetesApi.group(jobConfig).ns(openshiftProject).jobs.post ); - console.log(JSON.stringify(jobConfig, null, 4)); jobInfo = await jobPost({ body: jobConfig }); logger.info(`${openshiftProject}: Created build ${buildName}`); @@ -168,7 +211,12 @@ const messageConsumer = async msg => { projectName, openshiftProject, branchName, - sha + sha, + deployment: { + ...deployment, + status: 'PENDING', + remoteId: jobInfo.metadata.uid, + } }; const taskMonitorLogs = await createTaskMonitor( @@ -197,12 +245,24 @@ const deathHandler = async (msg, lastError) => { const { buildName, projectName, - openshiftProject, branchName, sha, - jobConfig + deployment } = JSON.parse(msg.content.toString()); + // Don't leave the deployment in an active state + try { + const now = moment.utc(); + await updateDeployment(deployment.id, { + status: 'ERROR', + completed: now.format('YYYY-MM-DDTHH:mm:ss'), + }); + } catch (error) { + logger.error( + `Could not update deployment ${projectName} ${buildName}. Message: ${error}` + ); + } + let logMessage = ''; if (sha) { logMessage = `\`${branchName}\` (${sha.substring(0, 7)})`; diff --git a/services/kubernetesmisc/src/handlers/ingressMigration.ts b/services/kubernetesmisc/src/handlers/ingressMigration.ts new file mode 100644 index 0000000000..21065e5cec --- /dev/null +++ b/services/kubernetesmisc/src/handlers/ingressMigration.ts @@ -0,0 +1,301 @@ +const promisify = require("util").promisify; +const R = require("ramda"); +import { logger } from "@lagoon/commons/dist/local-logging"; +import { sendToLagoonLogs } from '@lagoon/commons/dist/logs'; +import { getOpenShiftInfoForProject, updateProject, updateTask } from '@lagoon/commons/dist/api'; +import { RouteMigration } from '@lagoon/commons/dist/openshiftApi'; +const convertDateFormat = R.init; + +import Api, { ClientConfiguration } from 'kubernetes-client'; +const Client = Api.Client1_13; + +const getConfig = (url, token) => ({ + url, + insecureSkipTlsVerify: true, + auth: { + bearer: token + } +}); + +const pause = duration => new Promise(res => setTimeout(res, duration)); +const retry = (retries, fn, delay = 1000) => + fn().catch( + err => + retries > 1 + ? pause(delay).then(() => retry(retries - 1, fn, delay)) + : Promise.reject(err) + ); + +export async function ingressMigration (data) { + const { projectName, productionEnvironment, standbyProductionEnvironment, task } = data; + + const result = await getOpenShiftInfoForProject(projectName); + const projectOpenShift = result.project; + const ocsafety = string => + string.toLocaleLowerCase().replace(/[^0-9a-z-]/g, '-'); + + try { + var safeActiveProductionEnvironment = ocsafety(productionEnvironment); + var safeStandbyProductionEnvironment = ocsafety(standbyProductionEnvironment); + var safeProjectName = ocsafety(projectName); + var openshiftConsole = projectOpenShift.openshift.consoleUrl.replace( + /\/$/, + '' + ); + var openshiftToken = projectOpenShift.openshift.token || ''; + var openshiftProject = projectOpenShift.openshiftProjectPattern + ? projectOpenShift.openshiftProjectPattern + .replace('${branch}', safeActiveProductionEnvironment) + .replace('${project}', safeProjectName) + : `${safeProjectName}-${safeActiveProductionEnvironment}`; + // create the destination openshift project name + var destinationOpenshiftProject = projectOpenShift.openshiftProjectPattern + ? projectOpenShift.openshiftProjectPattern + .replace('${branch}', safeStandbyProductionEnvironment) + .replace('${project}', safeProjectName) + : `${safeProjectName}-${safeStandbyProductionEnvironment}`; + } catch (error) { + logger.error(`Error while loading information for project ${projectName}`); + logger.error(error); + throw error; + } + + // define the ingressmigration. the annotation being set to true is what actually triggers the switch + const migrateRoutes = (openshiftProject, destinationOpenshiftProject) => { + let config = { + apiVersion: 'dioscuri.amazee.io/v1', + kind: 'IngressMigrate', + metadata: { + name: openshiftProject, + annotations: { + 'dioscuri.amazee.io/migrate':'true' + } + }, + spec: { + destinationNamespace: destinationOpenshiftProject, + activeEnvironment: safeActiveProductionEnvironment, + }, + }; + + return config; + }; + + // Kubernetes API Object - needed as some API calls are done to the Kubernetes API part of OpenShift and + // the OpenShift API does not support them. + const dioscuri: any = new RouteMigration({ + url: openshiftConsole, + insecureSkipTlsVerify: true, + auth: { + bearer: openshiftToken + } + }); + + const config: ClientConfiguration = getConfig(openshiftConsole, openshiftToken); + const client = new Client({ config }); + + const ingressMigratesGet = promisify( + dioscuri.ns(openshiftProject).ingressmigrates.get + ); + + const ingressMigrateDelete = async name => { + const deleteFn = promisify(dioscuri.ns(openshiftProject).ingressmigrates(openshiftProject).delete); + return deleteFn({ + body: {} + }); + }; + + const hasNoRouteMigrate = () => + new Promise(async (resolve, reject) => { + const ingressMigrates = await ingressMigratesGet(); + if (ingressMigrates.items.length === 0) { + logger.info(`${openshiftProject}: RouteMigrate deleted`); + resolve(); + } else { + logger.info( + `${openshiftProject}: RouteMigrate not deleted yet, will try again in 2sec` + ); + reject(); + } + }); + + const projectExists = async (client, namespace) => { + const namespaces = await client.api.v1.namespaces(namespace).get(); + if ( + namespaces.statusCode !== 200 && + namespaces.body.metadata.name !== namespace + ) { + return false; + } + + return true; + }; + + if (!(await projectExists(client, openshiftProject))) { + logger.error(`Project ${openshiftProject} does not exist, bailing`); + return; + } + if (!(await projectExists(client, destinationOpenshiftProject))) { + logger.error(`Project ${destinationOpenshiftProject} does not exist, bailing`); + return; + } + + // check if there is already a ingress migrate resource, delete it if there is + try { + const ingressMigrates = await ingressMigratesGet(); + for (let ingressMigrate of ingressMigrates.items) { + await ingressMigrateDelete(ingressMigrate.metadata.name); + logger.info( + `${openshiftProject}: Deleting IngressMigrate ${ingressMigrate.metadata.name}` + ); + } + // RouteMigrates are deleted quickly, but we still have to wait before we attempt to create the new one + try { + await retry(10, hasNoRouteMigrate, 2000); + } catch (err) { + throw new Error( + `${openshiftProject}: IngressMigrate not deleted` + ); + } + } catch (err) { + logger.info(`${openshiftProject}: IngressMigrate doesn't exist`); // proceed if it doesn't exist + } + + // add the ingressmigrate resource + try { + const ingressMigratePost = promisify( + dioscuri.ns(openshiftProject).ingressmigrates.post + ); + await ingressMigratePost({ + body: migrateRoutes(openshiftProject, destinationOpenshiftProject) + }); + logger.verbose(`${openshiftProject}: IngressMigrate resource created`); + } catch (err) { + logger.error(err); + throw new Error(); + } + + sendToLagoonLogs( + 'info', + projectName, + '', + 'task:misc-kubernetes:route:migrate', + data, + `*[${projectName}]* Ingress Migration between environments *${destinationOpenshiftProject}* started` + ); + + const ingressMigrateGet = promisify( + dioscuri.ns(openshiftProject).ingressmigrates(openshiftProject).get + ); + + // this will check the resource in openshift, then updates the task in the api + const updateActiveStandbyTask = () => { + return (new Promise(async (resolve, reject) => { + let exitResolve = false; + const ingressMigrateStatus = await ingressMigrateGet(); + if (ingressMigrateStatus === undefined || ingressMigrateStatus.status === undefined || ingressMigrateStatus.status.conditions === undefined) { + logger.info(`${openshiftProject}: active/standby switch not ready, will try again in 2sec`); + } else { + for (let i = 0; i < ingressMigrateStatus.status.conditions.length; i++) { + switch (ingressMigrateStatus.status.conditions[i].type ) { + case 'started': + // update the task to started + var created = convertDateFormat(ingressMigrateStatus.status.conditions[i].lastTransitionTime) + await updateTask(task.id, { + status: 'ACTIVE', + created: created, + }); + break; + case 'failed': + // update the task to failed + var created = convertDateFormat(ingressMigrateStatus.status.conditions[i].lastTransitionTime) + await updateTask(task.id, { + status: 'FAILED', + completed: created, + }); + var condition: any = new Object(); + // send a log off with the status information + condition.condition = ingressMigrateStatus.status.conditions[i].condition + condition.activeRoutes = ingressMigrateStatus.spec.ingress.activeIngress + condition.standbyRoutes = ingressMigrateStatus.spec.ingress.standbyIngress + var conditionStr= JSON.stringify(condition); + await saveTaskLog( + 'active-standby-switch', + projectOpenShift.name, + 'failed', + task.uuid, + conditionStr, + ); + logger.info(`${openshiftProject}: active/standby switch failed`); + exitResolve = true; + break; + case 'completed': + // swap the active/standby in lagoon by updating the project + const response = await updateProject(projectOpenShift.id, { + productionEnvironment: safeStandbyProductionEnvironment, + standbyProductionEnvironment: safeActiveProductionEnvironment, + productionRoutes: ingressMigrateStatus.spec.ingress.activeIngress, + standbyRoutes: ingressMigrateStatus.spec.ingress.standbyIngress, + }); + // update the task to completed + var created = convertDateFormat(ingressMigrateStatus.status.conditions[i].lastTransitionTime) + await updateTask(task.id, { + status: 'SUCCEEDED', + completed: created, + }); + // send a log off with the status information + var condition: any = new Object(); + condition.condition = ingressMigrateStatus.status.conditions[i].condition + condition.activeRoutes = ingressMigrateStatus.spec.ingress.activeIngress + condition.standbyRoutes = ingressMigrateStatus.spec.ingress.standbyIngress + var conditionStr= JSON.stringify(condition); + await saveTaskLog( + 'active-standby-switch', + projectOpenShift.name, + 'succeeded', + task.uuid, + conditionStr, + ); + logger.info(`${openshiftProject}: active/standby switch completed`); + exitResolve = true; + break; + } + } + } + // handle the exit here + if (exitResolve == true) { + resolve(); + } else { + logger.info(`${openshiftProject}: active/standby switch not ready, will try again in 2sec`); + reject(); + } + })); + } + + try { + // actually run the task that updates the task + await retry(10, updateActiveStandbyTask, 2000); + } catch (err) { + throw new Error( + `${openshiftProject}: active/standby task is taking too long ${err}` + ); + } +} + +const saveTaskLog = async (jobName, projectName, status, uid, log) => { + const meta = { + jobName, + jobStatus: status, + remoteId: uid + }; + + sendToLagoonLogs( + 'info', + projectName, + '', + `task:misc-kubernetes:route:migrate:${jobName}`, + meta, + log + ); +}; + +export default ingressMigration; \ No newline at end of file diff --git a/services/kubernetesmisc/src/index.ts b/services/kubernetesmisc/src/index.ts index aeda21fefd..dea756148c 100644 --- a/services/kubernetesmisc/src/index.ts +++ b/services/kubernetesmisc/src/index.ts @@ -4,6 +4,7 @@ import { consumeTasks, initSendToLagoonTasks } from '@lagoon/commons/dist/tasks' import resticRestore from './handlers/resticRestore'; import kubernetesBuildCancel from "./handlers/kubernetesBuildCancel"; +import ingressMigration from "./handlers/ingressMigration"; initSendToLagoonLogs(); initSendToLagoonTasks(); @@ -25,6 +26,10 @@ const messageConsumer = async msg => { kubernetesBuildCancel(data); break; + case 'kubernetes:route:migrate': + ingressMigration(data); + break; + default: const meta = { msg: JSON.parse(msg.content.toString()), diff --git a/services/logs-concentrator/Dockerfile b/services/logs-concentrator/Dockerfile new file mode 100644 index 0000000000..189d07d4d3 --- /dev/null +++ b/services/logs-concentrator/Dockerfile @@ -0,0 +1,22 @@ +FROM fluent/fluentd:v1.11-1 +LABEL maintainer="support@amazee.io" + +USER root + +RUN apk add --no-cache --update --virtual .build-deps \ + build-base ruby-dev \ + && gem install fluent-plugin-elasticsearch \ + && gem install fluent-plugin-prometheus \ + && gem sources --clear-all \ + && apk del .build-deps \ + && rm -rf /tmp/* /var/tmp/* /usr/lib/ruby/gems/*/cache/*.gem \ + && apk add --no-cache curl + +COPY fluent.conf /fluentd/etc/ +COPY entrypoint.sh /bin/ + +USER fluent + +# environment variables that must be defined to point to the k8s api +# these are set by default when running in k8s +ENV KUBERNETES_SERVICE_HOST KUBERNETES_SERVICE_PORT diff --git a/services/logs-concentrator/entrypoint.sh b/services/logs-concentrator/entrypoint.sh new file mode 100755 index 0000000000..b93e1150f4 --- /dev/null +++ b/services/logs-concentrator/entrypoint.sh @@ -0,0 +1,30 @@ +#!/bin/sh + +# https://github.com/fluent/fluentd-docker-image/blob/master/v1.10/alpine/entrypoint.sh + +#source vars if file exists +DEFAULT=/etc/default/fluentd + +if [ -r $DEFAULT ]; then + set -o allexport + . $DEFAULT + set +o allexport +fi + +# If the user has supplied only arguments append them to `fluentd` command +if [ "${1#-}" != "$1" ]; then + set -- fluentd "$@" +fi + +# If user does not supply config file or plugins, use the default +if [ "$1" = "fluentd" ]; then + if ! echo "$@" | grep ' \-c' ; then + set -- "$@" -c "/fluentd/etc/${FLUENTD_CONF}" + fi + + if ! echo "$@" | grep ' \-p' ; then + set -- "$@" -p /fluentd/plugins + fi +fi + +exec "$@" diff --git a/services/logs-concentrator/fluent.conf b/services/logs-concentrator/fluent.conf new file mode 100644 index 0000000000..c09fe65d6b --- /dev/null +++ b/services/logs-concentrator/fluent.conf @@ -0,0 +1,19 @@ +# vi: ft=fluentd + +# NOTE: this is just a placeholder. When running in k8s a configmap is mounted +# over this file. That configmap is configured in the lagoon-logs-concentrator +# helm chart. + + + workers 2 + + + + @type forward + @id in_forward + add_tag_prefix in_forward + + + + @type stdout + diff --git a/services/logs-dispatcher/Dockerfile b/services/logs-dispatcher/Dockerfile index 09c7287561..7b9f0d7f05 100644 --- a/services/logs-dispatcher/Dockerfile +++ b/services/logs-dispatcher/Dockerfile @@ -1,16 +1,20 @@ -FROM fluent/fluentd:v1.10-1 +FROM fluent/fluentd:v1.11-1 LABEL maintainer="support@amazee.io" USER root RUN apk add --no-cache --update --virtual .build-deps \ build-base ruby-dev \ - && gem install fluent-plugin-elasticsearch \ + && gem install fluent-plugin-cloudwatch-logs \ && gem install fluent-plugin-kubernetes_metadata_filter \ + && gem install fluent-plugin-multi-format-parser \ && gem install fluent-plugin-prometheus \ + && gem install fluent-plugin-rabbitmq \ && gem install fluent-plugin-record-modifier \ + && gem install fluent-plugin-remote-syslog \ && gem install fluent-plugin-rewrite-tag-filter \ && gem install fluent-plugin-route \ + && gem install fluent-plugin-s3 --no-document \ && gem sources --clear-all \ && apk del .build-deps \ && rm -rf /tmp/* /var/tmp/* /usr/lib/ruby/gems/*/cache/*.gem \ diff --git a/services/logs-dispatcher/fluent.conf b/services/logs-dispatcher/fluent.conf index cda858f17f..c09fe65d6b 100644 --- a/services/logs-dispatcher/fluent.conf +++ b/services/logs-dispatcher/fluent.conf @@ -1,181 +1,19 @@ # vi: ft=fluentd -# NOTE: when running in k8s a configmap is mounted over this file. That -# configmap is configured in the lagoon-logging helm chart. +# NOTE: this is just a placeholder. When running in k8s a configmap is mounted +# over this file. That configmap is configured in the lagoon-logs-concentrator +# helm chart. workers 2 - # fluentd parameters - @type forward - @id container - tag "lagoon.#{ENV['CLUSTER_NAME']}.raw" + @type forward + @id in_forward + add_tag_prefix in_forward -# relabel router logs -# check app name first. if app name didn't match, set tag to container log. - - @type rewrite_tag_filter - - key $.kubernetes.labels.app - pattern ^nginx-ingress$ - tag "app-nginx-ingress" - - - invert true - key $.kubernetes.labels.app - pattern ^nginx-ingress$ - tag "lagoon.#{ENV['CLUSTER_NAME']}.container" - + + @type stdout -# check namespace_name. if it is okay too, tag as router log. -# if namespace didn't match, set tag to container log. - - @type rewrite_tag_filter - - key $.kubernetes.namespace_name - pattern ^syn-nginx-ingress$ - tag "lagoon.#{ENV['CLUSTER_NAME']}.router" - - - invert true - key $.kubernetes.namespace_name - pattern ^syn-nginx-ingress$ - tag "lagoon.#{ENV['CLUSTER_NAME']}.container" - - - -# strip the duplicated log field from router logs - - @type record_modifier - remove_keys log - - -# logs are now tagged appropriately, so route to labels based on the tag - - @type route - # route _all_ logs container logs (even nginx-ingress) to @container - - copy - @label @container - - # route just the router logs to @router - - copy - @label @router - - - - - - - - diff --git a/services/logs-forwarder/.lagoon.multi.yml b/services/logs-forwarder/.lagoon.multi.yml index 1210ee26e3..ff6ecc392f 100644 --- a/services/logs-forwarder/.lagoon.multi.yml +++ b/services/logs-forwarder/.lagoon.multi.yml @@ -266,7 +266,10 @@ objects: ssl_version TLSv1_2 request_timeout 600s slow_flush_log_threshold 300s - + log_es_400_reason true + # mapping exception ignore. these chunks will be dropped + ignore_exceptions ["Fluent::Plugin::ElasticsearchErrorHandler::ElasticsearchError"] + @type file path /fluentd/buffer/elasticsearch # buffer params (per worker) @@ -316,7 +319,8 @@ objects: "index.refresh_interval" : "5s", "number_of_shards": 1, "number_of_replicas": 1, - "index.routing.allocation.require.box_type": "live" + "index.routing.allocation.require.box_type": "live", + "index.mapping.ignore_malformed": true }, "mappings" : { "dynamic_templates" : [ { diff --git a/services/logs-forwarder/.lagoon.single.yml b/services/logs-forwarder/.lagoon.single.yml index db76e1ea75..c450fbfabf 100644 --- a/services/logs-forwarder/.lagoon.single.yml +++ b/services/logs-forwarder/.lagoon.single.yml @@ -249,7 +249,10 @@ objects: ssl_version TLSv1_2 request_timeout 600s slow_flush_log_threshold 300s - + log_es_400_reason true + # mapping exception ignore. these chunks will be dropped + ignore_exceptions ["Fluent::Plugin::ElasticsearchErrorHandler::ElasticsearchError"] + @type file path /fluentd/buffer/elasticsearch # buffer params (per worker) @@ -299,7 +302,8 @@ objects: "index.refresh_interval" : "5s", "number_of_shards": 1, "number_of_replicas": 1, - "index.routing.allocation.require.box_type": "live" + "index.routing.allocation.require.box_type": "live", + "index.mapping.ignore_malformed": true }, "mappings" : { "dynamic_templates" : [ { diff --git a/services/logs-tee/Dockerfile b/services/logs-tee/Dockerfile new file mode 100644 index 0000000000..812a8cf7c5 --- /dev/null +++ b/services/logs-tee/Dockerfile @@ -0,0 +1,13 @@ +ARG ALPINE_VERSION +FROM alpine:${ALPINE_VERSION} +LABEL maintainer="support@amazee.io" + +RUN addgroup -g 1000 -S socat && \ + adduser -u 1000 -S socat -G socat && \ + apk add --no-cache socat bash + +COPY entrypoint.sh / + +USER socat + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/services/logs-tee/entrypoint.sh b/services/logs-tee/entrypoint.sh new file mode 100755 index 0000000000..4448632d2c --- /dev/null +++ b/services/logs-tee/entrypoint.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# this script: +# - listens on the UDP port given in the first argument (all interfaces) +# - assumes the remaining arguments are UDP endpoints +# - duplicates received traffic to each UDP endpoints +# - ensures that each defined endpoint resolves before starting +# - echoes to STDOUT if $DEBUG is set to "true" + +set -euo pipefail +set -x + +socat -b 65507 -u "udp-recvfrom:$1,fork" udp-sendto:127.255.255.255:9999,broadcast & + +shift + +for endpoint in "$@"; do + while ! nslookup "${endpoint/:[0-9]*/}" &> /dev/null; do + echo "${endpoint/:[0-9]*/} doesn't resolve. retrying in 2 seconds.." + sleep 2 + done + socat -b 65507 -u udp-recvfrom:9999,reuseaddr,fork udp-sendto:$endpoint & +done + +if [[ ${DEBUG:-} = true ]]; then + socat -b 65507 -u udp-recvfrom:9999,reuseaddr,fork - & +fi + +wait -n diff --git a/services/logs2rocketchat/src/readFromRabbitMQ.ts b/services/logs2rocketchat/src/readFromRabbitMQ.ts index 58429bbe72..858ea89663 100644 --- a/services/logs2rocketchat/src/readFromRabbitMQ.ts +++ b/services/logs2rocketchat/src/readFromRabbitMQ.ts @@ -112,6 +112,7 @@ export async function readFromRabbitMQ (msg: ConsumeMessage, channelWrapperLogs: case "task:deploy-openshift:finished": case "task:remove-openshift-resources:finished": case "task:builddeploy-openshift:complete": + case "task:builddeploy-kubernetes:complete": text = `*[${meta.projectName}]* ` if (meta.shortSha) { text = `${text} \`${meta.branchName}\` (${meta.shortSha})` diff --git a/services/openshiftbuilddeploy/src/index.ts b/services/openshiftbuilddeploy/src/index.ts index 94a86adbba..f7bfd715d9 100644 --- a/services/openshiftbuilddeploy/src/index.ts +++ b/services/openshiftbuilddeploy/src/index.ts @@ -5,7 +5,7 @@ import R from 'ramda'; import sha1 from 'sha1'; import crypto from 'crypto'; import { logger } from '@lagoon/commons/dist/local-logging'; -import { getOpenShiftInfoForProject, addOrUpdateEnvironment, getEnvironmentByName, addDeployment } from '@lagoon/commons/dist/api'; +import { getOpenShiftInfoForProject, addOrUpdateEnvironment, getEnvironmentByName, addDeployment, getBillingGroupForProject } from '@lagoon/commons/dist/api'; import { sendToLagoonLogs, initSendToLagoonLogs } from '@lagoon/commons/dist/logs'; import { consumeTasks, initSendToLagoonTasks, createTaskMonitor } from '@lagoon/commons/dist/tasks'; @@ -39,6 +39,8 @@ const messageConsumer = async msg => { const result = await getOpenShiftInfoForProject(projectName); const projectOpenShift = result.project + const billingGroupResult = await getBillingGroupForProject(projectName); + const projectBillingGroup = billingGroupResult.project const ocsafety = string => string.toLocaleLowerCase().replace(/[^0-9a-z-]/g,'-') @@ -85,8 +87,18 @@ const messageConsumer = async msg => { var graphqlEnvironmentType = environmentType.toUpperCase() var graphqlGitType = type.toUpperCase() var openshiftPromoteSourceProject = promoteSourceEnvironment ? `${safeProjectName}-${ocsafety(promoteSourceEnvironment)}` : "" - // A secret which is the same across all Environments of this Lagoon Project + // A secret which is the same across all Environments of this Lagoon Project var projectSecret = crypto.createHash('sha256').update(`${projectName}-${jwtSecret}`).digest('hex'); + var alertContactHA = "" + var alertContactSA = "" + var monitoringConfig = JSON.parse(projectOpenShift.openshift.monitoringConfig) || "invalid" + if (monitoringConfig != "invalid"){ + alertContactHA = monitoringConfig.uptimerobot.alertContactHA || "" + alertContactSA = monitoringConfig.uptimerobot.alertContactSA || "" + } + var availability = projectOpenShift.availability || "STANDARD" + const billingGroup = projectBillingGroup.groups.find(i => i.type == "billing" ) || "" + var uptimeRobotStatusPageId = billingGroup.uptimeRobotStatusPageId || "" } catch(error) { logger.error(`Error while loading information for project ${projectName}`) logger.error(error) @@ -272,6 +284,18 @@ const messageConsumer = async msg => { if (!R.isEmpty(environment.envVariables)) { buildconfig.spec.strategy.customStrategy.env.push({"name": "LAGOON_ENVIRONMENT_VARIABLES", "value": JSON.stringify(environment.envVariables)}) } + if (alertContactHA != undefined && alertContactSA != undefined){ + if (availability == "HIGH") { + buildconfig.spec.strategy.customStrategy.env.push({"name": "MONITORING_ALERTCONTACT","value": alertContactHA}) + } else { + buildconfig.spec.strategy.customStrategy.env.push({"name": "MONITORING_ALERTCONTACT","value": alertContactSA}) + } + } else { + buildconfig.spec.strategy.customStrategy.env.push({"name": "MONITORING_ALERTCONTACT","value": "unconfigured"}) + } + if (uptimeRobotStatusPageId){ + buildconfig.spec.strategy.customStrategy.env.push({"name": "MONITORING_STATUSPAGEID","value": uptimeRobotStatusPageId}) + } return buildconfig } @@ -333,7 +357,8 @@ const messageConsumer = async msg => { "name":openshiftProject, "labels": { "lagoon.sh/project": safeProjectName, - "lagoon.sh/environment": safeBranchName + "lagoon.sh/environment": safeBranchName, + "lagoon.sh/environmentType": environmentType } }, "displayName":`[${projectName}] ${branchName}` diff --git a/services/ui/.gitignore b/services/ui/.gitignore index c1733462af..80e2398775 100644 --- a/services/ui/.gitignore +++ b/services/ui/.gitignore @@ -8,3 +8,4 @@ storybook-static #environment .env +.python-version diff --git a/services/ui/.storybook/decorators/ApiConnection.js b/services/ui/.storybook/decorators/ApiConnection.js index 6390875749..ed5aa163a4 100644 --- a/services/ui/.storybook/decorators/ApiConnection.js +++ b/services/ui/.storybook/decorators/ApiConnection.js @@ -6,7 +6,7 @@ import { InMemoryCache, IntrospectionFragmentMatcher } from 'apollo-cache-inmemo import { SchemaLink } from 'apollo-link-schema'; import { ApolloProvider } from 'react-apollo'; import { makeExecutableSchema, addMockFunctionsToSchema } from 'graphql-tools'; -import typeDefs from 'api/src/typeDefs'; +import typeDefs from 'api/dist/typeDefs'; import mocks, { seed } from 'api/src/mocks'; import introspectionQueryResultData from 'api/src/fragmentTypes.json'; diff --git a/services/ui/.storybook/presets.js b/services/ui/.storybook/presets.js index 9531befa38..c32795a60c 100644 --- a/services/ui/.storybook/presets.js +++ b/services/ui/.storybook/presets.js @@ -1,3 +1,4 @@ module.exports = [ '@storybook/addon-docs/preset', + '@storybook/preset-typescript' ]; diff --git a/services/ui/.storybook/webpack.config.js b/services/ui/.storybook/webpack.config.js index 5024c45895..0564ff7802 100644 --- a/services/ui/.storybook/webpack.config.js +++ b/services/ui/.storybook/webpack.config.js @@ -9,8 +9,19 @@ module.exports = async ({ config, mode }) => { // Add alias for storybook decorators and components. config.resolve.alias.storybook = __dirname; + config.module.rules.push({ + test: /\.(ts|tsx)$/, + use: [ + { + loader: require.resolve('ts-loader'), + }, + ], + }); + + config.resolve.extensions.push('.ts', '.tsx'); + // Debug config. - // console.dir(config, { depth: null }); + //console.dir(config, { depth: null }); return config; }; diff --git a/services/ui/package.json b/services/ui/package.json index a6da1b02f6..d0b93473ab 100644 --- a/services/ui/package.json +++ b/services/ui/package.json @@ -14,7 +14,9 @@ "serve-storybook": "storybook-server -s ./src" }, "dependencies": { - "@apollo/react-hooks": "^3.1.3", + "@apollo/react-hooks": "^3.1.5", + "@emotion/core": "^10.0.28", + "@emotion/styled": "^10.0.27", "@zeit/next-css": "^1.0.1", "apollo-cache-inmemory": "^1.3.9", "apollo-client": "^2.4.5", @@ -37,12 +39,14 @@ "ramda": "^0.25.0", "react": "^16.8.4", "react-apollo": "^2.1.11", + "react-beautiful-dnd": "^13.0.0", "react-copy-to-clipboard": "^5.0.1", "react-dom": "^16.8.4", + "react-hexgrid": "^1.0.3", "react-highlight-words": "^0.14.0", "react-modal": "^3.8.1", "react-nice-dates": "^1.0.2", - "react-select": "^2.1.1", + "react-select": "^3.0.0", "react-typekit": "^1.1.3", "recompose": "^0.30.0", "resize-observer-polyfill": "^1.5.1", @@ -60,6 +64,7 @@ "@storybook/addon-viewport": "^5.3.0-beta.16", "@storybook/addons": "^5.3.0-beta.16", "@storybook/core": "^5.3.0-beta.16", + "@storybook/preset-typescript": "^3.0.0", "@storybook/react": "^5.3.0-beta.16", "apollo-link-schema": "^1.2.4", "babel-loader": "^8.0.6", @@ -68,7 +73,8 @@ "prop-types": "^15.7.2", "react-is": "^16.12.0", "regenerator-runtime": "^0.13.3", - "require-context.macro": "^1.2.2" + "require-context.macro": "^1.2.2", + "ts-loader": "^7.0.5" }, "postcss": { "plugins": { diff --git a/services/ui/server.js b/services/ui/server.js index 8b5a9d19ac..f4631870cf 100644 --- a/services/ui/server.js +++ b/services/ui/server.js @@ -27,7 +27,6 @@ app app.render(req, res, '/project', { projectName: req.params.projectSlug }); }); - server.get('/admin/billing/:billingGroupSlug', (req, res) => { app.render(req, res, '/admin/billing', { billingGroupName: req.params.billingGroupSlug }); }); @@ -44,8 +43,6 @@ app app.render(req, res, '/admin/billing', { billingGroupName: req.params.billingGroupSlug, year: req.params.yearSlug, month: req.params.monthSlug, lang: req.params.lang }); }); - - server.get('/projects/:projectSlug/:environmentSlug', (req, res) => { app.render(req, res, '/environment', { openshiftProjectName: req.params.environmentSlug @@ -96,6 +93,36 @@ app } ); + server.get( + '/projects/:projectSlug/:environmentSlug/problems', + (req, res) => { + app.render(req, res, '/problems', { + openshiftProjectName: req.params.environmentSlug + }); + } + ); + + server.get( + '/problems/project', + (req, res) => { + app.render(req, res, '/problems-dashboard-by-project'); + } + ); + + server.get( + '/problems', + (req, res) => { + app.render(req, res, '/problems-dashboard-by-project-hex'); + } + ); + + server.get( + '/problems/identifier', + (req, res) => { + app.render(req, res, '/problems-dashboard'); + } + ); + server.get('*', (req, res) => { return handle(req, res); }); diff --git a/services/ui/src/components/Accordion/index.js b/services/ui/src/components/Accordion/index.js new file mode 100644 index 0000000000..3b1f9de96f --- /dev/null +++ b/services/ui/src/components/Accordion/index.js @@ -0,0 +1,89 @@ +import React, { useState, Fragment } from "react"; +import PropTypes from "prop-types"; + +const Accordion = ({ children, defaultValue = true, minified = false, className = "", onToggle, columns }) => { + const [visibility, setVisibility] = useState(defaultValue); + const accordionType = minified ? 'minified' : 'wide'; + const colCountClass = columns && 'cols-'+Object.keys(columns).length; + + return ( +
+
{ + setVisibility(!visibility); + if (onToggle) onToggle(!visibility); + }}> + {Object.keys(columns).map((item, i) =>
{columns[item]}
)} +
+ + {visibility ? {children} : null} + +
+ ); +}; + +Accordion.propTypes = { + children: PropTypes.any.isRequired, + defaultValue: PropTypes.bool, + className: PropTypes.string, + onToggle: PropTypes.func, + columns: PropTypes.any.isRequired +}; + +export default Accordion; \ No newline at end of file diff --git a/services/ui/src/components/Accordion/index.stories.js b/services/ui/src/components/Accordion/index.stories.js new file mode 100644 index 0000000000..65244e1b57 --- /dev/null +++ b/services/ui/src/components/Accordion/index.stories.js @@ -0,0 +1,12 @@ +import React from 'react'; +import { action } from '@storybook/addon-actions'; +import Accordion from './index'; + +export default { + component: Accordion, + title: 'Components/Accordion', +}; + +export const Default = () => ( + Some default body content. +); \ No newline at end of file diff --git a/services/ui/src/components/ActiveStandbyConfirm/index.js b/services/ui/src/components/ActiveStandbyConfirm/index.js index 4f24569817..9740847c42 100644 --- a/services/ui/src/components/ActiveStandbyConfirm/index.js +++ b/services/ui/src/components/ActiveStandbyConfirm/index.js @@ -1,9 +1,9 @@ import React from 'react'; import Modal from 'components/Modal'; import Button from 'components/Button'; -import { color } from 'lib/variables'; +import { bp, color } from 'lib/variables'; // @TODO: add this once the logic exists -import withLogic from 'components/ActiveStandbyConfirm/logic'; +//import withLogic from 'components/ActiveStandbyConfirm/logic'; import ActiveStandby from 'components/ActiveStandbyConfirm'; /** @@ -19,7 +19,9 @@ export const ActiveStandbyConfirm = ({ }) => { return ( - +

- Are you sure you want to switch the "{standbyEnvironment}" environment to active?
+ This will replace the current active environment {activeEnvironment}
+ with the selected standby environment {standbyEnvironment}.

+ Are you sure you want to do this?

Upon confirmation you will be taken to the task page to monitor execution.

- cancel + cancel
diff --git a/services/ui/src/components/BillingGroupProjects/data.json b/services/ui/src/components/BillingGroupProjects/data.json index 9c75d2381e..eaa2350e9b 100644 --- a/services/ui/src/components/BillingGroupProjects/data.json +++ b/services/ui/src/components/BillingGroupProjects/data.json @@ -53,7 +53,9 @@ "discountFixed": 0, "discountPercentage": 0, "extraFixed": 1517, - "extraPercentage": 0 + "extraPercentage": 0, + "min": 0, + "max": 0 }, { "id": "18", @@ -88,7 +90,9 @@ "discountFixed": 0, "discountPercentage": 0, "extraFixed": 540, - "extraPercentage": 0 + "extraPercentage": 0, + "min": 0, + "max": 0 } ], "projects": [ diff --git a/services/ui/src/components/BillingGroups/index.js b/services/ui/src/components/BillingGroups/index.js index cf4eb31e4c..bd2e7e6807 100644 --- a/services/ui/src/components/BillingGroups/index.js +++ b/services/ui/src/components/BillingGroups/index.js @@ -13,10 +13,10 @@ const BillingGroups = ({ billingGroups }) => (
{!billingGroups.length &&
No BillingGroups
} { - billingGroups.map(({name, id, currency}) => ( + billingGroups.map(({name, id, currency, projects}) => (
-
{name}
+
{name}
{projects[0].openshift.name}
{currency}
@@ -61,6 +61,10 @@ const BillingGroups = ({ billingGroups }) => ( } } + .cluster { + color: gray; + } + .data-table { background-color: ${color.white}; border: 1px solid ${color.lightestGrey}; diff --git a/services/ui/src/components/BillingModifiers/AddBillingModifier.js b/services/ui/src/components/BillingModifiers/AddBillingModifier.js index b3f63308fa..77a66618f5 100644 --- a/services/ui/src/components/BillingModifiers/AddBillingModifier.js +++ b/services/ui/src/components/BillingModifiers/AddBillingModifier.js @@ -1,45 +1,20 @@ +import * as R from 'ramda'; import React from 'react'; import css from 'styled-jsx/css'; -import { Mutation } from 'react-apollo'; - import { color } from 'lib/variables'; -import AddBillingModifierMutation from '../../lib/mutation/AddBillingModifier'; -import AllBillingModifiersQuery from 'lib/query/AllBillingModifiers'; -import BillingGroupCostsQuery from 'lib/query/BillingGroupCosts'; + import BillingModifierForm from "./BillingModifierForm"; -const AddBillingModifier = ({ group, month }) => { +const AddBillingModifier = ({ group, month, editBillingModifier, editHandler }) => { return(
- - {(addBillingModifier, { loading, called, error, data }) => { - - const addBillingModifierHandler = (input) => { - addBillingModifier({ variables: { input } }); - }; - - if (!error && called && loading) { - return
Adding Billing Modifier...
; - } - - return ( -
-

Add Billing Modifier

- { error ?
{error.message.replace('GraphQL error:', '').trim()}
: "" } - -
- ); - }} -
+
+

{R.isEmpty(editBillingModifier) ? 'Add' : 'Edit' } Billing Modifier

+ +
+ + + )} + + + ); +} + +const ModifierList = React.memo(function ModifierList({ modifiers, editHandler }) { + return modifiers.map((modifier, index) => ()); +}); + + +const reorder = (list, startIndex, endIndex) => { + const result = Array.from(list); + const [removed] = result.splice(startIndex, 1); + result.splice(endIndex, 0, removed); + + return result.map((item, index) => ({...item, weight: index})); +}; + +const AllBillingModifiers = ({group, modifiers, month, editHandler}) => { + + const client = useApolloClient(); + + const [updateModifier] = useMutation( + UpdateBillingModifierMutation, + { + update(cache, { data: { updateBillingModifier } }){ + const variables = { input: { name: group } }; + const { allBillingModifiers } = cache.readQuery({ query: AllBillingModifiersQuery, variables}); + const { id, weight } = updateBillingModifier; + + const idx = allBillingModifiers.findIndex(({id}) => id === id ); + + if(allBillingModifiers[idx].weight !== weight){ + const data = { allBillingModifiers: allBillingModifiers.map(obj => id === obj.id ? updateBillingModifier : obj) }; + cache.writeQuery({ query: AllBillingModifiersQuery, variables, data }); } + } + } + ); - .modifier-value { - font-weight: bold; - } - .comments { - padding-top: 15px; - margin-right: 100px; - } + const onDragEnd = (result) => { - .delete { - position: absolute; - top: 15px; - right: 15px; - } + editHandler({}); + + if (!result.destination) { + return; + } - .data-table { - background-color: ${color.white}; - border: 1px solid ${color.lightestGrey}; - border-radius: 3px; - box-shadow: 0px 4px 8px 0px rgba(0, 0, 0, 0.03); - - .data-none { - border: 1px solid ${color.white}; - border-bottom: 1px solid ${color.lightestGrey}; - border-radius: 3px; - line-height: 1.5rem; - padding: 8px 0 7px 0; - text-align: center; - } + if (result.destination.index === result.source.index) { + return; + } - .data-row { - border: 1px solid ${color.white}; - border-bottom: 1px solid ${color.lightestGrey}; - border-radius: 0; - line-height: 1.5rem; - display: block; - position: relative; - padding: 8px 0 7px 0; + const reorderedModifiers = reorder( + modifiers, + result.source.index, + result.destination.index + ); - & > div { - padding-left: 20px; - @media ${bp.wideDown} { - padding-right: 40px; - } - } + reorderedModifiers.forEach(modifier => { + const {id, weight} = modifier; - &:hover { - border: 1px solid ${color.brightBlue}; + const optimisticResponse = { + updateBillingModifier: { + ...modifier, + __typename: "BillingModifier", + group: { + ...modifier.group, + type: "billing", + __typename: "BillingGroup" } + } + }; - &:first-child { - border-top-left-radius: 3px; - border-top-right-radius: 3px; - } + const variables = { input: { id, patch: { weight }} }; + + updateModifier({variables, optimisticResponse }); + }); - &:last-child { - border-bottom-left-radius: 3px; - border-bottom-right-radius: 3px; - } + + const variables = { input: { name: group } }; + const data = { allBillingModifiers: reorderedModifiers }; + client.writeQuery({ query: AllBillingModifiersQuery, variables, data }); + + } // end onDragEnd + + return( +
+ +

Billing Modifiers

+ + {!modifiers.length && ( +
No Billing Modifiers
+ )} + + + + {(provided, snapshot) => ( +
+ + {provided.placeholder} +
+ )} +
+
+ + +
-); + ); + +} export default AllBillingModifiers; diff --git a/services/ui/src/components/BillingModifiers/BillingModifierForm.js b/services/ui/src/components/BillingModifiers/BillingModifierForm.js index c280c2a160..5bc981caaf 100644 --- a/services/ui/src/components/BillingModifiers/BillingModifierForm.js +++ b/services/ui/src/components/BillingModifiers/BillingModifierForm.js @@ -1,7 +1,15 @@ -import React, { useState } from 'react'; +import * as R from 'ramda'; +import React, { useState, useEffect } from 'react'; import css from 'styled-jsx/css'; import Button from 'components/Button'; +import { useMutation } from '@apollo/react-hooks'; + +import AddBillingModifierMutation from '../../lib/mutation/AddBillingModifier'; +import UpdateBillingModifierMutation from 'lib/mutation/UpdateBillingModifier'; +import AllBillingModifiersQuery from 'lib/query/AllBillingModifiers'; +import BillingGroupCostsQuery from 'lib/query/BillingGroupCosts'; + import moment from 'moment'; import { enGB } from 'date-fns/locale' @@ -9,27 +17,121 @@ import { DateRangePicker, START_DATE, END_DATE } from 'react-nice-dates' import 'react-nice-dates/build/style.css' -const BillingModifierForm = ({group, submitHandler}) => { +const BillingModifierForm = ({group, editBillingModifier, editHandler}) => { + + const getModifierType = ({discountFixed, discountPercentage, extraFixed, extraPercentage, min, max}) => { + if(discountFixed !== 0){ + return 'discountFixed' + } + + if (discountPercentage !== 0) { + return 'discountPercentage' + } + + if(extraFixed !== 0){ + return 'extraFixed' + } + + if(extraPercentage !== 0){ + return 'extraPercentage' + } + + if(min !== 0){ + return 'min' + } + + if(max !== 0){ + return 'max' + } + + return ''; + } + + const getModifierValue = ({discountFixed, discountPercentage, extraFixed, extraPercentage, min, max}) => { + if(discountFixed !== 0){ + return discountFixed + } + + if (discountPercentage !== 0) { + return discountPercentage + } + + if(extraFixed !== 0){ + return extraFixed + } + + if(extraPercentage !== 0){ + return extraPercentage + } + + if(min !== 0){ + return min + } + + if(max !== 0){ + return max + } + + return ''; + } const defaultValues = { - startDate: '', - endDate: '', - modifierType: 'discountFixed', - modifierValue: '', - customerComments: '', - adminComments: '', - weight: 0, + startDate: !R.isEmpty(editBillingModifier) ? editBillingModifier.startDate : '', + endDate: !R.isEmpty(editBillingModifier) ? editBillingModifier.endDate : '', + modifierType: !R.isEmpty(editBillingModifier) ? getModifierType(editBillingModifier) : 'discountFixed', + modifierValue: !R.isEmpty(editBillingModifier) ? getModifierValue(editBillingModifier) : '', + customerComments: !R.isEmpty(editBillingModifier) ? editBillingModifier.customerComments : '', + adminComments: !R.isEmpty(editBillingModifier) ? editBillingModifier.adminComments : '', + weight: !R.isEmpty(editBillingModifier) ? editBillingModifier.weight : 0, }; const [values, setValues] = useState(defaultValues); + useEffect(() => { + setValues(defaultValues) + }, [editBillingModifier]) + const handleChange = e => { const {name, value} = e.target; setValues({...values, [name]: value}); } + const [addBillingModifier] = useMutation( + AddBillingModifierMutation, + { + update(cache, { data: { addBillingModifier } }){ + + const variables = { input: { name: group } }; + const { allBillingModifiers } = cache.readQuery({ query: AllBillingModifiersQuery, variables}); + const data = { allBillingModifiers: [...allBillingModifiers, {...addBillingModifier}] }; - const isFormValid = values.startDate !== '' && values.endDate !== '' && values.modifierType && values.modifierValue && values.adminComments !== ''; + cache.writeQuery({ query: AllBillingModifiersQuery, variables, data }); + } + } + ); + const [updateModifier] = useMutation( + UpdateBillingModifierMutation, + { + update(cache, { data: { updateBillingModifier } }){ + const variables = { input: { name: group } }; + const { allBillingModifiers } = cache.readQuery({ query: AllBillingModifiersQuery, variables}); + const { id, weight } = updateBillingModifier; + + const idx = allBillingModifiers.findIndex(({id}) => id === id ); + + if(allBillingModifiers[idx].weight !== weight){ + const data = { allBillingModifiers: allBillingModifiers.map(obj => id === obj.id ? updateBillingModifier : obj) }; + cache.writeQuery({ query: AllBillingModifiersQuery, variables, data }); + } + + if(updateBillingModifier){ + editHandler({}) + } + } + } + ); + + const isFormValid = values.startDate !== '' && values.endDate !== '' && values.modifierType && values.modifierValue && values.adminComments !== ''; const formSubmitHandler = () => { const variables = { @@ -40,14 +142,44 @@ const BillingModifierForm = ({group, submitHandler}) => { discountPercentage: values.modifierType === 'discountPercentage' ? parseFloat(values.modifierValue) : 0, extraFixed: values.modifierType === 'extraFixed' ? parseFloat(values.modifierValue) : 0, extraPercentage: values.modifierType === 'extraPercentage' ? parseFloat(values.modifierValue) : 0, + min: values.modifierType === 'min' ? parseFloat(values.modifierValue) : 0, + max: values.modifierType === 'max' ? parseFloat(values.modifierValue) : 0, customerComments: values.customerComments, adminComments: values.adminComments, weight: values.weight !== 0 ? parseInt(values.weight): 0 }; - submitHandler(variables) + // const optimisticResponse = { + // addBillingModifier: { + // ...variables, + // __typename: "BillingModifier", + // } + // }; + + if(R.isEmpty(editBillingModifier)){ + addBillingModifier({ variables: { input: {...variables } } }) + }else{ + + const optimisticResponse = { + updateBillingModifier: { + ...editBillingModifier, + ...variables, + __typename: "BillingModifier", + group: { + ...editBillingModifier.group, + type:'billing', + __typename: "BillingGroup" + } + } + }; + + const editVariables = { input: { id: editBillingModifier.id, patch: { ...variables }} }; + updateModifier({variables: editVariables, optimisticResponse }); + } } + + return (
@@ -60,6 +192,7 @@ const BillingModifierForm = ({group, submitHandler}) => { className={'input' + (focus === START_DATE ? ' -focused' : '')} placeholder='Start date (YYYY-MM-DD)' onChange={handleChange} + value={values.startDate} /> { className={'input' + (focus === END_DATE ? ' -focused' : '')} placeholder='End date (YYYY-MM-DD)' onChange={handleChange} + value={values.endDate} />
@@ -81,12 +215,15 @@ const BillingModifierForm = ({group, submitHandler}) => { aria-labelledby="modifierType" label='Modifier Type' className="modifierInput" + value={values.modifierType} > {[ {name: 'Discount: Fixed', value: 'discountFixed'}, {name: 'Discount: Percentage (0-100)', value: 'discountPercentage'}, {name: 'Extra: Fixed', value: 'extraFixed'}, - {name: 'Extra: Percentage (0-100)', value: 'extraPercentage'} + {name: 'Extra: Percentage (0-100)', value: 'extraPercentage'}, + {name: 'Minimum Amount', value: 'min'}, + {name: 'Maximum Amount', value: 'max'} ].map(modifier => (
- +
+ + ); +}; + +export default SelectFilter; diff --git a/services/ui/src/components/Header/index.js b/services/ui/src/components/Header/index.js index 29d13acd7f..175420dca4 100644 --- a/services/ui/src/components/Header/index.js +++ b/services/ui/src/components/Header/index.js @@ -14,7 +14,7 @@ const Header = ({ logo }) => (
- Home ( Tasks + {(environment.project.problemsUi == 1) &&
  • + + Problems + +
  • + } diff --git a/services/ui/src/components/Problems/Accordion/index.js b/services/ui/src/components/Problems/Accordion/index.js new file mode 100644 index 0000000000..2902af3f44 --- /dev/null +++ b/services/ui/src/components/Problems/Accordion/index.js @@ -0,0 +1,47 @@ +import React, { useState, Fragment } from "react"; +import PropTypes from "prop-types"; +import moment from 'moment'; + +const Accordion = ({ children, defaultValue = true, className = "", onToggle, heading }) => { + const [visibility, setVisibility] = useState(defaultValue); + return ( +
    +
    { + setVisibility(!visibility); + if (onToggle) onToggle(!visibility); + }}> +
    {heading.identifier}
    +
    {heading.service}
    +
    {heading.associatedPackage || 'UNSET'}
    +
    {heading.source}
    +
    {heading.severity}
    +
    {heading.severityScore}
    +
    + + {visibility ? {children} : null} + +
    + ); +}; + +Accordion.propTypes = { + className: PropTypes.string, + children: PropTypes.any.isRequired, + onToggle: PropTypes.func, +}; + +export default Accordion; \ No newline at end of file diff --git a/services/ui/src/components/Problems/index.js b/services/ui/src/components/Problems/index.js new file mode 100644 index 0000000000..dc0f22a916 --- /dev/null +++ b/services/ui/src/components/Problems/index.js @@ -0,0 +1,273 @@ +import React, { useState, useEffect } from 'react'; +import { bp, color, fontSize } from 'lib/variables'; +import useSortableProblemsData from './sortedItems'; +import Accordion from 'components/Accordion'; +import * as moment from "moment"; + +const Problems = ({ problems }) => { + const { sortedItems, requestSort, getClassNamesFor } = useSortableProblemsData(problems); + + const [problemTerm, setProblemTerm] = useState(''); + const [hasFilter, setHasFilter] = React.useState(false); + + const handleProblemFilterChange = (event) => { + setHasFilter(false); + + if (event.target.value !== null || event.target.value !== '') { + setHasFilter(true); + } + setProblemTerm(event.target.value); + }; + + const handleSort = (key) => { + return requestSort(key); + }; + + const filterResults = (item) => { + const lowercasedFilter = problemTerm.toLowerCase(); + if (problemTerm == null || problemTerm === '') { + return problems; + } + + return Object.keys(item).some(key => { + if (item[key] !== null) { + return item[key].toString().toLowerCase().includes(lowercasedFilter); + } + }); + }; + + return ( +
    +
    + +
    +
    + + + + + + +
    +
    + {!sortedItems.filter(item => filterResults(item)) &&
    No Problems
    } + {sortedItems.filter(item => filterResults(item)).map((problem) => { + + const {id, description, environment, project, data, service, deleted, version, fixedVersion, + links, __typename, created, ...selectedColumns} = problem; + const formatCreated = moment.utc(created) + .local() + .format('DD MM YYYY, HH:mm:ss'); + const { identifier, severity, source, severityScore, associatedPackage } = selectedColumns; + const columns = {identifier, severity, source, created: formatCreated, severityScore, associatedPackage}; + + return ( + +
    + + ); + })} +
    + +
    + ); +}; + +export default Problems; diff --git a/services/ui/src/components/Problems/index.stories.js b/services/ui/src/components/Problems/index.stories.js new file mode 100644 index 0000000000..0033d8110f --- /dev/null +++ b/services/ui/src/components/Problems/index.stories.js @@ -0,0 +1,16 @@ +import React from 'react'; +import Problems from './index'; +import mocks, { generator } from 'api/src/mocks'; + +export default { + component: Problems, + title: 'Components/Problems', +} + +export const Default = () => ( + +); + +export const NoProblems = () => ( + +); diff --git a/services/ui/src/components/Problems/sortedItems.js b/services/ui/src/components/Problems/sortedItems.js new file mode 100644 index 0000000000..b9ba246ca6 --- /dev/null +++ b/services/ui/src/components/Problems/sortedItems.js @@ -0,0 +1,59 @@ +import React, {useState} from "react"; +import moment from 'moment'; +import hash from 'object-hash'; + +const useSortableProblemsData = (initialItems) => { + const initialConfig = {key: 'identifier', direction: 'ascending'}; + const [sortConfig, setSortConfig] = React.useState(initialConfig); + const [currentItems, setCurrentItems] = useState(initialItems); + + const getClassNamesFor = (name) => { + if (!sortConfig) return; + return sortConfig.key === name && sortConfig.direction || 'no-sort'; + }; + + const sortedItems = React.useMemo(() => { + let sortableItems = [...currentItems]; + + if (sortConfig !== null) { + sortableItems.sort((a, b) => { + let aParsed = sortConfig.key === 'created' ? new moment(a[sortConfig.key]).format('YYYYMMDD') + : (a[sortConfig.key] ? a[sortConfig.key].toString().toLowerCase().trim() : null); + let bParsed = sortConfig.key === 'created' ? new moment(b[sortConfig.key]).format('YYYYMMDD') + : (b[sortConfig.key] ? b[sortConfig.key].toString().toLowerCase().trim() : null); + + if (aParsed < bParsed) { + return sortConfig.direction === 'ascending' ? -1 : 1; + } + if (aParsed > bParsed) { + return sortConfig.direction === 'ascending' ? 1 : -1; + } + + return 0; + }); + } + + return sortableItems; + }, [currentItems, sortConfig]); + + if (hash(sortedItems) !== hash(currentItems)) { + setCurrentItems(sortedItems); + } + + const requestSort = (key) => { + let direction = 'ascending'; + + if (sortConfig && sortConfig.key === key && sortConfig.direction === 'ascending') { + direction = 'descending'; + } + + setCurrentItems(sortedItems); + setSortConfig({ key, direction }); + + return { sortedItems: currentItems }; + }; + + return { sortedItems: currentItems, getClassNamesFor, requestSort }; +}; + +export default useSortableProblemsData; diff --git a/services/ui/src/components/ProblemsByIdentifier/index.js b/services/ui/src/components/ProblemsByIdentifier/index.js new file mode 100644 index 0000000000..f2b9a77639 --- /dev/null +++ b/services/ui/src/components/ProblemsByIdentifier/index.js @@ -0,0 +1,327 @@ +import React, { useState } from 'react'; +import { bp, color, fontSize } from 'lib/variables'; +import useSortableData from './sortedItems'; +import Accordion from 'components/Accordion'; +import ProblemsLink from 'components/link/Problems'; + +const ProblemsByIdentifier = ({ problems }) => { + const { sortedItems, getClassNamesFor, requestSort } = useSortableData(problems); + + const [problemTerm, setProblemTerm] = useState(''); + const [hasFilter, setHasFilter] = React.useState(false); + const [moreProjectsLimit, setMoreProjectsLimit] = React.useState(5); + + const handleProblemFilterChange = (event) => { + setHasFilter(false); + + if (event.target.value !== null || event.target.value !== '') { + setHasFilter(true); + } + setProblemTerm(event.target.value); + }; + + const handleSort = (key) => { + return requestSort(key); + }; + + const filterResults = (item) => { + const lowercasedFilter = problemTerm.toLowerCase(); + if (problemTerm == null || problemTerm === '') { + return problems; + } + + return Object.keys(item).some(key => { + if (item[key] !== null) { + return item[key].toString().toLowerCase().includes(lowercasedFilter); + } + }); + }; + + const onLoadMore = () => { + setMoreProjectsLimit(moreProjectsLimit+moreProjectsLimit); + }; + + return ( +
    +
    + +
    +
    + + + + +
    +
    + {!sortedItems.filter(item => filterResults(item)).length &&
    No Problems
    } + {sortedItems.filter(item => filterResults(item)).map((item) => { + const {identifier, source, severity, problems, environment } = item; + const { description, associatedPackage, links } = problems[0] || ''; + + const columns = { + identifier: identifier, source, severity, + projectsAffected: problems && problems.filter(p => p != null).length || 0 + }; + + return ( + +
    +
    +
    + + {description &&
    + {description.length > 250 ? description.substring(0, 247)+'...' : description} +
    } +
    +
    + + {associatedPackage &&
    {associatedPackage}
    } +
    +
    + + {links && } +
    +
    +
    +
    + + {problems && problems.filter(p => p != null).slice(0, moreProjectsLimit).map(problem => { + const { id, name: envName, openshiftProjectName, environmentType, project } = problem.environment || ''; + + return ( +
    + + {project ? `${project.name}` : ''}{envName ? ` : ${envName.toLowerCase()}` : ''} + +
    + ) + })} + {problems && problems.filter(p => p != null).length > moreProjectsLimit && + + } +
    +
    +
    +
    + ); + })} +
    + +
    + ); +}; + +export default ProblemsByIdentifier; diff --git a/services/ui/src/components/ProblemsByIdentifier/index.stories.js b/services/ui/src/components/ProblemsByIdentifier/index.stories.js new file mode 100644 index 0000000000..66f1ec571c --- /dev/null +++ b/services/ui/src/components/ProblemsByIdentifier/index.stories.js @@ -0,0 +1,25 @@ +import React from 'react'; +import { Query } from 'react-apollo'; +import AllProblemsQuery from 'lib/query/AllProblems'; +import mocks, { generator } from 'api/src/mocks'; +import ProblemsByIdentifier from './index'; + +export default { + component: ProblemsByIdentifier, + title: 'Components/ProblemsByIdentifier', +} + +export const Default = ({ problems }) => ; +Default.story = { + decorators: [ + storyFn => ( + + {({data}) => storyFn({problems: data.problems})} + + ), + ], +}; + +export const NoProblems = () => ( + +); diff --git a/services/ui/src/components/ProblemsByIdentifier/sortedItems.js b/services/ui/src/components/ProblemsByIdentifier/sortedItems.js new file mode 100644 index 0000000000..c67f2c97d8 --- /dev/null +++ b/services/ui/src/components/ProblemsByIdentifier/sortedItems.js @@ -0,0 +1,70 @@ +import React, {useState, useMemo} from "react"; +import hash from 'object-hash'; + +const useSortableData = (initialItems, initialConfig = {key: 'severity', direction: 'ascending'}) => { + const [sortConfig, setSortConfig] = React.useState(initialConfig); + const [currentItems, setCurrentItems] = useState(initialItems); + + const getClassNamesFor = (name) => { + if (!sortConfig) return; + return sortConfig.key === name ? sortConfig.direction : undefined; + }; + + const sortedItems = useMemo(() => { + let sortableItems = [...currentItems]; + + if (sortConfig !== null) { + sortableItems.sort((a, b) => { + let aParsed, bParsed = ''; + + if (sortConfig.key === 'identifier') { + aParsed = a[sortConfig.key].toString().toLowerCase().trim(); + bParsed = b[sortConfig.key].toString().toLowerCase().trim(); + } + else if (sortConfig.key === 'projectsAffected') { + aParsed = a.problems.length; + bParsed = b.problems.length; + } + else { + let aProblem, bProblem; + + if (a[sortConfig.key] === undefined) aProblem = a.problem; + if (b[sortConfig.key] === undefined) bProblem = b.problem; + + let aItem = a[sortConfig.key] || aProblem[sortConfig.key]; + aParsed = aItem.toString().toLowerCase().trim(); + + let bItem = b[sortConfig.key] || bProblem[sortConfig.key]; + bParsed = bItem.toString().toLowerCase().trim(); + } + + if (aParsed < bParsed) return sortConfig.direction === 'ascending' ? -1 : 1; + if (aParsed > bParsed) return sortConfig.direction === 'ascending' ? 1 : -1; + return 0; + }); + } + + return sortableItems; + }, [currentItems, sortConfig]); + + if (hash(sortedItems) !== hash(currentItems)) { + setCurrentItems(sortedItems); + } + + const requestSort = (key) => { + let direction = 'ascending'; + + if (sortConfig && sortConfig.key === key && sortConfig.direction === 'ascending') { + direction = 'descending'; + } + + setCurrentItems(sortedItems); + setSortConfig({ key, direction }); + + return { sortedItems: currentItems }; + }; + + return { sortedItems: currentItems, currentSortConfig: sortConfig, getClassNamesFor, requestSort }; +}; + +export default useSortableData; \ No newline at end of file diff --git a/services/ui/src/components/ProblemsByProject/Honeycomb/index.js b/services/ui/src/components/ProblemsByProject/Honeycomb/index.js new file mode 100644 index 0000000000..69adfd3dc8 --- /dev/null +++ b/services/ui/src/components/ProblemsByProject/Honeycomb/index.js @@ -0,0 +1,202 @@ +import React, {useState, Fragment, useEffect, useRef} from "react"; +import { HexGrid, Layout, Hexagon, Text, GridGenerator, HexUtils } from 'react-hexgrid'; +import * as R from 'ramda'; +import ProblemsByProject from "components/ProblemsByProject"; +import {LoadingPageNoHeader} from 'pages/_loading'; +import {ErrorNoHeader} from 'pages/_error'; +import { bp } from 'lib/variables'; +import './styling.css'; + +const config = { + "width": 1200, + "height": 100, + "layout": {"width": 4, "height": 4, "flat": false, "spacing": 1.08}, + "origin": {"x": 0, "y": 0}, + "map": "rectangle", +}; + +const Honeycomb = ({ data, filter }) => { + const { projectsProblems } = data || []; + const [projects, setProjects] = useState(projects); + const [projectInView, setProjectInView] = useState(false); + const [display, setDisplay] = useState({type: "normal", multiplier: 2}); + + const generator = GridGenerator.getGenerator(config.map); + const projectCount = projectsProblems && parseInt(projectsProblems.length); + const displayMultiple = display && parseInt(display.multiplier * 8); + let rows = projectsProblems && parseInt(projectCount / displayMultiple); + + const hexs = generator.apply(config, [displayMultiple, ++rows]); + const layout = config.layout; + const size = { + x: parseInt(display.hexSize * layout.width), + y: parseInt(display.hexSize * layout.height) + }; + + const handleHexClick = (project) => { + const {environments, id, name} = project || []; + const problems = environments && environments.filter(e => e instanceof Object).map(e => { + return e.problems; + }); + + const problemsPerProject = Array.prototype.concat.apply([], problems); + const critical = problemsPerProject.filter(p => p.severity === 'CRITICAL').length; + const high = problemsPerProject.filter(p => p.severity === 'HIGH').length; + const medium = problemsPerProject.filter(p => p.severity === 'MEDIUM').length; + const low = problemsPerProject.filter(p => p.severity === 'LOW').length; + + setProjectInView({name: name, environments: environments, severityCount: {critical: critical, high: high, medium: medium, low: low}}); + }; + + const flattenProblems = (project) => { + const {environments} = project || []; + const filterProblems = environments && environments.filter(e => e instanceof Object).map(e => { + return e.problems; + }); + return Array.prototype.concat.apply([], filterProblems); + }; + + const sortByProjects = (projects) => { + return projects && projects.sort((a, b) => { + const aProblems = flattenProblems(a); + const bProblems = flattenProblems(b); + + return bProblems.length - aProblems.length; + }); + }; + + const getClassName = (critical) => { + if (critical === 0) { return "no-critical" } + if (critical === 1) { return "light-red" } else + if (critical >= 1 && critical <= 5) { return "red" } else + if (critical >= 5 && critical < 10) { return "dark-red" } else + if (critical >= 10 && critical < 15) { return "darker-red" } + }; + + useEffect(() => { + const count = projectsProblems && projectsProblems.length; + if (count <= 48) setDisplay({type: "normal", multiplier: 2, hexSize: 4, viewBox: "180 -20 100 100"}); + if (count >= 49 && count <= 96) setDisplay({type: "medium", multiplier: 4, hexSize: 1, viewBox: "65 -30 100 100"}); + if (count >= 97 && count <=479) setDisplay({type: "large", multiplier: 4, hexSize: 1, viewBox: "65 -10 100 100"}); + if (count >= 480) setDisplay({type: "extra-large", multiplier: 5.5, hexSize: 0.66, viewBox: "30 -10 100 100"}); + + const filterProjects = !filter.showCleanProjects ? projectsProblems && projectsProblems.filter(p => { + return !R.isEmpty(flattenProblems(p)) + }) : projectsProblems && projectsProblems; + + const sortProjects = filterProjects && sortByProjects(filterProjects); + + setProjects(sortProjects); + }, [projectsProblems, filter]); + + return ( +
    + {!projects && } + {projects && +
    +
    + +
    +
    + } + {projects && + <> + + + {hexs.slice(0, projects.length).map((hex, i) => { + const project = projects[i] || null; + const {environments, id, name} = project; + const filterProblems = environments && environments.filter(e => e instanceof Object).map(e => { + return e.problems; + }); + + const problemsPerProject = Array.prototype.concat.apply([], filterProblems); + const critical = problemsPerProject.filter(p => p.severity === 'CRITICAL').length; + const problemCount = problemsPerProject.length || 0; + + const HexText = () => { + const classes = display.type !== "normal" ? "no-text" : 'text'; + + if (problemsPerProject.length) { + return ( + {`P: ${problemCount}, C: ${critical}`} + ); + } + else { + return {`P: ${problemCount}`} + } + }; + + return ( + handleHexClick(project)}> + + + )})} + + +
    +
    +
    + {projectInView ? + <> +
    + {projectInView.environments && projectInView.environments.map(environment => ( +
    + + +
    + ))} + + :
    No project selected
    + } +
    +
    +
    + + } + +
    + ); +}; + +export default Honeycomb; \ No newline at end of file diff --git a/services/ui/src/components/ProblemsByProject/Honeycomb/index.stories.js b/services/ui/src/components/ProblemsByProject/Honeycomb/index.stories.js new file mode 100644 index 0000000000..566c734246 --- /dev/null +++ b/services/ui/src/components/ProblemsByProject/Honeycomb/index.stories.js @@ -0,0 +1,26 @@ +import React from 'react'; +import Honeycomb from './index'; +import { Query } from 'react-apollo'; +import AllProjectsProblemsQuery from 'lib/query/AllProjectsProblems'; + +export default { + component: Honeycomb, + title: 'Components/Honeycomb', +} + +export const Default = (projects) => { + return projects && +}; +Default.story = { + decorators: [ + storyFn => ( + + {({data: projectsProblems}) => projectsProblems && storyFn({projects: projectsProblems})} + + ), + ], +}; + +export const NoProjects = () => ( + +); diff --git a/services/ui/src/components/ProblemsByProject/Honeycomb/styling.css b/services/ui/src/components/ProblemsByProject/Honeycomb/styling.css new file mode 100644 index 0000000000..e262451e60 --- /dev/null +++ b/services/ui/src/components/ProblemsByProject/Honeycomb/styling.css @@ -0,0 +1,53 @@ +svg.grid { + margin: 0 auto; + width: 100%; +} +svg.grid g { + fill: #f2f2f2; + fill-opacity: 0.6; +} +svg.grid g:hover { + fill-opacity: 1; +} +svg.grid g:hover text { + fill-opacity: 1; +} +svg.grid g .green g { + fill: #cbf3cf; +} +svg.grid g .light-red g { + fill: #ffa19c; +} +svg.grid g .red g { + fill: #ff6961; +} +svg.grid g .dark-red g { + fill: #c30a00; +} +svg.grid g .darker-red g { + fill: #880700; +} +svg.grid g polygon { + cursor: pointer; + stroke: #4c84ff; + stroke-width: 0.2; + transition: fill-opacity .2s; +} +svg.grid g text { + font-size: 0.325em; + fill: #000; + fill-opacity: 0.5; + transition: fill-opacity .2s; + cursor: pointer; +} +svg.grid g text.no-text { + font-size: 0; +} +svg.grid path { + fill: none; + stroke: hsl(60, 20%, 70%); + stroke-width: 0.4em; + stroke-opacity: 0.3; + stroke-linecap: round; + stroke-linejoin: round; +} \ No newline at end of file diff --git a/services/ui/src/components/ProblemsByProject/index.js b/services/ui/src/components/ProblemsByProject/index.js new file mode 100644 index 0000000000..56ed6fd0a7 --- /dev/null +++ b/services/ui/src/components/ProblemsByProject/index.js @@ -0,0 +1,309 @@ +import React, { useState, useEffect } from 'react'; +import { bp, color, fontSize } from 'lib/variables'; +import useSortableData from './sortedItems'; +import Accordion from 'components/Accordion'; + +const ProblemsByProject = ({ problems }) => { + const { sortedItems, getClassNamesFor, requestSort } = useSortableData(problems, {key: 'id', direction: 'ascending'}); + + const [problemTerm, setProblemTerm] = useState(''); + const [hasFilter, setHasFilter] = React.useState(false); + + const handleProblemFilterChange = (event) => { + setHasFilter(false); + + if (event.target.value !== null || event.target.value !== '') { + setHasFilter(true); + } + setProblemTerm(event.target.value); + }; + + const handleSort = (key) => { + return requestSort(key); + }; + + const filterResults = (item) => { + const lowercasedFilter = problemTerm.toLowerCase(); + if (problemTerm == null || problemTerm === '') { + return problems; + } + + return Object.keys(item).some(key => { + if (item[key] !== null) { + return item[key].toString().toLowerCase().includes(lowercasedFilter); + } + }); + }; + + return ( +
    +
    + +
    +
    + + + + +
    +
    + {!sortedItems.filter(problem => filterResults(problem)).length &&
    No Problems
    } + {sortedItems.filter(problem => filterResults(problem)).map((problem) => { + + const {identifier, source, severity, associatedPackage, data } = problem; + const columns = {identifier, source, severity, associatedPackage}; + const parsedData = JSON.parse(data); + + return ( + +
    +
    + + {problem &&
    + {(problem.description).length > 250 ? problem.description.substring(0, 247)+'...' : problem.description} +
    } +
    +
    + + {problem &&
    {problem.associatedPackage}
    } +
    +
    + + {problem && } +
    + {problem && (
    + +
    + {parsedData &&
    {Object.keys(parsedData).map((key) => { + return
    {key}: {`${parsedData[key]}`}
    + })}
    } +
    +
    )} +
    +
    + ); + })} +
    + +
    + ); +}; + +export default ProblemsByProject; diff --git a/services/ui/src/components/ProblemsByProject/sortedItems.js b/services/ui/src/components/ProblemsByProject/sortedItems.js new file mode 100644 index 0000000000..3884ed3917 --- /dev/null +++ b/services/ui/src/components/ProblemsByProject/sortedItems.js @@ -0,0 +1,63 @@ +import React, {useState, useEffect, useMemo} from "react"; +import hash from 'object-hash'; + +const useSortableData = (initialItems, initialConfig) => { + const [sortConfig, setSortConfig] = React.useState(initialConfig); + const [currentItems, setCurrentItems] = useState(initialItems); + + const getClassNamesFor = (name) => { + if (!sortConfig) return; + return sortConfig.key === name ? sortConfig.direction : undefined; + }; + + const sortedItems = useMemo(() => { + if (!currentItems) return; + + let sortableItems = [...currentItems]; + + if (sortConfig !== null) { + sortableItems.sort((a, b) => { + let aParsed, bParsed = ''; + + if (sortConfig.key === 'identifier') { + aParsed = a[sortConfig.key].toString().toLowerCase().trim(); + bParsed = b[sortConfig.key].toString().toLowerCase().trim(); + } + else { + let aProblem = a[sortConfig.key]; + aParsed = aProblem.toString().toLowerCase().trim(); + + let bProblem = b[sortConfig.key]; + bParsed = bProblem.toString().toLowerCase().trim(); + } + + if (aParsed < bParsed) return sortConfig.direction === 'ascending' ? -1 : 1; + if (aParsed > bParsed) return sortConfig.direction === 'ascending' ? 1 : -1; + return 0; + }); + } + + return sortableItems; + }, [currentItems, sortConfig]); + + if (hash(sortedItems) !== hash(currentItems)) { + setCurrentItems(sortedItems); + } + + const requestSort = (key) => { + let direction = 'ascending'; + + if (sortConfig && sortConfig.key === key && sortConfig.direction === 'ascending') { + direction = 'descending'; + } + + setCurrentItems(sortedItems); + setSortConfig({ key, direction }); + + return { sortedItems: currentItems }; + }; + + return { sortedItems: currentItems, currentSortConfig: sortConfig, getClassNamesFor, requestSort }; +}; + +export default useSortableData; \ No newline at end of file diff --git a/services/ui/src/components/ProjectDetailsSidebar/index.js b/services/ui/src/components/ProjectDetailsSidebar/index.js index 483701d0a2..8d46f2d33c 100644 --- a/services/ui/src/components/ProjectDetailsSidebar/index.js +++ b/services/ui/src/components/ProjectDetailsSidebar/index.js @@ -4,14 +4,11 @@ import { CopyToClipboard } from 'react-copy-to-clipboard'; import moment from 'moment'; import giturlparse from 'git-url-parse'; import Environments from 'components/Environments'; -// @TODO: add this once the logic exists -// import ActiveStandbyConfirm from 'components/ActiveStandbyConfirm'; import { bp, color, fontSize } from 'lib/variables'; import { Mutation } from 'react-apollo'; import ProjectByNameQuery from 'lib/query/ProjectByName'; -import SwitchActiveStandbyMutation from 'lib/mutation/SwitchActiveStandby'; const Project = ({ project }) => { const [copied, setCopied] = useState(false); @@ -93,49 +90,6 @@ const Project = ({ project }) => {
    - {/* - @TODO: add this once the logic exists - {project.productionEnvironment && project.standbyProductionEnvironment - ?
    -
    - -
    - - {(switchActiveStandby, { loading, called, error, data }) => { - const switchActiveBranch = () => { - const input = { - project:{ - name: project.name - } - } - - switchActiveStandby({ variables: { input } }); - Router.push(`/projects/${productionEnvironment.project.name}/${productionEnvironment.openshiftProjectName}/tasks`) - } - - if (!error && called && loading) { - return
    Switching Standby Environment to Active...
    ; - } - - return ( - - ); - }} -
    -
    -
    -
    - : null - } */} + + ); +} + /** * Displays a billingGroupCost page, given the billingGroupCost name. */ @@ -58,16 +146,26 @@ export const PageBillingGroup = ({ router }) => { const [costs, setCosts] = useState([]); const queries = []; - for (let i = monthsToGraph; i > 0; i--) { + for (let i = monthsToGraph; i >= 0; i--) { queries.push(useQuery(BillingGroupCostsQuery, { variables: { input: { name: group }, month: moment().subtract(i, 'M').format('YYYY-MM').toString() } })) } + const [editModifier, setEditModifier] = useState({ }); + useEffect(() => { - const result = queries.map(query => { - if (query && query.data && query.data.costs) { - return (query.data.costs) + const result = queries.map(({loading, error, data}) => { + if (error) { + return {error}; + } + + if (loading){ + return {loading}; + } + + if (data && data.costs) { + return (data.costs) } - return ({total: 0}); + return {}; }); // for (let i = 0; i <= 5; i++) { @@ -86,18 +184,29 @@ export const PageBillingGroup = ({ router }) => { } const prevSubmitHandler = () => { - const dateTime = `${values.year}-${values.month}-01 0:00:00.000`; - const date = new Date(dateTime); - const [year, month] = moment(date).subtract(1, 'M').format('YYYY-MM').toString().split('-'); - setValues({month, year}); + //currently we can't go back more than 2 months + const dateTime = `${values.year}-${values.month}-01 0:00:00.000`; + const date = new Date(dateTime); + + // const monthDifference = Math.round(moment(new Date()).diff(date, 'months', true)); + // if(monthDifference < 3){ + const [year, month] = moment(date).subtract(1, 'M').format('YYYY-MM').toString().split('-'); + setValues({month, year}); + // } } + + const nextSubmitHandler = () => { const dateTime = `${values.year}-${values.month}-01 0:00:00.000`; const date = new Date(dateTime); const [year, month] = moment(date).add(1, 'M').format('YYYY-MM').toString().split('-'); setValues({month, year}); } + + const editModifierHandler = (modifier) => { + setEditModifier(modifier) + } return( <> @@ -108,11 +217,32 @@ export const PageBillingGroup = ({ router }) => { {auth => { - if (adminAuthChecker(auth)) { + if (!adminAuthChecker(auth)) { + return (
    Seems that you do not have permissions to access this resource.
    ); + } + + if (costs.length > 0 && costs[0].loading){ + return (

    Loading...

    ); + } + + if (costs.length > 0 && costs[0].error){ + if (costs[0].error.message.includes("Projects must have the same availability") ){ + return (); + } + + if (costs[0].error.message.includes("Cannot read property 'availability' of undefined")){ + return (
    This billing group does not seem to have any projedcts.
    ); + } + + return (
    {costs[0].error.message}
    ); + } + + const selectedMonthCosts = costs.find(o => o.yearMonth === `${values.year}-${values.month}`); + return (
    - + { selectedMonthCosts && }
    @@ -153,50 +283,40 @@ export const PageBillingGroup = ({ router }) => {
    - - - - {R.compose(withQueryLoading, withQueryError)( - ({ data: { costs } }) => { - return( - <> -
    -
    -
    - -
    - - -
    - -
    -
    -
    - { - {R.compose(withQueryLoading, withQueryError)( - ({ data: { allBillingModifiers: modifiers } }) => - )} - } - -
    -
    -
    - -
    - - ); + +
    +
    +
    + { selectedMonthCosts && } + { !selectedMonthCosts &&
    Sorry, we don't have billing data for this month.
    } +
    + + +
    + { selectedMonthCosts && } +
    +
    +
    + { + + {R.compose(withQueryLoading, withQueryError)( + ({ data: { allBillingModifiers: modifiers } }) => + )} + } - )} - - + +
    +
    +
    + { selectedMonthCosts && } +
    ); - } - return (
    Seems that you do not have permissions to access this resource.
    ); }}
    + + + ); +}; + +export default ProblemsDashboardProductHexPage; diff --git a/services/ui/src/pages/problems-dashboard-by-project.js b/services/ui/src/pages/problems-dashboard-by-project.js new file mode 100644 index 0000000000..d78a6ab007 --- /dev/null +++ b/services/ui/src/pages/problems-dashboard-by-project.js @@ -0,0 +1,265 @@ +import React, {useEffect, useState} from 'react'; +import * as R from 'ramda'; +import Head from 'next/head'; +import { Query } from 'react-apollo'; +import {useQuery} from "@apollo/react-hooks"; +import AllProblemsByProjectQuery from 'lib/query/AllProblemsByProject'; +import getSeverityEnumQuery, {getProjectOptions, getSourceOptions} from 'components/Filters/helpers'; +import withQueryLoadingNoHeader from 'lib/withQueryLoadingNoHeader'; +import withQueryErrorNoHeader from 'lib/withQueryErrorNoHeader'; +import ProblemsByProject from "components/ProblemsByProject"; +import Accordion from "components/Accordion"; +import MainLayout from 'layouts/MainLayout'; +import SelectFilter from 'components/Filters'; +import { bp } from 'lib/variables'; + +/** + * Displays the problems overview page by project. + */ +const ProblemsDashboardProductPage = () => { + const [projectSelect, setProjectSelect] = useState([]); + const [source, setSource] = useState([]); + const [severity, setSeverity] = useState(['CRITICAL']); + const [envType, setEnvType] = useState('PRODUCTION'); + + const { data: projects, loading: projectsLoading } = useQuery(getProjectOptions); + const { data: severities, loading: severityLoading } = useQuery(getSeverityEnumQuery); + const { data: sources, loading: sourceLoading } = useQuery(getSourceOptions); + + const handleProjectChange = (project) => { + let values = project && project.map(p => p.value) || []; + setProjectSelect(values); + }; + + const handleEnvTypeChange = (envType) => { + setEnvType(envType.value); + }; + + const handleSourceChange = (source) => { + let values = source && source.map(s => s.value) || []; + setSource(values); + }; + + const handleSeverityChange = (severity) => { + let values = severity && severity.map(s => s.value) || []; + setSeverity(values); + }; + + const projectOptions = (projects) => { + return projects && projects.map(p => ({ value: p.name, label: p.name})); + }; + + const sourceOptions = (sources) => { + return sources && sources.map(s => ({ value: s, label: s})); + }; + + const severityOptions = (enums) => { + return enums && enums.map(s => ({ value: s.name, label: s.name})); + }; + + return ( + <> + + Problems Dashboard By Project + + +
    +
    + + +
    +
    + + +
    + +
    +
    + {projects && +
    +
    + +
    +
    + } +
    + {projects && projects.allProjects.map(project => { + const filterProjectSelect = projectSelect.filter(s => { + return s.includes(project.name); + }).toString() || ''; + + return ( + + {R.compose( + withQueryLoadingNoHeader, + withQueryErrorNoHeader + )(({data: { project }}) => { + const {environments, id, name} = project || []; + const filterProblems = environments && environments.filter(e => e instanceof Object).map(e => { + return e.problems; + }); + + const problemsPerProject = Array.prototype.concat.apply([], filterProblems); + const critical = problemsPerProject.filter(p => p.severity === 'CRITICAL').length; + const high = problemsPerProject.filter(p => p.severity === 'HIGH').length; + const medium = problemsPerProject.filter(p => p.severity === 'MEDIUM').length; + const low = problemsPerProject.filter(p => p.severity === 'LOW').length; + + const columns = {name, problemCount: problemsPerProject.length}; + + return ( + <> + {environments && +
    +
    + + {!environments.length &&
    No Environments
    } +
    +
      +
    • {Object.keys(problemsPerProject).length} Problems
    • +
    • {critical}
    • +
    • {high}
    • +
    • {medium}
    • +
    • {low}
    • +
    +
    + {environments.map(environment => ( +
    + + +
    + ))} +
    +
    +
    + } + ); + })}
    + )})} +
    + +
    +
    + ); +}; + +export default ProblemsDashboardProductPage; diff --git a/services/ui/src/pages/problems-dashboard.js b/services/ui/src/pages/problems-dashboard.js new file mode 100644 index 0000000000..debe900cff --- /dev/null +++ b/services/ui/src/pages/problems-dashboard.js @@ -0,0 +1,190 @@ +import React, {useState} from 'react'; +import * as R from 'ramda'; +import Head from 'next/head'; +import { Query } from 'react-apollo'; +import {useQuery} from "@apollo/react-hooks"; +import AllProblemsQuery from 'lib/query/AllProblems'; +import getSeverityEnumQuery, {getSourceOptions} from 'components/Filters/helpers'; +import withQueryLoadingNoHeader from 'lib/withQueryLoadingNoHeader'; +import withQueryErrorNoHeader from 'lib/withQueryErrorNoHeader'; +import ProblemsByIdentifier from "components/ProblemsByIdentifier"; +import MainLayout from 'layouts/MainLayout'; +import SelectFilter from 'components/Filters'; +import { bp } from 'lib/variables'; + +/** + * Displays the problems overview page. + * + */ +const ProblemsDashboardPage = () => { + const [source, setSource] = useState([]); + const [severity, setSeverity] = useState(['CRITICAL']); + const [envType, setEnvType] = useState('PRODUCTION'); + + const { data: severities, loading: severityLoading } = useQuery(getSeverityEnumQuery); + const { data: sources, loading: sourceLoading } = useQuery(getSourceOptions); + + const handleEnvTypeChange = (envType) => setEnvType(envType.value); + + const handleSourceChange = (source) => { + let values = source && source.map(s => s.value) || []; + setSource(values); + }; + + const handleSeverityChange = (severity) => { + let values = severity && severity.map(s => s.value) || []; + setSeverity(values); + }; + + const sourceOptions = (sources) => { + return sources && sources.map(s => ({ value: s, label: s})); + }; + + const severityOptions = (enums) => { + return enums && enums.map(s => ({ value: s.name, label: s.name})); + }; + + const groupByProblemIdentifier = (problems) => problems && problems.reduce((arr, problem) => { + arr[problem.identifier] = arr[problem.identifier] || []; + arr[problem.identifier].push(problem); + return arr; + }, {}); + + + return ( + <> + + Problems Dashboard + + +
    +

    Problems Dashboard By Identifier

    +
    + + + +
    + +
    + + {R.compose( + withQueryLoadingNoHeader, + withQueryErrorNoHeader + )(({data: {problems} }) => { + + // Group problems by identifier + const problemsById = groupByProblemIdentifier(problems) || []; + const problemIdentifiers = problemsById && Object.keys(problemsById).map(p => { + const problem = problemsById[p][0]; + + return {identifier: p, source: problem.source, severity: problem.severity, problems: problemsById[p]}; + }, []); + + const critical = problems && problems.filter(p => p.severity === 'CRITICAL').length; + const high = problems && problems.filter(p => p.severity === 'HIGH').length; + const medium = problems && problems.filter(p => p.severity === 'MEDIUM').length; + const low = problems && problems.filter(p => p.severity === 'LOW').length; + + return ( + <> +
    +
    +
    +
      +
    • {problems && Object.keys(problems).length} Problems
    • +
    • {critical}
    • +
    • {high}
    • +
    • {medium}
    • +
    • {low}
    • +
    +
      +
    • {envType.charAt(0).toUpperCase() + envType.slice(1).toLowerCase()} environments
    • +
    +
    + +
    + +
    + ); + })} +
    +
    + ); +}; + +export default ProblemsDashboardPage; diff --git a/services/ui/src/pages/problems.js b/services/ui/src/pages/problems.js new file mode 100644 index 0000000000..9688d6a17e --- /dev/null +++ b/services/ui/src/pages/problems.js @@ -0,0 +1,77 @@ +import React from 'react'; +import * as R from 'ramda'; +import { withRouter } from 'next/router'; +import Head from 'next/head'; +import { Query } from 'react-apollo'; +import MainLayout from 'layouts/MainLayout'; +import EnvironmentWithProblemsQuery from 'lib/query/EnvironmentWithProblems'; +import Breadcrumbs from 'components/Breadcrumbs'; +import ProjectBreadcrumb from 'components/Breadcrumbs/Project'; +import EnvironmentBreadcrumb from 'components/Breadcrumbs/Environment'; +import NavTabs from 'components/NavTabs'; +import Problems from 'components/Problems'; +import withQueryLoading from 'lib/withQueryLoading'; +import withQueryError from 'lib/withQueryError'; +import { withEnvironmentRequired } from 'lib/withDataRequired'; +import { bp, color } from 'lib/variables'; + +/** + * Displays the problems page, given the name of an openshift project. + */ +export const PageProblems = ({ router }) => ( + <> + + {`${router.query.openshiftProjectName} | Problems`} + + + {R.compose( + withQueryLoading, + withQueryError, + withEnvironmentRequired + )(({ data: { environment } }) => { + + return ( + + + + + +
    + +
    + +
    +
    + +
    + ); + })} +
    + +); + +export default withRouter(PageProblems); diff --git a/services/ui/src/static/images/problems-active.svg b/services/ui/src/static/images/problems-active.svg new file mode 100644 index 0000000000..d772807c41 --- /dev/null +++ b/services/ui/src/static/images/problems-active.svg @@ -0,0 +1 @@ +tasks diff --git a/services/ui/src/static/images/problems.svg b/services/ui/src/static/images/problems.svg new file mode 100644 index 0000000000..35e630c54b --- /dev/null +++ b/services/ui/src/static/images/problems.svg @@ -0,0 +1 @@ +tasks \ No newline at end of file diff --git a/services/ui/src/webpack.shared-config.js b/services/ui/src/webpack.shared-config.js index f26992da42..07aa81c127 100644 --- a/services/ui/src/webpack.shared-config.js +++ b/services/ui/src/webpack.shared-config.js @@ -1,6 +1,7 @@ const path = require('path'); module.exports = { + extensions: ['.ts', '.tsx', '.js'], alias: { components: path.join(__dirname, 'components'), layouts: path.join(__dirname, 'layouts'), diff --git a/services/ui/tsconfig.json b/services/ui/tsconfig.json new file mode 100644 index 0000000000..d610fbcaaa --- /dev/null +++ b/services/ui/tsconfig.json @@ -0,0 +1,31 @@ +{ + "extends": "../../tsconfig.json", + "compilerOptions": { + "outDir": "build/lib", + "module": "commonjs", + "target": "es5", + "lib": ["es5", "es6", "es7", "es2017", "dom"], + "sourceMap": true, + "allowJs": false, + "jsx": "react", + "moduleResolution": "node", + "rootDirs": ["src", "stories"], + "baseUrl": "src", + "forceConsistentCasingInFileNames": true, + "noImplicitReturns": true, + "noImplicitThis": true, + "noImplicitAny": true, + "strictNullChecks": true, + "suppressImplicitAnyIndexErrors": true, + "noUnusedLocals": true, + "declaration": true, + "allowSyntheticDefaultImports": true, + "experimentalDecorators": true, + "emitDecoratorMetadata": true + }, + "exclude": ["node_modules", "build", "scripts", "../api"], + "include": ["./src"], + "references": [ + { "path": "../../node-packages/commons" } + ] +} diff --git a/services/webhook-handler/src/extractWebhookData.ts b/services/webhook-handler/src/extractWebhookData.ts index 6455cada50..c96d3dc6b9 100644 --- a/services/webhook-handler/src/extractWebhookData.ts +++ b/services/webhook-handler/src/extractWebhookData.ts @@ -84,6 +84,14 @@ export function extractWebhookData(req: IncomingMessage, body: string): WebhookR webhooktype = 'resticbackup'; event = 'restore:finished'; uuid = uuid4(); + } else if (bodyObj.type && bodyObj.type == 'scanningCompleted') { + webhooktype = 'problems'; + event = 'harbor:scanningcompleted'; + uuid = uuid4(); + } else if (bodyObj.lagoonInfo) { + webhooktype = 'problems'; + event = 'drutiny:resultset'; + uuid = uuid4(); } else { throw new Error('No supported event header found on POST request'); } diff --git a/services/webhooks2tasks/src/handlers/problems/harborScanningCompleted.ts b/services/webhooks2tasks/src/handlers/problems/harborScanningCompleted.ts new file mode 100644 index 0000000000..ef068f6913 --- /dev/null +++ b/services/webhooks2tasks/src/handlers/problems/harborScanningCompleted.ts @@ -0,0 +1,250 @@ +// @flow + +import { sendToLagoonLogs } from '@lagoon/commons/dist/logs'; +import { + getVulnerabilitiesPayloadFromHarbor, +} from '@lagoon/commons/dist/harborApi'; +import * as R from 'ramda'; +import uuid4 from 'uuid4'; + +import { + getProjectByName, + getProblemHarborScanMatches, + getEnvironmentByOpenshiftProjectName, + getOpenShiftInfoForProject, +} from '@lagoon/commons/dist/api'; + +const HARBOR_WEBHOOK_SUCCESSFUL_SCAN = "Success"; + +const DEFAULT_REPO_DETAILS_REGEX = "^(?.+)\/(?.+)\/(?.+)$"; + +const DEFAULT_REPO_DETAILS_MATCHER = { + defaultProjectName: "", + defaultEnvironmentName: "", + defaultServiceName: "", + regex: DEFAULT_REPO_DETAILS_REGEX, +}; + + export async function harborScanningCompleted( + WebhookRequestData, + channelWrapperWebhooks +) { + const { webhooktype, event, uuid, body } = WebhookRequestData; + const HARBOR_WEBHOOK_SUCCESSFUL_SCAN = "Success"; + + try { + let { + resources, + repository, + scanOverview, + lagoonProjectName, + lagoonEnvironmentName, + lagoonServiceName, + harborScanId, + } = await validateAndTransformIncomingWebhookdata(body); + + if(scanOverview.scan_status !== HARBOR_WEBHOOK_SUCCESSFUL_SCAN) { + sendToLagoonLogs( + 'error', + '', + uuid, + `${webhooktype}:${event}:unhandled`, + { data: body }, + `Received a scan report of status "${scanOverview.scan_status}" - ignoring` + ); + + return; + } + + let vulnerabilities = []; + vulnerabilities = await getVulnerabilitiesFromHarbor(harborScanId); + + let { id: lagoonProjectId } = await getProjectByName(lagoonProjectName); + + const result = await getOpenShiftInfoForProject(lagoonProjectName); + const projectOpenShift = result.project; + + const ocsafety = string => + string.toLocaleLowerCase().replace(/[^0-9a-z-]/g, '-'); + + let openshiftProjectName = projectOpenShift.openshiftProjectPattern + ? projectOpenShift.openshiftProjectPattern + .replace('${branch}', ocsafety(lagoonEnvironmentName)) + .replace('${project}', ocsafety(lagoonProjectName)) + : ocsafety(`${lagoonProjectName}-${lagoonEnvironmentName}`); + + const environmentResult = await getEnvironmentByOpenshiftProjectName(openshiftProjectName); + const environmentDetails: any = R.prop('environmentByOpenshiftProjectName', environmentResult) + + + let messageBody = { + lagoonProjectId, + lagoonProjectName, + lagoonEnvironmentId: environmentDetails.id, + lagoonEnvironmentName: environmentDetails.name, + lagoonServiceName, + vulnerabilities, + }; + + const webhookData = generateWebhookData( + WebhookRequestData.giturl, + 'problems', + 'harbor:scanningresultfetched', + messageBody + ); + + const buffer = new Buffer(JSON.stringify(webhookData)); + + await channelWrapperWebhooks.publish(`lagoon-webhooks`, '', buffer, { + persistent: true, + }); + + } catch (error) { + sendToLagoonLogs( + 'error', + '', + uuid, + `${webhooktype}:${event}:unhandled`, + { data: body }, + `Could not fetch Harbor scan results, reason: ${error}` + ); + } +} + +/** + * This function will take an incoming Harbor webhook and decompose it + * into a more useable format + * + * @param {*} rawData + */ +const validateAndTransformIncomingWebhookdata = async (rawData) => { + let { resources, repository } = rawData.event_data; + + if (!repository.repo_full_name) { + throw generateError( + 'InvalidHarborInput', + 'Unable to find repo_full_name in body.event_data.repository' + ); + } + + // scan_overview is tricky because the property doesn't have an obvious name. + // We convert it to an array of objects with the old property as a member + let scanOverviewArray = R.toPairs(resources[0].scan_overview).map((e) => { + let obj = e[1]; + obj.scan_key = e[0]; + return obj; + }); + + let harborScanPatternMatchers = await getProblemHarborScanMatches(); + + let { + lagoonProjectName, + lagoonEnvironmentName, + lagoonServiceName, + } = matchRepositoryAgainstPatterns(repository.repo_full_name, harborScanPatternMatchers.allProblemHarborScanMatchers); + + return { + resources, + repository, + scanOverview: scanOverviewArray.pop(), + lagoonProjectName, + lagoonEnvironmentName, + lagoonServiceName, + harborScanId: repository.repo_full_name, + }; +}; + +const generateError = (name, message) => { + let e = new Error(message); + e.name = name; + return e; +}; + +const matchRepositoryAgainstPatterns = (repoFullName, matchPatterns = []) => { + const matchingRes = matchPatterns.filter((e) => generateRegex(e.regex).test(repoFullName)); + + if(matchingRes.length > 1) { + const stringifyMatchingRes = matchingRes.reduce((prevRetString, e) => `${e.regex},${prevRetString}`, ''); + throw generateError("InvalidHarborConfiguration", + `We have multiple matching regexes for '${repoFullName}'` + ); + } else if (matchingRes.length == 0 && !generateRegex(DEFAULT_REPO_DETAILS_MATCHER.regex).test(repoFullName)) { + throw generateError("HarborError", + `We have no matching regexes, including default, for '${repoFullName}'` + ); + } + + const matchPatternDetails = matchingRes.pop() || DEFAULT_REPO_DETAILS_MATCHER; + + const { + lagoonProjectName = matchPatternDetails.defaultProjectName, + lagoonEnvironmentName = matchPatternDetails.defaultEnvironmentName, + lagoonServiceName = matchPatternDetails.defaultServiceName, + } = extractRepositoryDetailsGivenRegex(repoFullName, matchPatternDetails.regex); + + return {lagoonProjectName, lagoonEnvironmentName, lagoonServiceName}; +} + +const generateRegex = R.memoizeWith(R.identity, re => new RegExp(re)); + +const extractRepositoryDetailsGivenRegex = (repoFullName, pattern = DEFAULT_REPO_DETAILS_REGEX) => { + const re = generateRegex(pattern); + const match = re.exec(repoFullName); + return match.groups; +} + +const generateWebhookData = ( + webhookGiturl, + webhooktype, + event, + body, + id = null +) => { + return { + webhooktype: webhooktype, + event: event, + giturl: webhookGiturl, + uuid: id ? id : uuid4(), + body: body, + }; +}; + +const extractVulnerabilities = (harborScanResponse) => { + for (let [key, value] of Object.entries(harborScanResponse)) { + let potentialStore: any = value; + if (potentialStore.hasOwnProperty('vulnerabilities')) { + return potentialStore.vulnerabilities; + } + } + throw new ProblemsHarborConnectionError( + "Scan response from Harbor does not contain a 'vulnerabilities' key" + ); +}; + +const getVulnerabilitiesFromHarbor = async (scanId) => { + let harborPayload = null; + try { + harborPayload = await getVulnerabilitiesPayloadFromHarbor( + scanId + ); + } catch (error) { + throw error; + } + + return extractVulnerabilities(harborPayload); +}; + +class ProblemsHarborConnectionError extends Error { + constructor(message) { + super(message); + this.name = 'problems-harborConnectionError'; + } +} + +class ProblemsInvalidWebhookData extends Error { + constructor(message) { + super(message); + this.name = 'problems-invalidWebhookData'; + } +} + diff --git a/services/webhooks2tasks/src/handlers/problems/processDrutinyResults.ts b/services/webhooks2tasks/src/handlers/problems/processDrutinyResults.ts new file mode 100644 index 0000000000..f0899a0c66 --- /dev/null +++ b/services/webhooks2tasks/src/handlers/problems/processDrutinyResults.ts @@ -0,0 +1,169 @@ +// @flow + +import { + addProblem, + deleteProblemsFromSource, + getProblemsforProjectEnvironment, +} from'@lagoon/commons/dist/api'; +import { sendToLagoonLogs } from '@lagoon/commons/dist/logs'; +const DRUTINY_VULNERABILITY_SOURCE_BASE = 'Drutiny'; +const DRUTINY_SERVICE_NAME = 'cli'; +const DRUTINY_PACKAGE_NAME = '' +import { + getProjectByName, + getEnvironmentByOpenshiftProjectName, + getOpenShiftInfoForProject, +} from '@lagoon/commons/dist/api'; +import { generateProblemsWebhookEventName } from "./webhookHelpers"; +import * as R from 'ramda'; + +const ERROR_STATES = ["error", "failure"]; +const SEVERITY_LEVELS = [ + "NONE", + "UNKNOWN", + "NEGLIGIBLE", + "LOW", + "MEDIUM", + "HIGH", + "CRITICAL" +]; +const DEFAULT_SEVERITY_LEVEL = "NEGLIGIBLE"; + +export async function processDrutinyResultset( + WebhookRequestData, + channelWrapperWebhooks +) { + + const { webhooktype, event, uuid, body } = WebhookRequestData; + const { lagoonInfo, results, profile: drutinyProfile } = body; + + try { + const lagoonProjectName = + lagoonInfo.LAGOON_DRUTINY_PROJECT_NAME || + lagoonInfo.LAGOON_PROJECT || + lagoonInfo.LAGOON_SAFE_PROJECT; + + if (!lagoonProjectName) { + throw new Error('No project name passed in Drutiny results'); + } + + const lagoonEnvironmentName = + lagoonInfo.LAGOON_DRUTINY_ENVIRONMENT_NAME || + lagoonInfo.LAGOON_ENVIRONMENT || + lagoonInfo.LAGOON_GIT_BRANCH; + + if (!lagoonEnvironmentName) { + throw new Error('No environment name passed in Drutiny results'); + } + + const { id: lagoonProjectId } = await getProjectByName( + lagoonProjectName + ); + + const result = await getOpenShiftInfoForProject(lagoonProjectName); + const projectOpenShift = result.project; + + const ocsafety = string => + string.toLocaleLowerCase().replace(/[^0-9a-z-]/g, '-'); + + let openshiftProjectName = projectOpenShift.openshiftProjectPattern + ? projectOpenShift.openshiftProjectPattern + .replace('${branch}', ocsafety(lagoonEnvironmentName)) + .replace('${project}', ocsafety(lagoonProjectName)) + : ocsafety(`${lagoonProjectName}-${lagoonEnvironmentName}`); + + const environmentResult = await getEnvironmentByOpenshiftProjectName(openshiftProjectName); + const environmentDetails: any = R.prop('environmentByOpenshiftProjectName', environmentResult) + + const lagoonEnvironmentId = environmentDetails.id; + const lagoonServiceName = DRUTINY_SERVICE_NAME; + const drutinyVulnerabilitySource = `${DRUTINY_VULNERABILITY_SOURCE_BASE}-${drutinyProfile}`; + + //Let's get the existing problems before removing them ... + const existingProblemSet = ( + await getProblemsforProjectEnvironment( + lagoonEnvironmentName, + lagoonProjectId + ) + ) + .filter((e) => e.service == lagoonServiceName) + .reduce((prev, current) => prev.concat([current.identifier]), []); + + await deleteProblemsFromSource( + environmentDetails.id, + drutinyVulnerabilitySource, + DRUTINY_SERVICE_NAME + ); + + if (results) { + results + .filter((e) => ERROR_STATES.includes(e.type)) + .forEach((element) => { + addProblem({ + environment: lagoonEnvironmentId, + identifier: element.name, + severity: convertSeverityLevels(element.severity), + source: drutinyVulnerabilitySource, + description: element.description, + data: JSON.stringify(element), + service: DRUTINY_SERVICE_NAME, + severityScore: null, + associatedPackage: 'Drupal', + version: null, + fixedVersion: null, + links: null, + }) + .then(() => { + sendToLagoonLogs( + 'info', + lagoonProjectName, + uuid, + generateProblemsWebhookEventName({ + source: 'drutiny', + severity: convertSeverityLevels(element.severity), + isNew: !existingProblemSet.includes(element.name) + }), + { + lagoonProjectId, + lagoonProjectName, + lagoonEnvironmentId, + lagoonEnvironmentName, + lagoonServiceName, + severity: convertSeverityLevels(element.severity), + vulnerability: element, + }, + `New problem found for ${lagoonProjectName}:${lagoonEnvironmentName}:${lagoonServiceName}. Severity: ${element.severity}. Description: ${element.description}` + ); + }) + .catch((error) => + sendToLagoonLogs( + 'error', + '', + uuid, + `${webhooktype}:${event}:problem_insert_error`, + { data: body }, + `Error inserting problem id ${element.id} for ${lagoonProjectId}:${environmentDetails.id} -- ${error.message}` + ) + ); + }); + } + } catch (error) { + sendToLagoonLogs( + 'error', + '', + uuid, + `${webhooktype}:${event}:unhandled`, + { data: body }, + `Could not process incoming Drutiny scan results, reason: ${error}` + ); + } +} + +const convertSeverityLevels = (level) => { + level = level.toUpperCase(); + if(SEVERITY_LEVELS.includes(level)) { + return level; + } + + return DEFAULT_SEVERITY_LEVEL; +} diff --git a/services/webhooks2tasks/src/handlers/problems/processHarborVulnerabilityList.ts b/services/webhooks2tasks/src/handlers/problems/processHarborVulnerabilityList.ts new file mode 100644 index 0000000000..89c368612e --- /dev/null +++ b/services/webhooks2tasks/src/handlers/problems/processHarborVulnerabilityList.ts @@ -0,0 +1,93 @@ +// @flow + +import { addProblem, + deleteProblemsFromSource, + getProblemsforProjectEnvironment, +} from '@lagoon/commons/dist/api'; +import { sendToLagoonLogs } from '@lagoon/commons/dist/logs'; +import { generateProblemsWebhookEventName } from "./webhookHelpers"; +const HARBOR_VULNERABILITY_SOURCE = 'Harbor'; + + +export async function processHarborVulnerabilityList( + WebhookRequestData, + channelWrapperWebhooks +) { + const { webhooktype, event, uuid, body } = WebhookRequestData; + + const { + lagoonProjectId, + lagoonProjectName, + lagoonEnvironmentId, + lagoonEnvironmentName, + lagoonServiceName, + vulnerabilities, + } = body; + + //Let's get the existing problems before removing them ... + const existingProblemSet = ( + await getProblemsforProjectEnvironment( + lagoonEnvironmentName, + lagoonProjectId + ) + ) + .filter((e) => e.service == lagoonServiceName) + .reduce((prev, current) => prev.concat([current.identifier]), []); + + await deleteProblemsFromSource( + lagoonEnvironmentId, + HARBOR_VULNERABILITY_SOURCE, + lagoonServiceName + ); + + if (vulnerabilities) { + vulnerabilities.forEach((element) => { + addProblem({ + environment: lagoonEnvironmentId, + identifier: element.id, + severity: element.severity.toUpperCase(), + severityScore: null, + source: HARBOR_VULNERABILITY_SOURCE, + description: element.description, + links: element.links.pop(), + data: JSON.stringify(element), + version: element.version, + fixedVersion: element.fix_version, + service: lagoonServiceName, + associatedPackage: element.package, + }) + .then(() => { + sendToLagoonLogs( + 'info', + lagoonProjectName, + uuid, + generateProblemsWebhookEventName({ + source: 'harbor', + severity: element.severity.toUpperCase(), + isNew: !existingProblemSet.includes(element.id), + }), + { + lagoonProjectId, + lagoonProjectName, + lagoonEnvironmentId, + lagoonEnvironmentName, + lagoonServiceName, + severity: element.severity.toUpperCase(), + vulnerability: element, + }, + `New problem found for ${lagoonProjectName}:${lagoonEnvironmentName}:${lagoonServiceName}. Severity: ${element.severity}. Description: ${element.description}` + ); + }) + .catch((error) => + sendToLagoonLogs( + 'error', + '', + uuid, + `${webhooktype}:${event}:problem_insert_error`, + { data: body }, + `Error inserting problem id ${element.id} for ${lagoonProjectId}:${lagoonEnvironmentId} -- ${error.message}` + ) + ); + }); + } + } diff --git a/services/webhooks2tasks/src/handlers/problems/webhookHelpers.js b/services/webhooks2tasks/src/handlers/problems/webhookHelpers.js new file mode 100644 index 0000000000..edaa092af9 --- /dev/null +++ b/services/webhooks2tasks/src/handlers/problems/webhookHelpers.js @@ -0,0 +1,15 @@ +const generateProblemsWebhookEventName = ({ + source, + severity, + isSummaryData = false, + isNew = true, +}) => { + const prefix = 'problem'; + const eventType = isNew ? 'insert' : 'update'; + const dataType = isSummaryData ? 'summary' : 'item'; + return `${prefix}:${eventType}:${source}:${dataType}:${severity}`; +}; + +module.exports = { + generateProblemsWebhookEventName +}; \ No newline at end of file diff --git a/services/webhooks2tasks/src/handlers/problems/webhookHelpers.ts b/services/webhooks2tasks/src/handlers/problems/webhookHelpers.ts new file mode 100644 index 0000000000..d101ff562c --- /dev/null +++ b/services/webhooks2tasks/src/handlers/problems/webhookHelpers.ts @@ -0,0 +1,11 @@ +export const generateProblemsWebhookEventName = ({ + source, + severity, + isSummaryData = false, + isNew = true, +}) => { + const prefix = 'problem'; + const eventType = isNew ? 'insert' : 'update'; + const dataType = isSummaryData ? 'summary' : 'item'; + return `${prefix}:${eventType}:${source}:${dataType}:${severity}`; +}; \ No newline at end of file diff --git a/services/webhooks2tasks/src/processQueue.ts b/services/webhooks2tasks/src/processQueue.ts index 3a4abad44c..34ce759a2e 100644 --- a/services/webhooks2tasks/src/processQueue.ts +++ b/services/webhooks2tasks/src/processQueue.ts @@ -3,7 +3,7 @@ import { ConsumeMessage } from 'amqplib'; import { processProjects } from './webhooks/projects'; import { processDataSync } from './webhooks/dataSync'; import { processBackup } from './webhooks/backup'; - +import { processProblems } from './webhooks/problems'; import { WebhookRequestData } from './types'; export async function processQueue (rabbitMsg: ConsumeMessage, channelWrapperWebhooks: ChannelWrapper): Promise { @@ -21,6 +21,8 @@ export async function processQueue (rabbitMsg: ConsumeMessage, channelWrapperWeb processDataSync(rabbitMsg, channelWrapperWebhooks); } else if (webhooktype == 'resticbackup') { processBackup(rabbitMsg, channelWrapperWebhooks); + } else if (webhooktype == 'problems') { + processProblems(rabbitMsg, channelWrapperWebhooks); } else { processProjects(rabbitMsg, channelWrapperWebhooks); diff --git a/services/webhooks2tasks/src/webhooks/problems.ts b/services/webhooks2tasks/src/webhooks/problems.ts new file mode 100644 index 0000000000..b0bc613de1 --- /dev/null +++ b/services/webhooks2tasks/src/webhooks/problems.ts @@ -0,0 +1,73 @@ +// @flow + +import uuid4 from 'uuid4'; +import { logger } from '@lagoon/commons/dist/local-logging'; +import { sendToLagoonLogs } from '@lagoon/commons/dist/logs'; +import { harborScanningCompleted } from '../handlers/problems/harborScanningCompleted'; +import { processHarborVulnerabilityList } from '../handlers/problems/processHarborVulnerabilityList'; +import { processDrutinyResultset } from '../handlers/problems/processDrutinyResults'; + +import { + WebhookRequestData, + Project +} from '../types'; + +export async function processProblems( + rabbitMsg, + channelWrapperWebhooks + ): Promise { + const webhook: WebhookRequestData = JSON.parse(rabbitMsg.content.toString()); + const { + webhooktype, + event + } = webhook; + + switch(webhook.event) { + case 'harbor:scanningcompleted' : + await handle(harborScanningCompleted, webhook, `${webhooktype}:${event}`, channelWrapperWebhooks); + break + case 'harbor:scanningresultfetched' : + await handle(processHarborVulnerabilityList, webhook, `${webhooktype}:${event}`, channelWrapperWebhooks); + break; + case 'drutiny:resultset' : + await handle(processDrutinyResultset, webhook, `${webhooktype}:${event}`, channelWrapperWebhooks); + break; + } + channelWrapperWebhooks.ack(rabbitMsg); +}; + +async function handle(handler, webhook: WebhookRequestData, fullEvent: string, channelWrapperWebhooks) { + const { + uuid + } = webhook; + + logger.info(`Handling ${fullEvent}`, { + uuid + }); + + try { + await handler(webhook, channelWrapperWebhooks); + } catch (error) { + logger.error(`Error handling ${fullEvent}`); + logger.error(error); + } +} + +async function unhandled(webhook: WebhookRequestData, fullEvent: string) { + const { + uuid + } = webhook; + + const meta = { + fullEvent: fullEvent + }; + sendToLagoonLogs( + 'info', + '', + uuid, + `unhandledWebhook`, + meta, + `Unhandled webhook ${fullEvent}` + ); + return; +} diff --git a/tests/tests/active-standby-kubernetes.yaml b/tests/tests/active-standby-kubernetes.yaml new file mode 100644 index 0000000000..c161f35f12 --- /dev/null +++ b/tests/tests/active-standby-kubernetes.yaml @@ -0,0 +1,14 @@ +--- + +- include: features/api-token.yaml + vars: + testname: "API TOKEN" + +- include: active-standby/active-standby.yaml + vars: + testname: "ACTIVE_STANDBY" + git_repo_name: active-standby.git + project: ci-active-standby-k8s + branch: master-a + standby_branch: master-b + error_code_check: 404 \ No newline at end of file diff --git a/tests/tests/active-standby.yaml b/tests/tests/active-standby-openshift.yaml similarity index 82% rename from tests/tests/active-standby.yaml rename to tests/tests/active-standby-openshift.yaml index 100c7bce6c..612aa65665 100644 --- a/tests/tests/active-standby.yaml +++ b/tests/tests/active-standby-openshift.yaml @@ -10,4 +10,5 @@ git_repo_name: active-standby.git project: ci-active-standby branch: master-a - standby_branch: master-b \ No newline at end of file + standby_branch: master-b + error_code_check: 503 \ No newline at end of file diff --git a/tests/tests/active-standby/active-standby.yaml b/tests/tests/active-standby/active-standby.yaml index f90c12d5d8..92ffdb6c56 100644 --- a/tests/tests/active-standby/active-standby.yaml +++ b/tests/tests/active-standby/active-standby.yaml @@ -76,7 +76,7 @@ serial: 1 vars: url: "http://nginx.{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','ROUTE_SUFFIX_HTTP') }}:{{ lookup('env','ROUTE_SUFFIX_HTTP_PORT') }}" - expected_returncode: 503 + expected_returncode: "{{error_code_check}}" tasks: - include: ../../checks/check-url-returncode.yaml @@ -85,6 +85,6 @@ serial: 1 vars: url: "http://nginx.{{ project | regex_replace('_', '-') }}.{{ standby_branch | regex_replace('/', '-') }}.{{ lookup('env','ROUTE_SUFFIX_HTTP') }}:{{ lookup('env','ROUTE_SUFFIX_HTTP_PORT') }}" - expected_returncode: 503 + expected_returncode: "{{error_code_check}}" tasks: - include: ../../checks/check-url-returncode.yaml diff --git a/yarn.lock b/yarn.lock index 000e24402a..087dce9484 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2,30 +2,49 @@ # yarn lockfile v1 -"@apollo/react-common@^3.1.3": - version "3.1.3" - resolved "https://registry.yarnpkg.com/@apollo/react-common/-/react-common-3.1.3.tgz#ddc34f6403f55d47c0da147fd4756dfd7c73dac5" - integrity sha512-Q7ZjDOeqjJf/AOGxUMdGxKF+JVClRXrYBGVq+SuVFqANRpd68MxtVV2OjCWavsFAN0eqYnRqRUrl7vtUCiJqeg== +"@apollo/protobufjs@^1.0.3": + version "1.0.4" + resolved "https://registry.yarnpkg.com/@apollo/protobufjs/-/protobufjs-1.0.4.tgz#cf01747a55359066341f31b5ce8db17df44244e0" + integrity sha512-EE3zx+/D/wur/JiLp6VCiw1iYdyy1lCJMf8CGPkLeDt5QJrN4N8tKFx33Ah4V30AUQzMk7Uz4IXKZ1LOj124gA== + dependencies: + "@protobufjs/aspromise" "^1.1.2" + "@protobufjs/base64" "^1.1.2" + "@protobufjs/codegen" "^2.0.4" + "@protobufjs/eventemitter" "^1.1.0" + "@protobufjs/fetch" "^1.1.0" + "@protobufjs/float" "^1.0.2" + "@protobufjs/inquire" "^1.1.0" + "@protobufjs/path" "^1.1.2" + "@protobufjs/pool" "^1.1.0" + "@protobufjs/utf8" "^1.1.0" + "@types/long" "^4.0.0" + "@types/node" "^10.1.0" + long "^4.0.0" + +"@apollo/react-common@^3.1.4": + version "3.1.4" + resolved "https://registry.yarnpkg.com/@apollo/react-common/-/react-common-3.1.4.tgz#ec13c985be23ea8e799c9ea18e696eccc97be345" + integrity sha512-X5Kyro73bthWSCBJUC5XYQqMnG0dLWuDZmVkzog9dynovhfiVCV4kPSdgSIkqnb++cwCzOVuQ4rDKVwo2XRzQA== dependencies: ts-invariant "^0.4.4" tslib "^1.10.0" -"@apollo/react-hooks@^3.1.3": - version "3.1.3" - resolved "https://registry.yarnpkg.com/@apollo/react-hooks/-/react-hooks-3.1.3.tgz#ad42c7af78e81fee0f30e53242640410d5bd0293" - integrity sha512-reIRO9xKdfi+B4gT/o/hnXuopUnm7WED/ru8VQydPw+C/KG/05Ssg1ZdxFKHa3oxwiTUIDnevtccIH35POanbA== +"@apollo/react-hooks@^3.1.5": + version "3.1.5" + resolved "https://registry.yarnpkg.com/@apollo/react-hooks/-/react-hooks-3.1.5.tgz#7e710be52461255ae7fc0b3b9c2ece64299c10e6" + integrity sha512-y0CJ393DLxIIkksRup4nt+vSjxalbZBXnnXxYbviq/woj+zKa431zy0yT4LqyRKpFy9ahMIwxBnBwfwIoupqLQ== dependencies: - "@apollo/react-common" "^3.1.3" + "@apollo/react-common" "^3.1.4" "@wry/equality" "^0.1.9" ts-invariant "^0.4.4" tslib "^1.10.0" -"@apollographql/apollo-tools@^0.4.0": - version "0.4.0" - resolved "https://registry.yarnpkg.com/@apollographql/apollo-tools/-/apollo-tools-0.4.0.tgz#8a1a0ab7a0bb12ccc03b72e4a104cfa5d969fd5f" - integrity sha512-7wEO+S+zgz/wVe3ilFQqICufRBYYDSNUkd1V03JWvXuSydbYq2SM5EgvWmFF+04iadt+aQ0XCCsRzCzRPQODfQ== +"@apollographql/apollo-tools@^0.4.3": + version "0.4.8" + resolved "https://registry.yarnpkg.com/@apollographql/apollo-tools/-/apollo-tools-0.4.8.tgz#d81da89ee880c2345eb86bddb92b35291f6135ed" + integrity sha512-W2+HB8Y7ifowcf3YyPHgDI05izyRtOeZ4MqIr7LbTArtmJ0ZHULWpn84SGMW7NAvTV1tFExpHlveHhnXuJfuGA== dependencies: - apollo-env "0.5.1" + apollo-env "^0.6.5" "@apollographql/graphql-playground-html@1.6.24": version "1.6.24" @@ -46,6 +65,13 @@ dependencies: "@babel/highlight" "^7.0.0" +"@babel/code-frame@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.10.1.tgz#d5481c5095daa1c57e16e54c6f9198443afb49ff" + integrity sha512-IGhtTmpjGbYzcEDOw7DcQtbQSXcG9ftmAXtWTu9V936vDye4xjjekktFAtgZsWpzTj/X01jocB46mTywm/4SZw== + dependencies: + "@babel/highlight" "^7.10.1" + "@babel/core@7.1.2": version "7.1.2" resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.1.2.tgz#f8d2a9ceb6832887329a7b60f9d035791400ba4e" @@ -127,6 +153,16 @@ lodash "^4.17.13" source-map "^0.5.0" +"@babel/generator@^7.10.1": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.10.2.tgz#0fa5b5b2389db8bfdfcc3492b551ee20f5dd69a9" + integrity sha512-AxfBNHNu99DTMvlUPlt1h2+Hn7knPpH5ayJ8OqDWSeLld+Fi2AYBTC/IejWDM9Edcii4UzZRCsbUt0WlSDsDsA== + dependencies: + "@babel/types" "^7.10.2" + jsesc "^2.5.1" + lodash "^4.17.13" + source-map "^0.5.0" + "@babel/generator@^7.7.4": version "7.7.4" resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.7.4.tgz#db651e2840ca9aa66f327dcec1dc5f5fa9611369" @@ -201,6 +237,18 @@ "@babel/traverse" "^7.7.4" "@babel/types" "^7.7.4" +"@babel/helper-create-class-features-plugin@^7.10.1": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.10.2.tgz#7474295770f217dbcf288bf7572eb213db46ee67" + integrity sha512-5C/QhkGFh1vqcziq1vAL6SI9ymzUp8BCYjFpvYVhWP4DlATIb3u5q3iUd35mvlyGs8fO7hckkW7i0tmH+5+bvQ== + dependencies: + "@babel/helper-function-name" "^7.10.1" + "@babel/helper-member-expression-to-functions" "^7.10.1" + "@babel/helper-optimise-call-expression" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/helper-replace-supers" "^7.10.1" + "@babel/helper-split-export-declaration" "^7.10.1" + "@babel/helper-create-class-features-plugin@^7.7.4": version "7.7.4" resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.7.4.tgz#fce60939fd50618610942320a8d951b3b639da2d" @@ -273,6 +321,15 @@ "@babel/template" "^7.1.0" "@babel/types" "^7.0.0" +"@babel/helper-function-name@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.10.1.tgz#92bd63829bfc9215aca9d9defa85f56b539454f4" + integrity sha512-fcpumwhs3YyZ/ttd5Rz0xn0TpIwVkN7X0V38B9TWNfVF42KEkhkAAuPCQ3oXmtTRtiPJrmZ0TrfS0GKF0eMaRQ== + dependencies: + "@babel/helper-get-function-arity" "^7.10.1" + "@babel/template" "^7.10.1" + "@babel/types" "^7.10.1" + "@babel/helper-function-name@^7.7.4": version "7.7.4" resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.7.4.tgz#ab6e041e7135d436d8f0a3eca15de5b67a341a2e" @@ -296,6 +353,13 @@ dependencies: "@babel/types" "^7.0.0" +"@babel/helper-get-function-arity@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.10.1.tgz#7303390a81ba7cb59613895a192b93850e373f7d" + integrity sha512-F5qdXkYGOQUb0hpRaPoetF9AnsXknKjWMZ+wmsIRsp5ge5sFh4c3h1eH2pRTTuy9KKAA2+TTYomGXAtEL2fQEw== + dependencies: + "@babel/types" "^7.10.1" + "@babel/helper-get-function-arity@^7.7.4": version "7.7.4" resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.7.4.tgz#cb46348d2f8808e632f0ab048172130e636005f0" @@ -324,6 +388,13 @@ dependencies: "@babel/types" "^7.5.5" +"@babel/helper-member-expression-to-functions@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.10.1.tgz#432967fd7e12a4afef66c4687d4ca22bc0456f15" + integrity sha512-u7XLXeM2n50gb6PWJ9hoO5oO7JFPaZtrh35t8RqKLT1jFKj9IWeD1zrcrYp1q1qiZTdEarfDWfTIP8nGsu0h5g== + dependencies: + "@babel/types" "^7.10.1" + "@babel/helper-member-expression-to-functions@^7.7.4": version "7.7.4" resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.7.4.tgz#356438e2569df7321a8326644d4b790d2122cb74" @@ -376,6 +447,13 @@ dependencies: "@babel/types" "^7.0.0" +"@babel/helper-optimise-call-expression@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.10.1.tgz#b4a1f2561870ce1247ceddb02a3860fa96d72543" + integrity sha512-a0DjNS1prnBsoKx83dP2falChcs7p3i8VMzdrSbfLhuQra/2ENC4sbri34dz/rWmDADsmF1q5GbfaXydh0Jbjg== + dependencies: + "@babel/types" "^7.10.1" + "@babel/helper-optimise-call-expression@^7.7.4": version "7.7.4" resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.7.4.tgz#034af31370d2995242aa4df402c3b7794b2dcdf2" @@ -388,6 +466,11 @@ resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.0.0.tgz#bbb3fbee98661c569034237cc03967ba99b4f250" integrity sha512-CYAOUCARwExnEixLdB6sDm2dIJ/YgEAKDM1MOeMeZu9Ld/bDgVo8aiWrXwcY7OBh+1Ea2uUcVRcxKk0GJvW7QA== +"@babel/helper-plugin-utils@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.1.tgz#ec5a5cf0eec925b66c60580328b122c01230a127" + integrity sha512-fvoGeXt0bJc7VMWZGCAEBEMo/HAjW2mP8apF5eXK0wSqwLAVHAISCWRoLMBMUs2kqeaG77jltVqu4Hn8Egl3nA== + "@babel/helper-regex@^7.0.0", "@babel/helper-regex@^7.4.4": version "7.5.5" resolved "https://registry.yarnpkg.com/@babel/helper-regex/-/helper-regex-7.5.5.tgz#0aa6824f7100a2e0e89c1527c23936c152cab351" @@ -427,6 +510,16 @@ "@babel/traverse" "^7.5.5" "@babel/types" "^7.5.5" +"@babel/helper-replace-supers@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.10.1.tgz#ec6859d20c5d8087f6a2dc4e014db7228975f13d" + integrity sha512-SOwJzEfpuQwInzzQJGjGaiG578UYmyi2Xw668klPWV5n07B73S0a9btjLk/52Mlcxa+5AdIYqws1KyXRfMoB7A== + dependencies: + "@babel/helper-member-expression-to-functions" "^7.10.1" + "@babel/helper-optimise-call-expression" "^7.10.1" + "@babel/traverse" "^7.10.1" + "@babel/types" "^7.10.1" + "@babel/helper-replace-supers@^7.7.4": version "7.7.4" resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.7.4.tgz#3c881a6a6a7571275a72d82e6107126ec9e2cdd2" @@ -460,6 +553,13 @@ dependencies: "@babel/types" "7.0.0-beta.44" +"@babel/helper-split-export-declaration@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.10.1.tgz#c6f4be1cbc15e3a868e4c64a17d5d31d754da35f" + integrity sha512-UQ1LVBPrYdbchNhLwj6fetj46BcFwfS4NllJo/1aJsT+1dLTEnXJL0qHqtY7gPzF8S2fXBJamf1biAXV3X077g== + dependencies: + "@babel/types" "^7.10.1" + "@babel/helper-split-export-declaration@^7.4.4": version "7.4.4" resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.4.4.tgz#ff94894a340be78f53f06af038b205c49d993677" @@ -474,6 +574,11 @@ dependencies: "@babel/types" "^7.7.4" +"@babel/helper-validator-identifier@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.1.tgz#5770b0c1a826c4f53f5ede5e153163e0318e94b5" + integrity sha512-5vW/JXLALhczRCWP0PnFDMCJAchlBvM7f4uk/jXritBnIa6E1KmqmtrS3yn1LAnxFBypQ3eneLuXjsnfQsgILw== + "@babel/helper-wrap-function@^7.1.0": version "7.2.0" resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.2.0.tgz#c4e0012445769e2815b55296ead43a958549f6fa" @@ -530,11 +635,25 @@ esutils "^2.0.2" js-tokens "^4.0.0" +"@babel/highlight@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.10.1.tgz#841d098ba613ba1a427a2b383d79e35552c38ae0" + integrity sha512-8rMof+gVP8mxYZApLF/JgNDAkdKa+aJt3ZYxF8z6+j/hpeXL7iMsKCPHa2jNMHu/qqBwzQF4OHNoYi8dMA/rYg== + dependencies: + "@babel/helper-validator-identifier" "^7.10.1" + chalk "^2.0.0" + js-tokens "^4.0.0" + "@babel/parser@^7.1.0", "@babel/parser@^7.1.2", "@babel/parser@^7.4.3", "@babel/parser@^7.6.0", "@babel/parser@^7.6.2": version "7.6.2" resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.6.2.tgz#205e9c95e16ba3b8b96090677a67c9d6075b70a1" integrity sha512-mdFqWrSPCmikBoaBYMuBulzTIKuXVPtEISFbRRVNwMWpCms/hmE2kRq0bblUHaNRKrjRlmVbx1sDHmjmRgD2Xg== +"@babel/parser@^7.10.1": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.10.2.tgz#871807f10442b92ff97e4783b9b54f6a0ca812d0" + integrity sha512-PApSXlNMJyB4JiGVhCOlzKIif+TKFTvu0aQAhnTvfP/z3vVSN6ZypH5bfUNwFXXjRQtUEBNFd2PtmCmG2Py3qQ== + "@babel/parser@^7.2.3", "@babel/parser@^7.4.2", "@babel/parser@^7.7.4": version "7.7.4" resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.7.4.tgz#75ab2d7110c2cf2fa949959afb05fa346d2231bb" @@ -771,6 +890,13 @@ dependencies: "@babel/helper-plugin-utils" "^7.0.0" +"@babel/plugin-syntax-typescript@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.10.1.tgz#5e82bc27bb4202b93b949b029e699db536733810" + integrity sha512-X/d8glkrAtra7CaQGMiGs/OGa6XgUzqPcBXCIGFCpCqnfGlT0Wfbzo/B89xHhnInTaItPK8LALblVXcUOEh95Q== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-transform-arrow-functions@^7.0.0": version "7.2.0" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.2.0.tgz#9aeafbe4d6ffc6563bf8f8372091628f00779550" @@ -1315,6 +1441,15 @@ dependencies: "@babel/helper-plugin-utils" "^7.0.0" +"@babel/plugin-transform-typescript@^7.10.1", "@babel/plugin-transform-typescript@^7.3.2": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.10.1.tgz#2c54daea231f602468686d9faa76f182a94507a6" + integrity sha512-v+QWKlmCnsaimLeqq9vyCsVRMViZG1k2SZTlcZvB+TqyH570Zsij8nvVUZzOASCRiQFUxkLrn9Wg/kH0zgy5OQ== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.10.1" + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-syntax-typescript" "^7.10.1" + "@babel/plugin-transform-unicode-regex@^7.0.0": version "7.6.2" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.6.2.tgz#b692aad888a7e8d8b1b214be6b9dc03d5031f698" @@ -1466,6 +1601,14 @@ "@babel/plugin-transform-react-jsx-self" "^7.7.4" "@babel/plugin-transform-react-jsx-source" "^7.7.4" +"@babel/preset-typescript@^7.3.3", "@babel/preset-typescript@^7.8.3": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.10.1.tgz#a8d8d9035f55b7d99a2461a0bdc506582914d07e" + integrity sha512-m6GV3y1ShiqxnyQj10600ZVOFrSSAa8HQ3qIUk2r+gcGtHTIRw0dJnFLt1WNXpKjtVw7yw1DAPU/6ma2ZvgJuA== + dependencies: + "@babel/helper-plugin-utils" "^7.10.1" + "@babel/plugin-transform-typescript" "^7.10.1" + "@babel/runtime-corejs2@7.1.2": version "7.1.2" resolved "https://registry.yarnpkg.com/@babel/runtime-corejs2/-/runtime-corejs2-7.1.2.tgz#8695811a3fd8091f54f274b9320334e5e8c62200" @@ -1495,6 +1638,20 @@ dependencies: regenerator-runtime "^0.13.2" +"@babel/runtime@^7.8.4": + version "7.10.3" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.10.3.tgz#670d002655a7c366540c67f6fd3342cd09500364" + integrity sha512-RzGO0RLSdokm9Ipe/YD+7ww8X2Ro79qiXZF3HU9ljrM+qnJmH1Vqth+hbiQZy761LnMJTMitHDuKVYTk3k4dLw== + dependencies: + regenerator-runtime "^0.13.4" + +"@babel/runtime@^7.8.7": + version "7.10.5" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.10.5.tgz#303d8bd440ecd5a491eae6117fd3367698674c5c" + integrity sha512-otddXKhdNn7d0ptoFRHtMLa8LqDxLYwTjB4nYgM1yy5N6gU/MUf8zqyyLltCH3yAVitBzmwK4us+DD0l/MauAg== + dependencies: + regenerator-runtime "^0.13.4" + "@babel/template@7.0.0-beta.44": version "7.0.0-beta.44" resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.0.0-beta.44.tgz#f8832f4fdcee5d59bf515e595fc5106c529b394f" @@ -1514,6 +1671,15 @@ "@babel/parser" "^7.6.0" "@babel/types" "^7.6.0" +"@babel/template@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.10.1.tgz#e167154a94cb5f14b28dc58f5356d2162f539811" + integrity sha512-OQDg6SqvFSsc9A0ej6SKINWrpJiNonRIniYondK2ViKhB06i3c0s+76XUft71iqBEe9S1OKsHwPAjfHnuvnCig== + dependencies: + "@babel/code-frame" "^7.10.1" + "@babel/parser" "^7.10.1" + "@babel/types" "^7.10.1" + "@babel/template@^7.7.4": version "7.7.4" resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.7.4.tgz#428a7d9eecffe27deac0a98e23bf8e3675d2a77b" @@ -1554,6 +1720,21 @@ globals "^11.1.0" lodash "^4.17.13" +"@babel/traverse@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.10.1.tgz#bbcef3031e4152a6c0b50147f4958df54ca0dd27" + integrity sha512-C/cTuXeKt85K+p08jN6vMDz8vSV0vZcI0wmQ36o6mjbuo++kPMdpOYw23W2XH04dbRt9/nMEfA4W3eR21CD+TQ== + dependencies: + "@babel/code-frame" "^7.10.1" + "@babel/generator" "^7.10.1" + "@babel/helper-function-name" "^7.10.1" + "@babel/helper-split-export-declaration" "^7.10.1" + "@babel/parser" "^7.10.1" + "@babel/types" "^7.10.1" + debug "^4.1.0" + globals "^11.1.0" + lodash "^4.17.13" + "@babel/traverse@^7.7.4": version "7.7.4" resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.7.4.tgz#9c1e7c60fb679fe4fcfaa42500833333c2058558" @@ -1587,6 +1768,15 @@ lodash "^4.17.13" to-fast-properties "^2.0.0" +"@babel/types@^7.10.1", "@babel/types@^7.10.2": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.10.2.tgz#30283be31cad0dbf6fb00bd40641ca0ea675172d" + integrity sha512-AD3AwWBSz0AWF0AkCN9VPiWrvldXq+/e3cHa4J89vo4ymjz1XwrBFFVZmkJTsQIPNk+ZVomPSXUJqq8yyjZsng== + dependencies: + "@babel/helper-validator-identifier" "^7.10.1" + lodash "^4.17.13" + to-fast-properties "^2.0.0" + "@babel/types@^7.7.4": version "7.7.4" resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.7.4.tgz#516570d539e44ddf308c07569c258ff94fde9193" @@ -1624,18 +1814,6 @@ resolved "https://registry.yarnpkg.com/@egoist/vue-to-react/-/vue-to-react-1.1.0.tgz#83c884b8608e8ee62e76c03e91ce9c26063a91ad" integrity sha512-MwfwXHDh6ptZGLEtNLPXp2Wghteav7mzpT2Mcwl3NZWKF814i5hhHnNkVrcQQEuxUroSWQqzxLkMKSb+nhPang== -"@emotion/babel-utils@^0.6.4": - version "0.6.10" - resolved "https://registry.yarnpkg.com/@emotion/babel-utils/-/babel-utils-0.6.10.tgz#83dbf3dfa933fae9fc566e54fbb45f14674c6ccc" - integrity sha512-/fnkM/LTEp3jKe++T0KyTszVGWNKPNOUJfjNKLO17BzQ6QPxgbg3whayom1Qr2oLFH3V92tDymU+dT5q676uow== - dependencies: - "@emotion/hash" "^0.6.6" - "@emotion/memoize" "^0.6.6" - "@emotion/serialize" "^0.9.1" - convert-source-map "^1.5.1" - find-root "^1.1.0" - source-map "^0.7.2" - "@emotion/cache@^10.0.17", "@emotion/cache@^10.0.9": version "10.0.19" resolved "https://registry.yarnpkg.com/@emotion/cache/-/cache-10.0.19.tgz#d258d94d9c707dcadaf1558def968b86bb87ad71" @@ -1646,6 +1824,16 @@ "@emotion/utils" "0.11.2" "@emotion/weak-memoize" "0.2.4" +"@emotion/cache@^10.0.27": + version "10.0.29" + resolved "https://registry.yarnpkg.com/@emotion/cache/-/cache-10.0.29.tgz#87e7e64f412c060102d589fe7c6dc042e6f9d1e0" + integrity sha512-fU2VtSVlHiF27empSbxi1O2JFdNWZO+2NFHfwO0pxgTep6Xa3uGb+3pVKfLww2l/IBGLNEZl5Xf/++A4wAYDYQ== + dependencies: + "@emotion/sheet" "0.9.4" + "@emotion/stylis" "0.8.5" + "@emotion/utils" "0.11.3" + "@emotion/weak-memoize" "0.2.5" + "@emotion/core@^10.0.20", "@emotion/core@^10.0.9": version "10.0.22" resolved "https://registry.yarnpkg.com/@emotion/core/-/core-10.0.22.tgz#2ac7bcf9b99a1979ab5b0a876fbf37ab0688b177" @@ -1658,6 +1846,18 @@ "@emotion/sheet" "0.9.3" "@emotion/utils" "0.11.2" +"@emotion/core@^10.0.28": + version "10.0.28" + resolved "https://registry.yarnpkg.com/@emotion/core/-/core-10.0.28.tgz#bb65af7262a234593a9e952c041d0f1c9b9bef3d" + integrity sha512-pH8UueKYO5jgg0Iq+AmCLxBsvuGtvlmiDCOuv8fGNYn3cowFpLN98L8zO56U0H1PjDIyAlXymgL3Wu7u7v6hbA== + dependencies: + "@babel/runtime" "^7.5.5" + "@emotion/cache" "^10.0.27" + "@emotion/css" "^10.0.27" + "@emotion/serialize" "^0.11.15" + "@emotion/sheet" "0.9.4" + "@emotion/utils" "0.11.3" + "@emotion/css@^10.0.22", "@emotion/css@^10.0.9": version "10.0.22" resolved "https://registry.yarnpkg.com/@emotion/css/-/css-10.0.22.tgz#37b1abb6826759fe8ac0af0ac0034d27de6d1793" @@ -1667,15 +1867,24 @@ "@emotion/utils" "0.11.2" babel-plugin-emotion "^10.0.22" +"@emotion/css@^10.0.27": + version "10.0.27" + resolved "https://registry.yarnpkg.com/@emotion/css/-/css-10.0.27.tgz#3a7458198fbbebb53b01b2b87f64e5e21241e14c" + integrity sha512-6wZjsvYeBhyZQYNrGoR5yPMYbMBNEnanDrqmsqS1mzDm1cOTu12shvl2j4QHNS36UaTE0USIJawCH9C8oW34Zw== + dependencies: + "@emotion/serialize" "^0.11.15" + "@emotion/utils" "0.11.3" + babel-plugin-emotion "^10.0.27" + "@emotion/hash@0.7.3": version "0.7.3" resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.7.3.tgz#a166882c81c0c6040975dd30df24fae8549bd96f" integrity sha512-14ZVlsB9akwvydAdaEnVnvqu6J2P6ySv39hYyl/aoB6w/V+bXX0tay8cF6paqbgZsN2n5Xh15uF4pE+GvE+itw== -"@emotion/hash@^0.6.2", "@emotion/hash@^0.6.6": - version "0.6.6" - resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.6.6.tgz#62266c5f0eac6941fece302abad69f2ee7e25e44" - integrity sha512-ojhgxzUHZ7am3D2jHkMzPpsBAiB005GF5YU4ea+8DNPybMk01JJUM9V9YRlF/GE95tcOm8DxQvWA2jq19bGalQ== +"@emotion/hash@0.8.0": + version "0.8.0" + resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.8.0.tgz#bbbff68978fefdbe68ccb533bc8cbe1d1afb5413" + integrity sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow== "@emotion/is-prop-valid@0.8.5": version "0.8.5" @@ -1684,15 +1893,22 @@ dependencies: "@emotion/memoize" "0.7.3" +"@emotion/is-prop-valid@0.8.8": + version "0.8.8" + resolved "https://registry.yarnpkg.com/@emotion/is-prop-valid/-/is-prop-valid-0.8.8.tgz#db28b1c4368a259b60a97311d6a952d4fd01ac1a" + integrity sha512-u5WtneEAr5IDG2Wv65yhunPSMLIpuKsbuOktRojfrEiEvRyC85LgPMZI63cr7NUqT8ZIGdSVg8ZKGxIug4lXcA== + dependencies: + "@emotion/memoize" "0.7.4" + "@emotion/memoize@0.7.3": version "0.7.3" resolved "https://registry.yarnpkg.com/@emotion/memoize/-/memoize-0.7.3.tgz#5b6b1c11d6a6dddf1f2fc996f74cf3b219644d78" integrity sha512-2Md9mH6mvo+ygq1trTeVp2uzAKwE2P7In0cRpD/M9Q70aH8L+rxMLbb3JCN2JoSWsV2O+DdFjfbbXoMoLBczow== -"@emotion/memoize@^0.6.1", "@emotion/memoize@^0.6.6": - version "0.6.6" - resolved "https://registry.yarnpkg.com/@emotion/memoize/-/memoize-0.6.6.tgz#004b98298d04c7ca3b4f50ca2035d4f60d2eed1b" - integrity sha512-h4t4jFjtm1YV7UirAFuSuFGyLa+NNxjdkq6DpFLANNQY5rHueFZHVY+8Cu1HYVP6DrheB0kv4m5xPjo7eKT7yQ== +"@emotion/memoize@0.7.4": + version "0.7.4" + resolved "https://registry.yarnpkg.com/@emotion/memoize/-/memoize-0.7.4.tgz#19bf0f5af19149111c40d98bb0cf82119f5d9eeb" + integrity sha512-Ja/Vfqe3HpuzRsG1oBtWTHk2PGZ7GR+2Vz5iYGelAw8dx32K0y7PjVuxK6z1nMpZOqAFsRUPCkK1YjJ56qJlgw== "@emotion/serialize@^0.11.12", "@emotion/serialize@^0.11.14": version "0.11.14" @@ -1705,21 +1921,27 @@ "@emotion/utils" "0.11.2" csstype "^2.5.7" -"@emotion/serialize@^0.9.1": - version "0.9.1" - resolved "https://registry.yarnpkg.com/@emotion/serialize/-/serialize-0.9.1.tgz#a494982a6920730dba6303eb018220a2b629c145" - integrity sha512-zTuAFtyPvCctHBEL8KZ5lJuwBanGSutFEncqLn/m9T1a6a93smBStK+bZzcNPgj4QS8Rkw9VTwJGhRIUVO8zsQ== +"@emotion/serialize@^0.11.15", "@emotion/serialize@^0.11.16": + version "0.11.16" + resolved "https://registry.yarnpkg.com/@emotion/serialize/-/serialize-0.11.16.tgz#dee05f9e96ad2fb25a5206b6d759b2d1ed3379ad" + integrity sha512-G3J4o8by0VRrO+PFeSc3js2myYNOXVJ3Ya+RGVxnshRYgsvErfAOglKAiy1Eo1vhzxqtUvjCyS5gtewzkmvSSg== dependencies: - "@emotion/hash" "^0.6.6" - "@emotion/memoize" "^0.6.6" - "@emotion/unitless" "^0.6.7" - "@emotion/utils" "^0.8.2" + "@emotion/hash" "0.8.0" + "@emotion/memoize" "0.7.4" + "@emotion/unitless" "0.7.5" + "@emotion/utils" "0.11.3" + csstype "^2.5.7" "@emotion/sheet@0.9.3": version "0.9.3" resolved "https://registry.yarnpkg.com/@emotion/sheet/-/sheet-0.9.3.tgz#689f135ecf87d3c650ed0c4f5ddcbe579883564a" integrity sha512-c3Q6V7Df7jfwSq5AzQWbXHa5soeE4F5cbqi40xn0CzXxWW9/6Mxq48WJEtqfWzbZtW9odZdnRAkwCQwN12ob4A== +"@emotion/sheet@0.9.4": + version "0.9.4" + resolved "https://registry.yarnpkg.com/@emotion/sheet/-/sheet-0.9.4.tgz#894374bea39ec30f489bbfc3438192b9774d32e5" + integrity sha512-zM9PFmgVSqBw4zL101Q0HrBVTGmpAxFZH/pYx/cjJT5advXguvcgjHFTCaIO3enL/xr89vK2bh0Mfyj9aa0ANA== + "@emotion/styled-base@^10.0.23": version "10.0.24" resolved "https://registry.yarnpkg.com/@emotion/styled-base/-/styled-base-10.0.24.tgz#9497efd8902dfeddee89d24b0eeb26b0665bfe8b" @@ -1730,6 +1952,16 @@ "@emotion/serialize" "^0.11.14" "@emotion/utils" "0.11.2" +"@emotion/styled-base@^10.0.27": + version "10.0.31" + resolved "https://registry.yarnpkg.com/@emotion/styled-base/-/styled-base-10.0.31.tgz#940957ee0aa15c6974adc7d494ff19765a2f742a" + integrity sha512-wTOE1NcXmqMWlyrtwdkqg87Mu6Rj1MaukEoEmEkHirO5IoHDJ8LgCQL4MjJODgxWxXibGR3opGp1p7YvkNEdXQ== + dependencies: + "@babel/runtime" "^7.5.5" + "@emotion/is-prop-valid" "0.8.8" + "@emotion/serialize" "^0.11.15" + "@emotion/utils" "0.11.3" + "@emotion/styled@^10.0.17": version "10.0.23" resolved "https://registry.yarnpkg.com/@emotion/styled/-/styled-10.0.23.tgz#2f8279bd59b99d82deade76d1046249ddfab7c1b" @@ -1738,41 +1970,69 @@ "@emotion/styled-base" "^10.0.23" babel-plugin-emotion "^10.0.23" +"@emotion/styled@^10.0.27": + version "10.0.27" + resolved "https://registry.yarnpkg.com/@emotion/styled/-/styled-10.0.27.tgz#12cb67e91f7ad7431e1875b1d83a94b814133eaf" + integrity sha512-iK/8Sh7+NLJzyp9a5+vIQIXTYxfT4yB/OJbjzQanB2RZpvmzBQOHZWhpAMZWYEKRNNbsD6WfBw5sVWkb6WzS/Q== + dependencies: + "@emotion/styled-base" "^10.0.27" + babel-plugin-emotion "^10.0.27" + "@emotion/stylis@0.8.4": version "0.8.4" resolved "https://registry.yarnpkg.com/@emotion/stylis/-/stylis-0.8.4.tgz#6c51afdf1dd0d73666ba09d2eb6c25c220d6fe4c" integrity sha512-TLmkCVm8f8gH0oLv+HWKiu7e8xmBIaokhxcEKPh1m8pXiV/akCiq50FvYgOwY42rjejck8nsdQxZlXZ7pmyBUQ== -"@emotion/stylis@^0.7.0": - version "0.7.1" - resolved "https://registry.yarnpkg.com/@emotion/stylis/-/stylis-0.7.1.tgz#50f63225e712d99e2b2b39c19c70fff023793ca5" - integrity sha512-/SLmSIkN13M//53TtNxgxo57mcJk/UJIDFRKwOiLIBEyBHEcipgR6hNMQ/59Sl4VjCJ0Z/3zeAZyvnSLPG/1HQ== +"@emotion/stylis@0.8.5": + version "0.8.5" + resolved "https://registry.yarnpkg.com/@emotion/stylis/-/stylis-0.8.5.tgz#deacb389bd6ee77d1e7fcaccce9e16c5c7e78e04" + integrity sha512-h6KtPihKFn3T9fuIrwvXXUOwlx3rfUvfZIcP5a6rh8Y7zjE3O06hT5Ss4S/YI1AYhuZ1kjaE/5EaOOI2NqSylQ== "@emotion/unitless@0.7.4": version "0.7.4" resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.7.4.tgz#a87b4b04e5ae14a88d48ebef15015f6b7d1f5677" integrity sha512-kBa+cDHOR9jpRJ+kcGMsysrls0leukrm68DmFQoMIWQcXdr2cZvyvypWuGYT7U+9kAExUE7+T7r6G3C3A6L8MQ== -"@emotion/unitless@^0.6.2", "@emotion/unitless@^0.6.7": - version "0.6.7" - resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.6.7.tgz#53e9f1892f725b194d5e6a1684a7b394df592397" - integrity sha512-Arj1hncvEVqQ2p7Ega08uHLr1JuRYBuO5cIvcA+WWEQ5+VmkOE3ZXzl04NbQxeQpWX78G7u6MqxKuNX3wvYZxg== +"@emotion/unitless@0.7.5": + version "0.7.5" + resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.7.5.tgz#77211291c1900a700b8a78cfafda3160d76949ed" + integrity sha512-OWORNpfjMsSSUBVrRBVGECkhWcULOAJz9ZW8uK9qgxD+87M7jHRcvh/A96XXNhXTLmKcoYSQtBEX7lHMO7YRwg== "@emotion/utils@0.11.2": version "0.11.2" resolved "https://registry.yarnpkg.com/@emotion/utils/-/utils-0.11.2.tgz#713056bfdffb396b0a14f1c8f18e7b4d0d200183" integrity sha512-UHX2XklLl3sIaP6oiMmlVzT0J+2ATTVpf0dHQVyPJHTkOITvXfaSqnRk6mdDhV9pR8T/tHc3cex78IKXssmzrA== -"@emotion/utils@^0.8.2": - version "0.8.2" - resolved "https://registry.yarnpkg.com/@emotion/utils/-/utils-0.8.2.tgz#576ff7fb1230185b619a75d258cbc98f0867a8dc" - integrity sha512-rLu3wcBWH4P5q1CGoSSH/i9hrXs7SlbRLkoq9IGuoPYNGQvDJ3pt/wmOM+XgYjIDRMVIdkUWt0RsfzF50JfnCw== +"@emotion/utils@0.11.3": + version "0.11.3" + resolved "https://registry.yarnpkg.com/@emotion/utils/-/utils-0.11.3.tgz#a759863867befa7e583400d322652a3f44820924" + integrity sha512-0o4l6pZC+hI88+bzuaX/6BgOvQVhbt2PfmxauVaYOGgbsAw14wdKyvMCZXnsnsHys94iadcF+RG/wZyx6+ZZBw== "@emotion/weak-memoize@0.2.4": version "0.2.4" resolved "https://registry.yarnpkg.com/@emotion/weak-memoize/-/weak-memoize-0.2.4.tgz#622a72bebd1e3f48d921563b4b60a762295a81fc" integrity sha512-6PYY5DVdAY1ifaQW6XYTnOMihmBVT27elqSjEoodchsGjzYlEsTQMcEhSud99kVawatyTZRTiVkJ/c6lwbQ7nA== +"@emotion/weak-memoize@0.2.5": + version "0.2.5" + resolved "https://registry.yarnpkg.com/@emotion/weak-memoize/-/weak-memoize-0.2.5.tgz#8eed982e2ee6f7f4e44c253e12962980791efd46" + integrity sha512-6U71C2Wp7r5XtFtQzYrW5iKFT67OixrSxjI4MptCHzdSVlgabczzqLe0ZSgnub/5Kp4hSbpDB1tMytZY9pwxxA== + +"@grpc/grpc-js@1.0.3": + version "1.0.3" + resolved "https://registry.yarnpkg.com/@grpc/grpc-js/-/grpc-js-1.0.3.tgz#7fa2ba293ccc1e91b24074c2628c8c68336e18c4" + integrity sha512-JKV3f5Bv2TZxK6eJSB9EarsZrnLxrvcFNwI9goq0YRXa3S6NNoCSnI3cG3lkXVIJ03Wng1WXe76kc2JQtRe7AQ== + dependencies: + semver "^6.2.0" + +"@grpc/proto-loader@^0.5.4": + version "0.5.4" + resolved "https://registry.yarnpkg.com/@grpc/proto-loader/-/proto-loader-0.5.4.tgz#038a3820540f621eeb1b05d81fbedfb045e14de0" + integrity sha512-HTM4QpI9B2XFkPz7pjwMyMgZchJ93TVkL3kWPW8GDMDKYxsMnmf4w2TNMJK7+KNiYHS5cJrCEAFlF+AwtXWVPA== + dependencies: + lodash.camelcase "^4.3.0" + protobufjs "^6.8.6" + "@icons/material@^0.2.4": version "0.2.4" resolved "https://registry.yarnpkg.com/@icons/material/-/material-0.2.4.tgz#e90c9f71768b3736e76d7dd6783fc6c2afa88bc8" @@ -2016,6 +2276,33 @@ call-me-maybe "^1.0.1" glob-to-regexp "^0.3.0" +"@newrelic/aws-sdk@^1.1.1": + version "1.1.3" + resolved "https://registry.yarnpkg.com/@newrelic/aws-sdk/-/aws-sdk-1.1.3.tgz#e51adcf0cd3b5a527b96a86ac24b5adaeac1cb0c" + integrity sha512-8O//20g3WxpTWiUcY8EWodfSlQ9qre0smbvA8N1B9sw42DYDfuYq011No/7/yynMPL5taY7cOwKkTUfqzzslCA== + +"@newrelic/koa@^3.0.0": + version "3.0.0" + resolved "https://registry.yarnpkg.com/@newrelic/koa/-/koa-3.0.0.tgz#048f636c6c06ab4e823674a2ed3d67677a22ed50" + integrity sha512-SxfcMqSxiKa3pi7dRmVoCXnh/VLc196GmwyGU2Fr5+vMxS5jPVj2a15v1mn2DGu04XngfXDvyt9Xa6u1JVRDpQ== + dependencies: + methods "^1.1.2" + +"@newrelic/native-metrics@^5.1.0": + version "5.2.0" + resolved "https://registry.yarnpkg.com/@newrelic/native-metrics/-/native-metrics-5.2.0.tgz#db5fb09bc50b73dabe2620629c24dc9fa903a5d5" + integrity sha512-vqqC3uwbiAMsmDkDecqm4Bcn6gwskl31SpYY6X/Zzuee+CcNifry5kcfD2iW7w4ENGDjn53dntvuRLQcxnQyKQ== + dependencies: + nan "^2.14.1" + semver "^5.5.1" + +"@newrelic/superagent@^2.0.1": + version "2.0.1" + resolved "https://registry.yarnpkg.com/@newrelic/superagent/-/superagent-2.0.1.tgz#8a0280598fedefd1b45fc83bdbc2707d129c47d5" + integrity sha512-1kOtaYh00DcK0IZ0LD3M6ja3urvm4a/waplr7TzrT/fDN/zgazpGSuRbYVg+O6zZacE4/Iw7OoKYGZW3bgBjJw== + dependencies: + methods "^1.1.2" + "@nodelib/fs.stat@^1.1.2": version "1.1.3" resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz#2b5a3ab3f918cca48a8c754c08168e3f03eba61b" @@ -2477,6 +2764,18 @@ pretty-hrtime "^1.0.3" regenerator-runtime "^0.13.3" +"@storybook/node-logger@^5.3.17": + version "5.3.19" + resolved "https://registry.yarnpkg.com/@storybook/node-logger/-/node-logger-5.3.19.tgz#c414e4d3781aeb06298715220012f552a36dff29" + integrity sha512-hKshig/u5Nj9fWy0OsyU04yqCxr0A9pydOHIassr4fpLAaePIN2YvqCqE2V+TxQHjZUnowSSIhbXrGt0DI5q2A== + dependencies: + "@types/npmlog" "^4.1.2" + chalk "^3.0.0" + core-js "^3.0.1" + npmlog "^4.1.2" + pretty-hrtime "^1.0.3" + regenerator-runtime "^0.13.3" + "@storybook/postinstall@5.3.0-beta.16": version "5.3.0-beta.16" resolved "https://registry.yarnpkg.com/@storybook/postinstall/-/postinstall-5.3.0-beta.16.tgz#e679e22147b87f43ee6343a2bd51ea7538410eab" @@ -2484,6 +2783,17 @@ dependencies: core-js "^3.0.1" +"@storybook/preset-typescript@^3.0.0": + version "3.0.0" + resolved "https://registry.yarnpkg.com/@storybook/preset-typescript/-/preset-typescript-3.0.0.tgz#e157baf6f2c4982c3da3328f5e1a640b3d7db9e4" + integrity sha512-tEbFWg5h/8SPfSCNXPxyqY418704K14q5H/xb9t0ARMXK3kZPTkKqKvdTvYg3UEKBBYbc+GA57UWaL+9b+DbDg== + dependencies: + "@babel/preset-typescript" "^7.8.3" + "@storybook/node-logger" "^5.3.17" + "@types/babel__core" "^7.1.6" + babel-preset-typescript-vue "^1.0.3" + fork-ts-checker-webpack-plugin "^4.1.0" + "@storybook/react@^5.3.0-beta.16": version "5.3.0-beta.16" resolved "https://registry.yarnpkg.com/@storybook/react/-/react-5.3.0-beta.16.tgz#2ddd01eda9b8fb214fd24e6e05554b96d12fc8e8" @@ -2753,6 +3063,17 @@ "@types/babel__template" "*" "@types/babel__traverse" "*" +"@types/babel__core@^7.1.6": + version "7.1.8" + resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.1.8.tgz#057f725aca3641f49fc11c7a87a9de5ec588a5d7" + integrity sha512-KXBiQG2OXvaPWFPDS1rD8yV9vO0OuWIqAEqLsbfX0oU2REN5KuoMnZ1gClWcBhO5I3n6oTVAmrMufOvRqdmFTQ== + dependencies: + "@babel/parser" "^7.1.0" + "@babel/types" "^7.0.0" + "@types/babel__generator" "*" + "@types/babel__template" "*" + "@types/babel__traverse" "*" + "@types/babel__generator@*": version "7.6.0" resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.6.0.tgz#f1ec1c104d1bb463556ecb724018ab788d0c172a" @@ -2787,10 +3108,10 @@ resolved "https://registry.yarnpkg.com/@types/bluebird/-/bluebird-3.5.27.tgz#61eb4d75dc6bfbce51cf49ee9bbebe941b2cb5d0" integrity sha512-6BmYWSBea18+tSjjSC3QIyV93ZKAeNWGM7R6aYt1ryTZXrlHF+QLV0G2yV0viEGVyRkyQsWfMoJ0k/YghBX5sQ== -"@types/body-parser@*", "@types/body-parser@1.17.1": - version "1.17.1" - resolved "https://registry.yarnpkg.com/@types/body-parser/-/body-parser-1.17.1.tgz#18fcf61768fb5c30ccc508c21d6fd2e8b3bf7897" - integrity sha512-RoX2EZjMiFMjZh9lmYrwgoP9RTpAjSHiJxdp4oidAQVO02T7HER3xj9UKue5534ULWeqVEkujhWcyvUce+d68w== +"@types/body-parser@*", "@types/body-parser@1.19.0": + version "1.19.0" + resolved "https://registry.yarnpkg.com/@types/body-parser/-/body-parser-1.19.0.tgz#0685b3c47eb3006ffed117cdd55164b61f80538f" + integrity sha512-W98JrE0j2K78swW4ukqMleo8R7h/pFETjM2DQ90MF6XK2i4LO4W3gQ71Lt4w3bfm2EvVSyWHplECvB5sK22yFQ== dependencies: "@types/connect" "*" "@types/node" "*" @@ -2869,19 +3190,20 @@ "@types/qs" "*" "@types/range-parser" "*" -"@types/express@*", "@types/express@4.17.1": - version "4.17.1" - resolved "https://registry.yarnpkg.com/@types/express/-/express-4.17.1.tgz#4cf7849ae3b47125a567dfee18bfca4254b88c5c" - integrity sha512-VfH/XCP0QbQk5B5puLqTLEeFgR8lfCJHZJKkInZ9mkYd+u8byX0kztXEQxEk4wZXJs8HI+7km2ALXjn4YKcX9w== +"@types/express@*", "@types/express@^4.17.6": + version "4.17.6" + resolved "https://registry.yarnpkg.com/@types/express/-/express-4.17.6.tgz#6bce49e49570507b86ea1b07b806f04697fac45e" + integrity sha512-n/mr9tZI83kd4azlPG5y997C/M4DNABK9yErhFM6hKdym4kkmd9j0vtsJyjFIwfRBxtrxZtAfGZCNRIBMFLK5w== dependencies: "@types/body-parser" "*" "@types/express-serve-static-core" "*" + "@types/qs" "*" "@types/serve-static" "*" -"@types/express@^4.17.6": - version "4.17.6" - resolved "https://registry.yarnpkg.com/@types/express/-/express-4.17.6.tgz#6bce49e49570507b86ea1b07b806f04697fac45e" - integrity sha512-n/mr9tZI83kd4azlPG5y997C/M4DNABK9yErhFM6hKdym4kkmd9j0vtsJyjFIwfRBxtrxZtAfGZCNRIBMFLK5w== +"@types/express@4.17.4": + version "4.17.4" + resolved "https://registry.yarnpkg.com/@types/express/-/express-4.17.4.tgz#e78bf09f3f530889575f4da8a94cd45384520aac" + integrity sha512-DO1L53rGqIDUEvOjJKmbMEQ5Z+BM2cIEPy/eV3En+s166Gz+FeuzRerxcab757u/U4v4XF4RYrZPmqKa+aY/2w== dependencies: "@types/body-parser" "*" "@types/express-serve-static-core" "*" @@ -3013,6 +3335,11 @@ resolved "https://registry.yarnpkg.com/@types/long/-/long-4.0.0.tgz#719551d2352d301ac8b81db732acb6bdc28dbdef" integrity sha512-1w52Nyx4Gq47uuu0EVcsHBxZFJgurQ+rTKS3qMHxR1GY2T8c2AJYd6vZoZ9q1rupaDjU0yT+Jc2XTyXkjeMA+Q== +"@types/long@^4.0.1": + version "4.0.1" + resolved "https://registry.yarnpkg.com/@types/long/-/long-4.0.1.tgz#459c65fa1867dafe6a8f322c4c51695663cc55e9" + integrity sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w== + "@types/mariasql@^0.1.30": version "0.1.30" resolved "https://registry.yarnpkg.com/@types/mariasql/-/mariasql-0.1.30.tgz#944446a351452169e10a68fbff7f20c6e0bc5b34" @@ -3023,6 +3350,14 @@ resolved "https://registry.yarnpkg.com/@types/mime/-/mime-2.0.1.tgz#dc488842312a7f075149312905b5e3c0b054c79d" integrity sha512-FwI9gX75FgVBJ7ywgnq/P7tw+/o1GUbtP0KzbtusLigAOgIgNISRK0ZPl4qertvXSIE8YbsVJueQ90cDt9YYyw== +"@types/node-fetch@2.5.7": + version "2.5.7" + resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.5.7.tgz#20a2afffa882ab04d44ca786449a276f9f6bbf3c" + integrity sha512-o2WVNf5UhWRkxlf6eq+jMZDu7kjgpgJfl4xVNlvryc95O/6F2ld8ztKX+qu+Rjyet93WAWm5LjeX9H5FGkODvw== + dependencies: + "@types/node" "*" + form-data "^3.0.0" + "@types/node@*", "@types/node@>=6", "@types/node@>=6.0.0": version "12.7.8" resolved "https://registry.yarnpkg.com/@types/node/-/node-12.7.8.tgz#cb1bf6800238898bc2ff6ffa5702c3cadd350708" @@ -3038,6 +3373,16 @@ resolved "https://registry.yarnpkg.com/@types/node/-/node-10.17.15.tgz#bfff4e23e9e70be6eec450419d51e18de1daf8e7" integrity sha512-daFGV9GSs6USfPgxceDA8nlSe48XrVCJfDeYm7eokxq/ye7iuOH87hKXgMtEAVLFapkczbZsx868PMDT1Y0a6A== +"@types/node@^13.7.0": + version "13.13.12" + resolved "https://registry.yarnpkg.com/@types/node/-/node-13.13.12.tgz#9c72e865380a7dc99999ea0ef20fc9635b503d20" + integrity sha512-zWz/8NEPxoXNT9YyF2osqyA9WjssZukYpgI4UYZpOjcyqwIUqWGkcCionaEb9Ki+FULyPyvNFpg/329Kd2/pbw== + +"@types/npmlog@^4.1.2": + version "4.1.2" + resolved "https://registry.yarnpkg.com/@types/npmlog/-/npmlog-4.1.2.tgz#d070fe6a6b78755d1092a3dc492d34c3d8f871c4" + integrity sha512-4QQmOF5KlwfxJ5IGXFIudkeLCdMABz03RcUXu+LCb24zmln8QW6aDjuGl4d4XPVLf2j+FnjelHTP7dvceAFbhA== + "@types/p-cancelable@^1.0.0": version "1.0.1" resolved "https://registry.yarnpkg.com/@types/p-cancelable/-/p-cancelable-1.0.1.tgz#4f0ce8aa3ee0007c2768b9b3e6e22af20a6eecbd" @@ -3123,6 +3468,13 @@ "@types/prop-types" "*" csstype "^2.2.0" +"@types/redis@^2.8.22": + version "2.8.22" + resolved "https://registry.yarnpkg.com/@types/redis/-/redis-2.8.22.tgz#8935227cbe39080506b625276d64974ddbcb9ea4" + integrity sha512-O21YLcAtcSzax8wy4CfxMNjIMNf5X2c1pKTXDWLMa2p77Igvy7wuNjWVv+Db93wTvRvLLev6oq3IE7gxNKFZyg== + dependencies: + "@types/node" "*" + "@types/request@^2.47.1": version "2.48.4" resolved "https://registry.yarnpkg.com/@types/request/-/request-2.48.4.tgz#df3d43d7b9ed3550feaa1286c6eabf0738e6cf7e" @@ -3179,13 +3531,6 @@ "@types/events" "*" "@types/node" "*" -"@types/ws@^6.0.0": - version "6.0.3" - resolved "https://registry.yarnpkg.com/@types/ws/-/ws-6.0.3.tgz#b772375ba59d79066561c8d87500144d674ba6b3" - integrity sha512-yBTM0P05Tx9iXGq00BbJPo37ox68R5vaGTXivs6RGh/BQ6QP5zqZDGWdAO6JbRE/iR1l80xeGAwCQS2nMV9S/w== - dependencies: - "@types/node" "*" - "@types/ws@^6.0.1": version "6.0.4" resolved "https://registry.yarnpkg.com/@types/ws/-/ws-6.0.4.tgz#7797707c8acce8f76d8c34b370d4645b70421ff1" @@ -3193,6 +3538,13 @@ dependencies: "@types/node" "*" +"@types/ws@^7.0.0": + version "7.2.5" + resolved "https://registry.yarnpkg.com/@types/ws/-/ws-7.2.5.tgz#513f28b04a1ea1aa9dc2cad3f26e8e37c88aae49" + integrity sha512-4UEih9BI1nBKii385G9id1oFrSkLcClbwtDfcYj8HJLQqZVAtb/42vXVrYvRWCcufNF/a+rZD3MxNwghA7UmCg== + dependencies: + "@types/node" "*" + "@types/yargs-parser@*": version "13.1.0" resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-13.1.0.tgz#c563aa192f39350a1d18da36c5a8da382bbd8228" @@ -3217,6 +3569,11 @@ resolved "https://registry.yarnpkg.com/@types/zen-observable/-/zen-observable-0.8.0.tgz#8b63ab7f1aa5321248aad5ac890a485656dcea4d" integrity sha512-te5lMAWii1uEJ4FwLjzdlbw3+n0FZNOvFXHxQDKeT0dilh7HOzdMzV2TrJVUzq8ep7J4Na8OUYPRLSQkJHAlrg== +"@tyriar/fibonacci-heap@^2.0.7": + version "2.0.9" + resolved "https://registry.yarnpkg.com/@tyriar/fibonacci-heap/-/fibonacci-heap-2.0.9.tgz#df3dcbdb1b9182168601f6318366157ee16666e9" + integrity sha512-bYuSNomfn4hu2tPiDN+JZtnzCpSpbJ/PNeulmocDy3xN2X5OkJL65zo6rPZp65cPPhLF9vfT/dgE+RtFRCSxOA== + "@webassemblyjs/ast@1.7.11": version "1.7.11" resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.7.11.tgz#b988582cafbb2b095e8b556526f30c90d057cace" @@ -3643,6 +4000,11 @@ address@1.1.2, address@^1.0.1: resolved "https://registry.yarnpkg.com/address/-/address-1.1.2.tgz#bf1116c9c758c51b7a933d296b72c221ed9428b6" integrity sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA== +agent-base@5: + version "5.1.1" + resolved "https://registry.yarnpkg.com/agent-base/-/agent-base-5.1.1.tgz#e8fb3f242959db44d63be665db7a8e739537a32c" + integrity sha512-TMeqbNl2fMW0nMjTEPOwe3J/PRFP4vqeoNuQMG0HlMrtm5QxKqdvAkZ1pRBQ/ulIyDD5Yq0nJ7YbdD8ey0TO3g== + agentkeepalive@^3.4.1: version "3.5.2" resolved "https://registry.yarnpkg.com/agentkeepalive/-/agentkeepalive-3.5.2.tgz#a113924dd3fa24a0bc3b78108c450c2abee00f67" @@ -3898,13 +4260,13 @@ anymatch@^2.0.0: micromatch "^3.1.4" normalize-path "^2.1.1" -apollo-cache-control@^0.8.4: - version "0.8.4" - resolved "https://registry.yarnpkg.com/apollo-cache-control/-/apollo-cache-control-0.8.4.tgz#a3650d5e4173953e2a3af995bea62147f1ffe4d7" - integrity sha512-IZ1d3AXZtkZhLYo0kWqTbZ6nqLFaeUvLdMESs+9orMadBZ7mvzcAfBwrhKyCWPGeAAZ/jKv8FtYHybpchHgFAg== +apollo-cache-control@^0.11.0: + version "0.11.0" + resolved "https://registry.yarnpkg.com/apollo-cache-control/-/apollo-cache-control-0.11.0.tgz#7075492d04c5424e7c6769380b503e8f75b39d61" + integrity sha512-dmRnQ9AXGw2SHahVGLzB/p4UW/taFBAJxifxubp8hqY5p9qdlSu4MPRq8zvV2ULMYf50rBtZyC4C+dZLqmHuHQ== dependencies: - apollo-server-env "^2.4.3" - graphql-extensions "^0.10.3" + apollo-server-env "^2.4.4" + apollo-server-plugin-base "^0.9.0" apollo-cache-inmemory@^1.3.9: version "1.6.3" @@ -3939,49 +4301,52 @@ apollo-client@^2.4.5: tslib "^1.9.3" zen-observable "^0.8.0" -apollo-datasource@^0.6.3: - version "0.6.3" - resolved "https://registry.yarnpkg.com/apollo-datasource/-/apollo-datasource-0.6.3.tgz#b31e089e52adb92fabb536ab8501c502573ffe13" - integrity sha512-gRYyFVpJgHE2hhS+VxMeOerxXQ/QYxWG7T6QddfugJWYAG9DRCl65e2b7txcGq2NP3r+O1iCm4GNwhRBDJbd8A== +apollo-datasource@^0.7.1: + version "0.7.1" + resolved "https://registry.yarnpkg.com/apollo-datasource/-/apollo-datasource-0.7.1.tgz#0b06da999ace50b7f5fe509f2a03f7de97974334" + integrity sha512-h++/jQAY7GA+4TBM+7ezvctFmmGNLrAPf51KsagZj+NkT9qvxp585rdsuatynVbSl59toPK2EuVmc6ilmQHf+g== dependencies: - apollo-server-caching "^0.5.0" - apollo-server-env "^2.4.3" + apollo-server-caching "^0.5.1" + apollo-server-env "^2.4.4" -apollo-engine-reporting-protobuf@^0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/apollo-engine-reporting-protobuf/-/apollo-engine-reporting-protobuf-0.4.0.tgz#e34c192d86493b33a73181fd6be75721559111ec" - integrity sha512-cXHZSienkis8v4RhqB3YG3DkaksqLpcxApRLTpRMs7IXNozgV7CUPYGFyFBEra1ZFgUyHXx4G9MpelV+n2cCfA== +apollo-engine-reporting-protobuf@^0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/apollo-engine-reporting-protobuf/-/apollo-engine-reporting-protobuf-0.5.1.tgz#b6e66e6e382f9bcdc2ac8ed168b047eb1470c1a8" + integrity sha512-TSfr9iAaInV8dhXkesdcmqsthRkVcJkzznmiM+1Ob/GScK7r6hBYCjVDt2613EHAg9SUzTOltIKlGD+N+GJRUw== dependencies: - protobufjs "^6.8.6" + "@apollo/protobufjs" "^1.0.3" -apollo-engine-reporting@^1.4.6: - version "1.4.6" - resolved "https://registry.yarnpkg.com/apollo-engine-reporting/-/apollo-engine-reporting-1.4.6.tgz#83af6689c4ab82d1c62c3f5dde7651975508114f" - integrity sha512-acfb7oFnru/8YQdY4x6+7WJbZfzdVETI8Cl+9ImgUrvUnE8P+f2SsGTKXTC1RuUvve4c56PAvaPgE+z8X1a1Mw== - dependencies: - apollo-engine-reporting-protobuf "^0.4.0" - apollo-graphql "^0.3.3" - apollo-server-caching "^0.5.0" - apollo-server-env "^2.4.3" - apollo-server-types "^0.2.4" +apollo-engine-reporting@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/apollo-engine-reporting/-/apollo-engine-reporting-2.0.0.tgz#af007b4a8a481fa97baef0eac51a7824f1ec3310" + integrity sha512-FvNwORsh3nxEfvQqd2xbd468a0q/R3kYar/Bk6YQdBX5qwqUhqmOcOSxLFk8Zb77HpwHij5CPpPWJb53TU1zcA== + dependencies: + apollo-engine-reporting-protobuf "^0.5.1" + apollo-graphql "^0.4.0" + apollo-server-caching "^0.5.1" + apollo-server-env "^2.4.4" + apollo-server-errors "^2.4.1" + apollo-server-plugin-base "^0.9.0" + apollo-server-types "^0.5.0" async-retry "^1.2.1" - graphql-extensions "^0.10.3" + uuid "^8.0.0" -apollo-env@0.5.1: - version "0.5.1" - resolved "https://registry.yarnpkg.com/apollo-env/-/apollo-env-0.5.1.tgz#b9b0195c16feadf0fe9fd5563edb0b9b7d9e97d3" - integrity sha512-fndST2xojgSdH02k5hxk1cbqA9Ti8RX4YzzBoAB4oIe1Puhq7+YlhXGXfXB5Y4XN0al8dLg+5nAkyjNAR2qZTw== +apollo-env@^0.6.5: + version "0.6.5" + resolved "https://registry.yarnpkg.com/apollo-env/-/apollo-env-0.6.5.tgz#5a36e699d39e2356381f7203493187260fded9f3" + integrity sha512-jeBUVsGymeTHYWp3me0R2CZRZrFeuSZeICZHCeRflHTfnQtlmbSXdy5E0pOyRM9CU4JfQkKDC98S1YglQj7Bzg== dependencies: + "@types/node-fetch" "2.5.7" core-js "^3.0.1" node-fetch "^2.2.0" sha.js "^2.4.11" -apollo-graphql@^0.3.3: - version "0.3.3" - resolved "https://registry.yarnpkg.com/apollo-graphql/-/apollo-graphql-0.3.3.tgz#ce1df194f6e547ad3ce1e35b42f9c211766e1658" - integrity sha512-t3CO/xIDVsCG2qOvx2MEbuu4b/6LzQjcBBwiVnxclmmFyAxYCIe7rpPlnLHSq7HyOMlCWDMozjoeWfdqYSaLqQ== +apollo-graphql@^0.4.0: + version "0.4.4" + resolved "https://registry.yarnpkg.com/apollo-graphql/-/apollo-graphql-0.4.4.tgz#25f456b28a4419bb6a42071f8a56e19e15bb80be" + integrity sha512-i012iRKT5nfsOaNMx4MTwHw2jrlyaF1zikpejxsGHsKIf3OngGvGh3pyw20bEmwj413OrNQpRxvvIz5A7W/8xw== dependencies: - apollo-env "0.5.1" + apollo-env "^0.6.5" lodash.sortby "^4.7.0" apollo-link-error@^1.1.1: @@ -4037,66 +4402,67 @@ apollo-link@^1.0.0, apollo-link@^1.2.13, apollo-link@^1.2.3: tslib "^1.9.3" zen-observable-ts "^0.8.20" -apollo-server-caching@^0.5.0: - version "0.5.0" - resolved "https://registry.yarnpkg.com/apollo-server-caching/-/apollo-server-caching-0.5.0.tgz#446a37ce2d4e24c81833e276638330a634f7bd46" - integrity sha512-l7ieNCGxUaUAVAAp600HjbUJxVaxjJygtPV0tPTe1Q3HkPy6LEWoY6mNHV7T268g1hxtPTxcdRu7WLsJrg7ufw== +apollo-server-caching@^0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/apollo-server-caching/-/apollo-server-caching-0.5.1.tgz#5cd0536ad5473abb667cc82b59bc56b96fb35db6" + integrity sha512-L7LHZ3k9Ao5OSf2WStvQhxdsNVplRQi7kCAPfqf9Z3GBEnQ2uaL0EgO0hSmtVHfXTbk5CTRziMT1Pe87bXrFIw== dependencies: lru-cache "^5.0.0" -apollo-server-core@^2.9.4: - version "2.9.4" - resolved "https://registry.yarnpkg.com/apollo-server-core/-/apollo-server-core-2.9.4.tgz#0404455884951804d23ea64e45514c73afd34e5e" - integrity sha512-6mzipnn9woJxgo/JQFWTlY13svS7HCr0ZsN035eRmKOsXzROfB9ugXcTuc6MP94ICM7TlB/DtJOP+bLX53mijw== +apollo-server-core@^2.14.2: + version "2.14.2" + resolved "https://registry.yarnpkg.com/apollo-server-core/-/apollo-server-core-2.14.2.tgz#4ab055b96b8be7821a726c81e8aa412deb7f3644" + integrity sha512-8G6Aoz+k+ecuQco1KNLFbMrxhe/8uR4AOaOYEvT/N5m/6lrkPYzvBAxbpRIub5AxEwpBPcIrI452rR3PD9DItA== dependencies: - "@apollographql/apollo-tools" "^0.4.0" + "@apollographql/apollo-tools" "^0.4.3" "@apollographql/graphql-playground-html" "1.6.24" "@types/graphql-upload" "^8.0.0" - "@types/ws" "^6.0.0" - apollo-cache-control "^0.8.4" - apollo-datasource "^0.6.3" - apollo-engine-reporting "^1.4.6" - apollo-server-caching "^0.5.0" - apollo-server-env "^2.4.3" - apollo-server-errors "^2.3.3" - apollo-server-plugin-base "^0.6.4" - apollo-server-types "^0.2.4" - apollo-tracing "^0.8.4" + "@types/ws" "^7.0.0" + apollo-cache-control "^0.11.0" + apollo-datasource "^0.7.1" + apollo-engine-reporting "^2.0.0" + apollo-server-caching "^0.5.1" + apollo-server-env "^2.4.4" + apollo-server-errors "^2.4.1" + apollo-server-plugin-base "^0.9.0" + apollo-server-types "^0.5.0" + apollo-tracing "^0.11.0" fast-json-stable-stringify "^2.0.0" - graphql-extensions "^0.10.3" + graphql-extensions "^0.12.2" graphql-tag "^2.9.2" graphql-tools "^4.0.0" graphql-upload "^8.0.2" + loglevel "^1.6.7" sha.js "^2.4.11" subscriptions-transport-ws "^0.9.11" ws "^6.0.0" -apollo-server-env@^2.4.3: - version "2.4.3" - resolved "https://registry.yarnpkg.com/apollo-server-env/-/apollo-server-env-2.4.3.tgz#9bceedaae07eafb96becdfd478f8d92617d825d2" - integrity sha512-23R5Xo9OMYX0iyTu2/qT0EUb+AULCBriA9w8HDfMoChB8M+lFClqUkYtaTTHDfp6eoARLW8kDBhPOBavsvKAjA== +apollo-server-env@^2.4.4: + version "2.4.4" + resolved "https://registry.yarnpkg.com/apollo-server-env/-/apollo-server-env-2.4.4.tgz#12d2d0896dcb184478cba066c7a683ab18689ca1" + integrity sha512-c2oddDS3lwAl6QNCIKCLEzt/dF9M3/tjjYRVdxOVN20TidybI7rAbnT4QOzf4tORnGXtiznEAvr/Kc9ahhKADg== dependencies: node-fetch "^2.1.2" util.promisify "^1.0.0" -apollo-server-errors@^2.3.3: - version "2.3.3" - resolved "https://registry.yarnpkg.com/apollo-server-errors/-/apollo-server-errors-2.3.3.tgz#83763b00352c10dc68fbb0d41744ade66de549ff" - integrity sha512-MO4oJ129vuCcbqwr5ZwgxqGGiLz3hCyowz0bstUF7MR+vNGe4oe3DWajC9lv4CxrhcqUHQOeOPViOdIo1IxE3g== +apollo-server-errors@^2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/apollo-server-errors/-/apollo-server-errors-2.4.1.tgz#16ad49de6c9134bfb2b7dede9842e73bb239dbe2" + integrity sha512-7oEd6pUxqyWYUbQ9TA8tM0NU/3aGtXSEibo6+txUkuHe7QaxfZ2wHRp+pfT1LC1K3RXYjKj61/C2xEO19s3Kdg== -apollo-server-express@^2.2.5: - version "2.9.4" - resolved "https://registry.yarnpkg.com/apollo-server-express/-/apollo-server-express-2.9.4.tgz#ae7ca0b70a644ba9fa5e3ac395d1e2d9a4b23522" - integrity sha512-diX9n81E0tIJ0Sy2bHvDGPM9QsFBsZ76Nx/dszinY00ViyWG0yIAYEYWeRbsoKTeNDWWTvlMrh/3Eu2oaCIEhQ== +apollo-server-express@^2.14.2: + version "2.14.2" + resolved "https://registry.yarnpkg.com/apollo-server-express/-/apollo-server-express-2.14.2.tgz#662dfeb9c794c1eca59dd93e57e74487a8195ae6" + integrity sha512-iYyZm0kQqkM561i9l0WC9HbJsGZJbHP9bhnWaa1Itd+yNBS2AJFp6mRR3hQacsWXUw7ewaKAracMIggvfSH5Aw== dependencies: "@apollographql/graphql-playground-html" "1.6.24" "@types/accepts" "^1.3.5" - "@types/body-parser" "1.17.1" + "@types/body-parser" "1.19.0" "@types/cors" "^2.8.4" - "@types/express" "4.17.1" + "@types/express" "4.17.4" accepts "^1.3.5" - apollo-server-core "^2.9.4" - apollo-server-types "^0.2.4" + apollo-server-core "^2.14.2" + apollo-server-types "^0.5.0" body-parser "^1.18.3" cors "^2.8.4" express "^4.17.1" @@ -4106,29 +4472,29 @@ apollo-server-express@^2.2.5: subscriptions-transport-ws "^0.9.16" type-is "^1.6.16" -apollo-server-plugin-base@^0.6.4: - version "0.6.4" - resolved "https://registry.yarnpkg.com/apollo-server-plugin-base/-/apollo-server-plugin-base-0.6.4.tgz#63ea4fd0bbb6c4510bc8d0d2ad0a0684c8d0da8c" - integrity sha512-4rY+cBAIpQomGWYBtk8hHkLQWHrh5hgIBPQqmhXh00YFdcY+Ob1/cU2/2iqTcIzhtcaezsc8OZ63au6ahSBQqg== +apollo-server-plugin-base@^0.9.0: + version "0.9.0" + resolved "https://registry.yarnpkg.com/apollo-server-plugin-base/-/apollo-server-plugin-base-0.9.0.tgz#777f720a1ee827a66b8c159073ca30645f8bc625" + integrity sha512-LWcPrsy2+xqwlNseh/QaGa/MPNopS8c4qGgh0g0cAn0lZBRrJ9Yab7dq+iQ6vdUBwIhUWYN6s9dwUWCZw2SL8g== dependencies: - apollo-server-types "^0.2.4" + apollo-server-types "^0.5.0" -apollo-server-types@^0.2.4: - version "0.2.4" - resolved "https://registry.yarnpkg.com/apollo-server-types/-/apollo-server-types-0.2.4.tgz#28864900ffc7f9711a859297c143a833fdb6aa43" - integrity sha512-G4FvBVgGQcTW6ZBS2+hvcDQkSfdOIKV+cHADduXA275v+5zl42g+bCaGd/hCCKTDRjmQvObLiMxH/BJ6pDMQgA== +apollo-server-types@^0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/apollo-server-types/-/apollo-server-types-0.5.0.tgz#51f39c5fa610ece8b07f1fbcf63c47d4ac150340" + integrity sha512-zhtsqqqfdeoJQAfc41Sy6WnnBVxKNgZ34BKXf/Q+kXmw7rbZ/B5SG3SJMvj1iFsbzZxILmWdUsE9aD20lEr0bg== dependencies: - apollo-engine-reporting-protobuf "^0.4.0" - apollo-server-caching "^0.5.0" - apollo-server-env "^2.4.3" + apollo-engine-reporting-protobuf "^0.5.1" + apollo-server-caching "^0.5.1" + apollo-server-env "^2.4.4" -apollo-tracing@^0.8.4: - version "0.8.4" - resolved "https://registry.yarnpkg.com/apollo-tracing/-/apollo-tracing-0.8.4.tgz#0117820c3f0ad3aa6daf7bf13ddbb923cbefa6de" - integrity sha512-DjbFW0IvHicSlTVG+vK+1WINfBMRCdPPHJSW/j65JMir9Oe56WGeqL8qz8hptdUUmLYEb+azvcyyGsJsiR3zpQ== +apollo-tracing@^0.11.0: + version "0.11.0" + resolved "https://registry.yarnpkg.com/apollo-tracing/-/apollo-tracing-0.11.0.tgz#8821eb60692f77c06660fb6bc147446f600aecfe" + integrity sha512-I9IFb/8lkBW8ZwOAi4LEojfT7dMfUSkpnV8LHQI8Rcj0HtzL9HObQ3woBmzyGHdGHLFuD/6/VHyFD67SesSrJg== dependencies: - apollo-server-env "^2.4.3" - graphql-extensions "^0.10.3" + apollo-server-env "^2.4.4" + apollo-server-plugin-base "^0.9.0" apollo-utilities@1.3.2, apollo-utilities@^1.0.1, apollo-utilities@^1.3.0, apollo-utilities@^1.3.2: version "1.3.2" @@ -4592,23 +4958,21 @@ babel-plugin-emotion@^10.0.20, babel-plugin-emotion@^10.0.22, babel-plugin-emoti find-root "^1.1.0" source-map "^0.5.7" -babel-plugin-emotion@^9.2.11: - version "9.2.11" - resolved "https://registry.yarnpkg.com/babel-plugin-emotion/-/babel-plugin-emotion-9.2.11.tgz#319c005a9ee1d15bb447f59fe504c35fd5807728" - integrity sha512-dgCImifnOPPSeXod2znAmgc64NhaaOjGEHROR/M+lmStb3841yK1sgaDYAYMnlvWNz8GnpwIPN0VmNpbWYZ+VQ== +babel-plugin-emotion@^10.0.27: + version "10.0.33" + resolved "https://registry.yarnpkg.com/babel-plugin-emotion/-/babel-plugin-emotion-10.0.33.tgz#ce1155dcd1783bbb9286051efee53f4e2be63e03" + integrity sha512-bxZbTTGz0AJQDHm8k6Rf3RQJ8tX2scsfsRyKVgAbiUPUNIRtlK+7JxP+TAd1kRLABFxe0CFm2VdK4ePkoA9FxQ== dependencies: "@babel/helper-module-imports" "^7.0.0" - "@emotion/babel-utils" "^0.6.4" - "@emotion/hash" "^0.6.2" - "@emotion/memoize" "^0.6.1" - "@emotion/stylis" "^0.7.0" + "@emotion/hash" "0.8.0" + "@emotion/memoize" "0.7.4" + "@emotion/serialize" "^0.11.16" babel-plugin-macros "^2.0.0" babel-plugin-syntax-jsx "^6.18.0" convert-source-map "^1.5.0" + escape-string-regexp "^1.0.5" find-root "^1.1.0" - mkdirp "^0.5.1" source-map "^0.5.7" - touch "^2.0.1" babel-plugin-extract-import-names@^1.5.1: version "1.5.1" @@ -4858,6 +5222,16 @@ babel-preset-jest@^24.9.0: babel-plugin-transform-undefined-to-void "^6.9.4" lodash "^4.17.11" +babel-preset-typescript-vue@^1.0.3: + version "1.1.1" + resolved "https://registry.yarnpkg.com/babel-preset-typescript-vue/-/babel-preset-typescript-vue-1.1.1.tgz#6a617dcb0ee26f911735d5f2bbe530286b2c7c02" + integrity sha512-wXeR7Y4xCsRUEdm4t4qlpv4wnxolS6jU0c7P2E6zJRWeG1sR0e6NL7DRN0tNuUwkUt0PU8bqVo4vzoA2VEuxnw== + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-transform-typescript" "^7.3.2" + "@babel/preset-typescript" "^7.3.3" + vue-template-compiler "^2.6.11" + babel-runtime@6.x.x, babel-runtime@^6.23.0, babel-runtime@^6.26.0: version "6.26.0" resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.26.0.tgz#965c7058668e82b55d7bfe04ff2337bc8b5647fe" @@ -5863,7 +6237,7 @@ colors@1.4.0, colors@^1.1.2: resolved "https://registry.yarnpkg.com/colors/-/colors-1.4.0.tgz#c50491479d4c1bdaed2c9ced32cf7c7dc2360f78" integrity sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA== -combined-stream@^1.0.6, combined-stream@~1.0.6: +combined-stream@^1.0.6, combined-stream@^1.0.8, combined-stream@~1.0.6: version "1.0.8" resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== @@ -5957,6 +6331,16 @@ concat-stream@^1.5.0, concat-stream@^1.6.0: readable-stream "^2.2.2" typedarray "^0.0.6" +concat-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-2.0.0.tgz#414cf5af790a48c60ab9be4527d56d5e41133cb1" + integrity sha512-MWufYdFw53ccGjCA+Ol7XJYpAlW6/prSMzuPOTRnJGcGzuhLn4Scrz7qf6o8bROZ514ltazcIFJZevcfbo0x7A== + dependencies: + buffer-from "^1.0.0" + inherits "^2.0.3" + readable-stream "^3.0.2" + typedarray "^0.0.6" + configstore@^3.0.0: version "3.1.2" resolved "https://registry.yarnpkg.com/configstore/-/configstore-3.1.2.tgz#c6f25defaeef26df12dd33414b001fe81a543f8f" @@ -6013,7 +6397,7 @@ content-type@~1.0.4: resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b" integrity sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA== -convert-source-map@1.6.0, convert-source-map@^1.1.0, convert-source-map@^1.4.0, convert-source-map@^1.5.0, convert-source-map@^1.5.1: +convert-source-map@1.6.0, convert-source-map@^1.1.0, convert-source-map@^1.4.0, convert-source-map@^1.5.0: version "1.6.0" resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.6.0.tgz#51b537a8c43e0f04dec1993bffcdd504e758ac20" integrity sha512-eFu7XigvxdZ1ETfbgPBohgyQ/Z++C0eEhTor0qRwBw9unw+L0/6V8wkSuGgzdThkiS5lSpdptOQPD8Ak40a+7A== @@ -6157,19 +6541,6 @@ create-ecdh@^4.0.0: bn.js "^4.1.0" elliptic "^6.0.0" -create-emotion@^9.2.12: - version "9.2.12" - resolved "https://registry.yarnpkg.com/create-emotion/-/create-emotion-9.2.12.tgz#0fc8e7f92c4f8bb924b0fef6781f66b1d07cb26f" - integrity sha512-P57uOF9NL2y98Xrbl2OuiDQUZ30GVmASsv5fbsjF4Hlraip2kyAvMm+2PoYUvFFw03Fhgtxk3RqZSm2/qHL9hA== - dependencies: - "@emotion/hash" "^0.6.2" - "@emotion/memoize" "^0.6.1" - "@emotion/stylis" "^0.7.0" - "@emotion/unitless" "^0.6.2" - csstype "^2.5.2" - stylis "^3.5.0" - stylis-rule-sheet "^0.0.10" - create-error-class@^3.0.0: version "3.0.2" resolved "https://registry.yarnpkg.com/create-error-class/-/create-error-class-3.0.2.tgz#06be7abef947a3f14a30fd610671d401bca8b7b6" @@ -6268,6 +6639,13 @@ crypto-random-string@^1.0.0: resolved "https://registry.yarnpkg.com/crypto-random-string/-/crypto-random-string-1.0.0.tgz#a230f64f568310e1498009940790ec99545bca7e" integrity sha1-ojD2T1aDEOFJgAmUB5DsmVRbyn4= +css-box-model@^1.2.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/css-box-model/-/css-box-model-1.2.1.tgz#59951d3b81fd6b2074a62d49444415b0d2b4d7c1" + integrity sha512-a7Vr4Q/kd/aw96bnJG332W9V9LkJO69JRcaCYDUqjp6/z0w6VcZjgAcTbgFxEPfBgdnAwlh3iwu+hLopa+flJw== + dependencies: + tiny-invariant "^1.0.6" + css-loader@1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-1.0.0.tgz#9f46aaa5ca41dbe31860e3b62b8e23c42916bf56" @@ -6400,10 +6778,10 @@ csstype@^2.2.0, csstype@^2.5.7: resolved "https://registry.yarnpkg.com/csstype/-/csstype-2.6.7.tgz#20b0024c20b6718f4eda3853a1f5a1cce7f5e4a5" integrity sha512-9Mcn9sFbGBAdmimWb2gLVDtFJzeKtDGIr76TUqmjZrw9LFXBMSU70lcs+C0/7fyCd6iBDqmksUcCOUIkisPHsQ== -csstype@^2.5.2: - version "2.6.6" - resolved "https://registry.yarnpkg.com/csstype/-/csstype-2.6.6.tgz#c34f8226a94bbb10c32cc0d714afdf942291fc41" - integrity sha512-RpFbQGUE74iyPgvr46U9t1xoQBM8T4BL8SxrN66Le2xYAPSaDJJKeztV3awugusb3g3G9iL8StmkBBXhcbbXhg== +csstype@^2.6.7: + version "2.6.10" + resolved "https://registry.yarnpkg.com/csstype/-/csstype-2.6.10.tgz#e63af50e66d7c266edb6b32909cfd0aabe03928b" + integrity sha512-D34BqZU4cIlMCY93rZHbrq9pjTAQJ3U8S8rfBqjwHxkGPThWFjzZDQpgMJY0QViLxth6ZKYiwFBo14RdN44U/w== currently-unhandled@^0.4.1: version "0.4.1" @@ -6721,7 +7099,7 @@ debug@2.6.9, debug@^2.2.0, debug@^2.3.3, debug@^2.6.0, debug@^2.6.8, debug@^2.6. dependencies: ms "2.0.0" -debug@4.1.1, debug@^4.1.0, debug@^4.1.1: +debug@4, debug@4.1.1, debug@^4.1.0, debug@^4.1.1: version "4.1.1" resolved "https://registry.yarnpkg.com/debug/-/debug-4.1.1.tgz#3b72260255109c6b589cee050f1d516139664791" integrity sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw== @@ -6866,6 +7244,11 @@ denodeify@^1.2.1: resolved "https://registry.yarnpkg.com/denodeify/-/denodeify-1.2.1.tgz#3a36287f5034e699e7577901052c2e6c94251631" integrity sha1-OjYof1A05pnnV3kBBSwubJQlFjE= +denque@^1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/denque/-/denque-1.4.1.tgz#6744ff7641c148c3f8a69c307e51235c1f4a37cf" + integrity sha512-OfzPuSZKGcgr96rf1oODnfjqBFmr1DVoc/TrItj3Ohe0Ah1C5WX5Baquw/9U9KovnQ88EqmJbD66rKYUQYN1tQ== + depd@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/depd/-/depd-2.0.0.tgz#b696163cc757560d09cf22cc8fad1571b79e76df" @@ -7017,6 +7400,14 @@ dom-helpers@^3.4.0: dependencies: "@babel/runtime" "^7.1.2" +dom-helpers@^5.0.1: + version "5.1.4" + resolved "https://registry.yarnpkg.com/dom-helpers/-/dom-helpers-5.1.4.tgz#4609680ab5c79a45f2531441f1949b79d6587f4b" + integrity sha512-TjMyeVUvNEnOnhzs6uAn9Ya47GmMo3qq7m+Lr/3ON0Rs5kHvb8I+SQYjLUSYn7qhEm0QjW0yrBkvz9yOrwwz1A== + dependencies: + "@babel/runtime" "^7.8.7" + csstype "^2.6.7" + dom-serializer@0: version "0.2.2" resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.2.2.tgz#1afb81f533717175d478655debc5e332d9f9bb51" @@ -7252,14 +7643,6 @@ emotion-theming@^10.0.19: "@emotion/weak-memoize" "0.2.4" hoist-non-react-statics "^3.3.0" -emotion@^9.1.2: - version "9.2.12" - resolved "https://registry.yarnpkg.com/emotion/-/emotion-9.2.12.tgz#53925aaa005614e65c6e43db8243c843574d1ea9" - integrity sha512-hcx7jppaI8VoXxIWEhxpDW7I+B4kq9RNzQLmsrF6LY8BGKqe2N+gFAQr0EfuFucFlPs2A9HM4+xNj4NeqEWIOQ== - dependencies: - babel-plugin-emotion "^9.2.11" - create-emotion "^9.2.12" - encodeurl@~1.0.1, encodeurl@~1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" @@ -7279,6 +7662,15 @@ end-of-stream@^1.0.0, end-of-stream@^1.1.0: dependencies: once "^1.4.0" +enhanced-resolve@^4.0.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-4.1.1.tgz#2937e2b8066cd0fe7ce0990a98f0d71a35189f66" + integrity sha512-98p2zE+rL7/g/DzMHMTF4zZlCgeVdJ7yr6xzEpJRYwFYrGi9ANdn5DnJURg6RpBkyk60XYDnWIv51VfIhfNGuA== + dependencies: + graceful-fs "^4.1.2" + memory-fs "^0.5.0" + tapable "^1.0.0" + enhanced-resolve@^4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-4.1.0.tgz#41c7e0bfdfe74ac1ffe1e57ad6a5c6c9f3742a7f" @@ -7417,6 +7809,18 @@ escodegen@^1.12.0, escodegen@^1.9.1: optionalDependencies: source-map "~0.6.1" +escodegen@^1.14.1: + version "1.14.2" + resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.14.2.tgz#14ab71bf5026c2aa08173afba22c6f3173284a84" + integrity sha512-InuOIiKk8wwuOFg6x9BQXbzjrQhtyXh46K9bqVTPzSo2FnyMBaYGBMC6PhQy7yxxil9vIedFBweQBMK74/7o8A== + dependencies: + esprima "^4.0.1" + estraverse "^4.2.0" + esutils "^2.0.2" + optionator "^0.8.1" + optionalDependencies: + source-map "~0.6.1" + eslint-config-airbnb-base@^12.1.0: version "12.1.0" resolved "https://registry.yarnpkg.com/eslint-config-airbnb-base/-/eslint-config-airbnb-base-12.1.0.tgz#386441e54a12ccd957b0a92564a4bafebd747944" @@ -7548,7 +7952,7 @@ esprima@^3.1.3: resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633" integrity sha1-/cpRzuYTOJXjyI1TXOSdv/YqRjM= -esprima@^4.0.0, esprima@~4.0.0: +esprima@^4.0.0, esprima@^4.0.1, esprima@~4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== @@ -8164,6 +8568,19 @@ fork-ts-checker-webpack-plugin@1.5.0: tapable "^1.0.0" worker-rpc "^0.1.0" +fork-ts-checker-webpack-plugin@^4.1.0: + version "4.1.6" + resolved "https://registry.yarnpkg.com/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-4.1.6.tgz#5055c703febcf37fa06405d400c122b905167fc5" + integrity sha512-DUxuQaKoqfNne8iikd14SAkh5uw4+8vNifp6gmA73yYNS6ywLIWSLD/n/mBzHQRpW3J7rbATEakmiA8JvkTyZw== + dependencies: + "@babel/code-frame" "^7.5.5" + chalk "^2.4.1" + micromatch "^3.1.10" + minimatch "^3.0.4" + semver "^5.6.0" + tapable "^1.0.0" + worker-rpc "^0.1.0" + form-data@*, form-data@^2.3.3, form-data@^2.5.0: version "2.5.1" resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.5.1.tgz#f2cbec57b5e59e23716e128fe44d4e5dd23895f4" @@ -8173,6 +8590,15 @@ form-data@*, form-data@^2.3.3, form-data@^2.5.0: combined-stream "^1.0.6" mime-types "^2.1.12" +form-data@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-3.0.0.tgz#31b7e39c85f1355b7139ee0c647cf0de7f83c682" + integrity sha512-CKMFDglpbMi6PyN+brwB9Q/GOw0eAnsrEZDgcsH5Krhz5Od/haKHAX0NmQfha2zPPz0JpWzA7GJHGSnvCRLWsg== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.8" + mime-types "^2.1.12" + form-data@~2.3.2: version "2.3.3" resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.3.tgz#dcce52c05f644f298c6a7ab936bd724ceffbf3a6" @@ -8652,14 +9078,14 @@ graceful-fs@^4.1.6, graceful-fs@^4.1.9, graceful-fs@^4.2.0, graceful-fs@^4.2.2: resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725" integrity sha1-TK+tdrxi8C+gObL5Tpo906ORpyU= -graphql-extensions@^0.10.3: - version "0.10.3" - resolved "https://registry.yarnpkg.com/graphql-extensions/-/graphql-extensions-0.10.3.tgz#9e37f3bd26309c40b03a0be0e63e02b3f99d52ea" - integrity sha512-kwU0gUe+Qdfr8iZYT91qrPSwQNgPhB/ClF1m1LEPdxlptk5FhFmjpxAcbMZ8q7j0kjfnbp2IeV1OhRDCEPqz2w== +graphql-extensions@^0.12.2: + version "0.12.2" + resolved "https://registry.yarnpkg.com/graphql-extensions/-/graphql-extensions-0.12.2.tgz#f22210e812939b7caa2127589f30e6a1c671540f" + integrity sha512-vFaZua5aLiCOOzxfY5qzHZ6S52BCqW7VVOwzvV52Wb5edRm3dn6u+1MR9yYyEqUHSf8LvdhEojYlOkKiaQ4ghA== dependencies: - "@apollographql/apollo-tools" "^0.4.0" - apollo-server-env "^2.4.3" - apollo-server-types "^0.2.4" + "@apollographql/apollo-tools" "^0.4.3" + apollo-server-env "^2.4.4" + apollo-server-types "^0.5.0" graphql-iso-date@^3.5.0: version "3.6.1" @@ -9152,6 +9578,14 @@ https-browserify@^1.0.0: resolved "https://registry.yarnpkg.com/https-browserify/-/https-browserify-1.0.0.tgz#ec06c10e0a34c0f2faf199f7fd7fc78fffd03c73" integrity sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM= +https-proxy-agent@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-4.0.0.tgz#702b71fb5520a132a66de1f67541d9e62154d82b" + integrity sha512-zoDhWrkR3of1l9QAL8/scJZyLu8j/gBkcwcaQOZh7Gyh/+uJQzGVETdgT30akuwkpL8HTRfssqI3BZuV18teDg== + dependencies: + agent-base "5" + debug "4" + humanize-ms@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/humanize-ms/-/humanize-ms-1.2.1.tgz#c46e3159a293f6b896da29316d8b6fe8bb79bbed" @@ -10469,7 +10903,7 @@ json-stream@^1.0.0: resolved "https://registry.yarnpkg.com/json-stream/-/json-stream-1.0.0.tgz#1a3854e28d2bbeeab31cc7ddf683d2ddc5652708" integrity sha1-GjhU4o0rvuqzHMfd9oPS3cVlJwg= -json-stringify-safe@5.0.x, json-stringify-safe@~5.0.1: +json-stringify-safe@5.0.x, json-stringify-safe@^5.0.0, json-stringify-safe@~5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" integrity sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus= @@ -11018,6 +11452,11 @@ loglevel@^1.4.1: resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.6.4.tgz#f408f4f006db8354d0577dcf6d33485b3cb90d56" integrity sha512-p0b6mOGKcGa+7nnmKbpzR6qloPbrgLcnio++E+14Vo/XffOGwZtRpUhr8dTH/x2oCMmEoIU0Zwm3ZauhvYD17g== +loglevel@^1.6.7: + version "1.6.8" + resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.6.8.tgz#8a25fb75d092230ecd4457270d80b54e28011171" + integrity sha512-bsU7+gc9AJ2SqpzxwU3+1fedl8zAntbtC5XYlt3s2j1hJcn2PsXSmgN8TaLG/J1/2mod4+cE/3vNL70/c1RNCA== + lokka@^1.7.0: version "1.7.0" resolved "https://registry.yarnpkg.com/lokka/-/lokka-1.7.0.tgz#ab2e8334612d2afd359aa89047547bd7de21046c" @@ -11292,7 +11731,7 @@ memoize-one@^4.0.0: resolved "https://registry.yarnpkg.com/memoize-one/-/memoize-one-4.1.0.tgz#a2387c58c03fff27ca390c31b764a79addf3f906" integrity sha512-2GApq0yI/b22J2j9rhbrAlsHb0Qcz+7yWxeLG8h+95sl1XPUgeLimQSOdur4Vw7cUhrBHwaUZxWFZueojqNRzA== -memoize-one@^5.0.0: +memoize-one@^5.0.0, memoize-one@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/memoize-one/-/memoize-one-5.1.1.tgz#047b6e3199b508eaec03504de71229b8eb1d75c0" integrity sha512-HKeeBpWvqiVJD57ZUAsJNm71eHTykffzcLZVYWiVfQeI1rJtuEaS7hQiEpWfVVk18donPwJEcFKIkCmPJNOhHA== @@ -11312,6 +11751,14 @@ memory-fs@^0.4.0, memory-fs@^0.4.1, memory-fs@~0.4.1: errno "^0.1.3" readable-stream "^2.0.1" +memory-fs@^0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/memory-fs/-/memory-fs-0.5.0.tgz#324c01288b88652966d161db77838720845a8e3c" + integrity sha512-jA0rdU5KoQMC0e6ppoNRtpp6vjFq6+NY7r8hywnC7V+1Xj/MtHwGIbB1QaK/dunyjWteJzmkpd7ooeWg10T7GA== + dependencies: + errno "^0.1.3" + readable-stream "^2.0.1" + meow@^4.0.0: version "4.0.1" resolved "https://registry.yarnpkg.com/meow/-/meow-4.0.1.tgz#d48598f6f4b1472f35bf6317a95945ace347f975" @@ -11367,7 +11814,7 @@ messageformat@^1.0.2: nopt "~3.0.6" reserved-words "^0.1.2" -methods@~1.1.2: +methods@^1.1.2, methods@~1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee" integrity sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4= @@ -11377,7 +11824,7 @@ microevent.ts@~0.1.1: resolved "https://registry.yarnpkg.com/microevent.ts/-/microevent.ts-0.1.1.tgz#70b09b83f43df5172d0205a63025bce0f7357fa0" integrity sha512-jo1OfR4TaEwd5HOrt5+tAZ9mqT4jmpNAusXtyfNzqVm9uiSYFZlKM1wYL4oU7azZW/PxQW53wM0S6OR1JHNa2g== -micromatch@4.x, micromatch@^4.0.2: +micromatch@4.x, micromatch@^4.0.0, micromatch@^4.0.2: version "4.0.2" resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.2.tgz#4fcb0999bf9fbc2fcbdd212f6d629b9a56c39259" integrity sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q== @@ -11719,6 +12166,11 @@ nan@^2.0.9, nan@^2.12.1: resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.0.tgz#7818f722027b2459a86f0295d434d1fc2336c52c" integrity sha512-INOFj37C7k3AfaNTtX8RhsTw7qRy7eLET14cROi9+5HAVbbHuIWUHEauBv5qT4Av2tWasiTY1Jw6puUNqRJXQg== +nan@^2.14.1: + version "2.14.1" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.1.tgz#d7be34dfa3105b91494c3147089315eff8874b01" + integrity sha512-isWHgVjnFjh2x2yuJ/tj3JbwoHu3UC2dX5G/88Cm24yB6YopVgxvBObDY7n5xW6ExmFhJpSEQqFPvq9zaXc8Jw== + nano@^6.4.3: version "6.4.4" resolved "https://registry.yarnpkg.com/nano/-/nano-6.4.4.tgz#4902a095e5186cfb23612c78826ea755b76fadf0" @@ -11797,6 +12249,28 @@ newman@^4.5.4: word-wrap "1.2.3" xmlbuilder "13.0.2" +newrelic@^6.9.0: + version "6.9.0" + resolved "https://registry.yarnpkg.com/newrelic/-/newrelic-6.9.0.tgz#b117a68cb2564dff917966984e6f3e073eb9712e" + integrity sha512-prgxTJ4sxS8bG87WbRMW9e7/E0TE9gEiMz/bjrLvo9Pgwo9bCoV7bzIFBaxnaLQReLvr5xWuNEQB+lHackSSSQ== + dependencies: + "@grpc/grpc-js" "1.0.3" + "@grpc/proto-loader" "^0.5.4" + "@newrelic/aws-sdk" "^1.1.1" + "@newrelic/koa" "^3.0.0" + "@newrelic/superagent" "^2.0.1" + "@tyriar/fibonacci-heap" "^2.0.7" + async "^2.1.4" + concat-stream "^2.0.0" + escodegen "^1.14.1" + esprima "^4.0.1" + https-proxy-agent "^4.0.0" + json-stringify-safe "^5.0.0" + readable-stream "^3.6.0" + semver "^5.3.0" + optionalDependencies: + "@newrelic/native-metrics" "^5.1.0" + next-server@8.1.0: version "8.1.0" resolved "https://registry.yarnpkg.com/next-server/-/next-server-8.1.0.tgz#50a9f248ede69fb33d83aa5274ec6c66f421556e" @@ -13503,9 +13977,9 @@ property-information@^5.0.0, property-information@^5.3.0: xtend "^4.0.1" protobufjs@^6.8.6: - version "6.8.8" - resolved "https://registry.yarnpkg.com/protobufjs/-/protobufjs-6.8.8.tgz#c8b4f1282fd7a90e6f5b109ed11c84af82908e7c" - integrity sha512-AAmHtD5pXgZfi7GMpllpO3q1Xw1OYldr+dMUlAnffGTAhqkg72WdmSY71uKBF/JuyiKs8psYbtKrhi0ASCD8qw== + version "6.9.0" + resolved "https://registry.yarnpkg.com/protobufjs/-/protobufjs-6.9.0.tgz#c08b2bf636682598e6fabbf0edb0b1256ff090bd" + integrity sha512-LlGVfEWDXoI/STstRDdZZKb/qusoAWUnmLg9R8OLSO473mBLWHowx8clbX5/+mKDEI+v7GzjoK9tRPZMMcoTrg== dependencies: "@protobufjs/aspromise" "^1.1.2" "@protobufjs/base64" "^1.1.2" @@ -13517,8 +13991,8 @@ protobufjs@^6.8.6: "@protobufjs/path" "^1.1.2" "@protobufjs/pool" "^1.1.0" "@protobufjs/utf8" "^1.1.0" - "@types/long" "^4.0.0" - "@types/node" "^10.1.0" + "@types/long" "^4.0.1" + "@types/node" "^13.7.0" long "^4.0.0" protocols@^1.1.0, protocols@^1.4.0: @@ -13797,12 +14271,10 @@ rabbitmq-pub-sub@^0.2.5: "@types/bunyan" "0.0.35" amqplib "^0.5.1" -raf@^3.4.0: - version "3.4.1" - resolved "https://registry.yarnpkg.com/raf/-/raf-3.4.1.tgz#0742e99a4a6552f445d73e3ee0328af0ff1ede39" - integrity sha512-Sq4CW4QhwOHE8ucn6J34MqtZCeWFP2aQSmrlroYgqAV1PjStIhJXxYuTgUIfkEk7zTLjmIjLmU5q+fbD1NnOJA== - dependencies: - performance-now "^2.1.0" +raf-schd@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/raf-schd/-/raf-schd-4.0.2.tgz#bd44c708188f2e84c810bf55fcea9231bcaed8a0" + integrity sha512-VhlMZmGy6A6hrkJWHLNTGl5gtgMUm+xfGza6wbwnE914yeQ5Ybm18vgM734RZhMgfw4tacUrWseGZlpUrrakEQ== ramda@^0.21.0: version "0.21.0" @@ -13880,6 +14352,19 @@ react-apollo@^2.1.11: ts-invariant "^0.4.2" tslib "^1.9.3" +react-beautiful-dnd@^13.0.0: + version "13.0.0" + resolved "https://registry.yarnpkg.com/react-beautiful-dnd/-/react-beautiful-dnd-13.0.0.tgz#f70cc8ff82b84bc718f8af157c9f95757a6c3b40" + integrity sha512-87It8sN0ineoC3nBW0SbQuTFXM6bUqM62uJGY4BtTf0yzPl8/3+bHMWkgIe0Z6m8e+gJgjWxefGRVfpE3VcdEg== + dependencies: + "@babel/runtime" "^7.8.4" + css-box-model "^1.2.0" + memoize-one "^5.1.1" + raf-schd "^4.0.2" + react-redux "^7.1.1" + redux "^4.0.4" + use-memo-one "^1.1.1" + react-clientside-effect@^1.2.2: version "1.2.2" resolved "https://registry.yarnpkg.com/react-clientside-effect/-/react-clientside-effect-1.2.2.tgz#6212fb0e07b204e714581dd51992603d1accc837" @@ -14026,6 +14511,13 @@ react-helmet-async@^1.0.2: react-fast-compare "^2.0.4" shallowequal "^1.1.0" +react-hexgrid@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/react-hexgrid/-/react-hexgrid-1.0.3.tgz#fe0749eaa61d159805c913c8fb78502ce71e247a" + integrity sha512-+rXia1Q/tGtEWrRRNGku9dZx+3yVcotr4cs3IF0PddJaaddpxUEMgZDqSway0QGNTT43eVbclR5UScwHBgnx2A== + dependencies: + classnames "^2.2.5" + react-highlight-words@^0.14.0: version "0.14.0" resolved "https://registry.yarnpkg.com/react-highlight-words/-/react-highlight-words-0.14.0.tgz#a1a40ff0a49ce78e7feb375a4e0a5fd1ca9c9609" @@ -14042,13 +14534,6 @@ react-hotkeys@2.0.0: dependencies: prop-types "^15.6.1" -react-input-autosize@^2.2.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/react-input-autosize/-/react-input-autosize-2.2.1.tgz#ec428fa15b1592994fb5f9aa15bb1eb6baf420f8" - integrity sha512-3+K4CD13iE4lQQ2WlF8PuV5htfmTRLH6MDnfndHM6LuBRszuXnuyIfE7nhSKt8AzRBZ50bu0sAhkNMeS5pxQQA== - dependencies: - prop-types "^15.5.8" - react-input-autosize@^2.2.2: version "2.2.2" resolved "https://registry.yarnpkg.com/react-input-autosize/-/react-input-autosize-2.2.2.tgz#fcaa7020568ec206bc04be36f4eb68e647c4d8c2" @@ -14140,18 +14625,30 @@ react-redux@^7.0.2: prop-types "^15.7.2" react-is "^16.9.0" -react-select@^2.1.1: - version "2.4.4" - resolved "https://registry.yarnpkg.com/react-select/-/react-select-2.4.4.tgz#ba72468ef1060c7d46fbb862b0748f96491f1f73" - integrity sha512-C4QPLgy9h42J/KkdrpVxNmkY6p4lb49fsrbDk/hRcZpX7JvZPNb6mGj+c5SzyEtBv1DmQ9oPH4NmhAFvCrg8Jw== +react-redux@^7.1.1: + version "7.2.0" + resolved "https://registry.yarnpkg.com/react-redux/-/react-redux-7.2.0.tgz#f970f62192b3981642fec46fd0db18a074fe879d" + integrity sha512-EvCAZYGfOLqwV7gh849xy9/pt55rJXPwmYvI4lilPM5rUT/1NxuuN59ipdBksRVSvz0KInbPnp4IfoXJXCqiDA== dependencies: - classnames "^2.2.5" - emotion "^9.1.2" + "@babel/runtime" "^7.5.5" + hoist-non-react-statics "^3.3.0" + loose-envify "^1.4.0" + prop-types "^15.7.2" + react-is "^16.9.0" + +react-select@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/react-select/-/react-select-3.1.0.tgz#ab098720b2e9fe275047c993f0d0caf5ded17c27" + integrity sha512-wBFVblBH1iuCBprtpyGtd1dGMadsG36W5/t2Aj8OE6WbByDg5jIFyT7X5gT+l0qmT5TqWhxX+VsKJvCEl2uL9g== + dependencies: + "@babel/runtime" "^7.4.4" + "@emotion/cache" "^10.0.9" + "@emotion/core" "^10.0.9" + "@emotion/css" "^10.0.9" memoize-one "^5.0.0" prop-types "^15.6.0" - raf "^3.4.0" - react-input-autosize "^2.2.1" - react-transition-group "^2.2.1" + react-input-autosize "^2.2.2" + react-transition-group "^4.3.0" react-select@^3.0.8: version "3.0.8" @@ -14213,6 +14710,16 @@ react-transition-group@^2.2.1: prop-types "^15.6.2" react-lifecycles-compat "^3.0.4" +react-transition-group@^4.3.0: + version "4.4.1" + resolved "https://registry.yarnpkg.com/react-transition-group/-/react-transition-group-4.4.1.tgz#63868f9325a38ea5ee9535d828327f85773345c9" + integrity sha512-Djqr7OQ2aPUiYurhPalTrVy9ddmFCCzwhqQmtN+J3+3DzLO209Fdr70QrN8Z3DsglWql6iY1lDWAfpFiBtuKGw== + dependencies: + "@babel/runtime" "^7.5.5" + dom-helpers "^5.0.1" + loose-envify "^1.4.0" + prop-types "^15.6.2" + react-typekit@^1.1.3: version "1.1.4" resolved "https://registry.yarnpkg.com/react-typekit/-/react-typekit-1.1.4.tgz#1305675bd8d348eeafc53f013edf1ec164b01f73" @@ -14328,6 +14835,15 @@ read-pkg@^3.0.0: isarray "0.0.1" string_decoder "~0.10.x" +readable-stream@^3.0.2, readable-stream@^3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.0.tgz#337bbda3adc0706bd3e024426a286d4b4b2c9198" + integrity sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA== + dependencies: + inherits "^2.0.3" + string_decoder "^1.1.1" + util-deprecate "^1.0.1" + readable-stream@^3.1.1: version "3.5.0" resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.5.0.tgz#465d70e6d1087f6162d079cd0b5db7fbebfd1606" @@ -14423,6 +14939,33 @@ redent@^2.0.0: indent-string "^3.0.0" strip-indent "^2.0.0" +redis-commands@^1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/redis-commands/-/redis-commands-1.5.0.tgz#80d2e20698fe688f227127ff9e5164a7dd17e785" + integrity sha512-6KxamqpZ468MeQC3bkWmCB1fp56XL64D4Kf0zJSwDZbVLLm7KFkoIcHrgRvQ+sk8dnhySs7+yBg94yIkAK7aJg== + +redis-errors@^1.0.0, redis-errors@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/redis-errors/-/redis-errors-1.2.0.tgz#eb62d2adb15e4eaf4610c04afe1529384250abad" + integrity sha1-62LSrbFeTq9GEMBK/hUpOEJQq60= + +redis-parser@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/redis-parser/-/redis-parser-3.0.0.tgz#b66d828cdcafe6b4b8a428a7def4c6bcac31c8b4" + integrity sha1-tm2CjNyv5rS4pCin3vTGvKwxyLQ= + dependencies: + redis-errors "^1.0.0" + +redis@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/redis/-/redis-3.0.2.tgz#bd47067b8a4a3e6a2e556e57f71cc82c7360150a" + integrity sha512-PNhLCrjU6vKVuMOyFu7oSP296mwBkcE6lrAjruBYG5LgdSqtRBoVQIylrMyVZD/lkF24RSNNatzvYag6HRBHjQ== + dependencies: + denque "^1.4.1" + redis-commands "^1.5.0" + redis-errors "^1.2.0" + redis-parser "^3.0.0" + redux@^4.0.1: version "4.0.4" resolved "https://registry.yarnpkg.com/redux/-/redux-4.0.4.tgz#4ee1aeb164b63d6a1bcc57ae4aa0b6e6fa7a3796" @@ -14431,6 +14974,14 @@ redux@^4.0.1: loose-envify "^1.4.0" symbol-observable "^1.2.0" +redux@^4.0.4: + version "4.0.5" + resolved "https://registry.yarnpkg.com/redux/-/redux-4.0.5.tgz#4db5de5816e17891de8a80c424232d06f051d93f" + integrity sha512-VSz1uMAH24DM6MF72vcojpYPtrTUu3ByVWfPL1nPfVRb5mZVTve5GnNCUV53QM/BZ66xfWrm0CTWoM+Xlz8V1w== + dependencies: + loose-envify "^1.4.0" + symbol-observable "^1.2.0" + reflect.ownkeys@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/reflect.ownkeys/-/reflect.ownkeys-0.2.0.tgz#749aceec7f3fdf8b63f927a04809e90c5c0b3460" @@ -14472,6 +15023,11 @@ regenerator-runtime@^0.13.2, regenerator-runtime@^0.13.3: resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.3.tgz#7cf6a77d8f5c6f60eb73c5fc1955b2ceb01e6bf5" integrity sha512-naKIZz2GQ8JWh///G7L3X6LaQUAMp2lvb1rvwwsURe/VXwD6VMfr+/1NuNw3ag8v2kY1aQ/go5SNn79O9JU7yw== +regenerator-runtime@^0.13.4: + version "0.13.5" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.5.tgz#d878a1d094b4306d10b9096484b33ebd55e26697" + integrity sha512-ZS5w8CpKFinUzOwW3c83oPeVXoNsrLsaCoLtJvAClH135j/R77RuymhiSErhm2lKcwSCIpmvIWSbDkIfAqKQlA== + regenerator-transform@^0.14.0: version "0.14.1" resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.14.1.tgz#3b2fce4e1ab7732c08f665dfdb314749c7ddd2fb" @@ -15419,7 +15975,7 @@ source-map@0.6.1, source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.0, sourc resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== -source-map@0.7.3, source-map@^0.7.2: +source-map@0.7.3: version "0.7.3" resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.7.3.tgz#5302f8169031735226544092e64981f751750383" integrity sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ== @@ -15881,12 +16437,12 @@ styled-jsx@3.2.1: stylis "3.5.4" stylis-rule-sheet "0.0.10" -stylis-rule-sheet@0.0.10, stylis-rule-sheet@^0.0.10: +stylis-rule-sheet@0.0.10: version "0.0.10" resolved "https://registry.yarnpkg.com/stylis-rule-sheet/-/stylis-rule-sheet-0.0.10.tgz#44e64a2b076643f4b52e5ff71efc04d8c3c4a430" integrity sha512-nTbZoaqoBnmK+ptANthb10ZRZOGC+EmTLLUxeYIuHNkEKcmKgXX1XWKkUBT2Ac4es3NybooPe0SmvKdhKJZAuw== -stylis@3.5.4, stylis@^3.5.0: +stylis@3.5.4: version "3.5.4" resolved "https://registry.yarnpkg.com/stylis/-/stylis-3.5.4.tgz#f665f25f5e299cf3d64654ab949a57c768b73fbe" integrity sha512-8/3pSmthWM7lsPBKv7NXkzn2Uc9W7NotcwGNpJaa3k7WMM1XDCA4MgT5k/8BIexd5ydZdboXtU90XH9Ec4Bv/Q== @@ -16186,6 +16742,11 @@ tiny-emitter@^2.0.0: resolved "https://registry.yarnpkg.com/tiny-emitter/-/tiny-emitter-2.1.0.tgz#1d1a56edfc51c43e863cbb5382a72330e3555423" integrity sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q== +tiny-invariant@^1.0.6: + version "1.1.0" + resolved "https://registry.yarnpkg.com/tiny-invariant/-/tiny-invariant-1.1.0.tgz#634c5f8efdc27714b7f386c35e6760991d230875" + integrity sha512-ytxQvrb1cPc9WBEI/HSeYYoGD0kWnGEOR8RY6KomWLBVhqz0RgTwVO9dLrGz7dC+nN9llyI7OKAgRq8Vq4ZBSw== + tinycolor2@^1.4.1: version "1.4.1" resolved "https://registry.yarnpkg.com/tinycolor2/-/tinycolor2-1.4.1.tgz#f4fad333447bc0b07d4dc8e9209d8f39a8ac77e8" @@ -16296,13 +16857,6 @@ token-stream@0.0.1: resolved "https://registry.yarnpkg.com/token-stream/-/token-stream-0.0.1.tgz#ceeefc717a76c4316f126d0b9dbaa55d7e7df01a" integrity sha1-zu78cXp2xDFvEm0LnbqlXX598Bo= -touch@^2.0.1: - version "2.0.2" - resolved "https://registry.yarnpkg.com/touch/-/touch-2.0.2.tgz#ca0b2a3ae3211246a61b16ba9e6cbf1596287164" - integrity sha512-qjNtvsFXTRq7IuMLweVgFxmEuQ6gLbRs2jQxL80TtZ31dEKWYIxRXquij6w6VimyDek5hD3PytljHmEtAs2u0A== - dependencies: - nopt "~1.0.10" - touch@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/touch/-/touch-3.1.0.tgz#fe365f5f75ec9ed4e56825e0bb76d24ab74af83b" @@ -16437,6 +16991,17 @@ ts-jest@^26.0.0: semver "7.x" yargs-parser "18.x" +ts-loader@^7.0.5: + version "7.0.5" + resolved "https://registry.yarnpkg.com/ts-loader/-/ts-loader-7.0.5.tgz#789338fb01cb5dc0a33c54e50558b34a73c9c4c5" + integrity sha512-zXypEIT6k3oTc+OZNx/cqElrsbBtYqDknf48OZos0NQ3RTt045fBIU8RRSu+suObBzYB355aIPGOe/3kj9h7Ig== + dependencies: + chalk "^2.3.0" + enhanced-resolve "^4.0.0" + loader-utils "^1.0.2" + micromatch "^4.0.0" + semver "^6.0.0" + ts-map@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/ts-map/-/ts-map-1.0.3.tgz#1c4d218dec813d2103b7e04e4bcf348e1471c1ff" @@ -16901,6 +17466,11 @@ use-callback-ref@^1.2.1: resolved "https://registry.yarnpkg.com/use-callback-ref/-/use-callback-ref-1.2.1.tgz#898759ccb9e14be6c7a860abafa3ffbd826c89bb" integrity sha512-C3nvxh0ZpaOxs9RCnWwAJ+7bJPwQI8LHF71LzbQ3BvzH5XkdtlkMadqElGevg5bYBDFip4sAnD4m06zAKebg1w== +use-memo-one@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/use-memo-one/-/use-memo-one-1.1.1.tgz#39e6f08fe27e422a7d7b234b5f9056af313bd22c" + integrity sha512-oFfsyun+bP7RX8X2AskHNTxu+R3QdE/RC5IefMbqptmACAA/gfol1KDD5KRzPsGMa62sWxGZw+Ui43u6x4ddoQ== + use-sidecar@^1.0.1: version "1.0.2" resolved "https://registry.yarnpkg.com/use-sidecar/-/use-sidecar-1.0.2.tgz#e72f582a75842f7de4ef8becd6235a4720ad8af6" @@ -16981,6 +17551,11 @@ uuid@^3.0.0, uuid@^3.3.2: resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.4.0.tgz#b23e4358afa8a202fe7a100af1f5f883f02007ee" integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A== +uuid@^8.0.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.1.0.tgz#6f1536eb43249f473abc6bd58ff983da1ca30d8d" + integrity sha512-CI18flHDznR0lq54xBycOVmphdCYnQLKn8abKn7PXUiKUGdEd+/l9LWNJmugXel4hXq7S+RMNl34ecyC9TntWg== + uvm@1.7.8: version "1.7.8" resolved "https://registry.yarnpkg.com/uvm/-/uvm-1.7.8.tgz#305b926d73391677318fdc5650b62da600b131d0" @@ -17103,6 +17678,14 @@ vue-template-compiler@^2.0.0: de-indent "^1.0.2" he "^1.1.0" +vue-template-compiler@^2.6.11: + version "2.6.11" + resolved "https://registry.yarnpkg.com/vue-template-compiler/-/vue-template-compiler-2.6.11.tgz#c04704ef8f498b153130018993e56309d4698080" + integrity sha512-KIq15bvQDrcCjpGjrAhx4mUlyyHfdmTaoNfeoATHLAiWB+MU3cx4lOzMwrnUh9cCxy0Lt1T11hAFY6TQgroUAA== + dependencies: + de-indent "^1.0.2" + he "^1.1.0" + w3c-hr-time@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/w3c-hr-time/-/w3c-hr-time-1.0.1.tgz#82ac2bff63d950ea9e3189a58a65625fedf19045"