diff --git a/.lagoon.secrets.yaml b/.lagoon.secrets.yaml
index 41928b1a09..6a411006cb 100644
--- a/.lagoon.secrets.yaml
+++ b/.lagoon.secrets.yaml
@@ -32,6 +32,10 @@ parameters:
description: Password used for connecting to the keycloak-db
generate: expression
from: "[a-zA-Z0-9]{32}"
+ - name: API_REDIS_PASSWORD
+ description: Password used for connecting to the api-redis
+ generate: expression
+ from: "[a-zA-Z0-9]{32}"
- name: SAFE_BRANCH
description: Which branch this belongs to, special chars replaced with dashes
required: true
@@ -99,3 +103,9 @@ objects:
name: opendistro-security-cookie-password
stringData:
OPENDISTRO_SECURITY_COOKIE_PASSWORD: ${OPENDISTRO_SECURITY_COOKIE_PASSWORD}
+- kind: Secret
+ apiVersion: v1
+ metadata:
+ name: api-redis-password
+ stringData:
+ API_REDIS_PASSWORD: ${API_REDIS_PASSWORD}
diff --git a/.lagoon.yml b/.lagoon.yml
index 9e2b4e48dc..b5fbfed2f0 100644
--- a/.lagoon.yml
+++ b/.lagoon.yml
@@ -50,15 +50,6 @@ environments:
rollouts:
logs-db: statefulset
logs-forwarder: statefulset
- cronjobs:
- - name: idle-clis
- schedule: '*/15 * * * *'
- command: /idle-clis.sh
- service: auto-idler
- - name: idle-services
- schedule: '*/30 * * * *'
- command: /idle-services.sh
- service: auto-idler
develop:
types:
logs-db: elasticsearch-cluster
diff --git a/Makefile b/Makefile
index ac2a47bae4..7da9c940f6 100644
--- a/Makefile
+++ b/Makefile
@@ -426,7 +426,9 @@ services := api \
backup-handler \
broker \
broker-single \
+ logs-concentrator \
logs-dispatcher \
+ logs-tee \
logs-forwarder \
logs-db \
logs-db-ui \
@@ -447,7 +449,8 @@ services := api \
harbor-redis \
harborregistry \
harborregistryctl \
- harbor-trivy
+ harbor-trivy \
+ api-redis
service-images += $(services)
@@ -480,6 +483,7 @@ build/harbor-nginx: build/harborregistryctl services/harbor-core/Dockerfile serv
build/tests-kubernetes: build/tests
build/tests-openshift: build/tests
build/toolbox: build/mariadb
+build/api-redis: build/redis
# Auth SSH needs the context of the root folder, so we have it individually
build/ssh: build/commons
@@ -529,7 +533,8 @@ build-list:
# Define list of all tests
all-k8s-tests-list:= features-kubernetes \
nginx \
- drupal
+ drupal \
+ active-standby-kubernetes
all-k8s-tests = $(foreach image,$(all-k8s-tests-list),k8s-tests/$(image))
# Run all k8s tests
@@ -576,7 +581,7 @@ all-openshift-tests-list:= features-openshift \
bitbucket \
nginx \
elasticsearch \
- active-standby
+ active-standby-openshift
all-openshift-tests = $(foreach image,$(all-openshift-tests-list),openshift-tests/$(image))
.PHONY: openshift-tests
@@ -611,7 +616,7 @@ drupal-test-services = drush-alias
webhook-tests = github gitlab bitbucket
# All Tests that use API endpoints
-api-tests = node features-openshift features-kubernetes nginx elasticsearch active-standby
+api-tests = node features-openshift features-kubernetes nginx elasticsearch active-standby-openshift active-standby-kubernetes
# All drupal tests
drupal-tests = drupal drupal-postgres
@@ -1025,28 +1030,31 @@ endif
--volume $$PWD/local-dev/k3d-nginx-ingress.yaml:/var/lib/rancher/k3s/server/manifests/k3d-nginx-ingress.yaml
echo "$(K3D_NAME)" > $@
export KUBECONFIG="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')"; \
- local-dev/kubectl apply -f $$PWD/local-dev/k3d-storageclass-bulk.yaml; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" apply -f $$PWD/local-dev/k3d-storageclass-bulk.yaml; \
docker tag $(CI_BUILD_TAG)/docker-host localhost:5000/lagoon/docker-host; \
docker push localhost:5000/lagoon/docker-host; \
- local-dev/kubectl create namespace k8up; \
- local-dev/helm/helm repo add appuio https://charts.appuio.ch; \
- local-dev/helm/helm upgrade --install -n k8up k8up appuio/k8up; \
- local-dev/kubectl create namespace dbaas-operator; \
- local-dev/helm/helm repo add dbaas-operator https://raw.githubusercontent.com/amazeeio/dbaas-operator/master/charts ; \
- local-dev/helm/helm upgrade --install -n dbaas-operator dbaas-operator dbaas-operator/dbaas-operator ; \
- local-dev/helm/helm upgrade --install -n dbaas-operator mariadbprovider dbaas-operator/mariadbprovider -f local-dev/helm-values-mariadbprovider.yml ; \
- local-dev/kubectl create namespace lagoon; \
- local-dev/helm/helm upgrade --install -n lagoon lagoon-remote ./charts/lagoon-remote --set dockerHost.image.name=172.17.0.1:5000/lagoon/docker-host --set dockerHost.registry=172.17.0.1:5000; \
- local-dev/kubectl -n lagoon rollout status deployment docker-host -w;
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' create namespace k8up; \
+ local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add appuio https://charts.appuio.ch; \
+ local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n k8up k8up appuio/k8up; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' create namespace dioscuri; \
+ local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add dioscuri https://raw.githubusercontent.com/amazeeio/dioscuri/ingress/charts ; \
+ local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n dioscuri dioscuri dioscuri/dioscuri ; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' create namespace dbaas-operator; \
+ local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add dbaas-operator https://raw.githubusercontent.com/amazeeio/dbaas-operator/master/charts ; \
+ local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n dbaas-operator dbaas-operator dbaas-operator/dbaas-operator ; \
+ local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n dbaas-operator mariadbprovider dbaas-operator/mariadbprovider -f local-dev/helm-values-mariadbprovider.yml ; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' create namespace lagoon; \
+ local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n lagoon lagoon-remote ./charts/lagoon-remote --set dockerHost.image.name=172.17.0.1:5000/lagoon/docker-host --set dockerHost.registry=172.17.0.1:5000; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon rollout status deployment docker-host -w;
ifeq ($(ARCH), darwin)
export KUBECONFIG="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')"; \
- KUBERNETESBUILDDEPLOY_TOKEN=$$(local-dev/kubectl -n lagoon describe secret $$(local-dev/kubectl -n lagoon get secret | grep kubernetesbuilddeploy | awk '{print $$1}') | grep token: | awk '{print $$2}'); \
+ KUBERNETESBUILDDEPLOY_TOKEN=$$(local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon describe secret $$(local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon get secret | grep kubernetesbuilddeploy | awk '{print $$1}') | grep token: | awk '{print $$2}'); \
sed -i '' -e "s/\".*\" # make-kubernetes-token/\"$${KUBERNETESBUILDDEPLOY_TOKEN}\" # make-kubernetes-token/g" local-dev/api-data/03-populate-api-data-kubernetes.gql; \
DOCKER_IP="$$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}')"; \
sed -i '' -e "s/172\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}/$${DOCKER_IP}/g" local-dev/api-data/03-populate-api-data-kubernetes.gql docker-compose.yaml;
else
export KUBECONFIG="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')"; \
- KUBERNETESBUILDDEPLOY_TOKEN=$$(local-dev/kubectl -n lagoon describe secret $$(local-dev/kubectl -n lagoon get secret | grep kubernetesbuilddeploy | awk '{print $$1}') | grep token: | awk '{print $$2}'); \
+ KUBERNETESBUILDDEPLOY_TOKEN=$$(local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon describe secret $$(local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon get secret | grep kubernetesbuilddeploy | awk '{print $$1}') | grep token: | awk '{print $$2}'); \
sed -i "s/\".*\" # make-kubernetes-token/\"$${KUBERNETESBUILDDEPLOY_TOKEN}\" # make-kubernetes-token/g" local-dev/api-data/03-populate-api-data-kubernetes.gql; \
DOCKER_IP="$$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}')"; \
sed -i "s/172\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}/$${DOCKER_IP}/g" local-dev/api-data/03-populate-api-data-kubernetes.gql docker-compose.yaml;
@@ -1068,27 +1076,27 @@ k3d-kubeconfig:
k3d-dashboard:
export KUBECONFIG="$$(./local-dev/k3d get-kubeconfig --name=$$(cat k3d))"; \
- local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/00_dashboard-namespace.yaml; \
- local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/01_dashboard-serviceaccount.yaml; \
- local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/02_dashboard-service.yaml; \
- local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/03_dashboard-secret.yaml; \
- local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/04_dashboard-configmap.yaml; \
- echo '{"apiVersion": "rbac.authorization.k8s.io/v1","kind": "ClusterRoleBinding","metadata": {"name": "kubernetes-dashboard","namespace": "kubernetes-dashboard"},"roleRef": {"apiGroup": "rbac.authorization.k8s.io","kind": "ClusterRole","name": "cluster-admin"},"subjects": [{"kind": "ServiceAccount","name": "kubernetes-dashboard","namespace": "kubernetes-dashboard"}]}' | local-dev/kubectl -n kubernetes-dashboard apply -f - ; \
- local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/06_dashboard-deployment.yaml; \
- local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/07_scraper-service.yaml; \
- local-dev/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/08_scraper-deployment.yaml; \
- local-dev/kubectl -n kubernetes-dashboard patch deployment kubernetes-dashboard --patch '{"spec": {"template": {"spec": {"containers": [{"name": "kubernetes-dashboard","args": ["--auto-generate-certificates","--namespace=kubernetes-dashboard","--enable-skip-login"]}]}}}}'; \
- local-dev/kubectl -n kubernetes-dashboard rollout status deployment kubernetes-dashboard -w; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/00_dashboard-namespace.yaml; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/01_dashboard-serviceaccount.yaml; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/02_dashboard-service.yaml; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/03_dashboard-secret.yaml; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/04_dashboard-configmap.yaml; \
+ echo '{"apiVersion": "rbac.authorization.k8s.io/v1","kind": "ClusterRoleBinding","metadata": {"name": "kubernetes-dashboard","namespace": "kubernetes-dashboard"},"roleRef": {"apiGroup": "rbac.authorization.k8s.io","kind": "ClusterRole","name": "cluster-admin"},"subjects": [{"kind": "ServiceAccount","name": "kubernetes-dashboard","namespace": "kubernetes-dashboard"}]}' | local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n kubernetes-dashboard apply -f - ; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/06_dashboard-deployment.yaml; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/07_scraper-service.yaml; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended/08_scraper-deployment.yaml; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n kubernetes-dashboard patch deployment kubernetes-dashboard --patch '{"spec": {"template": {"spec": {"containers": [{"name": "kubernetes-dashboard","args": ["--auto-generate-certificates","--namespace=kubernetes-dashboard","--enable-skip-login"]}]}}}}'; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n kubernetes-dashboard rollout status deployment kubernetes-dashboard -w; \
open http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/ ; \
- local-dev/kubectl proxy
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' proxy
k8s-dashboard:
- kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended.yaml; \
- kubectl -n kubernetes-dashboard rollout status deployment kubernetes-dashboard -w; \
+ kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended.yaml; \
+ kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n kubernetes-dashboard rollout status deployment kubernetes-dashboard -w; \
echo -e "\nUse this token:"; \
- kubectl -n lagoon describe secret $$(local-dev/kubectl -n lagoon get secret | grep kubernetesbuilddeploy | awk '{print $$1}') | grep token: | awk '{print $$2}'; \
+ kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon describe secret $$(local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon get secret | grep kubernetesbuilddeploy | awk '{print $$1}') | grep token: | awk '{print $$2}'; \
open http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/ ; \
- kubectl proxy
+ kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' proxy
# Stop k3d
.PHONY: k3d/stop
@@ -1134,3 +1142,7 @@ rebuild-push-oc-build-deploy-dind:
.PHONY: ui-development
ui-development: build/api build/api-db build/local-api-data-watcher-pusher build/ui build/keycloak build/keycloak-db build/broker build/broker-single
IMAGE_REPO=$(CI_BUILD_TAG) docker-compose -p $(CI_BUILD_TAG) --compatibility up -d api api-db local-api-data-watcher-pusher ui keycloak keycloak-db broker
+
+.PHONY: api-development
+api-development: build/api build/api-db build/local-api-data-watcher-pusher build/keycloak build/keycloak-db build/broker build/broker-single
+ IMAGE_REPO=$(CI_BUILD_TAG) docker-compose -p $(CI_BUILD_TAG) --compatibility up -d api api-db local-api-data-watcher-pusher keycloak keycloak-db broker
diff --git a/charts/index.yaml b/charts/index.yaml
index 82a4943898..f49ac5c23d 100644
--- a/charts/index.yaml
+++ b/charts/index.yaml
@@ -3,7 +3,67 @@ entries:
lagoon-logging:
- apiVersion: v2
appVersion: 0.1.0
- created: "2020-05-20T21:04:11.988795-04:00"
+ created: "2020-07-15T13:20:44.801012001+08:00"
+ dependencies:
+ - name: logging-operator
+ repository: https://kubernetes-charts.banzaicloud.com
+ version: ~3.4.0
+ description: |
+ A Helm chart for Kubernetes which installs the lagoon container and router logs collection system.
+ digest: e06440b9199bc69f46c2fb66a20d3ed153c2f0fe749c97a40b2bf137f97c7205
+ name: lagoon-logging
+ type: application
+ urls:
+ - lagoon-logging-0.6.3.tgz
+ version: 0.6.3
+ - apiVersion: v2
+ appVersion: 0.1.0
+ created: "2020-07-15T13:20:44.793677497+08:00"
+ dependencies:
+ - name: logging-operator
+ repository: https://kubernetes-charts.banzaicloud.com
+ version: ~3.3.0
+ description: |
+ A Helm chart for Kubernetes which installs the lagoon container and router logs collection system.
+ digest: d944b8a7dd5ba927eab5be5df30ebbd0fafb5c45277a550c89c300853b80167a
+ name: lagoon-logging
+ type: application
+ urls:
+ - lagoon-logging-0.6.2.tgz
+ version: 0.6.2
+ - apiVersion: v2
+ appVersion: 0.1.0
+ created: "2020-07-15T13:20:44.786136365+08:00"
+ dependencies:
+ - name: logging-operator
+ repository: https://kubernetes-charts.banzaicloud.com
+ version: ~3.3.0
+ description: |
+ A Helm chart for Kubernetes which installs the lagoon container and router logs collection system.
+ digest: 516961903c4c2fc2d8b39b3a9c8f594bdb5db7a1e70c480b89554e64d7303902
+ name: lagoon-logging
+ type: application
+ urls:
+ - lagoon-logging-0.6.1.tgz
+ version: 0.6.1
+ - apiVersion: v2
+ appVersion: 0.1.0
+ created: "2020-07-15T13:20:44.777543065+08:00"
+ dependencies:
+ - name: logging-operator
+ repository: https://kubernetes-charts.banzaicloud.com
+ version: ~3.2.0
+ description: |
+ A Helm chart for Kubernetes which installs the lagoon container and router logs collection system.
+ digest: 94c4a3b92dad2f23f61a750d3b9b6e69084c93a6775a0051ce990c3528e90f25
+ name: lagoon-logging
+ type: application
+ urls:
+ - lagoon-logging-0.2.0.tgz
+ version: 0.2.0
+ - apiVersion: v2
+ appVersion: 0.1.0
+ created: "2020-07-15T13:20:44.769678229+08:00"
dependencies:
- name: logging-operator
repository: https://kubernetes-charts.banzaicloud.com
@@ -16,10 +76,33 @@ entries:
urls:
- lagoon-logging-0.1.0.tgz
version: 0.1.0
+ lagoon-logs-concentrator:
+ - apiVersion: v2
+ appVersion: 1.16.0
+ created: "2020-07-15T13:20:44.802816825+08:00"
+ description: A Helm chart for Kubernetes which installs the Lagoon logs-concentrator
+ service.
+ digest: a4373f224b6435b3c4b4556c99a081c9467edc7748991446a11b1735789bbdcb
+ name: lagoon-logs-concentrator
+ type: application
+ urls:
+ - lagoon-logs-concentrator-0.2.1.tgz
+ version: 0.2.1
+ - apiVersion: v2
+ appVersion: 1.16.0
+ created: "2020-07-15T13:20:44.802021602+08:00"
+ description: A Helm chart for Kubernetes which installs the Lagoon logs-concentrator
+ service.
+ digest: c66bc7450f61a74cb1e8742c4feb5146c7361e2c04e3171235c1e776ca958327
+ name: lagoon-logs-concentrator
+ type: application
+ urls:
+ - lagoon-logs-concentrator-0.2.0.tgz
+ version: 0.2.0
lagoon-remote:
- apiVersion: v2
appVersion: 1.4.0
- created: "2020-05-20T21:04:11.990249-04:00"
+ created: "2020-07-15T13:20:44.803852312+08:00"
description: A Helm chart to run a lagoon-remote
digest: 96bc41bc9985cd6a7fbd85a32affea3bbbabdf4baa0cd829e7e3d33fb975ceeb
name: lagoon-remote
@@ -29,7 +112,7 @@ entries:
version: 0.1.3
- apiVersion: v2
appVersion: 1.4.0
- created: "2020-05-20T21:04:11.989691-04:00"
+ created: "2020-07-15T13:20:44.803424311+08:00"
description: A Helm chart to run a lagoon-remote
digest: 5756a3fbb46a11f2f43fdcadb41d709d90c70208b90fa0257d48dcacc4df3040
name: lagoon-remote
@@ -37,4 +120,4 @@ entries:
urls:
- lagoon-remote-0.1.2.tgz
version: 0.1.2
-generated: "2020-05-20T21:04:11.982298-04:00"
+generated: "2020-07-15T13:20:44.761029818+08:00"
diff --git a/charts/lagoon-logging-0.2.0.tgz b/charts/lagoon-logging-0.2.0.tgz
new file mode 100644
index 0000000000..56c9dd0a4d
Binary files /dev/null and b/charts/lagoon-logging-0.2.0.tgz differ
diff --git a/charts/lagoon-logging-0.6.1.tgz b/charts/lagoon-logging-0.6.1.tgz
new file mode 100644
index 0000000000..74d0031b53
Binary files /dev/null and b/charts/lagoon-logging-0.6.1.tgz differ
diff --git a/charts/lagoon-logging-0.6.2.tgz b/charts/lagoon-logging-0.6.2.tgz
new file mode 100644
index 0000000000..1fdee215a2
Binary files /dev/null and b/charts/lagoon-logging-0.6.2.tgz differ
diff --git a/charts/lagoon-logging-0.6.3.tgz b/charts/lagoon-logging-0.6.3.tgz
new file mode 100644
index 0000000000..b23e145f71
Binary files /dev/null and b/charts/lagoon-logging-0.6.3.tgz differ
diff --git a/charts/lagoon-logging.values.yaml b/charts/lagoon-logging.values.yaml
deleted file mode 100644
index 2abc7accf5..0000000000
--- a/charts/lagoon-logging.values.yaml
+++ /dev/null
@@ -1,91 +0,0 @@
-# Default values for lagoon-logging.
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
-
-nameOverride: ""
-fullnameOverride: ""
-
-logsDispatcher:
-
- name: logs-dispatcher
-
- replicaCount: 2
-
- image:
- repository: amazeeiolagoon/logs-dispatcher
- pullPolicy: Always
- # Overrides the image tag whose default is the chart version.
- tag: v1-5-0
-
- serviceAccount:
- # Specifies whether a service account should be created
- create: true
- # Annotations to add to the service account
- annotations: {}
- # The name of the service account to use.
- # If not set and create is true, a name is generated using the fullname
- # template
- # If this value is set, the serviceAccount named must have clusterrole
- # view.
- name: ""
-
- podAnnotations: {}
-
- podSecurityContext: {}
- # fsGroup: 2000
-
- securityContext: {}
- # capabilities:
- # drop:
- # - ALL
- # readOnlyRootFilesystem: true
- # runAsNonRoot: true
- # runAsUser: 1000
-
- resources: {}
- # If you want to specify resources, uncomment the following lines, adjust
- # them as necessary, and remove the curly braces after 'resources:'.
- # limits:
- # cpu: 100m
- # memory: 128Mi
- # requests:
- # cpu: 100m
- # memory: 128Mi
-
- nodeSelector: {}
-
- tolerations: []
-
- affinity: {}
-
-# Don't collect logs from these namespaces.
-# Comment out this field to collect from all namespaces.
-excludeNamespaces:
-- cattle-prometheus
-- kube-system
-- syn
-- syn-cert-manager
-- syn-synsights
-- syn-cluster-autoscaler
-
-# Configure the cluster output buffer.
-# This may require tweaking to handle high volumes of logs.
-clusterOutputBuffer:
- flush_thread_count: 256
- timekey: 1m
- timekey_wait: 10s
- timekey_use_utc: true
-
-# Elasticsearch output config.
-elasticsearchHostPort: "443"
-elasticsearchScheme: https
-# The values below must be supplied during installation as they have no sane
-# defaults.
-elasticsearchAdminPassword: SOp1qe31Bb6jqIjjpPaqNURtMbBIo7Ah
-elasticsearchHost: logs-db.ch2.amazee.io
-clusterName: amazeeio-de3
-
-# chart dependency on logging-operator
-logging-operator:
- enabled: true
- createCustomResource: false
diff --git a/charts/lagoon-logging/Chart.lock b/charts/lagoon-logging/Chart.lock
index dfc19bef5c..861f97b626 100644
--- a/charts/lagoon-logging/Chart.lock
+++ b/charts/lagoon-logging/Chart.lock
@@ -1,6 +1,6 @@
dependencies:
- name: logging-operator
repository: https://kubernetes-charts.banzaicloud.com
- version: 3.0.5
-digest: sha256:b5f1e93500944b39e9f49083594eaecdb4e584ec94dfcd8a38ef4c4835377e35
-generated: "2020-05-07T22:37:40.078678817+08:00"
+ version: 3.4.0
+digest: sha256:d248221846af4df24cb1402d84c6bf8d8db6c26a6348d10345bd5572ed6d8ab7
+generated: "2020-07-15T11:52:39.381243481+08:00"
diff --git a/charts/lagoon-logging/Chart.yaml b/charts/lagoon-logging/Chart.yaml
index 71155cb9f5..7e2d3e6760 100644
--- a/charts/lagoon-logging/Chart.yaml
+++ b/charts/lagoon-logging/Chart.yaml
@@ -12,7 +12,7 @@ type: application
# time you make changes to the chart and its templates, including the app
# version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
-version: 0.1.0
+version: 0.6.3
# This is the version number of the application being deployed. This version
# number should be incremented each time you make changes to the application.
@@ -23,4 +23,4 @@ appVersion: 0.1.0
dependencies:
- name: logging-operator
repository: https://kubernetes-charts.banzaicloud.com
- version: ~3.0.5
+ version: ~3.4.0
diff --git a/charts/lagoon-logging/README.md b/charts/lagoon-logging/README.md
index 6fcb5088a2..b881442bee 100644
--- a/charts/lagoon-logging/README.md
+++ b/charts/lagoon-logging/README.md
@@ -19,13 +19,15 @@ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com
helm dependency build lagoon-logging
```
-1. Create a `lagoon-logging.values.yaml` file inside `charts/` directory containing these fields with the
- relevant values added.
+1. Create a `lagoon-logging.values.yaml` file inside `charts/` directory containing these fields with the relevant values added.
+ For required values and documentation see the comment block at the end of the chart's `values.yaml`.
+
+**OpenShift only**
+
+You must set allow the fluentbit pods to run in privileged mode:
```
-elasticsearchHost: ...
-elasticsearchAdminPassword: ...
-clusterName: ...
+fluentbitPrivileged: true
```
2. Test installation.
@@ -38,12 +40,39 @@ helm template --debug --namespace lagoon-logging -f ./lagoon-logging.values.yaml
helm upgrade --dry-run --install --debug --create-namespace --namespace lagoon-logging -f ./lagoon-logging.values.yaml lagoon-logging lagoon-logging
```
-2. Run installation.
+3. Run installation.
```
helm upgrade --install --debug --create-namespace --namespace lagoon-logging -f ./lagoon-logging.values.yaml lagoon-logging lagoon-logging
```
+**OpenShift only**
+
+Give the various serviceaccounts permissions required:
+```
+oc project lagoon-logging
+
+# fluentd statefulset serviceaccount (logging-operator chart)
+oc adm policy add-scc-to-user nonroot -z lagoon-logging-fluentd
+
+# fluentbit daemonset serviceaccount (logging-operator chart)
+oc adm policy add-scc-to-user privileged -z lagoon-logging-fluentbit
+
+# logs-dispatcher statefulset serviceaccount (lagoon-logging chart)
+oc adm policy add-scc-to-user anyuid -z lagoon-logging-logs-dispatcher
+```
+
+And make the project network global:
+```
+oc adm pod-network make-projects-global lagoon-logging
+```
+
+4. Update application-logs and router-logs services
+
+The `application-logs` and `router-logs` services in the `lagoon` namespace needs to be updated to point their `externalName` to the `lagoon-logging-logs-dispatcher` service in the `lagoon-logging` namespace (or wherever you've installed it).
+
+If you are migrating from the old lagoon logging infrastructure and want to keep logs flowing to both old and new infrastructure, point these services at the relevant `logs-tee` service in the `lagoon-logging` namespace. The `logs-tee` services then need to have the legacy `endpoint` configured. See the comments in the chart `values.yaml` for an example.
+
## View logs
### For namespaces without a lagoon.sh/project label
@@ -51,6 +80,7 @@ helm upgrade --install --debug --create-namespace --namespace lagoon-logging -f
Logs will appear in indices matching these patterns:
```
+application-logs-*_$CLUSTERNAME-*
container-logs-*_$CLUSTERNAME-*
router-logs-*_$CLUSTERNAME-*
```
@@ -58,6 +88,7 @@ router-logs-*_$CLUSTERNAME-*
e.g. if `clusterName: test1`
```
+application-logs-*_test1-*
container-logs-*_test1-*
router-logs-*_test1-*
```
@@ -67,6 +98,7 @@ router-logs-*_test1-*
Logs will appear in indices matching these patterns:
```
+application-logs-$PROJECT-*
container-logs-$PROJECT-*
router-logs-$PROJECT-*
```
@@ -74,6 +106,38 @@ router-logs-$PROJECT-*
e.g. if `lagoon.sh/project: drupal-example`
```
+application-logs-drupal-example-*
container-logs-drupal-example-*
router-logs-drupal-example-*
```
+
+## How to upgrade
+
+NOTE: If the `logging-operator` chart upgrade doesn't work, just uninstall the helm release and install it again. Logs won't be lost since fluentbit will send the contents of the log files once it is reinstalled.
+
+```
+helm upgrade --debug --namespace lagoon-logging --reuse-values lagoon-logging lagoon-logging
+```
+
+## Log export
+
+The `logs-dispatcher` includes support for sending logs to external sinks such as [cloudwatch](https://github.com/fluent-plugins-nursery/fluent-plugin-cloudwatch-logs) or [S3](https://docs.fluentd.org/output/s3).
+This feature uses the [fluentd copy plugin](https://docs.fluentd.org/output/copy), so see that link for syntax.
+
+For example configure the `exportLogs` value like so:
+
+```
+exportLogs:
+ s3.conf: |
+
+ @type s3
+ ...
+
+ cloudwatch.conf: |
+
+ @type cloudwatch_logs
+ ...
+
+```
+
+IMPORTANT: use `ignore_error` so that the main log flow to elasticsearch is not interrupted.
diff --git a/charts/lagoon-logging/templates/NOTES.txt b/charts/lagoon-logging/templates/NOTES.txt
index 21d6d73b23..311f0a2bee 100644
--- a/charts/lagoon-logging/templates/NOTES.txt
+++ b/charts/lagoon-logging/templates/NOTES.txt
@@ -2,4 +2,4 @@ Thank you for installing {{ .Chart.Name }}.
Your release is named {{ .Release.Name }}.
-Your logs are now being sent to {{ .Values.elasticsearchScheme }}://{{ .Values.elasticsearchHost }}:{{ .Values.elasticsearchHostPort }}
+Your logs are now being sent to {{ coalesce .Values.forward.host }}:{{ .Values.forward.hostPort }}.
diff --git a/charts/lagoon-logging/templates/_helpers.tpl b/charts/lagoon-logging/templates/_helpers.tpl
index 55c9d789d8..6431344a78 100644
--- a/charts/lagoon-logging/templates/_helpers.tpl
+++ b/charts/lagoon-logging/templates/_helpers.tpl
@@ -92,3 +92,85 @@ Create the name of the service account to use
{{- default "default" .Values.logsDispatcher.serviceAccount.name }}
{{- end }}
{{- end }}
+
+{{/*
+Create a default fully qualified app name for logs-tee-router
+We truncate at 63 chars because some Kubernetes name fields are limited to this
+(by the DNS naming spec).
+*/}}
+{{- define "lagoon-logging.logsTeeRouter.fullname" -}}
+{{- include "lagoon-logging.fullname" . }}-{{ .Values.logsTeeRouter.name }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "lagoon-logging.logsTeeRouter.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "lagoon-logging.name" . }}
+app.kubernetes.io/component: {{ include "lagoon-logging.logsTeeRouter.fullname" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "lagoon-logging.logsTeeRouter.labels" -}}
+helm.sh/chart: {{ include "lagoon-logging.chart" . }}
+{{ include "lagoon-logging.logsTeeRouter.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "lagoon-logging.logsTeeRouter.serviceAccountName" -}}
+{{- if .Values.logsTeeRouter.serviceAccount.create }}
+{{- default (include "lagoon-logging.logsTeeRouter.fullname" .) .Values.logsTeeRouter.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.logsTeeRouter.serviceAccount.name }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name for logs-tee-application
+We truncate at 63 chars because some Kubernetes name fields are limited to this
+(by the DNS naming spec).
+*/}}
+{{- define "lagoon-logging.logsTeeApplication.fullname" -}}
+{{- include "lagoon-logging.fullname" . }}-{{ .Values.logsTeeApplication.name }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "lagoon-logging.logsTeeApplication.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "lagoon-logging.name" . }}
+app.kubernetes.io/component: {{ include "lagoon-logging.logsTeeApplication.fullname" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "lagoon-logging.logsTeeApplication.labels" -}}
+helm.sh/chart: {{ include "lagoon-logging.chart" . }}
+{{ include "lagoon-logging.logsTeeApplication.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "lagoon-logging.logsTeeApplication.serviceAccountName" -}}
+{{- if .Values.logsTeeApplication.serviceAccount.create }}
+{{- default (include "lagoon-logging.logsTeeApplication.fullname" .) .Values.logsTeeApplication.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.logsTeeApplication.serviceAccount.name }}
+{{- end }}
+{{- end }}
diff --git a/charts/lagoon-logging/templates/clusterflow.yaml b/charts/lagoon-logging/templates/clusterflow.yaml
index 5ced60dfce..4b19d295cb 100644
--- a/charts/lagoon-logging/templates/clusterflow.yaml
+++ b/charts/lagoon-logging/templates/clusterflow.yaml
@@ -1,7 +1,6 @@
apiVersion: logging.banzaicloud.io/v1beta1
kind: ClusterFlow
metadata:
- namespace: {{ .Release.Namespace | quote }}
name: {{ include "lagoon-logging.fullname" . }}
labels:
{{- include "lagoon-logging.labels" . | nindent 4 }}
@@ -9,12 +8,19 @@ spec:
# match entries are considered in order
# the empty "select: {}" indicates all namespaces/labels
match:
- {{- with .Values.excludeNamespaces }}
- exclude:
namespaces:
+ - {{ .Release.Namespace }}
+ {{- with .Values.excludeNamespaces }}
{{- toYaml . | nindent 6 }}
- {{- end }}
+ {{- end }}
+ {{- with .Values.selectNamespaces }}
+ - select:
+ namespaces:
+ {{- toYaml . | nindent 6 }}
+ {{- else }}
- select: {}
+ {{- end }}
filters:
- tag_normaliser: {}
outputRefs:
diff --git a/charts/lagoon-logging/templates/clusteroutput.yaml b/charts/lagoon-logging/templates/clusteroutput.yaml
index 05cb76dcf8..f30fb1ead6 100644
--- a/charts/lagoon-logging/templates/clusteroutput.yaml
+++ b/charts/lagoon-logging/templates/clusteroutput.yaml
@@ -1,7 +1,6 @@
apiVersion: logging.banzaicloud.io/v1beta1
kind: ClusterOutput
metadata:
- namespace: {{ .Release.Namespace | quote }}
name: {{ include "lagoon-logging.fullname" . }}
labels:
{{- include "lagoon-logging.labels" . | nindent 4 }}
diff --git a/charts/lagoon-logging/templates/logging.yaml b/charts/lagoon-logging/templates/logging.yaml
index 62c6424c2d..0150941c18 100644
--- a/charts/lagoon-logging/templates/logging.yaml
+++ b/charts/lagoon-logging/templates/logging.yaml
@@ -1,11 +1,23 @@
apiVersion: logging.banzaicloud.io/v1beta1
kind: Logging
metadata:
- namespace: {{ .Release.Namespace | quote }}
name: {{ include "lagoon-logging.fullname" . }}
labels:
{{- include "lagoon-logging.labels" . | nindent 4 }}
spec:
- fluentd: {}
+ fluentd:
+ security:
+ podSecurityContext:
+ runAsUser: 100
+ fsGroup: 0
+ scaling:
+ replicas: 3
+ {{- with .Values.fluentbitPrivileged }}
+ fluentbit:
+ security:
+ securityContext:
+ privileged: {{ . }}
+ {{- else }}
fluentbit: {}
+ {{- end }}
controlNamespace: {{ .Release.Namespace | quote }}
diff --git a/charts/lagoon-logging/templates/logs-dispatcher.clusterrolebinding.yaml b/charts/lagoon-logging/templates/logs-dispatcher.clusterrolebinding.yaml
index 6ae0578013..8d6228be38 100644
--- a/charts/lagoon-logging/templates/logs-dispatcher.clusterrolebinding.yaml
+++ b/charts/lagoon-logging/templates/logs-dispatcher.clusterrolebinding.yaml
@@ -2,7 +2,6 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
- namespace: {{ .Release.Namespace | quote }}
name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}
labels:
{{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }}
diff --git a/charts/lagoon-logging/templates/logs-dispatcher.env.configmap.yaml b/charts/lagoon-logging/templates/logs-dispatcher.env.configmap.yaml
index 0bb3be47c7..bea40f2150 100644
--- a/charts/lagoon-logging/templates/logs-dispatcher.env.configmap.yaml
+++ b/charts/lagoon-logging/templates/logs-dispatcher.env.configmap.yaml
@@ -1,12 +1,18 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ .Release.Namespace | quote }}
name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-env
labels:
{{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }}
data:
- CLUSTER_NAME: {{ required "A valid .Values.clusterName required!" .Values.clusterName }}
- ELASTICSEARCH_HOST: {{ required "A valid .Values.elasticsearchHost required!" .Values.elasticsearchHost }}
- ELASTICSEARCH_HOST_PORT: {{ .Values.elasticsearchHostPort | quote }}
- ELASTICSEARCH_SCHEME: {{ .Values.elasticsearchScheme }}
+ CLUSTER_NAME: >-
+ {{ required "A valid .Values.clusterName required!" .Values.clusterName }}
+ LOGS_FORWARD_HOST: >-
+ {{ required "A valid .Values.forward.host required!" .Values.forward.host }}
+ LOGS_FORWARD_HOSTNAME: >-
+ {{ coalesce .Values.forward.hostName .Values.forward.host }}
+ LOGS_FORWARD_HOST_PORT: >-
+ {{ default "24224" .Values.forward.hostPort }}
+{{- if .Values.lagoonLogs.enabled }}
+ RABBITMQ_HOST: {{ required "If .Values.lagoonLogs.enabled is true, a valid .Values.lagoonLogs.rabbitMQHost required!" .Values.lagoonLogs.rabbitMQHost }}
+{{- end }}
diff --git a/charts/lagoon-logging/templates/logs-dispatcher.fluent-conf.configmap.yaml b/charts/lagoon-logging/templates/logs-dispatcher.fluent-conf.configmap.yaml
index f8df6425cc..0489d27405 100644
--- a/charts/lagoon-logging/templates/logs-dispatcher.fluent-conf.configmap.yaml
+++ b/charts/lagoon-logging/templates/logs-dispatcher.fluent-conf.configmap.yaml
@@ -1,7 +1,6 @@
apiVersion: v1
kind: ConfigMap
metadata:
- namespace: {{ .Release.Namespace | quote }}
name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-fluent-conf
labels:
{{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }}
@@ -12,21 +11,94 @@ data:
workers 2
+ # prometheus metrics
+
+ @type prometheus
+
+
+ @type prometheus_monitor
+
+
+ @type prometheus_output_monitor
+
+
+ # container logs collected by the logging-operator
- # fluentd parameters
@type forward
- @id container
- tag "lagoon.#{ENV['CLUSTER_NAME']}.raw"
+ @id in_container
+ tag process.container
+
+
+ # application logs emitted by the lagoon_logs drupal module
+
+ @type udp
+ @id in_application
+ tag "lagoon.#{ENV['CLUSTER_NAME']}.application"
+ port 5140
+ # max IPv4 UDP payload size
+ message_length_limit 65507
+
+ @type json
+
+
+
+ # router logs emitted by the openshift routers
+
+ @type syslog
+ @id in_router_openshift
+ tag "lagoon.#{ENV['CLUSTER_NAME']}.router.openshift"
+ emit_unmatched_lines true
+ # syslog parameters
+ port 5141
+ severity_key severity
+ # max IPv4 UDP payload size
+ message_length_limit 65507
+
+ @type regexp
+ # parse HTTP logs based on the haproxy documentation
+ # As per the documentation here
+ # https://www.haproxy.com/documentation/hapee/1-8r1/onepage/#8.2.3, except
+ # we split the frontend_name into its constituent parts as used by
+ # openshift.
+ expression /^.{,15} (?\w+)\[(?\d+)\]: (?\S+):(?\d+) \[(?\S+)\] (?\S+) (?\S+):(?(?\S+):\S+\/pod:(?[^:]+):(?[^:]+)):\S+ (?
[\d-]+)\/(?[\d-]+)\/(?[\d-]+)\/(?
[\d-]+)\/(?[\d-]+) (?\d+) (?\d+) (?\S+) (?\S+) (?\S+) (?\d+)\/(?\d+)\/(?\d+)\/(?\d+)\/(?\d+) (?\d+)\/(?\d+) "(?.+)"/
+ time_key request_date
+ time_format %d/%b/%Y:%T.%L
+ types pid:integer,client_port:integer,TR:integer,Tw:integer,Tc:integer,Tr:integer,Ta:integer,bytes_read:integer,actconn:integer,feconn:integer,beconn:integer,srv_conn:integer,retries:integer,srv_queue:integer,backend_queue:integer
+
- # relabel router logs
+ #
+ # optional sources which can be enabled in the chart
+ #
+ @include source.d/*.conf
+
+ #
+ # capture unmatched openshift router logs
+ #
+
+ @type record_modifier
+
+ index_name router-logs-openshift_parse_error_${ENV['CLUSTER_NAME']}-${Time.at(time).strftime("%Y.%m")}
+
+
+
+ #
+ # pre-processing for nginx_router logs
+ #
+ # the reason for having the two match blocks is because we have two checks
+ # to distinguish nginx_router logs:
+ # * app label is "nginx-ingress"
+ # * namespace is "syn-nginx-ingress"
+ # if either of those checks fails the message is tagged as a regular
+ # container log.
+ #
# check app name first. if app name didn't match, set tag to container log.
-
+
@type rewrite_tag_filter
key $.kubernetes.labels.app
pattern ^nginx-ingress$
- tag "app-nginx-ingress"
+ tag "process.app_nginx_ingress"
invert true
@@ -37,12 +109,12 @@ data:
# check namespace_name. if it is okay too, tag as router log.
# if namespace didn't match, set tag to container log.
-
+
@type rewrite_tag_filter
key $.kubernetes.namespace_name
pattern ^syn-nginx-ingress$
- tag "lagoon.#{ENV['CLUSTER_NAME']}.router"
+ tag "lagoon.#{ENV['CLUSTER_NAME']}.router.nginx"
invert true
@@ -52,135 +124,203 @@ data:
- # strip the duplicated log field from router logs
-
+ #
+ # process container logs
+ #
+ # restructure so the kubernetes_metadata plugin can find the keys it needs
+
+ @type record_modifier
+ remove_keys _dummy_
+
+ _dummy_ ${record['docker'] = {'container_id' => "#{record.dig('kubernetes','docker_id')}"}; nil}
+
+
+ # enrich with k8s metadata (will get the namespace labels)
+
+ @type kubernetes_metadata
+ @log_level warn
+ skip_container_metadata true
+ skip_master_url true
+
+ # strip the duplicate information so that it doesn't appear in logs
+
+ @type record_modifier
+ remove_keys docker
+
+ # add the index name
+
+ @type record_modifier
+
+ index_name container-logs-${record.dig('kubernetes','namespace_labels','lagoon_sh/project') || "#{record.dig('kubernetes','namespace_name') || 'unknown_project'}_#{ENV['CLUSTER_NAME']}"}-_-${record.dig('kubernetes','namespace_labels','lagoon_sh/environmentType') || "unknown_environmenttype"}-_-${Time.at(time).strftime("%Y.%m")}
+
+
+ # post-process to try to eke some more structure out of the logs.
+ # the last "format none" block is a catch-all for unmatched messages.
+
+ @type parser
+ key_name log
+ reserve_data true
+
+ @type multi_format
+
+ format nginx
+ types size:integer
+
+
+ format none
+
+
+
+ # some container logs have a duplicate message field for some reason, so
+ # remove that.
+
+ @type record_modifier
+ remove_keys message
+
+
+ #
+ # process application logs
+ #
+ # restructure so the kubernetes_metadata plugin can find the keys it needs
+
@type record_modifier
- remove_keys log
+ remove_keys _dummy_,type
+
+ _dummy_ ${record['openshift_project'] = record['type']; record['kubernetes'] = {'namespace_name' => record['type'], 'pod_name' => record['host'], 'container_name' => 'unknown'}; record['docker'] = {'container_id' => "#{record['type']}_#{record['host']}"}; nil}
+
+
+ # enrich with k8s metadata (will get the namespace labels)
+
+ @type kubernetes_metadata
+ @log_level warn
+ skip_container_metadata true
+ skip_master_url true
+
+ # add the index_name
+
+ @type record_modifier
+
+ index_name application-logs-${record.dig('kubernetes','namespace_labels','lagoon_sh/project') || "#{record.dig('kubernetes','namespace_name') || 'unknown_project'}_#{ENV['CLUSTER_NAME']}"}-_-${record.dig('kubernetes','namespace_labels','lagoon_sh/environmentType') || "unknown_environmenttype"}-_-${Time.at(time).strftime("%Y.%m")}
+
+
+ # strip the kubernetes data as it's duplicated in container/router logs and
+ # not really relevant for application logs
+
+ @type record_modifier
+ remove_keys docker,kubernetes
- # logs are now tagged appropriately, so route to labels based on the tag
-
- @type route
- # route _all_ logs container logs (even nginx-ingress) to @container
-
- copy
- @label @container
-
- # route just the router logs to @router
-
- copy
- @label @router
-
-
+ #
+ # process nginx_router logs
+ #
+ # Strip the nginx-ingress namespace info and add enough dummy information
+ # so that kubernetes_metadata plugin can get the namespace labels.
+ # Also strip the duplicated log field.
+
+ @type record_modifier
+ remove_keys _dummy_,log
+
+ _dummy_ ${record['kubernetes'] = {'namespace_name' => record['namespace'], 'pod_name' => 'nopod', 'container_name' => 'nocontainer'}; record['docker'] = {'container_id' => "#{record['namespace']}_#{record['ingress_name']}"}; nil}
+
+
+ # enrich with k8s metadata (will get the namespace labels)
+
+ @type kubernetes_metadata
+ @log_level warn
+ skip_container_metadata true
+ skip_master_url true
+
+ # strip the dummy information so that it doesn't appear in logs
+
+ @type record_modifier
+ remove_keys _dummy_,docker
+
+ _dummy_ ${record['kubernetes'].delete('pod_name'); record['kubernetes'].delete('container_name'); record['kubernetes'].delete('pod_id'); nil}
+
+
+
+ #
+ # process openshift router logs
+ #
+ # retructure the record enough for the kubernetes_metadata plugin to get
+ # namespace labels
+
+ @type record_modifier
+ remove_keys _dummy_,kubernetes_namespace_name,kubernetes_pod_name,kubernetes_container_name,docker_container_id
+
+ _dummy_ ${record['kubernetes'] = {'namespace_name' => record['kubernetes_namespace_name'], 'pod_name' => record['kubernetes_pod_name'], 'container_name' => record['kubernetes_container_name']}; record['docker'] = {'container_id' => record['docker_container_id']}; nil}
+
+
+ # enrich with k8s metadata
+
+ @type kubernetes_metadata
+ @log_level warn
+ skip_container_metadata true
+ skip_master_url true
+
-
+ #
+ # add the router index_name
+ #
+
+ @type record_modifier
+
+ index_name router-logs-${record.dig('kubernetes','namespace_labels','lagoon_sh/project') || "#{record.dig('kubernetes','namespace_name') || 'unknown_project'}_#{ENV['CLUSTER_NAME']}"}-_-${record.dig('kubernetes','namespace_labels','lagoon_sh/environmentType') || "unknown_environmenttype"}-_-${Time.at(time).strftime("%Y.%m")}
+
+
-
+ #
+ # add the lagoon index_name
+ # the source for this tag is included when lagoonLogs.enabled is true
+ #
+
+ @type record_modifier
+
+ index_name lagoon-logs-${record['project']}-_-all_environments-_-${Time.at(time).strftime("%Y.%m")}
+
+
-
+
+ @include store.d/*.conf
+
diff --git a/charts/lagoon-logging/templates/logs-dispatcher.secret.yaml b/charts/lagoon-logging/templates/logs-dispatcher.secret.yaml
index eabf555307..bd128655e8 100644
--- a/charts/lagoon-logging/templates/logs-dispatcher.secret.yaml
+++ b/charts/lagoon-logging/templates/logs-dispatcher.secret.yaml
@@ -2,9 +2,31 @@ apiVersion: v1
kind: Secret
type: Opaque
metadata:
- namespace: {{ .Release.Namespace | quote }}
- name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}
+ name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-env
labels:
{{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }}
stringData:
- LOGSDB_ADMIN_PASSWORD: {{ required "A valid .Values.elasticsearchAdminPassword required!" .Values.elasticsearchAdminPassword }}
+ LOGS_FORWARD_USERNAME: {{ required "A valid .Values.forward.username required!" .Values.forward.username }}
+ LOGS_FORWARD_PASSWORD: {{ required "A valid .Values.forward.password required!" .Values.forward.password }}
+ # self hostname should match the CN on the client certificate
+ LOGS_FORWARD_SELF_HOSTNAME: {{ required "A valid .Values.forward.selfHostname required!" .Values.forward.selfHostname }}
+ LOGS_FORWARD_SHARED_KEY: {{ required "A valid .Values.forward.sharedKey required!" .Values.forward.sharedKey }}
+{{- if .Values.lagoonLogs.enabled }}
+ RABBITMQ_USER: {{ required "If .Values.lagoonLogs.enabled is true, a valid .Values.lagoonLogs.rabbitMQUser required!" .Values.lagoonLogs.rabbitMQUser }}
+ RABBITMQ_PASSWORD: {{ required "If .Values.lagoonLogs.enabled is true, a valid .Values.lagoonLogs.rabbitMQPassword required!" .Values.lagoonLogs.rabbitMQPassword }}
+{{- end }}
+---
+apiVersion: v1
+kind: Secret
+type: Opaque
+metadata:
+ name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-tls
+ labels:
+ {{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }}
+stringData:
+ ca.crt: |
+ {{- required "A valid .Values.tls.caCert required!" .Values.tls.caCert | nindent 4 }}
+ client.crt: |
+ {{- required "A valid .Values.tls.clientCert required!" .Values.tls.clientCert | nindent 4 }}
+ client.key: |
+ {{- required "A valid .Values.tls.clientKey required!" .Values.tls.clientKey | nindent 4 }}
diff --git a/charts/lagoon-logging/templates/logs-dispatcher.service.yaml b/charts/lagoon-logging/templates/logs-dispatcher.service.yaml
index 2503031345..6a777b09e5 100644
--- a/charts/lagoon-logging/templates/logs-dispatcher.service.yaml
+++ b/charts/lagoon-logging/templates/logs-dispatcher.service.yaml
@@ -1,7 +1,6 @@
apiVersion: v1
kind: Service
metadata:
- namespace: {{ .Release.Namespace | quote }}
name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}
labels:
{{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }}
@@ -13,8 +12,16 @@ spec:
- name: forward
port: 24224
protocol: TCP
- targetPort: 24224
+ targetPort: forward
- name: metrics
port: 24231
protocol: TCP
- targetPort: 24231
+ targetPort: metrics
+ - name: application
+ port: 5140
+ protocol: UDP
+ targetPort: application
+ - name: syslog-router
+ port: 5141
+ protocol: UDP
+ targetPort: syslog-router
diff --git a/charts/lagoon-logging/templates/logs-dispatcher.serviceaccount.yaml b/charts/lagoon-logging/templates/logs-dispatcher.serviceaccount.yaml
index 5ce527bb83..35e99d41f2 100644
--- a/charts/lagoon-logging/templates/logs-dispatcher.serviceaccount.yaml
+++ b/charts/lagoon-logging/templates/logs-dispatcher.serviceaccount.yaml
@@ -2,7 +2,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
- namespace: {{ .Release.Namespace | quote }}
name: {{ include "lagoon-logging.logsDispatcher.serviceAccountName" . }}
labels:
{{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }}
diff --git a/charts/lagoon-logging/templates/logs-dispatcher.source-lagoon.configmap.yaml b/charts/lagoon-logging/templates/logs-dispatcher.source-lagoon.configmap.yaml
new file mode 100644
index 0000000000..b941adf64b
--- /dev/null
+++ b/charts/lagoon-logging/templates/logs-dispatcher.source-lagoon.configmap.yaml
@@ -0,0 +1,26 @@
+{{- if .Values.lagoonLogs.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-source-lagoon
+ labels:
+ {{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }}
+data:
+ lagoon.conf: |
+ # lagoon logs from rabbitmq
+
+ @type rabbitmq
+ @id in_lagoon
+ tag "lagoon.#{ENV['CLUSTER_NAME']}.lagoon"
+ host "#{ENV['RABBITMQ_HOST']}"
+ user "#{ENV['RABBITMQ_USER']}"
+ pass "#{ENV['RABBITMQ_PASSWORD']}"
+ # logstash default vhost
+ vhost /
+ exchange lagoon-logs
+ exchange_type direct
+ routing_key ""
+ queue logs-dispatcher
+ durable true
+
+{{- end }}
diff --git a/charts/lagoon-logging/templates/logs-dispatcher.statefulset.yaml b/charts/lagoon-logging/templates/logs-dispatcher.statefulset.yaml
index 6c576c9166..cea31ef5ec 100644
--- a/charts/lagoon-logging/templates/logs-dispatcher.statefulset.yaml
+++ b/charts/lagoon-logging/templates/logs-dispatcher.statefulset.yaml
@@ -1,7 +1,6 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
- namespace: {{ .Release.Namespace | quote }}
name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}
labels:
{{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }}
@@ -13,8 +12,17 @@ spec:
{{- include "lagoon-logging.logsDispatcher.selectorLabels" . | nindent 6 }}
template:
metadata:
- {{- with .Values.logsDispatcher.podAnnotations }}
annotations:
+ checksum/secret: {{ include (print $.Template.BasePath "/logs-dispatcher.secret.yaml") . | sha256sum }}
+ checksum/env-configmap: {{ include (print $.Template.BasePath "/logs-dispatcher.env.configmap.yaml") . | sha256sum }}
+ checksum/fluent-conf-configmap: {{ include (print $.Template.BasePath "/logs-dispatcher.fluent-conf.configmap.yaml") . | sha256sum }}
+ {{- if .Values.exportLogs }}
+ checksum/store-configmap: {{ include (print $.Template.BasePath "/logs-dispatcher.store.configmap.yaml") . | sha256sum }}
+ {{- end }}
+ {{- if .Values.lagoonLogs.enabled }}
+ checksum/source-lagoon-configmap: {{ include (print $.Template.BasePath "/logs-dispatcher.source-lagoon.configmap.yaml") . | sha256sum }}
+ {{- end }}
+ {{- with .Values.logsDispatcher.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
@@ -47,6 +55,12 @@ spec:
- containerPort: 24231
protocol: TCP
name: metrics
+ - containerPort: 5140
+ protocol: UDP
+ name: application
+ - containerPort: 5141
+ protocol: UDP
+ name: syslog-router
readinessProbe:
tcpSocket:
port: 24224
@@ -59,11 +73,21 @@ spec:
- mountPath: /fluentd/etc/fluent.conf
name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-fluent-conf
subPath: fluent.conf
+ {{- if .Values.exportLogs }}
+ - mountPath: /fluentd/etc/store.d
+ name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-store
+ {{- end }}
+ {{- if .Values.lagoonLogs.enabled }}
+ - mountPath: /fluentd/etc/source.d
+ name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-source-lagoon
+ {{- end }}
- mountPath: /fluentd/buffer/
name: buffer
+ - mountPath: /fluentd/tls/
+ name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-tls
envFrom:
- secretRef:
- name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}
+ name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-env
- configMapRef:
name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-env
resources:
@@ -88,6 +112,22 @@ spec:
path: fluent.conf
name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-fluent-conf
name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-fluent-conf
+ {{- if .Values.exportLogs }}
+ - configMap:
+ defaultMode: 420
+ name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-store
+ name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-store
+ {{- end }}
+ {{- if .Values.lagoonLogs.enabled }}
+ - configMap:
+ defaultMode: 420
+ name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-source-lagoon
+ name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-source-lagoon
+ {{- end }}
+ - secret:
+ defaultMode: 420
+ secretName: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-tls
+ name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-tls
volumeClaimTemplates:
- metadata:
name: buffer
diff --git a/charts/lagoon-logging/templates/logs-dispatcher.store.configmap.yaml b/charts/lagoon-logging/templates/logs-dispatcher.store.configmap.yaml
new file mode 100644
index 0000000000..f3bb1cd1c8
--- /dev/null
+++ b/charts/lagoon-logging/templates/logs-dispatcher.store.configmap.yaml
@@ -0,0 +1,10 @@
+{{- if .Values.exportLogs }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "lagoon-logging.logsDispatcher.fullname" . }}-store
+ labels:
+ {{- include "lagoon-logging.logsDispatcher.labels" . | nindent 4 }}
+data:
+ {{- toYaml .Values.exportLogs | nindent 2 }}
+{{- end }}
diff --git a/charts/lagoon-logging/templates/logs-tee.deployment.yaml b/charts/lagoon-logging/templates/logs-tee.deployment.yaml
new file mode 100644
index 0000000000..bafcb90776
--- /dev/null
+++ b/charts/lagoon-logging/templates/logs-tee.deployment.yaml
@@ -0,0 +1,132 @@
+{{- if .Values.logsTeeRouter.enabled }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "lagoon-logging.logsTeeRouter.fullname" . }}
+ labels:
+ {{- include "lagoon-logging.logsTeeRouter.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.logsTeeRouter.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "lagoon-logging.logsTeeRouter.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "lagoon-logging.logsTeeRouter.selectorLabels" . | nindent 8 }}
+ spec:
+ serviceAccountName: {{ include "lagoon-logging.logsTeeRouter.serviceAccountName" . }}
+ securityContext:
+ {{- toYaml .Values.logsTeeRouter.podSecurityContext | nindent 8 }}
+ containers:
+ - name: socat
+ securityContext:
+ {{- toYaml .Values.logsTeeRouter.securityContext | nindent 12 }}
+ image: "{{ .Values.logsTeeRouter.image.repository }}:{{ .Values.logsTeeRouter.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.logsTeeRouter.image.pullPolicy }}
+ args:
+ # UDP port in
+ - {{ .Values.logsTeeRouter.listenPort | quote }}
+ # UDP endpoints out
+ - {{ include "lagoon-logging.logsDispatcher.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:5141
+ {{- with .Values.logsTeeRouter.endpoints }}
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ ports:
+ - containerPort: {{ .Values.logsTeeRouter.listenPort }}
+ protocol: UDP
+ name: syslog-router
+ readinessProbe:
+ exec:
+ command:
+ - pgrep
+ - socat
+ initialDelaySeconds: 20
+ livenessProbe:
+ exec:
+ command:
+ - pgrep
+ - socat
+ initialDelaySeconds: 120
+ resources:
+ {{- toYaml .Values.logsTeeRouter.resources | nindent 12 }}
+ {{- with .Values.logsTeeRouter.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.logsTeeRouter.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.logsTeeRouter.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+{{- end }}
+{{- if .Values.logsTeeApplication.enabled }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "lagoon-logging.logsTeeApplication.fullname" . }}
+ labels:
+ {{- include "lagoon-logging.logsTeeApplication.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.logsTeeApplication.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "lagoon-logging.logsTeeApplication.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "lagoon-logging.logsTeeApplication.selectorLabels" . | nindent 8 }}
+ spec:
+ serviceAccountName: {{ include "lagoon-logging.logsTeeApplication.serviceAccountName" . }}
+ securityContext:
+ {{- toYaml .Values.logsTeeApplication.podSecurityContext | nindent 8 }}
+ containers:
+ - name: socat
+ securityContext:
+ {{- toYaml .Values.logsTeeApplication.securityContext | nindent 12 }}
+ image: "{{ .Values.logsTeeApplication.image.repository }}:{{ .Values.logsTeeApplication.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.logsTeeApplication.image.pullPolicy }}
+ args:
+ # UDP port in
+ - {{ .Values.logsTeeApplication.listenPort | quote }}
+ # UDP endpoints out
+ - {{ include "lagoon-logging.logsDispatcher.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:5140
+ {{- with .Values.logsTeeApplication.endpoints }}
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ ports:
+ - containerPort: {{ .Values.logsTeeApplication.listenPort }}
+ protocol: UDP
+ name: application
+ readinessProbe:
+ exec:
+ command:
+ - pgrep
+ - socat
+ initialDelaySeconds: 20
+ livenessProbe:
+ exec:
+ command:
+ - pgrep
+ - socat
+ initialDelaySeconds: 120
+ resources:
+ {{- toYaml .Values.logsTeeApplication.resources | nindent 12 }}
+ {{- with .Values.logsTeeApplication.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.logsTeeApplication.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.logsTeeApplication.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/lagoon-logging/templates/logs-tee.service.yaml b/charts/lagoon-logging/templates/logs-tee.service.yaml
new file mode 100644
index 0000000000..6ca5133dd8
--- /dev/null
+++ b/charts/lagoon-logging/templates/logs-tee.service.yaml
@@ -0,0 +1,36 @@
+{{- if .Values.logsTeeRouter.enabled }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "lagoon-logging.logsTeeRouter.fullname" . }}
+ labels:
+ {{- include "lagoon-logging.logsTeeRouter.labels" . | nindent 4 }}
+spec:
+ type: ClusterIP
+ selector:
+ {{- include "lagoon-logging.logsTeeRouter.selectorLabels" . | nindent 4 }}
+ ports:
+ - name: syslog-router
+ port: {{ .Values.logsTeeRouter.listenPort }}
+ protocol: UDP
+ targetPort: syslog-router
+{{- end }}
+{{- if .Values.logsTeeApplication.enabled }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "lagoon-logging.logsTeeApplication.fullname" . }}
+ labels:
+ {{- include "lagoon-logging.logsTeeApplication.labels" . | nindent 4 }}
+spec:
+ type: ClusterIP
+ selector:
+ {{- include "lagoon-logging.logsTeeApplication.selectorLabels" . | nindent 4 }}
+ ports:
+ - name: application
+ port: {{ .Values.logsTeeApplication.listenPort }}
+ protocol: UDP
+ targetPort: application
+{{- end }}
diff --git a/charts/lagoon-logging/values.yaml b/charts/lagoon-logging/values.yaml
index 8ee43e35ab..18d44d330f 100644
--- a/charts/lagoon-logging/values.yaml
+++ b/charts/lagoon-logging/values.yaml
@@ -9,13 +9,13 @@ logsDispatcher:
name: logs-dispatcher
- replicaCount: 2
+ replicaCount: 3
image:
repository: amazeeiolagoon/logs-dispatcher
pullPolicy: Always
# Overrides the image tag whose default is the chart version.
- tag: v1-5-0
+ tag: master
serviceAccount:
# Specifies whether a service account should be created
@@ -58,34 +58,259 @@ logsDispatcher:
affinity: {}
+logsTeeRouter:
+
+ enabled: false
+
+ name: logs-tee-router
+
+ replicaCount: 3
+
+ image:
+ repository: amazeeiolagoon/logs-tee
+ pullPolicy: Always
+ # Overrides the image tag whose default is the chart version.
+ tag: master
+
+ serviceAccount:
+ # Specifies whether a service account should be created
+ create: false
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname
+ # template
+ # If this value is set, the serviceAccount named must have clusterrole
+ # view.
+ name: ""
+
+ listenPort: 5140
+ # the logs-dispatcher endpoint is automatically added to this list
+ # define other endpoints here
+ #endpoints:
+ #- logs2logs-db.lagoon.svc.cluster.local
+
+ podAnnotations: {}
+
+ podSecurityContext: {}
+ # fsGroup: 2000
+
+ securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+ resources: {}
+ # If you want to specify resources, uncomment the following lines, adjust
+ # them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ nodeSelector: {}
+
+ tolerations: []
+
+ affinity: {}
+
+logsTeeApplication:
+
+ enabled: false
+
+ name: logs-tee-application
+
+ replicaCount: 3
+
+ image:
+ repository: amazeeiolagoon/logs-tee
+ pullPolicy: Always
+ # Overrides the image tag whose default is the chart version.
+ tag: master
+
+ serviceAccount:
+ # Specifies whether a service account should be created
+ create: false
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname
+ # template
+ # If this value is set, the serviceAccount named must have clusterrole
+ # view.
+ name: ""
+
+ listenPort: 5140
+ # the logs-dispatcher endpoint is automatically added to this list
+ # define other endpoints here
+ #endpoints:
+ #- logs2logs-db.lagoon.svc.cluster.local
+
+ podAnnotations: {}
+
+ podSecurityContext: {}
+ # fsGroup: 2000
+
+ securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+ resources: {}
+ # If you want to specify resources, uncomment the following lines, adjust
+ # them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ nodeSelector: {}
+
+ tolerations: []
+
+ affinity: {}
+
# Don't collect logs from these namespaces.
# Comment out this field to collect from all namespaces.
excludeNamespaces:
+# k8s
- cattle-prometheus
+- cattle-system
+- dbaas-operator
+- default
+- kube-cleanup-operator
+- kube-node-lease
+- kube-public
- kube-system
+- metrics-server
- syn
+- syn-backup
- syn-cert-manager
-- syn-synsights
- syn-cluster-autoscaler
+- syn-efs-provisioner
+- syn-resource-locker
+- syn-synsights
+# openshift
+- acme-controller
+- appuio-baas-operator
+- appuio-dnsmonitor
+- appuio-espejo
+- appuio-infra
+- appuio-monitoring
+- appuio-pruner
+- appuio-tiller
+- dioscuri-controller
+- kube-service-catalog
+- management-infra
+- monitoring-infra
+- openshift
+- openshift-ansible-service-broker
+- openshift-console
+- openshift-infra
+- openshift-logging
+- openshift-metrics-server
+- openshift-monitoring
+- openshift-node
+- openshift-sdn
+- openshift-web-console
+- tiller
# Configure the cluster output buffer.
# This may require tweaking to handle high volumes of logs.
clusterOutputBuffer:
- flush_thread_count: 256
+ flush_thread_count: 32
timekey: 1m
timekey_wait: 10s
timekey_use_utc: true
-# Elasticsearch output config.
-elasticsearchHostPort: "443"
-elasticsearchScheme: https
-# The values below must be supplied during installation as they have no sane
-# defaults.
-elasticsearchHost: ""
-elasticsearchAdminPassword: ""
-clusterName: ""
-
# chart dependency on logging-operator
logging-operator:
enabled: true
createCustomResource: false
+
+# lagoon logs collection disabled by default. see below for instructions on
+# enabling this.
+lagoonLogs:
+ enabled: false
+
+# The values below must be supplied during installation.
+# Certificates should be provided in PEM format, and are generated as described
+# in the README for the lagoon-logs-concentrator chart.
+# Sample data shown below.
+
+#tls:
+# caCert: |
+# -----BEGIN CERTIFICATE-----
+# ...
+# -----END CERTIFICATE-----
+# clientCert: |
+# -----BEGIN CERTIFICATE-----
+# ...
+# -----END CERTIFICATE-----
+# clientKey: |
+# -----BEGIN EC PRIVATE KEY-----
+# ...
+# -----END EC PRIVATE KEY-----
+#
+#forward:
+# username: "example1"
+# password: "securepass"
+# host: "203.0.113.9"
+# # hostName is optional - it is used for TLS verification for when host is an
+# # IP address.
+# # NOTE: if host is _not_ an IP address and it is presents a certificate
+# # without that hostname, you'll also need to set tlsVerifyHostname to
+# # false. The hostName field does _not_ override the host field for TLS
+# # verification when host is not an IP address.
+# hostName: "logs.server.example.com"
+# # tlsVerifyHostname: false
+# # hostPort is optional, default 24224
+# hostPort: "24224"
+# selfHostname: "logs-dispatcher.example1.lagoon.example.com"
+# sharedKey: "supersecurekey"
+#
+#clusterName: "example1"
+
+# Optional lagoon logs configuration. This should be enabled on a full lagoon
+# install, but not in a lagoon-remote install. If enabled, the rabbitMQ* values
+# are required.
+#
+#lagoonLogs:
+# enabled: true
+# rabbitMQHost: secureuser
+# rabbitMQUser: secureuser
+# rabbitMQPassword: secureuser
+
+# Optional namespace selection. Logs will _only_ be collected from these
+# namespaces. You probably don't want to configure this, except for debugging.
+#
+#selectNamespaces:
+#- drupal-example
+
+# Optional log export configuration
+
+#exportLogs:
+# s3.conf: |
+#
+# @type s3
+# ...
+#
+# cloudwatch.conf: |
+#
+# @type cloudwatch_logs
+# ...
+#
+
+# Openshift only!
+
+#fluentbitPrivileged: true
diff --git a/charts/lagoon-logs-concentrator-0.2.0.tgz b/charts/lagoon-logs-concentrator-0.2.0.tgz
new file mode 100644
index 0000000000..f85f353c1b
Binary files /dev/null and b/charts/lagoon-logs-concentrator-0.2.0.tgz differ
diff --git a/charts/lagoon-logs-concentrator-0.2.1.tgz b/charts/lagoon-logs-concentrator-0.2.1.tgz
new file mode 100644
index 0000000000..9eba50868b
Binary files /dev/null and b/charts/lagoon-logs-concentrator-0.2.1.tgz differ
diff --git a/charts/lagoon-logs-concentrator/.helmignore b/charts/lagoon-logs-concentrator/.helmignore
new file mode 100644
index 0000000000..0e8a0eb36f
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/lagoon-logs-concentrator/Chart.yaml b/charts/lagoon-logs-concentrator/Chart.yaml
new file mode 100644
index 0000000000..0fdc4d2301
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/Chart.yaml
@@ -0,0 +1,23 @@
+apiVersion: v2
+name: lagoon-logs-concentrator
+description: A Helm chart for Kubernetes which installs the Lagoon logs-concentrator service.
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.2.1
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+appVersion: 1.16.0
diff --git a/charts/lagoon-logs-concentrator/README.md b/charts/lagoon-logs-concentrator/README.md
new file mode 100644
index 0000000000..4335be7c4f
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/README.md
@@ -0,0 +1,53 @@
+# Logs Concentrator
+
+This service collects logs from logs-dispatchers (both local and remote) using
+fluentd's forward protocol, and sends them to Elasticsearch.
+
+## Configuration
+
+See the commented sample configuration at the end of `values.yaml`.
+
+## TLS
+
+Clients connect to this service via TLS. Mutual TLS authentication is performed by the client and server.
+
+Important notes:
+
+* We run our own CA since the in-cluster CA signs certificates with only one year expiry.
+* The instructions below require [cfssl](https://github.com/cloudflare/cfssl).
+* Refer to [this documentation](https://coreos.com/os/docs/latest/generate-self-signed-certificates.html) for further details.
+
+### Generate a CA certificate
+
+This is only required the first time you set up this chart.
+
+Edit the `ca-csr.json` as required and run this command:
+
+```
+cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
+rm ca.csr
+```
+
+You'll end up with `ca-key.pem` and `ca.pem`, which are the CA key and certificate. Store these somewhere safe, they'll be used to generate all future certificates.
+
+### Generate a server certificate
+
+This will be the certificate used by the `logs-concentrator`.
+
+Edit the `server.json` as required and run this command:
+
+```
+cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server server.json | cfssljson -bare server
+rm server.csr
+```
+
+### Generate a client certificate
+
+This will be the certificate used by the `lagoon-logging` chart's `logs-dispatcher`.
+
+Edit the `client.json` as required and run this command:
+
+```
+cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client.json | cfssljson -bare client
+rm client.csr
+```
diff --git a/charts/lagoon-logs-concentrator/ca-config.json b/charts/lagoon-logs-concentrator/ca-config.json
new file mode 100644
index 0000000000..213ea49e67
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/ca-config.json
@@ -0,0 +1,25 @@
+{
+ "signing": {
+ "default": {
+ "expiry": "87600h"
+ },
+ "profiles": {
+ "server": {
+ "expiry": "87600h",
+ "usages": [
+ "signing",
+ "key encipherment",
+ "server auth"
+ ]
+ },
+ "client": {
+ "expiry": "87600h",
+ "usages": [
+ "signing",
+ "key encipherment",
+ "client auth"
+ ]
+ }
+ }
+ }
+}
diff --git a/charts/lagoon-logs-concentrator/ca-csr.json b/charts/lagoon-logs-concentrator/ca-csr.json
new file mode 100644
index 0000000000..91122dfc61
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/ca-csr.json
@@ -0,0 +1,13 @@
+{
+ "CN": "logs-ca.cluster1.example.com",
+ "hosts": [
+ "logs-ca.cluster1.example.com"
+ ],
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ },
+ "ca": {
+ "expiry": "87600h"
+ }
+}
diff --git a/charts/lagoon-logs-concentrator/client.json b/charts/lagoon-logs-concentrator/client.json
new file mode 100644
index 0000000000..4813dad0cc
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/client.json
@@ -0,0 +1,10 @@
+{
+ "hosts": [
+ "logs-dispatcher.cluster2.example.com"
+ ],
+ "CN": "logs-dispatcher.cluster2.example.com",
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ }
+}
diff --git a/charts/lagoon-logs-concentrator/server.json b/charts/lagoon-logs-concentrator/server.json
new file mode 100644
index 0000000000..326e3580a0
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/server.json
@@ -0,0 +1,10 @@
+{
+ "hosts": [
+ "logs-concentrator.cluster1.example.com"
+ ],
+ "CN": "logs-concentrator.cluster1.example.com",
+ "key": {
+ "algo": "ecdsa",
+ "size": 256
+ }
+}
diff --git a/charts/lagoon-logs-concentrator/templates/NOTES.txt b/charts/lagoon-logs-concentrator/templates/NOTES.txt
new file mode 100644
index 0000000000..a92a735533
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/templates/NOTES.txt
@@ -0,0 +1,5 @@
+Thank you for installing {{ .Chart.Name }}.
+
+Your release is named {{ .Release.Name }}.
+
+Your logs are now being sent to {{ default "http" .Values.elasticsearchScheme }}://{{ .Values.elasticsearchHost }}:{{ default "9200" .Values.elasticsearchHostPort }}
diff --git a/charts/lagoon-logs-concentrator/templates/_helpers.tpl b/charts/lagoon-logs-concentrator/templates/_helpers.tpl
new file mode 100644
index 0000000000..e9dfc9e1f8
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/templates/_helpers.tpl
@@ -0,0 +1,63 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "lagoon-logs-concentrator.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "lagoon-logs-concentrator.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "lagoon-logs-concentrator.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "lagoon-logs-concentrator.labels" -}}
+helm.sh/chart: {{ include "lagoon-logs-concentrator.chart" . }}
+{{ include "lagoon-logs-concentrator.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "lagoon-logs-concentrator.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "lagoon-logs-concentrator.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "lagoon-logs-concentrator.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create }}
+{{- default (include "lagoon-logs-concentrator.fullname" .) .Values.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.serviceAccount.name }}
+{{- end }}
+{{- end }}
diff --git a/charts/lagoon-logs-concentrator/templates/env.configmap.yaml b/charts/lagoon-logs-concentrator/templates/env.configmap.yaml
new file mode 100644
index 0000000000..01002ee081
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/templates/env.configmap.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-env
+ labels:
+ {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }}
+data:
+ ELASTICSEARCH_HOST: {{ required "A valid .Values.elasticsearchHost required!" .Values.elasticsearchHost }}
+{{- if .Values.elasticsearchHostPort }}
+ ELASTICSEARCH_HOST_PORT: {{ .Values.elasticsearchHostPort | quote }}
+{{- end }}
+{{- if .Values.elasticsearchScheme }}
+ ELASTICSEARCH_SCHEME: {{ .Values.elasticsearchScheme }}
+{{- end }}
diff --git a/charts/lagoon-logs-concentrator/templates/fluent-conf.configmap.yaml b/charts/lagoon-logs-concentrator/templates/fluent-conf.configmap.yaml
new file mode 100644
index 0000000000..f90259838a
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/templates/fluent-conf.configmap.yaml
@@ -0,0 +1,76 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-fluent-conf
+ labels:
+ {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }}
+data:
+ fluent.conf: |
+ # vi: ft=fluentd
+
+ workers 4
+
+ # prometheus metrics
+
+ @type prometheus
+
+
+ @type prometheus_monitor
+
+
+ @type prometheus_output_monitor
+
+
+ @type forward
+ @id in_forward
+ add_tag_prefix in_forward
+ # avoid verbose OpenSSL warnings in fluentd logs due to liveness probes
+ # @log_level error
+
+ self_hostname logs-concentrator
+ user_auth true
+ shared_key "#{ENV['FORWARD_SHARED_KEY']}"
+ @include user.d/*.conf
+
+
+ ca_path /fluentd/tls/ca.crt
+ cert_path /fluentd/tls/server.crt
+ private_key_path /fluentd/tls/server.key
+ client_cert_auth true
+
+
+ # send to elasticsearch
+
+ @type elasticsearch
+ @id out_elasticsearch
+ # ingestion
+ target_index_key index_name
+ include_timestamp true
+ time_key time
+ # endpoint
+ host "#{ENV['ELASTICSEARCH_HOST']}"
+ port "#{ENV.fetch('ELASTICSEARCH_HOST_PORT','9200')}"
+ scheme "#{ENV.fetch('ELASTICSEARCH_SCHEME','http')}"
+ ssl_min_version TLSv1_2
+ ssl_max_version TLSv1_3
+ user admin
+ password "#{ENV['LOGSDB_ADMIN_PASSWORD']}"
+ # endpoint error handling
+ reconnect_on_error true
+ reload_on_failure true
+ request_timeout 600s
+ slow_flush_log_threshold 300s
+ log_es_400_reason true
+
+ @type file
+ path /fluentd/buffer/elasticsearch
+ # buffer params (per worker)
+ total_limit_size 8GB
+ # flush params
+ flush_thread_count 4
+ overflow_action drop_oldest_chunk
+
+ # silence warnings (these have no effect)
+ type_name _doc
+ ssl_version TLSv1_2
+
diff --git a/charts/lagoon-logs-concentrator/templates/hpa.yaml b/charts/lagoon-logs-concentrator/templates/hpa.yaml
new file mode 100644
index 0000000000..dd328ef938
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/templates/hpa.yaml
@@ -0,0 +1,28 @@
+{{- if .Values.autoscaling.enabled }}
+apiVersion: autoscaling/v2beta1
+kind: HorizontalPodAutoscaler
+metadata:
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}
+ labels:
+ {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }}
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Statefulset
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}
+ minReplicas: {{ .Values.autoscaling.minReplicas }}
+ maxReplicas: {{ .Values.autoscaling.maxReplicas }}
+ metrics:
+ {{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: cpu
+ targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
+ {{- end }}
+ {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
+ - type: Resource
+ resource:
+ name: memory
+ targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
+ {{- end }}
+{{- end }}
diff --git a/charts/lagoon-logs-concentrator/templates/ingress.yaml b/charts/lagoon-logs-concentrator/templates/ingress.yaml
new file mode 100644
index 0000000000..cb4b4defe2
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/templates/ingress.yaml
@@ -0,0 +1,41 @@
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "lagoon-logs-concentrator.fullname" . -}}
+{{- $svcPort := .Values.service.port -}}
+{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1beta1
+{{- else -}}
+apiVersion: extensions/v1beta1
+{{- end }}
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ labels:
+ {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }}
+ {{- with .Values.ingress.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ {{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+ {{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ .host | quote }}
+ http:
+ paths:
+ {{- range .paths }}
+ - path: {{ . }}
+ backend:
+ serviceName: {{ $fullName }}
+ servicePort: {{ $svcPort }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
diff --git a/charts/lagoon-logs-concentrator/templates/secret.yaml b/charts/lagoon-logs-concentrator/templates/secret.yaml
new file mode 100644
index 0000000000..dcaf596bf6
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/templates/secret.yaml
@@ -0,0 +1,41 @@
+apiVersion: v1
+kind: Secret
+type: Opaque
+metadata:
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-env
+ labels:
+ {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }}
+stringData:
+ FORWARD_SHARED_KEY: {{ required "A valid .Values.forwardSharedKey required!" .Values.forwardSharedKey }}
+ LOGSDB_ADMIN_PASSWORD: {{ required "A valid .Values.elasticsearchAdminPassword required!" .Values.elasticsearchAdminPassword }}
+---
+apiVersion: v1
+kind: Secret
+type: Opaque
+metadata:
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-tls
+ labels:
+ {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }}
+stringData:
+ ca.crt: |
+ {{- required "A valid .Values.tls.caCert required!" .Values.tls.caCert | nindent 4}}
+ server.crt: |
+ {{- required "A valid .Values.tls.serverCert required!" .Values.tls.serverCert | nindent 4}}
+ server.key: |
+ {{- required "A valid .Values.tls.serverKey required!" .Values.tls.serverKey | nindent 4}}
+---
+apiVersion: v1
+kind: Secret
+type: Opaque
+metadata:
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-users
+ labels:
+ {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }}
+stringData:
+ user.conf: |
+ {{- range .Values.users }}
+
+ username "{{ .username }}"
+ password "{{ .password }}"
+
+ {{- end }}
diff --git a/charts/lagoon-logs-concentrator/templates/service.yaml b/charts/lagoon-logs-concentrator/templates/service.yaml
new file mode 100644
index 0000000000..ffaaa3cc9a
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/templates/service.yaml
@@ -0,0 +1,19 @@
+apiVersion: v1
+kind: Service
+metadata:
+{{- with .Values.service.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+{{- end }}
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}
+ labels:
+ {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: forward
+ protocol: TCP
+ name: forward
+ selector:
+ {{- include "lagoon-logs-concentrator.selectorLabels" . | nindent 4 }}
diff --git a/charts/lagoon-logs-concentrator/templates/serviceaccount.yaml b/charts/lagoon-logs-concentrator/templates/serviceaccount.yaml
new file mode 100644
index 0000000000..b29517f125
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/templates/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "lagoon-logs-concentrator.serviceAccountName" . }}
+ labels:
+ {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/lagoon-logs-concentrator/templates/statefulset.yaml b/charts/lagoon-logs-concentrator/templates/statefulset.yaml
new file mode 100644
index 0000000000..0f41ac058e
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/templates/statefulset.yaml
@@ -0,0 +1,116 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}
+ labels:
+ {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }}
+spec:
+{{- if not .Values.autoscaling.enabled }}
+ replicas: {{ .Values.replicaCount }}
+{{- end }}
+ serviceName: {{ include "lagoon-logs-concentrator.fullname" . }}
+ selector:
+ matchLabels:
+ {{- include "lagoon-logs-concentrator.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ annotations:
+ checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
+ checksum/env-configmap: {{ include (print $.Template.BasePath "/env.configmap.yaml") . | sha256sum }}
+ checksum/fluent-conf-configmap: {{ include (print $.Template.BasePath "/fluent-conf.configmap.yaml") . | sha256sum }}
+ {{- with .Values.podAnnotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "lagoon-logs-concentrator.selectorLabels" . | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "lagoon-logs-concentrator.serviceAccountName" . }}
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ initContainers:
+ - name: chown-buffer
+ image: busybox:musl
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 12 }}
+ command:
+ - chown
+ - '100:0'
+ - /fluentd/buffer
+ volumeMounts:
+ - mountPath: /fluentd/buffer/
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-buffer
+ containers:
+ - name: fluentd
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 12 }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: forward
+ containerPort: 24224
+ protocol: TCP
+ livenessProbe:
+ tcpSocket:
+ port: forward
+ readinessProbe:
+ tcpSocket:
+ port: forward
+ envFrom:
+ - secretRef:
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-env
+ - configMapRef:
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-env
+ volumeMounts:
+ - mountPath: /fluentd/etc/fluent.conf
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-fluent-conf
+ subPath: fluent.conf
+ - mountPath: /fluentd/etc/user.d/
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-users
+ - mountPath: /fluentd/buffer/
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-buffer
+ - mountPath: /fluentd/tls/
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-tls
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ volumes:
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: fluent.conf
+ path: fluent.conf
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-fluent-conf
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-fluent-conf
+ - secret:
+ defaultMode: 420
+ secretName: {{ include "lagoon-logs-concentrator.fullname" . }}-tls
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-tls
+ - secret:
+ defaultMode: 420
+ secretName: {{ include "lagoon-logs-concentrator.fullname" . }}-users
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-users
+ volumeClaimTemplates:
+ - metadata:
+ name: {{ include "lagoon-logs-concentrator.fullname" . }}-buffer
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 32Gi
diff --git a/charts/lagoon-logs-concentrator/templates/tests/test-connection.yaml b/charts/lagoon-logs-concentrator/templates/tests/test-connection.yaml
new file mode 100644
index 0000000000..adac30105c
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/templates/tests/test-connection.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include "lagoon-logs-concentrator.fullname" . }}-test-connection"
+ labels:
+ {{- include "lagoon-logs-concentrator.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test-success
+spec:
+ containers:
+ - name: nc
+ image: busybox
+ command: ['nc']
+ args:
+ - "-zvw5"
+ - {{ include "lagoon-logs-concentrator.fullname" . }}
+ - {{ .Values.service.port }}
+ restartPolicy: Never
diff --git a/charts/lagoon-logs-concentrator/values.yaml b/charts/lagoon-logs-concentrator/values.yaml
new file mode 100644
index 0000000000..11ec47fbf8
--- /dev/null
+++ b/charts/lagoon-logs-concentrator/values.yaml
@@ -0,0 +1,109 @@
+# Default values for lagoon-logs-concentrator.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ repository: amazeeiolagoon/logs-concentrator
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart version.
+ tag: logs-concentrator
+
+imagePullSecrets: []
+nameOverride: ""
+fullnameOverride: ""
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: false
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: ""
+
+podAnnotations: {}
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+service:
+ type: ClusterIP
+ port: 24224
+ # Annotations to add to the service
+ annotations: {}
+
+ingress:
+ enabled: false
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: chart-example.local
+ paths: []
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+resources:
+ requests:
+ cpu: 250m
+ memory: 256Mi
+
+autoscaling:
+ enabled: true
+ minReplicas: 2
+ maxReplicas: 4
+ targetCPUUtilizationPercentage: 80
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
+
+# The values below must be supplied during installation.
+# Certificates should be provided in PEM format, and are generated as described
+# in the README.
+# Sample data shown below.
+
+#elasticsearchHost: "logs-db-service.elasticsearch.svc.cluster.local"
+#elasticsearchAdminPassword: "securepass"
+#tls:
+# caCert: |
+# -----BEGIN CERTIFICATE-----
+# ...
+# -----END CERTIFICATE-----
+# serverCert: |
+# -----BEGIN CERTIFICATE-----
+# ...
+# -----END CERTIFICATE-----
+# serverKey: |
+# -----BEGIN EC PRIVATE KEY-----
+# ...
+# -----END EC PRIVATE KEY-----
+#forwardSharedKey: "securekey"
+#users:
+#- username: "example1"
+# password: "securepass"
+#- username: "example2"
+# password: "securepass"
+
+# The values below are optional.
+
+#elasticsearchHostPort: "443" # default 9200
+#elasticsearchScheme: https # default http
+#service:
+# type: LoadBalancer # default ClusterIP. Set to LoadBalancer to
+# # expose the logs-concentrator service
+# # publicly.
diff --git a/docker-compose.yaml b/docker-compose.yaml
index 05feea5519..d46cfb244d 100644
--- a/docker-compose.yaml
+++ b/docker-compose.yaml
@@ -10,7 +10,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/api-db/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/api-db:v1-6-0
+ lagoon.image: amazeeiolagoon/api-db:v1-8-1
webhook-handler:
image: ${IMAGE_REPO:-lagoon}/webhook-handler
command: yarn run dev
@@ -22,7 +22,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/webhook-handler/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/webhook-handler:v1-6-0
+ lagoon.image: amazeeiolagoon/webhook-handler:v1-8-1
backup-handler:
image: ${IMAGE_REPO:-lagoon}/backup-handler
restart: on-failure
@@ -31,7 +31,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/backup-handler/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/backup-handler:v1-6-0
+ lagoon.image: amazeeiolagoon/backup-handler:v1-8-1
depends_on:
- broker
broker:
@@ -42,7 +42,7 @@ services:
labels:
lagoon.type: rabbitmq-cluster
lagoon.template: services/broker/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/broker:v1-6-0
+ lagoon.image: amazeeiolagoon/broker:v1-8-1
openshiftremove:
image: ${IMAGE_REPO:-lagoon}/openshiftremove
command: yarn run dev
@@ -52,7 +52,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/openshiftremove/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/openshiftremove:v1-6-0
+ lagoon.image: amazeeiolagoon/openshiftremove:v1-8-1
openshiftbuilddeploy:
image: ${IMAGE_REPO:-lagoon}/openshiftbuilddeploy
command: yarn run dev
@@ -64,7 +64,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/openshiftbuilddeploy/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/openshiftbuilddeploy:v1-6-0
+ lagoon.image: amazeeiolagoon/openshiftbuilddeploy:v1-8-1
openshiftbuilddeploymonitor:
image: ${IMAGE_REPO:-lagoon}/openshiftbuilddeploymonitor
command: yarn run dev
@@ -78,7 +78,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/openshiftbuilddeploymonitor/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/openshiftbuilddeploymonitor:v1-6-0
+ lagoon.image: amazeeiolagoon/openshiftbuilddeploymonitor:v1-8-1
openshiftjobs:
image: ${IMAGE_REPO:-lagoon}/openshiftjobs
command: yarn run dev
@@ -92,7 +92,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/openshiftjobs/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/openshiftjobs:v1-6-0
+ lagoon.image: amazeeiolagoon/openshiftjobs:v1-8-1
openshiftjobsmonitor:
image: ${IMAGE_REPO:-lagoon}/openshiftjobsmonitor
command: yarn run dev
@@ -102,7 +102,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/openshiftjobsmonitor/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/openshiftjobsmonitor:v1-6-0
+ lagoon.image: amazeeiolagoon/openshiftjobsmonitor:v1-8-1
openshiftmisc:
image: ${IMAGE_REPO:-lagoon}/openshiftmisc
command: yarn run dev
@@ -112,7 +112,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/openshiftmisc/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/openshiftmisc:v1-6-0
+ lagoon.image: amazeeiolagoon/openshiftmisc:v1-8-1
kubernetesmisc:
image: ${IMAGE_REPO:-lagoon}/kubernetesmisc
command: yarn run dev
@@ -122,7 +122,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/kubernetesmisc/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/kubernetesmisc:v1-6-0
+ lagoon.image: amazeeiolagoon/kubernetesmisc:v1-8-1
kubernetesbuilddeploy:
image: ${IMAGE_REPO:-lagoon}/kubernetesbuilddeploy
command: yarn run dev
@@ -135,7 +135,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/kubernetesbuilddeploy/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/kubernetesbuilddeploy:v1-6-0
+ lagoon.image: amazeeiolagoon/kubernetesbuilddeploy:v1-8-1
kubernetesdeployqueue:
image: ${IMAGE_REPO:-lagoon}/kubernetesdeployqueue
command: yarn run dev
@@ -145,7 +145,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/kubernetesdeployqueue/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/kubernetesdeployqueue:v1-6-0
+ lagoon.image: amazeeiolagoon/kubernetesdeployqueue:v1-8-1
kubernetesbuilddeploymonitor:
image: ${IMAGE_REPO:-lagoon}/kubernetesbuilddeploymonitor
command: yarn run dev
@@ -159,7 +159,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/kubernetesbuilddeploymonitor/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/kubernetesbuilddeploymonitor:v1-6-0
+ lagoon.image: amazeeiolagoon/kubernetesbuilddeploymonitor:v1-8-1
kubernetesjobs:
image: ${IMAGE_REPO:-lagoon}/kubernetesjobs
command: yarn run dev
@@ -173,7 +173,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/kubernetesjobs/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/kubernetesjobs:v1-6-0
+ lagoon.image: amazeeiolagoon/kubernetesjobs:v1-8-1
kubernetesjobsmonitor:
image: ${IMAGE_REPO:-lagoon}/kubernetesjobsmonitor
command: yarn run dev
@@ -187,7 +187,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/kubernetesjobsmonitor/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/kubernetesjobsmonitor:v1-6-0
+ lagoon.image: amazeeiolagoon/kubernetesjobsmonitor:v1-8-1
kubernetesremove:
image: ${IMAGE_REPO:-lagoon}/kubernetesremove
command: yarn run dev
@@ -197,7 +197,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/kubernetesremove/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/kubernetesremove:v1-6-0
+ lagoon.image: amazeeiolagoon/kubernetesremove:v1-8-1
logs2rocketchat:
image: ${IMAGE_REPO:-lagoon}/logs2rocketchat
command: yarn run dev
@@ -207,7 +207,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/logs2rocketchat/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/logs2rocketchat:v1-6-0
+ lagoon.image: amazeeiolagoon/logs2rocketchat:v1-8-1
logs2slack:
image: ${IMAGE_REPO:-lagoon}/logs2slack
command: yarn run dev
@@ -217,7 +217,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/logs2slack/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/logs2slack:v1-6-0
+ lagoon.image: amazeeiolagoon/logs2slack:v1-8-1
logs2microsoftteams:
image: ${IMAGE_REPO:-lagoon}/logs2microsoftteams
command: yarn run dev
@@ -227,7 +227,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/logs2microsoftteams/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/logs2microsoftteams:v1-6-0
+ lagoon.image: amazeeiolagoon/logs2microsoftteams:v1-8-1
logs2email:
image: ${IMAGE_REPO:-lagoon}/logs2email
command: yarn run dev
@@ -237,7 +237,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/logs2slack/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/logs2email:v1-6-0
+ lagoon.image: amazeeiolagoon/logs2email:v1-8-1
depends_on:
- mailhog
mailhog:
@@ -255,7 +255,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/webhooks2tasks/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/webhooks2tasks:v1-6-0
+ lagoon.image: amazeeiolagoon/webhooks2tasks:v1-8-1
api:
image: ${IMAGE_REPO:-lagoon}/api
command: yarn run dev
@@ -267,10 +267,14 @@ services:
- keycloak
ports:
- '3000:3000'
+ # Uncomment for local new relic tracking
+ # environment:
+ # - NEW_RELIC_LICENSE_KEY=
+ # - NEW_RELIC_APP_NAME=api-local
labels:
lagoon.type: custom
lagoon.template: services/api/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/api:v1-6-0
+ lagoon.image: amazeeiolagoon/api:v1-8-1
ui:
image: ${IMAGE_REPO:-lagoon}/ui
command: yarn run dev
@@ -284,7 +288,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/ui/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/ui:v1-6-0
+ lagoon.image: amazeeiolagoon/ui:v1-8-1
ssh:
image: ${IMAGE_REPO:-lagoon}/ssh
depends_on:
@@ -305,7 +309,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/ssh/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/ssh:v1-6-0
+ lagoon.image: amazeeiolagoon/ssh:v1-8-1
auth-server:
image: ${IMAGE_REPO:-lagoon}/auth-server
command: yarn run dev
@@ -319,7 +323,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/auth-server/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/auth-server:v1-6-0
+ lagoon.image: amazeeiolagoon/auth-server:v1-8-1
keycloak:
image: ${IMAGE_REPO:-lagoon}/keycloak
user: '111111111'
@@ -330,7 +334,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/keycloak/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/keycloak:v1-6-0
+ lagoon.image: amazeeiolagoon/keycloak:v1-8-1
keycloak-db:
image: ${IMAGE_REPO:-lagoon}/keycloak-db
ports:
@@ -338,7 +342,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/keycloak-db/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/keycloak-db:v1-6-0
+ lagoon.image: amazeeiolagoon/keycloak-db:v1-8-1
tests-kubernetes:
image: ${IMAGE_REPO:-lagoon}/tests
environment:
@@ -454,7 +458,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/drush-alias/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/drush-alias:v1-6-0
+ lagoon.image: amazeeiolagoon/drush-alias:v1-8-1
version: '2'
logs-db:
image: ${IMAGE_REPO:-lagoon}/logs-db
@@ -470,14 +474,14 @@ services:
labels:
lagoon.type: elasticsearch
lagoon.template: services/logs-db/.lagoon.single.yml
- lagoon.image: amazeeiolagoon/logs-db:v1-6-0
+ lagoon.image: amazeeiolagoon/logs-db:v1-8-1
logs-forwarder:
image: ${IMAGE_REPO:-lagoon}/logs-forwarder
user: '111111111'
labels:
lagoon.type: custom
lagoon.template: services/logs-forwarder/.lagoon.single.yml
- lagoon.image: amazeeiolagoon/logs-forwarder:v1-6-0
+ lagoon.image: amazeeiolagoon/logs-forwarder:v1-8-1
logs-db-ui:
image: ${IMAGE_REPO:-lagoon}/logs-db-ui
user: '111111111'
@@ -489,14 +493,14 @@ services:
labels:
lagoon.type: kibana
lagoon.template: services/logs-db-ui/.lagoon.yml
- lagoon.image: amazeeiolagoon/logs-db-ui:v1-6-0
+ lagoon.image: amazeeiolagoon/logs-db-ui:v1-8-1
logs-db-curator:
image: ${IMAGE_REPO:-lagoon}/logs-db-curator
user: '111111111'
labels:
lagoon.type: cli
lagoon.template: services/logs-db-curator/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/logs-db-curator:v1-6-0
+ lagoon.image: amazeeiolagoon/logs-db-curator:v1-8-1
logs2logs-db:
image: ${IMAGE_REPO:-lagoon}/logs2logs-db
user: '111111111'
@@ -512,7 +516,7 @@ services:
labels:
lagoon.type: logstash
lagoon.template: services/logs2logs-db/.lagoon.yml
- lagoon.image: amazeeiolagoon/logs2logs-db:v1-6-0
+ lagoon.image: amazeeiolagoon/logs2logs-db:v1-8-1
auto-idler:
image: ${IMAGE_REPO:-lagoon}/auto-idler
user: '111111111'
@@ -521,11 +525,11 @@ services:
- './services/auto-idler/idle-clis.sh:/idle-clis.sh'
- './services/auto-idler/openshift-services.sh:/openshift-services.sh'
- './services/auto-idler/openshift-clis.sh:/openshift-clis.sh'
- - './services/auto-idler/create_jwt.sh:/create_jwt.sh'
+ - './services/auto-idler/create_jwt.py:/create_jwt.py'
labels:
lagoon.type: custom
lagoon.template: services/auto-idler/.lagoon.yml
- lagoon.image: amazeeiolagoon/auto-idler:v1-6-0
+ lagoon.image: amazeeiolagoon/auto-idler:v1-8-1
storage-calculator:
image: ${IMAGE_REPO:-lagoon}/storage-calculator
user: '111111111'
@@ -534,7 +538,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/storage-calculator/.lagoon.yml
- lagoon.image: amazeeiolagoon/storage-calculator:v1-6-0
+ lagoon.image: amazeeiolagoon/storage-calculator:v1-8-1
logs-collector:
image: openshift/origin-logging-fluentd:v3.6.1
labels:
@@ -594,7 +598,7 @@ services:
- HTTPS_PROXY=
- NO_PROXY=harbor-core,harbor-jobservice,harbor-database,harborregistry,harbor-portal,harbor-trivy,127.0.0.1,localhost,.local,.internal
- HARBOR_NGINX_ENDPOINT=http://harbor-nginx:8080
- - ROBOT_TOKEN_DURATION=999
+ - ROBOT_TOKEN_DURATION=500
- CORE_SECRET=secret123
- JOBSERVICE_SECRET=secret123
- REGISTRY_HTTP_SECRET=secret123
@@ -606,7 +610,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/harbor-core/harbor-core.yml
- lagoon.image: amazeeiolagoon/harbor-core:v1-6-0
+ lagoon.image: amazeeiolagoon/harbor-core:v1-8-1
harbor-database:
image: ${IMAGE_REPO:-lagoon}/harbor-database
hostname: harbor-database
@@ -620,7 +624,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/harbor-database/harbor-database.yml
- lagoon.image: amazeeiolagoon/harbor-database:v1-6-0
+ lagoon.image: amazeeiolagoon/harbor-database:v1-8-1
harbor-jobservice:
image: ${IMAGE_REPO:-lagoon}/harbor-jobservice
hostname: harbor-jobservice
@@ -649,7 +653,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/harbor-jobservice/harbor-jobservice.yml
- lagoon.image: amazeeiolagoon/harbor-jobservice:v1-6-0
+ lagoon.image: amazeeiolagoon/harbor-jobservice:v1-8-1
harbor-nginx:
image: ${IMAGE_REPO:-lagoon}/harbor-nginx
hostname: harbor-nginx
@@ -665,7 +669,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/harbor-nginx/harbor-nginx.yml
- lagoon.image: amazeeiolagoon/harbor-nginx:v1-6-0
+ lagoon.image: amazeeiolagoon/harbor-nginx:v1-8-1
harbor-portal:
image: ${IMAGE_REPO:-lagoon}/harbor-portal
hostname: harbor-portal
@@ -675,7 +679,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/harbor-portal/harbor-portal.yml
- lagoon.image: amazeeiolagoon/harbor-portal:v1-6-0
+ lagoon.image: amazeeiolagoon/harbor-portal:v1-8-1
harbor-redis:
image: ${IMAGE_REPO:-lagoon}/harbor-redis
hostname: harbor-redis
@@ -685,7 +689,7 @@ services:
labels:
lagoon.type: custom
lagoon.template: services/harbor-redis/harbor-redis.yml
- lagoon.image: amazeeiolagoon/harbor-redis:v1-6-0
+ lagoon.image: amazeeiolagoon/harbor-redis:v1-8-1
harbor-trivy:
image: ${IMAGE_REPO:-lagoon}/harbor-trivy
hostname: harbor-trivy
@@ -717,7 +721,7 @@ services:
lagoon.type: custom
lagoon.template: services/harbor-trivy/harbor-trivy.yml
lagoon.name: harbor-trivy
- lagoon.image: amazeeiolagoon/harbor-trivy:v1-6-0
+ lagoon.image: amazeeiolagoon/harbor-trivy:v1-8-1
harborregistry:
image: ${IMAGE_REPO:-lagoon}/harborregistry
hostname: harborregistry
@@ -739,7 +743,7 @@ services:
lagoon.type: custom
lagoon.template: services/harborregistry/harborregistry.yml
lagoon.name: harborregistry
- lagoon.image: amazeeiolagoon/harborregistry:v1-6-0
+ lagoon.image: amazeeiolagoon/harborregistry:v1-8-1
harborregistryctl:
image: ${IMAGE_REPO:-lagoon}/harborregistryctl
hostname: harborregistryctl
@@ -754,4 +758,10 @@ services:
lagoon.type: custom
lagoon.template: services/harborregistryctl/harborregistry.yml
lagoon.name: harborregistry
- lagoon.image: amazeeiolagoon/harborregistryctl:v1-6-0
+ lagoon.image: amazeeiolagoon/harborregistryctl:v1-8-1
+ api-redis:
+ image: ${IMAGE_REPO:-lagoon}/api-redis
+ labels:
+ lagoon.type: custom
+ lagoon.template: services/api-redis/.lagoon.app.yml
+ lagoon.image: amazeeiolagoon/api-redis:v1-8-1
diff --git a/docs/administering_lagoon/install_k8s.md b/docs/administering_lagoon/install_k8s.md
new file mode 100644
index 0000000000..24ea63f34e
--- /dev/null
+++ b/docs/administering_lagoon/install_k8s.md
@@ -0,0 +1,125 @@
+# Install local Kubernetes cluster for Lagoon
+
+Let's see how to install a local lightweight k8s cluster using
+k3s by Rancher: [rancher/k3s](https://github.com/rancher/k3s)
+
+!!!hint
+ In order to have the best experience we recommend the following:
+ Linux or Mac OSX
+ 32 GB+ RAM total
+ 12 GB+ RAM allocated to Docker
+ 6+ cores allocated to Docker
+ SSD disk with 25GB+ free
+
+## Installation checklist
+1. Make sure you have a clean state checking the following (use `-n` option for dry-run):
+ 1. Make sure no lagoon containers are running running `make kill`.
+ 2. Make sure to clean any old lagoon containers and volumes running `make down`.
+ 3. Now your `build` dir should be empty and `docker ps` should show no containers running.
+2. Make sure to allow `172.17.0.1:5000` as insecure registry, check the [docker docs](https://docs.docker.com/registry/insecure/) for more information.
+ 1. Edit `insecure-registries` key in your `/etc/docker/daemon.json` and add `"insecure-registries":["172.17.0.1:5000"]` then restart docker service with `systemctl restart docker`.
+3. Using `sysctl vm.max_map_count` check the value of `vm.max_map_count` is at least `262144` or set it is using `sysctl -w vm.max_map_count=262144`. We need to increase this value to avoid error [`max virtual memory areas is too low`](https://stackoverflow.com/questions/51445846/elasticsearch-max-virtual-memory-areas-vm-max-map-count-65530-is-too-low-inc/51448773#51448773) on `logs-db` Elasticsearch service.
+
+## Create a local k8s cluster
+1. Now you can create a local k3s Kubernetes cluster running `make k3d` and see the following notable outputs:
+ *
+ ```
+ INFO[0000] Creating cluster [k3s-lagoon]
+ INFO[0000] Creating server using docker.io/rancher/k3s:v1.17.0-k3s.1...
+ INFO[0008] SUCCESS: created cluster [k3s-lagoon]
+ ...
+ The push refers to repository [localhost:5000/lagoon/docker-host]
+ ...
+ The push refers to repository [localhost:5000/lagoon/kubectl-build-deploy-dind]
+ ...
+ Release "k8up" does not exist. Installing it now.
+ NAME: k8up
+ LAST DEPLOYED: Thu May 7 10:45:46 2020
+ NAMESPACE: k8up
+ STATUS: deployed
+ REVISION: 1
+ TEST SUITE: None
+ namespace/dbaas-operator created
+ "dbaas-operator" has been added to your repositories
+ Release "dbaas-operator" does not exist. Installing it now.
+ NAME: dbaas-operator
+ LAST DEPLOYED: Thu May 7 10:45:47 2020
+ NAMESPACE: dbaas-operator
+ STATUS: deployed
+ REVISION: 1
+ TEST SUITE: None
+ Release "mariadbprovider" does not exist. Installing it now.
+ coalesce.go:165: warning: skipped value for providers: Not a table.
+ NAME: mariadbprovider
+ LAST DEPLOYED: Thu May 7 10:45:48 2020
+ NAMESPACE: dbaas-operator
+ STATUS: deployed
+ REVISION: 1
+ TEST SUITE: None
+ namespace/lagoon created
+ Release "lagoon-remote" does not exist. Installing it now.
+ NAME: lagoon-remote
+ LAST DEPLOYED: Thu May 7 10:45:48 2020
+ NAMESPACE: lagoon
+ STATUS: deployed
+ REVISION: 1
+ TEST SUITE: None
+
+ ```
+2. At the end of the script, using `docker ps` you should see an output like the following:
+ *
+ ```
+ CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+ 0d61e8ba168e rancher/k3s:v1.17.0-k3s.1 "/bin/k3s server --h…" 28 minutes ago Up 28 minutes 0.0.0.0:16643->16643/tcp, 0.0.0.0:18080->80/tcp, 0.0.0.0:18443->443/tcp k3d-k3s-lagoon-server
+ a7960981caaa lagoon/local-registry "/entrypoint.sh /etc…" 30 minutes ago Up 30 minutes 0.0.0.0:5000->5000/tcp lagoon_local-registry_1
+
+ ```
+2. `make k3d-kubeconfig` will print the `KUBECONFIG` env var you need to start using the cluster.
+ 1. Execute `export KUBECONFIG="$(./local-dev/k3d get-kubeconfig --name=$(cat k3d))"` inside the terminal.
+ 2. Now you should be able to use the cluster via an already installed `kubectl` or making a symbolic link to `/usr/local/bin/kubectl -> /your/path/amazee/lagoon/local-dev/kubectl`
+ 3. If you prefer to use something more visual you could install [k9s](https://k9scli.io/topics/install/) cli tool.
+ 4. Here the complete list of pods you should see with `kubectl get pod -A`
+ ```
+ NAMESPACE NAME
+ kube-system local-path-provisioner
+ kube-system metrics-server
+ k8up k8up-operator
+ dbaas-operator kube-rbac-proxy,manager
+ kube-system coredns
+ lagoon docker-host
+ kube-system helm
+ kube-system nginx-ingress-default-backend
+ kube-system lb-port-80,lb-port-443
+ kube-system nginx-ingress-controller
+ ```
+ 5. Here the complete list of deployed helm [releases](https://helm.sh/docs/helm/helm_list/) you should see with `local-dev/helm/helm ls --all-namespaces`.
+ ```
+ NAME NAMESPACE
+ dbaas-operator dbaas-operator
+ k8up k8up
+ lagoon-remote lagoon
+ mariadbprovider dbaas-operator
+ nginx kube-system
+ ```
+
+## Deploy Lagoon on Kubernetes
+1. TODO
+
+## Configure Installed Lagoon
+
+We have a fully running Kubernetes cluster. Now it's time to configure the first project inside of it. Follow the examples in [GraphQL API](graphql_api.md).
+
+## Clean up
+
+Clean up k3s cluster with `make k3d/stop`.
+
+
+## Troubleshooting
+
+âš **Unable to connect to the server: x509: certificate signed by unknown authority**
+
+Rebuild the cluster via
+```
+make k3d/stop
+make k3d
+```
diff --git a/docs/developing_lagoon/code_of_conduct.md b/docs/code_of_conduct.md
similarity index 100%
rename from docs/developing_lagoon/code_of_conduct.md
rename to docs/code_of_conduct.md
diff --git a/docs/developing_lagoon/contributing.md b/docs/contributing.md
similarity index 100%
rename from docs/developing_lagoon/contributing.md
rename to docs/contributing.md
diff --git a/docs/using_lagoon/active_standby.md b/docs/using_lagoon/active_standby.md
index ea479a0566..e31567308c 100644
--- a/docs/using_lagoon/active_standby.md
+++ b/docs/using_lagoon/active_standby.md
@@ -3,10 +3,10 @@
Lagoon supports Active/Standby (also known as blue/green) deployments.
## Configuration
-To change an existing project to support active/standby you'll need to configure some project settings in the Lagoon API
+To change an existing project to support active/standby you'll need to configure some project settings in the Lagoon API.
-`productionEnviromment` should be set to the branch name of the current environment that is active
-`standbyProductionEnvironment` should be set to the branch name of the current environment that is in standby
+* `productionEnviromment` should be set to the branch name of the current environment that is active.
+* `standbyProductionEnvironment` should be set to the branch name of the environment that will be in standby.
```
mutation updateProject {
@@ -23,6 +23,7 @@ mutation updateProject {
}
}
```
+
### `.lagoon.yml` - `production_routes`
To configure a project for active/standby in the `.lagoon.yml` file, you'll need to configure the `production_routes` section with any routes you want to attach to the `active` environment, and any routes to the `standby` environment. During an Active/Standby switch, these routes will migrate between the two environments.
@@ -52,10 +53,14 @@ production_routes:
> Note: Any routes that are under the section `environments..routes` will not be moved as part of active/standby, these routes will always be attached to the environment as defined. Ensure that if you do need a specific route to be migrated during an active/standby switch, that you remove them from the `environments` section and place them under the `production_routes` section specific to if it should be an `active` or `standby` route.
-## Triggering a switch event
+## Triggering the active/standby switch
+### via the UI
+To trigger the switching of environment routes, you can visit the standby environment in the Lagoon UI and click on the button labeled `Switch Active/Standby environments`. You will be prompted to confirm your action.
-To trigger an event to switch the environments, you can run the following graphQL mutation, this will inform lagoon to begin the process.
+Once confirmed, it will take you to the tasks page where you can view the progress of the switch.
+### via the API
+The following graphQL mutation can be executed which will start the process of switching the environment routes.
```
mutation ActiveStandby {
switchActiveStandby(
@@ -94,7 +99,9 @@ By default, projects will be created with the following aliases that will be ava
* `lagoon-production`
* `lagoon-standby`
-The `lagoon-production` alias will resolve point to whichever site is defined as `productionEnvironment`, where `lagoon-standby` will always resolve to the site that is defined as `standbyProductionEnvironment`
+The `lagoon-production` alias will resolve to whichever environment is currently in the API as `productionEnvironment`, where `lagoon-standby` will always resolve to the environment that is defined as `standbyProductionEnvironment`.
+
+> As the active/standby switch updates these as required, `lagoon-production` will always be the `active` environment.
These alias are configurable by updating the project, but be aware that changing them may require you to update any scripts that rely on them.
@@ -112,4 +119,39 @@ mutation updateProject {
standbyAlias
}
}
+```
+
+## Notes
+
+When the active/standby trigger has been executed, the `productionEnvironment` and `standbyProductionEnvironments` will switch within the Lagoon API. Both environments are still classed as `production` environment types. We use the `productionEnvironment` to determine which one is labelled as `active`. For more information on the differences between environment types, read the [documentation for `environment types`](environment_types.md#environment-types)
+
+```
+query projectByName {
+ projectByName(name:"drupal-example"){
+ productionEnvironment
+ standbyProductionEnvironment
+ }
+}
+```
+Before switching environments
+```
+{
+ "data": {
+ "projectByName": {
+ "productionEnvironment": "production-brancha",
+ "standbyProductionEnvironment": "production-branchb"
+ }
+ }
+}
+```
+After switching environments
+```
+{
+ "data": {
+ "projectByName": {
+ "productionEnvironment": "production-branchb",
+ "standbyProductionEnvironment": "production-brancha"
+ }
+ }
+}
```
\ No newline at end of file
diff --git a/docs/using_lagoon/configure_webhooks.md b/docs/using_lagoon/configure_webhooks.md
index 0f3dfe440e..27f6c5b2ef 100644
--- a/docs/using_lagoon/configure_webhooks.md
+++ b/docs/using_lagoon/configure_webhooks.md
@@ -7,7 +7,7 @@ Your Lagoon administrator will also give you the route to the webhook-handler. Y
- [Bitbucket](#bitbucket)
!!!hint
- If you are an amazee.io customer, the route to the webhook-handler is: [`https://hooks.lagoon.amazeeio.cloud`](https://hooks.lagoon.amazeeio.cloud).
+ If you are an amazee.io customer, the route to the webhook-handler is: [`https://webhook.amazeeio.cloud`](https://hooks.lagoon.amazeeio.cloud).
!!!warning
Managing the following settings will require you to have a high level of access to these repositories, which will be controlled by your organization. If you cannot access these settings, please contact your systems administrator or the appropriate person within your organization .
diff --git a/docs/using_lagoon/docker_images/mariadb.md b/docs/using_lagoon/docker_images/mariadb.md
index 00fc6edcea..2129f3bc2a 100644
--- a/docs/using_lagoon/docker_images/mariadb.md
+++ b/docs/using_lagoon/docker_images/mariadb.md
@@ -42,6 +42,8 @@ Environment variables defined in MariaDB base image:
| `MARIADB_LOG_SLOW` | empty | Variable to control the save of slow queries. |
| `MARIADB_LOG_QUERIES` | empty | Variable to control the save of ALL queries. |
| `BACKUPS_DIR` | /var/lib/mysql/backup | Default path for databases backups. |
+| `MARIADB_DATA_DIR` | /var/lib/mysql | Path of the mariadb data dir, be careful, changing this can occur data loss! |
+| `MARIADB_COPY_DATA_DIR_SOURCE` | unset | Path which the entrypoint script of mariadb will use to copy into the defined `MARIADB_DATA_DIR`, this can be used for prepopulating the MariaDB with a database. The scripts expects actual MariaDB data files and not a sql file! Plus it only copies data if the destination does not already have a mysql datadir in it. |
If the `LAGOON_ENVIRONMENT_TYPE` variable is set to `production`, performances are set accordingly by using `MARIADB_INNODB_BUFFER_POOL_SIZE=1024` and `MARIADB_INNODB_LOG_FILE_SIZE=256`.
diff --git a/docs/using_lagoon/docker_images/nginx.md b/docs/using_lagoon/docker_images/nginx.md
index 24472d9a8b..3f107510b9 100644
--- a/docs/using_lagoon/docker_images/nginx.md
+++ b/docs/using_lagoon/docker_images/nginx.md
@@ -60,3 +60,4 @@ Environment variables are meant to contain common information for the `Nginx` co
| `BASIC_AUTH` | `restricted` | By not setting `BASIC_AUTH` this will instruct Lagoon to automatically enable basic authentication if `BASIC_AUTH_USERNAME` and `BASIC_AUTH_PASSWORD` are set. To disable basic authentication even if `BASIC_AUTH_USERNAME` and `BASIC_AUTH_PASSWORD` are set, set `BASIC_AUTH` to `off`. |
| `BASIC_AUTH_USERNAME` | \(not set\) | Username for basic authentication |
| `BASIC_AUTH_PASSWORD` | \(not set\) | Password for basic authentication \(unencrypted\) |
+| `FAST_HEALTH_CHECK` | \(not set\) | If set to `true` this will redirect GET requests from certain user agents (StatusCake, Pingdom, Site25x7, Uptime, nagios) to the lightweight Lagoon service healthcheck. |
diff --git a/docs/using_lagoon/docker_images/php-fpm.md b/docs/using_lagoon/docker_images/php-fpm.md
index 4d72a41d19..2e4466ddb7 100644
--- a/docs/using_lagoon/docker_images/php-fpm.md
+++ b/docs/using_lagoon/docker_images/php-fpm.md
@@ -88,6 +88,7 @@ Environment variables are meant to contain common information for the PHP contai
| :--- | :--- | :--- |
| `NEWRELIC_ENABLED` | `false` | Enable NewRelic performance monitoring, needs `NEWRELIC_LICENSE` be configured. |
| `NEWRELIC_LICENSE` | \(not set\) | NewRelic license to be used, Important: `NEWRELIC_ENABLED` needs to be set to`true` in order for NewRelic to be enabled. |
+| `NEWRELIC_BROWSER_MONITORING_ENABLED` | `true` | This enables auto-insertion of the JavaScript fragments for NewRelic browser monitoring, Important: `NEWRELIC_ENABLED` needs to be set to`true` in order for NewRelic to be enabled. |
| `PHP_APC_ENABLED` | `1` | Can be set to 0 to disable APC. [See php.net](http://php.net/manual/en/apc.configuration.php#ini.apc.enabled). |
| `PHP_APC_SHM_SIZE` | `32m` | The size of each shared memory segment given. [See php.net](http://php.net/manual/en/apc.configuration.php#ini.apc.shm-size). |
| `PHP_DISPLAY_ERRORS` | `Off` | This determines whether errors should be printed to the screen as part of the output or if they should be hidden from the user. [See php.net](http://php.net/display-errors). |
@@ -104,4 +105,3 @@ Environment variables are meant to contain common information for the PHP contai
| `PHP_MAX_INPUT_VARS` | `2000` | How many input variables will be accepted. [See php.net](http://php.net/manual/en/info.configuration.php#ini.max-input-vars). |
| `PHP_MEMORY_LIMIT` | `400M` | Maximum amount of memory a script may consume. [See php.net](http://php.net/memory-limit). |
| `XDEBUG_ENABLE` | \(not set\) | Used to enable `xdebug` extension. [See php.net](http://php.net/manual/en/apc.configuration.php#ini.apc.enabled). |
-
diff --git a/docs/using_lagoon/docker_images/redis.md b/docs/using_lagoon/docker_images/redis.md
index 635ef54e02..ee89cf3896 100644
--- a/docs/using_lagoon/docker_images/redis.md
+++ b/docs/using_lagoon/docker_images/redis.md
@@ -25,7 +25,7 @@ Environment variables defined in Redis base image. See also [https://raw.githubu
| Environment Variable | Default | Description |
| :--- | :--- | :--- |
-| `LOGLEVEL` | notice | Define the level of logs |
| `DATABASES` | -1 | Default number of databases created at startup |
+| `LOGLEVEL` | notice | Define the level of logs |
| `MAXMEMORY` | 100mb | Maximum amount of memory |
-
+| `REDIS_PASSWORD` | disabled | Enables [authentication feature](https://redis.io/topics/security#authentication-feature) |
diff --git a/docs/using_lagoon/docker_images/solr.md b/docs/using_lagoon/docker_images/solr.md
index 5ed29c1b06..55b8fb2883 100644
--- a/docs/using_lagoon/docker_images/solr.md
+++ b/docs/using_lagoon/docker_images/solr.md
@@ -25,4 +25,5 @@ Environment variables defined in `Solr` base image.
| Environment Variable | Default | Description |
| :--- | :--- | :--- |
| `SOLR_JAVA_MEM` | 512M | Default Java HEAP size \(ie. `SOLR_JAVA_MEM="-Xms10g -Xmx10g"` \). |
-
+| `SOLR_DATA_DIR` | /var/solr | Path of the solr data dir, be careful, changing this can occur data loss! |
+| `SOLR_COPY_DATA_DIR_SOURCE` | unset | Path which the entrypoint script of solr will use to copy into the defined `SOLR_DATA_DIR`, this can be used for prepopulating the Solr with a core. The scripts expects actual Solr data files! Plus it only copies data if the destination does not already have a solr core in it. |
\ No newline at end of file
diff --git a/docs/using_lagoon/drupal/lagoonize.md b/docs/using_lagoon/drupal/lagoonize.md
index 8be7a68f45..90826dc72b 100644
--- a/docs/using_lagoon/drupal/lagoonize.md
+++ b/docs/using_lagoon/drupal/lagoonize.md
@@ -4,14 +4,13 @@
In order for Drupal to work with Lagoon, we need to teach Drupal about Lagoon and Lagoon about Drupal. This happens by copying specific YAML and PHP Files into your Git repository.
-You find [these Files in our GitHub repository](https://github.com/amazeeio/lagoon/tree/master/docs/using_lagoon/drupal); the easiest way is to [download these files as a ZIP file](https://minhaskamal.github.io/DownGit/#/home?url=https://github.com/amazeeio/lagoon/tree/master/docs/using_lagoon/drupal) and copy them into your Git repository. For each Drupal version and database type you will find an individual folder. A short overview of what they are:
+You find [these Files in our GitHub repository](https://github.com/amazeeio/lagoon/tree/master/docs/using_lagoon/drupal); the easiest way is to [download these files as a ZIP file](https://downgit.github.io/#/home?url=https://github.com/amazeeio/lagoon/tree/master/docs/using_lagoon/drupal) and copy them into your Git repository. For each Drupal version and database type you will find an individual folder. A short overview of what they are:
- `.lagoon.yml` - The main file that will be used by Lagoon to understand what should be deployed and many more things. This file has some sensible Drupal defaults, if you would like to edit or modify, please check the specific [Documentation for .lagoon.yml](../lagoon_yml.md)
- `docker-compose.yml`, `.dockerignore`, and `*.dockerfile` (or `Dockerfile`) - These files are used to run your local Drupal development environment, they tell Docker which services to start and how to build them. They contain sensible defaults and many commented lines. iWe hope that it's well-commented enough to be self-describing. If you would like to find out more, see [Documentation for docker-compose.yml](../docker-compose_yml.md)
- `sites/default/*` - These .php and .yml files teach Drupal how to communicate with Lagoon containers both locally and in production. It also provides an easy system for specific overrides in development and production environments. Unlike other Drupal hosting systems, Lagoon never ever injects Drupal settings files into your Drupal. Therefore you can edit them however you like. Like all other files, they contain sensible defaults and some commented parts.
- `drush/aliases.drushrc.php` - These files are specific to Drush and tell Drush how to talk to the Lagoon GraphQL API in order to learn about all Site Aliases there are.
- `drush/drushrc.php` - Some sensible defaults for Drush commands.
-- Add `patches` directory if you choose [drupal8-composer-mariadb](../drupal/services/mariadb.md).
### Update your `.gitignore` Settings
diff --git a/docs/using_lagoon/drupal/services/solr.md b/docs/using_lagoon/drupal/services/solr.md
index e865fc1c4c..f06216f5d2 100644
--- a/docs/using_lagoon/drupal/services/solr.md
+++ b/docs/using_lagoon/drupal/services/solr.md
@@ -1,10 +1,14 @@
# Solr
## Standard use
- For Solr 5.5 and 6.6 we ship the default schema files provided by [search_api_solr](https://www.drupal.org/project/search_api_solr) version 8.x-1.2. Add the Solr version you would like to use in your docker-compose.yml file, following [our example](https://github.com/amazeeio/drupal-example/blob/master/docker-compose.yml#L103-L111).
+We provide you with the default schema files provided by [search_api_solr](https://www.drupal.org/project/search_api_solr) version 8.x-1.2. This works for Solr 5.5 and 6.6
+
+Specify the Solr version you would like to use in your docker-compose.yml file, following [our example](https://github.com/amazeeio/drupal-example/blob/master/docker-compose.yml#L103-L111).
## Custom schema
-To implement schema customizations for Solr in your project, look to how Lagoon [creates our standard images](https://github.com/amazeeio/lagoon/blob/master/images/solr-drupal/Dockerfile).
+If you use a different version of the search_api_solr module, you may need to add your own custom schema. The module allows you to download an easy config.zip file containing what you need.
+
+Also if for any other reason you would like to implement schema customizations for Solr in your project, look to how Lagoon [creates our standard images](https://github.com/amazeeio/lagoon/blob/master/images/solr-drupal/Dockerfile).
* In the `solr` section of your docker-compose file replace `image: amazeeio/solr:6.6` with:
diff --git a/docs/using_lagoon/harbor/using_harbor.md b/docs/using_lagoon/harbor/using_harbor.md
index a6a74d1c50..653f43e0bb 100644
--- a/docs/using_lagoon/harbor/using_harbor.md
+++ b/docs/using_lagoon/harbor/using_harbor.md
@@ -4,7 +4,7 @@
If you are running Lagoon locally, you can access that UI at [localhost:8084](https://localhost:8084/). The username is `admin` and the password is `admin`
-**Note:** If you are hosting a site with amazee.io, we do not allow customer access to the Harbor UI within amazee.io's Lagoon.
+**Note:** If you are hosting a site with amazee.io, we do not allow customer access to the Harbor UI within amazee.io's Lagoon.
Once logged in, the first screen is a list of all repositories your user has access to. Each "repository" in Harbor correlates to a project in Lagoon.
@@ -17,3 +17,31 @@ Within each Harbor repository, you'll see a list of container images from all en
From here, you can drill down into an individual container in order to see its details, including an overview of its security scan results.

+
+
+## How Harbor interacts with the Problems Database
+
+Lagoon can respond to Harbor webhook scan events and, if the result set matches a Project environment, will use the results and generate entries in the Problems system.
+
+By default, Lagoon tries to parse out the incoming Harbor repo to match the pattern `PROJECT/ENVIRONMENT/SERVICE` - so if we had a project FOO, with environments PROD and DEV, and each of these environments had services `nodejs` and `mariadb` - an incoming Harbor repo name of `FOO/DEV/mariadb` will tell us precisely which service this image scan corresponds to.
+
+In the case where the Harbor repo name does not correspond to this schema, we need some way of mapping scans to the right Project/environment/service. To allow this, we introduce an object called a Harbor Scan Matcher. This is essentially a regex that is meant to match against incoming Harbor repo names that associates the scan with existing projects and environments (or a range of them).We make use named capture groups in our regexes to match Project, Environment, and Service names. If the name of an environment, project, or service is not present in your regex, you can assign a default name for each of these that will be used instead. This is best illustrated by an example.
+
+Below we create a Harbor Scan Matcher that matches an incoming repo name for the FOO project - let's assume that the incoming repo name from Harbor does not contain anything corresponding to the environment name, so we'll by default assume that this is going to go to an environment named PROD.If the harbor repo name is something like `MY_ENTERPRISE_PROJECT-` we could match with by creating the following Harbor Scan Matcher:
+
+```graphql
+mutation addProblemHarborScan {
+ addProblemHarborScanMatch(input: {name: "EnterpriseProjectMatcher", description:"Matches incoming Harbor Scans for FOO",
+ defaultLagoonProject: "FOO"
+ defaultLagoonService: "nodejs"
+ regex: "^MY_ENTERPRISE_PROJECT-(?.+)$"
+ }) {
+ id
+ name
+ description
+ regex
+ }
+}
+```
+
+This will now match any incoming string of the form `MY_ENTERPRISE_PROJECT-` - assign it to the FOO project and attach any vulnerabilities found to the `nodejs` service.This is an ADMIN ONLY function since the Harbor Scan Matchers need to be set across the entire Lagoon instance. If non-admin users could set these then it would be possible to reroute Harbor scans from one project to another.
\ No newline at end of file
diff --git a/docs/using_lagoon/lagoon_yml.md b/docs/using_lagoon/lagoon_yml.md
index 06ef55ee23..8c91ec1cc4 100644
--- a/docs/using_lagoon/lagoon_yml.md
+++ b/docs/using_lagoon/lagoon_yml.md
@@ -126,6 +126,17 @@ Note: If you would like to temporarily disable pre/post-rollout tasks during a d
This allows for the disabling of the automatically created routes \(NOT the custom routes per environment, see below for them\) all together.
+### `routes.autogenerate.allowPullrequests`
+
+This allows pull request to get autogenerated routes when route autogeneration is disabled.
+
+```
+routes:
+ autogenerate:
+ enabled: false
+ allowPullrequests: true
+```
+
### `routes.autogenerate.insecure`
This allows you to define the behavior of the automatic creates routes \(NOT the custom routes per environment, see below for more\). The following options are allowed:
@@ -140,10 +151,10 @@ Environment names match your deployed branches or pull requests. This allows for
#### `environments.[name].monitoring_urls`
-At the end of a deploy, Lagoon will check this field for any URLs which you have specified to add to the API for the purpose of monitoring. The default value for this field is the first route for a project. It is useful for adding specific paths of a project to the API, for consumption by a monitoring service.
+!!!danger
+ This feature will be removed in an upcoming release of Lagoon. Please use the newer [`monitoring-path` method](lagoon_yml.md#monitoring-a-specific-path) on your specific route.
-!!!hint
- Please note, Lagoon does not provide any direct integration to a monitoring service, this just adds the URLs to the API. On amazee.io, we take the `monitoring_urls` and add them to our StatusCake account.
+At the end of a deploy, Lagoon will check this field for any URLs which you have specified to add to the API for the purpose of monitoring. The default value for this field is the first route for a project. It is useful for adding specific paths of a project to the API, for consumption by a monitoring service.
#### `environments.[name].routes`
@@ -176,6 +187,14 @@ In the `"www.example.com"` example repeated below, we see two more options \(als
hsts: max-age=31536000
```
+#### Monitoring a specific path
+When [UptimeRobot](https://uptimerobot.com/) is configured for your cluster (OpenShift or Kubernetes), Lagoon will inject annotations to each route/ingress for use by the `stakater/IngressControllerMonitor`. The default action is to monitor the homepage of the route. If you have a specific route to be monitored, this can be overriden by adding a `monitoring-path` to your route specification. A common use is to set up a path for monitoring which bypasses caching to give a more real-time monitoring of your site.
+
+```
+ - "www.example.com":
+ monitoring-path: "/bypass-cache"
+```
+
#### Ingress annotations (Redirects)
!!!hint
@@ -264,6 +283,18 @@ environments:
mariadb: statefulset
```
+### `environments.[name].autogenerateRoutes`
+
+This allows for any environments to get autogenerated routes when route autogeneration is disabled.
+
+```
+routes:
+ autogenerate:
+ enabled: false
+environments:
+ develop:
+ autogenerateRoutes: true
+```
#### Cron jobs - `environments.[name].cronjobs`
diff --git a/helpers/check_acme_routes.sh b/helpers/check_acme_routes.sh
new file mode 100755
index 0000000000..c4dc864235
--- /dev/null
+++ b/helpers/check_acme_routes.sh
@@ -0,0 +1,301 @@
+#!/bin/bash
+
+# Description: script to check routes with exposer pods.
+# In case of no DNS record or mis-configuration, script will update the route
+# by disabling the tls-acme, removing other acme related annotations and add
+# an interal one for filtering purpose
+
+set -eu -o pipefail
+
+# Set DEBUG variable to true, to start bash in debug mode
+DEBUG="${DEBUG:-"false"}"
+if [ "$DEBUG" = "true" ]; then
+ set -x
+fi
+
+# Some variables
+
+# Cluster full hostname and API hostname
+CLUSTER_HOSTNAME="${CLUSTER_HOSTNAME:-""}"
+CLUSTER_API_HOSTNAME="${CLUSTER_API_HOSTNAME:-"$CLUSTER_HOSTNAME"}"
+
+# Default command
+COMMAND=${1:-"help"}
+
+# Set DRYRUN variable to true to run in dry-run mode
+DRYRUN="${DRYRUN:-"false"}"
+
+
+# Set a REGEX variable to filter the execution of the script
+REGEX=${REGEX:-".*"}
+
+# Set NOTIFYONLY to true if you want to send customers a notification
+# explaining why Lagoon is not able to issue Let'S Encrypt certificate for
+# some routes defined in customer's .lagoon.yml file.
+# If set to true, no other action rather than notification is done (ie: no annotation or deletion)
+NOTIFYONLY=${NOTIFYONLY:-"false"}
+
+# Help function
+function usage() {
+ echo -e "The available commands are:
+ - help (get this help)
+ - getpendingroutes (get a list of routes with acme \"orderStatus\" in Pending
+ - getdisabledroutes (get a list of routes with \"administratively-disabled\" annotation
+ - getbrokenroutes (get a list of all possible broken routes)
+ - updateroutes (update broken routes)
+
+ By default, script doesn't set any default cluster to run routes' checks. Please set CLUSTER_HOSTNAME and CLUSTER_API_HOSTNAME variables.
+ If you want to change the API endpoint, set CLUSTER_API_HOSTNAME variable.
+ If you want to change the cluster's hostname, set CLUSTER_HOSTNAME variable.
+ If you want to filter the execution of the script only for certain projects, set the REGEX variable.
+ If you want to test against a specific IP, set the CLUSTER_IPS array.
+
+ Examples:
+ CLUSTER_HOSTNAME=\"ch.amazee.io\" CLUSTER_API_HOSTNAME=\"ch.amazeeio.cloud\" ./check_acme_routes.sh getpendingroutes (Returns a list of all routes witl TLS in Pending status for the defined cluster)
+ REGEX=\"drupal-example\" ./check_acme_routes.sh getpendingroutes (Returns a list of all routes for all projects matchiing the regex \`drupal-example\` with TLS in Pending status)
+ REGEX=\"drupal-example-master\" DRYRUN=true ./check_acme_routes.sh updateroutes (Will run in DRYRUN mode to check and update all broken routes in \`drupal-example-master\` project)"
+
+}
+
+# Function that performs mandatory variales and dependencies checks
+function initial_checks() {
+ # By default script doesn't set CLUSTER_HOSTNAME and CLUSTER_API_HOSTNAME. At least CLUSTER_HOSTNAME must be set
+ if [ -z "$CLUSTER_HOSTNAME" ]; then
+ echo "Please set CLUSTER_HOSTNAME variable"
+ usage
+ exit 1
+ fi
+
+ # Script depends on `lagoon-cli`. Check if it in installed
+ if [[ ! $(command -v lagoon) ]]; then
+ echo "Please install \`lagoon-cli\` from https://github.com/amazeeio/lagoon-cli because the script relys on it"
+ exit 1
+ fi
+}
+
+# function to get a list of all "administratively-disabled" routes
+function get_all_disabled_routes() {
+ echo -e "List of routes administratively disabled\n"
+ oc get route --all-namespaces -o=jsonpath="{range .items[?(@.metadata.annotations.amazee\.io/administratively-disabled)]}{.metadata.namespace}{'\t'}{.metadata.name}{'\n'}{end}"
+ exit 0
+}
+
+# Function to check if you are running the script on the right cluster and if you're logged in correctly
+function check_cluster_api() {
+ # Check on which cluster you're going to run commands
+ if oc whoami --show-server | grep -q -v "$CLUSTER_API_HOSTNAME"; then
+ echo "Please connect to the right cluster"
+ exit 1
+ fi
+
+ # Check if you're logged in correctly
+ if [ $(oc status|grep -q "Unauthorized";echo $?) -eq 0 ]; then
+ echo "Please login into the cluster"
+ exit 1
+ fi
+}
+
+# Function to get a list of all routes with acme.openshift.io/status.provisioningStatus.orderStatus=pending
+function get_pending_routes() {
+ for namespace in $(oc get projects --no-headers=true |awk '{print $1}'|sort -u|grep -E "$REGEX")
+ do
+ IFS=$';'
+ # For each route in a namespace with `tls-acme` set to true, check the `orderStatus` if in pending status
+ for routelist in $(oc get route -n "$namespace" -o=jsonpath="{range .items[?(@.metadata.annotations.kubernetes\.io/tls-acme=='true')]}{.metadata.name}{'\n'}{.metadata.annotations.acme\.openshift\.io/status}{';'}{end}"|sed "s/^[[:space:]]*//")
+ do
+ PENDING_ROUTE_NAME=$(echo "$routelist"|sed -n 1p)
+ if echo "$routelist"|sed -n 4p | grep -q pending; then
+ STATUS="Pending"
+ echo "Route $PENDING_ROUTE_NAME in $namespace is in $STATUS status"
+ fi
+
+ done
+ unset IFS
+ done
+}
+
+# Function for creating an array with all routes that might be updated
+function create_routes_array() {
+ # Get the list of namespaces with broker routes, according to REGEX
+ for namespace in $(oc get routes --all-namespaces|grep exposer|awk '{print $1}'|sort -u|grep -E "$REGEX")
+ do
+ # Raw JSON Openshift project output
+ PROJECTJSON="$(oc get project "$namespace" -o json)"
+
+ # Gather project name based on a label or an annotation
+ if [ $(echo $PROJECTJSON |grep -q 'lagoon.sh/project'; echo $?) -eq 0 ]; then
+ PROJECTNAME=$(echo "${PROJECTJSON}" | grep 'lagoon.sh/project' | awk -F'"' '{print $4}')
+ else
+ PROJECTNAME=$(echo "${PROJECTJSON}" |grep display-name|awk -F'[][]' '{print $2}'|tr "_" "-")
+ fi
+
+ # Get the list of broken unique routes for each namespace
+ for routelist in $(oc get -n "$namespace" route|grep exposer|awk -vNAMESPACE="$namespace" -vPROJECTNAME="$PROJECTNAME" '{print $1";"$2";"NAMESPACE";"PROJECTNAME}'|sort -u -k2 -t ";")
+ do
+ # Put the list into an array
+ ROUTES_ARRAY+=("$routelist")
+ done
+ done
+
+ # Create a sorted array of unique route to check
+ ROUTES_ARRAY_SORTED=($(sort -u -k 2 -t ";"<<<"${ROUTES_ARRAY[*]}"))
+}
+
+# Function to check the routes, update them and delete the exposer's routes
+function check_routes() {
+
+ # Cluster array of IPs
+ CLUSTER_IPS=($(dig +short "$CLUSTER_HOSTNAME"))
+ for i in "${ROUTES_ARRAY_SORTED[@]}"
+ do
+ # Tranform the item into an array
+ route=($(echo "$i" | tr ";" "\n"))
+
+ # Gather some useful variables
+ ROUTE_NAME=${route[0]}
+ ROUTE_HOSTNAME=${route[1]}
+ ROUTE_NAMESPACE=${route[2]}
+ ROUTE_PROJECTNAME=${route[3]}
+
+ # Get route DNS record(s)
+ if [[ $(dig +short "$ROUTE_HOSTNAME" &> /dev/null; echo $?) -ne 0 ]]; then
+ ROUTE_HOSTNAME_IP="null"
+ else
+ ROUTE_HOSTNAME_IP=$(dig +short "$ROUTE_HOSTNAME")
+ fi
+
+ # Check if the route matches the Cluster's IP(s)
+ if echo "$ROUTE_HOSTNAME_IP" | grep -E -q -v "${CLUSTER_IPS[*]}"; then
+
+ # If IP is empty, then no DNS record set
+ if [ -z "$ROUTE_HOSTNAME_IP" ]; then
+ DNS_ERROR="No A or CNAME record set"
+ else
+ DNS_ERROR="$ROUTE_HOSTNAME in $ROUTE_NAMESPACE has no DNS record poiting to ${CLUSTER_IPS[*]} and going to disable tls-acme"
+ fi
+
+ # Print the error on stdout
+ echo "$DNS_ERROR"
+
+ if [[ "$NOTIFYONLY" = "true" ]]; then
+ notify_customer "$ROUTE_PROJECTNAME"
+ else
+ # Call the update function to update the route
+ update_annotation "$ROUTE_HOSTNAME" "$ROUTE_NAMESPACE"
+ notify_customer "$ROUTE_PROJECTNAME"
+
+ # Now once the main route is updated, it's time to get rid of exposers' routes
+ for j in $(oc get -n "$ROUTE_NAMESPACE" route|grep exposer|grep -E '(^|\s)'"$ROUTE_HOSTNAME"'($|\s)'|awk '{print $1";"$2}')
+ do
+ ocroute=($(echo "$j" | tr ";" "\n"))
+ OCROUTE_NAME=${ocroute[0]}
+ if [[ $DRYRUN = true ]]; then
+ echo -e "DRYRUN oc delete -n $ROUTE_NAMESPACE route $OCROUTE_NAME"
+ else
+ echo -e "\nDelete route $OCROUTE_NAME"
+ oc delete -n "$ROUTE_NAMESPACE" route "$OCROUTE_NAME"
+ fi
+ done
+ fi
+ fi
+ echo -e "\n"
+
+
+ done
+}
+
+# Function to update route's annotation (ie: update tls-amce, remove tls-acme-awaiting-* and set a new one for internal purpose)
+function update_annotation() {
+ echo "Update route's annotations"
+ OCOPTIONS="--overwrite"
+ if [[ "$DRYRUN" = "true" ]]; then
+ OCOPTIONS="--dry-run --overwrite"
+ fi
+
+ # Annotate the route
+ oc annotate -n "$2" $OCOPTIONS route "$1" acme.openshift.io/status- kubernetes.io/tls-acme-awaiting-authorization-owner- kubernetes.io/tls-acme-awaiting-authorization-at-url- kubernetes.io/tls-acme="false" amazee.io/administratively-disabled="$(date +%s)"
+}
+
+
+# Function to notify customer about the misconfiguration of their routes
+function notify_customer() {
+
+ # Get Slack|Rocketchat channel and webhook
+ if [ $(TEST=$(lagoon list slack -p "$1" --no-header|awk '{print $3";"$4}'); echo $?) -eq 0 ]; then
+ NOTIFICATION="slack"
+ elif [ $(TEST=$(lagoon list rocketchat -p "$1" --no-header|awk '{print $3";"$4}'); echo $?) -eq 0 ]; then
+ NOTIFICATION="rocketchat"
+ else
+ echo "No notification set"
+ return 0
+ fi
+
+ MESSAGE="Your $ROUTE_HOSTNAME route is configured in the \`.lagoon.yml\` file to issue an TLS certificate from Lets Encrypt. Unfortunately Lagoon is unable to issue a certificate as $DNS_ERROR.\nTo be issued correctly, the DNS records for $ROUTE_HOSTNAME should point to $CLUSTER_HOSTNAME with an CNAME record (preferred) or to ${CLUSTER_IPS[*]} via an A record (also possible but not preferred).\nIf you don't need the SSL certificate or you are using a CDN that provides you with an TLS certificate, please update your .lagoon.yml file by setting the tls-acme parameter to false for $ROUTE_HOSTNAME, as described here: https://lagoon.readthedocs.io/en/latest/using_lagoon/lagoon_yml/#ssl-configuration-tls-acme.\nWe have now administratively disabled the issuing of Lets Encrypt certificate for $ROUTE_HOSTNAME in order to protect the cluster, this will be reset during the next deployment, therefore we suggest to resolve this issue as soon as possible. Feel free to reach out to us for further information.\nThanks you.\namazee.io team"
+
+ NOTIFICATION_DATA=($(lagoon list $NOTIFICATION -p "$1" --no-header|awk '{print $3";"$4}'))
+ for notification in ${NOTIFICATION_DATA[@]}
+ do
+ CHANNEL=$(echo "$notification"|cut -f1 -d ";")
+ WEBHOOK=$(echo "$notification"|cut -f2 -d ";")
+
+ # json Payload
+ PAYLOAD="\"channel\": \"$CHANNEL\", \"text\": \"${MESSAGE}\""
+
+ echo -e "Sending notification into ${CHANNEL}"
+
+ # Execute curl to send message into the channel
+ if [[ $DRYRUN = true ]]; then
+ echo "DRYRUN Sending notification on \"$NOTIFICATION\" curl -X POST -H 'Content-type: application/json' --data '{'"$PAYLOAD"'}' "$WEBHOOK""
+ else
+ curl -X POST -H 'Content-type: application/json' --data '{'"${PAYLOAD}"'}' ${WEBHOOK}
+ fi
+ done
+}
+
+# Main function
+function main() {
+
+ COMMAND="$1"
+
+ # Check first the cluster you're connected to
+ echo -e "You're running the script on $CLUSTER_HOSTNAME\nDRYRUN mode is set to \"$DRYRUN\""
+ check_cluster_api
+
+ case "$COMMAND" in
+ help)
+ usage
+ ;;
+ getpendingroutes)
+ get_pending_routes
+ ;;
+ getdisabledroutes)
+ get_all_disabled_routes
+ ;;
+ getbrokenroutes)
+ echo -e "\nCreating a list of possible broken routes"
+ create_routes_array
+ echo -e "ROUTE_NAMESPACE;ROUTE_NAME;ROUTE_HOSTNAME"|column -t -s ";"
+ for i in "${ROUTES_ARRAY_SORTED[@]}"
+ do
+ # Tranform the item into an array
+ route=($(echo "$i" | tr ";" "\n"))
+ # Gather some useful variables
+ ROUTE_NAME=${route[0]}
+ ROUTE_HOSTNAME=${route[1]}
+ ROUTE_NAMESPACE=${route[2]}
+ echo -e "$ROUTE_NAMESPACE;$ROUTE_NAME;$ROUTE_HOSTNAME"|column -t -s ";"
+ done
+ ;;
+ updateroutes)
+ echo -e "Checking routes\n"
+ create_routes_array
+ check_routes
+ ;;
+ *)
+ usage
+ ;;
+ esac
+}
+
+initial_checks "$COMMAND"
+main "$COMMAND"
diff --git a/helpers/label-namespaces.sh b/helpers/label-namespaces.sh
new file mode 100755
index 0000000000..e9f1f93c8e
--- /dev/null
+++ b/helpers/label-namespaces.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+
+##
+# Label all namespaces with lagoon info
+#
+# Old environments weren't labelled the way that Lagoon expects. This script
+# can be run against a cluster to add the missing labels.
+
+set -euo pipefail
+#set -x
+
+# Loop through all oc projects.
+while read -r project ; do
+
+ # Check if lagoon-env configmap exists.
+ if oc get configmap -n "$project" lagoon-env >/dev/null 2>&1; then
+
+ echo "################################################"
+ echo "Annotating project: $project..."
+ echo "################################################"
+
+ LAGOON_PROJECT=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/LAGOON_PROJECT:/ { print $2 }')
+ LAGOON_ENVIRONMENT_TYPE=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/LAGOON_ENVIRONMENT_TYPE:/ { print $2 }')
+ LAGOON_GIT_SAFE_BRANCH=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/LAGOON_GIT_SAFE_BRANCH:/ { print $2 }')
+ MARIADB_DATABASE=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/MARIADB_DATABASE:/ { print $2 }')
+ MARIADB_USERNAME=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/MARIADB_USERNAME:/ { print $2 }')
+
+ oc label namespace "$project" "lagoon.sh/project=$LAGOON_PROJECT" --overwrite
+ oc label namespace "$project" "lagoon.sh/environmentType=$LAGOON_ENVIRONMENT_TYPE" --overwrite
+ oc label namespace "$project" "lagoon.sh/environment=$LAGOON_GIT_SAFE_BRANCH" --overwrite
+ oc label namespace "$project" "lagoon.sh/mariadb-schema=$MARIADB_DATABASE" --overwrite
+ oc label namespace "$project" "lagoon.sh/mariadb-username=$MARIADB_USERNAME" --overwrite
+ else
+
+ echo "No lagoon-env configmap found for $project"
+
+ fi
+
+done < <(oc get ns -l '!lagoon.sh/project' | sed '1d' | awk '{print $1}')
diff --git a/helpers/shared-to-shared-migrate.sh b/helpers/shared-to-shared-migrate.sh
index a3878c909a..f734e3d663 100755
--- a/helpers/shared-to-shared-migrate.sh
+++ b/helpers/shared-to-shared-migrate.sh
@@ -4,7 +4,7 @@
# What this script is for
# =======================
# This script will migrate a database user, access, database and contents from
-# a source cluster to a destination cluster.
+# an existing cluster to a destination cluster.
#
# At the moment, this is geared towards the Ansible Service Broker, but likely
# can be modified in the future to work with the DBaaS operator.
@@ -18,13 +18,13 @@
# ============
# * You are logged into OpenShift CLI and have access to the NAMESPACE you want
# to migrate.
-# * You have a `.my.cnf` file for the source and desintation database clusters.
-# * If your database clusters are not directly accessible, then you have
-# created SSH tunnels to expose them on a local port.
+# * You have a `.my.cnf` file for the destination database cluster.
+# * If your destination database cluster is not directly accessible, then you
+# have created SSH tunnels to expose them on a local port.
#
# How to get your existing ASB root credentials
# =============================================
-# oc -n openshift-ansible-service-broker get secret/lagoon-dbaas-db-credentials -o JSON | jq '.data'
+# oc -n openshift-ansible-service-broker get secret/lagoon-dbaas-db-credentials -o json | jq '.data | map_values(@base64d)'
#
# How to create a `.my.cnf` file
# ==============================
@@ -39,22 +39,33 @@
# =======================================================================
# ssh -L 33007:shared-cluster.cluster-banana.ap-southeast-2.rds.amazonaws.com:3306 jumpbox.aws.amazee.io
#
-# Example commands
-# ================
+# Example command 1
+# =================
# ./helpers/shared-to-shared-migrate.sh \
-# --source shared-cluster.cluster-banana.ap-southeast-2.rds.amazonaws.com \
# --destination shared-cluster.cluster-apple.ap-southeast-2.rds.amazonaws.com \
# --replica shared-cluster.cluster-r0-apple.ap-southeast-2.rds.amazonaws.com \
# --namespace NAMESPACE \
# --dry-run
#
+# Example command 2
+# =================
+# namespaces="
+# foo-example-com-production
+# bar-example-com-production
+# baz-example-com-production
+# quux-example-com-production
+# "
+# for namespace in $namespaces; do
+# ./helpers/shared-to-shared-migrate.sh \
+# --dry-run \
+# --namespace "$namespace" \
+# --destination shared-mysql-production-1-cluster.cluster-plum.ap-southeast-2.rds.amazonaws.com \
+# --replica shared-mysql-production-1-cluster.cluster-ro-plum.ap-southeast-2.rds.amazonaws.com
+# done
+#
set -euo pipefail
-# Reset in case getopts has been used previously in the shell.
-OPTIND=1
-
# Initialize our own variables:
-SOURCE_CLUSTER=""
DESTINATION_CLUSTER=""
REPLICA_CLUSTER=""
NAMESPACE=""
@@ -63,31 +74,39 @@ TIMESTAMP=$(date +%s)
# Colours.
shw_grey () {
- echo $(tput bold)$(tput setaf 0) $@ $(tput sgr 0)
+ tput bold
+ tput setaf 0
+ echo "$@"
+ tput sgr0
}
shw_norm () {
- echo $(tput bold)$(tput setaf 9) $@ $(tput sgr 0)
+ tput bold
+ tput setaf 9
+ echo "$@"
+ tput sgr0
}
shw_info () {
- echo $(tput bold)$(tput setaf 4) $@ $(tput sgr 0)
+ tput bold
+ tput setaf 4
+ echo "$@"
+ tput sgr0
}
shw_warn () {
- echo $(tput bold)$(tput setaf 2) $@ $(tput sgr 0)
+ tput bold
+ tput setaf 2
+ echo "$@"
+ tput sgr0
}
shw_err () {
- echo $(tput bold)$(tput setaf 1) $@ $(tput sgr 0)
+ tput bold
+ tput setaf 1
+ echo "$@"
+ tput sgr0
}
# Parse input arguments.
while [[ $# -gt 0 ]] ; do
- key="$1"
-
- case $key in
- -s|--source)
- SOURCE_CLUSTER="$2"
- shift # past argument
- shift # past value
- ;;
+ case $1 in
-d|--destination)
DESTINATION_CLUSTER="$2"
shift # past argument
@@ -107,53 +126,51 @@ while [[ $# -gt 0 ]] ; do
DRY_RUN="TRUE"
shift # past argument
;;
+ *)
+ echo "Invalid Argument: $1"
+ exit 3
+ ;;
esac
done
shw_grey "================================================"
-shw_grey " SOURCE_CLUSTER=$SOURCE_CLUSTER"
+shw_grey " START_TIMESTAMP='$(date +%Y-%m-%dT%H:%M:%S%z)'"
+shw_grey "================================================"
shw_grey " DESTINATION_CLUSTER=$DESTINATION_CLUSTER"
shw_grey " REPLICA_CLUSTER=$REPLICA_CLUSTER"
shw_grey " NAMESPACE=$NAMESPACE"
shw_grey "================================================"
-for util in oc jq mysql shyaml; do
+for util in oc jq mysql; do
if ! command -v ${util} > /dev/null; then
shw_err "Please install ${util}"
exit 1
fi
done
-CONF_FILE=${HOME}/.my.cnf-${SOURCE_CLUSTER}
-if [ ! -f "$CONF_FILE" ]; then
- shw_err "ERROR: please create $CONF_FILE so I can know how to connect to ${SOURCE_CLUSTER}"
- exit 2
-fi
-
CONF_FILE=${HOME}/.my.cnf-${DESTINATION_CLUSTER}
if [ ! -f "$CONF_FILE" ]; then
shw_err "ERROR: please create $CONF_FILE so I can know how to connect to ${DESTINATION_CLUSTER}"
exit 2
fi
-if [ ! -z "${DRY_RUN}" ] ; then
+if [ "$DRY_RUN" ] ; then
shw_warn "Dry run is enabled, so no network service changes will take place."
fi
# Load the DBaaS credentials for the project
-SECRETS=/tmp/${NAMESPACE}-migration.yaml
-oc -n ${NAMESPACE} get secret mariadb-servicebroker-credentials -o yaml > $SECRETS
+SECRETS=$(oc -n "$NAMESPACE" get secret mariadb-servicebroker-credentials -o json)
-DB_NETWORK_SERVICE=$(cat $SECRETS | shyaml get-value data.DB_HOST | base64 -D)
-if cat ${SECRETS} | grep DB_READREPLICA_HOSTS > /dev/null ; then
- DB_READREPLICA_HOSTS=$(cat $SECRETS | shyaml get-value data.DB_READREPLICA_HOSTS | base64 -D)
+DB_NETWORK_SERVICE=$(echo "$SECRETS" | jq -er '.data.DB_HOST | @base64d')
+if echo "$SECRETS" | grep -q DB_READREPLICA_HOSTS ; then
+ DB_READREPLICA_HOSTS=$(echo "$SECRETS" | jq -er '.data.DB_READREPLICA_HOSTS | @base64d')
else
DB_READREPLICA_HOSTS=""
fi
-DB_USER=$(cat $SECRETS | shyaml get-value data.DB_USER | base64 -D)
-DB_PASSWORD=$(cat $SECRETS | shyaml get-value data.DB_PASSWORD | base64 -D)
-DB_NAME=$(cat $SECRETS | shyaml get-value data.DB_NAME | base64 -D)
-DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D)
+DB_USER=$(echo "$SECRETS" | jq -er '.data.DB_USER | @base64d')
+DB_PASSWORD=$(echo "$SECRETS" | jq -er '.data.DB_PASSWORD | @base64d')
+DB_NAME=$(echo "$SECRETS" | jq -er '.data.DB_NAME | @base64d')
+DB_PORT=$(echo "$SECRETS" | jq -er '.data.DB_PORT | @base64d')
shw_grey "================================================"
shw_grey " DB_NETWORK_SERVICE=$DB_NETWORK_SERVICE"
@@ -161,10 +178,11 @@ shw_grey " DB_READREPLICA_HOSTS=$DB_READREPLICA_HOSTS"
shw_grey " DB_USER=$DB_USER"
shw_grey " DB_PASSWORD=$DB_PASSWORD"
shw_grey " DB_NAME=$DB_NAME"
+shw_grey " DB_PORT=$DB_PORT"
shw_grey "================================================"
# Ensure there is a database in the destination.
-shw_info "> Setting up the MySQL bits"
+shw_info "> Preparing Database, User, and permissions on destination"
shw_info "================================================"
CONF_FILE=${HOME}/.my.cnf-${DESTINATION_CLUSTER}
mysql --defaults-file="$CONF_FILE" -se "CREATE DATABASE IF NOT EXISTS \`${DB_NAME}\`;"
@@ -178,43 +196,49 @@ shw_info "================================================"
mysql --defaults-file="$CONF_FILE" -e "SELECT * FROM mysql.db WHERE Db = '${DB_NAME}'\G;"
# Dump the database inside the CLI pod.
-POD=$(oc -n ${NAMESPACE} get pods -o json --show-all=false -l service=cli | jq -r '.items[].metadata.name')
-shw_info "> Dumping database ${DB_NAME} on pod ${POD} on host ${DB_NETWORK_SERVICE}"
+POD=$(oc -n "$NAMESPACE" get pods -o json --field-selector=status.phase=Running -l service=cli | jq -r '.items[0].metadata.name // empty')
+if [ -z "$POD" ]; then
+ shw_warn "No running cli pod in namespace $NAMESPACE"
+ shw_warn "Scaling up 1 cli DeploymentConfig pod"
+ oc -n "$NAMESPACE" scale dc cli --replicas=1 --timeout=2m
+ sleep 32 # hope for timely scheduling
+ POD=$(oc -n "$NAMESPACE" get pods -o json --field-selector=status.phase=Running -l service=cli | jq -er '.items[0].metadata.name')
+fi
+shw_info "> Dumping database $DB_NAME on pod $POD on host $DB_NETWORK_SERVICE"
shw_info "================================================"
-oc -n ${NAMESPACE} exec ${POD} -- bash -c "time mysqldump -h ${DB_NETWORK_SERVICE} -u ${DB_USER} -p${DB_PASSWORD} ${DB_NAME} > /tmp/migration.sql"
-oc -n ${NAMESPACE} exec ${POD} -- ls -lath /tmp/migration.sql || exit 1
-oc -n ${NAMESPACE} exec ${POD} -- head -n 5 /tmp/migration.sql
-oc -n ${NAMESPACE} exec ${POD} -- tail -n 5 /tmp/migration.sql || exit 1
+oc -n "$NAMESPACE" exec "$POD" -- bash -c "time mysqldump -h '$DB_NETWORK_SERVICE' -u '$DB_USER' -p'$DB_PASSWORD' '$DB_NAME' > /tmp/migration.sql"
+oc -n "$NAMESPACE" exec "$POD" -- ls -lh /tmp/migration.sql
+oc -n "$NAMESPACE" exec "$POD" -- head -n 5 /tmp/migration.sql
+oc -n "$NAMESPACE" exec "$POD" -- tail -n 5 /tmp/migration.sql
shw_norm "> Dump is done"
shw_norm "================================================"
# Import to new database.
shw_info "> Importing the dump into ${DESTINATION_CLUSTER}"
shw_info "================================================"
-oc -n ${NAMESPACE} exec ${POD} -- bash -c "time mysql -h ${DESTINATION_CLUSTER} -u ${DB_USER} -p${DB_PASSWORD} ${DB_NAME} < /tmp/migration.sql"
-oc -n ${NAMESPACE} exec ${POD} -- bash -c "rm /tmp/migration.sql"
+oc -n "$NAMESPACE" exec "$POD" -- bash -c "time mysql -h '$DESTINATION_CLUSTER' -u '$DB_USER' -p'$DB_PASSWORD' '$DB_NAME' < /tmp/migration.sql"
+oc -n "$NAMESPACE" exec "$POD" -- rm /tmp/migration.sql
shw_norm "> Import is done"
shw_norm "================================================"
# Alter the network service(s).
-shw_info "> Altering the Network Service ${DB_NETWORK_SERVICE} to point at ${DESTINATION_CLUSTER}"
+shw_info "> Altering the Network Service $DB_NETWORK_SERVICE to point at $DESTINATION_CLUSTER"
shw_info "================================================"
-oc -n ${NAMESPACE} get svc/${DB_NETWORK_SERVICE} -o yaml > /tmp/${NAMESPACE}-svc.yaml
-if [ -z "${DRY_RUN}" ] ; then
- oc -n ${NAMESPACE} patch svc/${DB_NETWORK_SERVICE} -p "{\"spec\":{\"externalName\": \"${DESTINATION_CLUSTER}\"}}"
-else
+ORIGINAL_DB_HOST=$(oc -n "$NAMESPACE" get "svc/$DB_NETWORK_SERVICE" -o json --export | tee "/tmp/$NAMESPACE-svc.json" | jq -er '.spec.externalName')
+if [ "$DRY_RUN" ] ; then
echo "**DRY RUN**"
+else
+ oc -n "$NAMESPACE" patch "svc/$DB_NETWORK_SERVICE" -p "{\"spec\":{\"externalName\": \"$DESTINATION_CLUSTER\"}}"
fi
-if [ ! -z "${DB_READREPLICA_HOSTS}" ]; then
- shw_info "> Altering the Network Service ${DB_READREPLICA_HOSTS} to point at ${REPLICA_CLUSTER}"
+if [ "$DB_READREPLICA_HOSTS" ]; then
+ shw_info "> Altering the Network Service $DB_READREPLICA_HOSTS to point at $REPLICA_CLUSTER"
shw_info "================================================"
- oc -n ${NAMESPACE} get svc/${DB_READREPLICA_HOSTS} -o yaml > /tmp/${NAMESPACE}-svc-replica.yaml
- ORIGINAL_DB_READREPLICA_HOSTS=$(cat /tmp/${NAMESPACE}-svc-replica.yaml | shyaml get-value spec.externalName)
- if [ -z "${DRY_RUN}" ] ; then
- oc -n ${NAMESPACE} patch svc/${DB_READREPLICA_HOSTS} -p "{\"spec\":{\"externalName\": \"${REPLICA_CLUSTER}\"}}"
- else
+ ORIGINAL_DB_READREPLICA_HOSTS=$(oc -n "$NAMESPACE" get "svc/$DB_READREPLICA_HOSTS" -o json --export | tee "/tmp/$NAMESPACE-svc-replica.json" | jq -er '.spec.externalName')
+ if [ "$DRY_RUN" ] ; then
echo "**DRY RUN**"
+ else
+ oc -n "$NAMESPACE" patch "svc/$DB_READREPLICA_HOSTS" -p "{\"spec\":{\"externalName\": \"$REPLICA_CLUSTER\"}}"
fi
fi
@@ -225,30 +249,33 @@ sleep 1
# Verify the correct RDS cluster.
shw_info "> Output the RDS cluster that Drush is connecting to"
shw_info "================================================"
-oc -n ${NAMESPACE} exec ${POD} -- bash -c "drush sqlq 'SELECT @@aurora_server_id;'"
+oc -n "$NAMESPACE" exec "$POD" -- bash -c "drush sqlq 'SELECT @@aurora_server_id;'"
# Drush status.
shw_info "> Drush status"
shw_info "================================================"
-oc -n ${NAMESPACE} exec ${POD} -- bash -c "drush status"
+oc -n "$NAMESPACE" exec "$POD" -- bash -c "drush status"
# Get routes, and ensure a cache bust works.
-ROUTE=$(oc -n ${NAMESPACE} get routes -o json | jq --raw-output '.items[0].spec.host')
+ROUTE=$(oc -n "$NAMESPACE" get routes -o json | jq -er '.items[0].spec.host')
shw_info "> Testing the route https://${ROUTE}/?${TIMESTAMP}"
shw_info "================================================"
curl -skLIXGET "https://${ROUTE}/?${TIMESTAMP}" \
-A "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36" \
- --cookie "NO_CACHE=1" | grep -E "HTTP|Cache|Location|LAGOON" || TRUE
+ --cookie "NO_CACHE=1" | grep -E "HTTP|Cache|Location|LAGOON" || true
shw_grey "================================================"
shw_grey ""
shw_grey "In order to rollback this change, edit the Network Service(s) like so:"
shw_grey ""
-shw_grey "oc -n ${NAMESPACE} patch svc/${DB_NETWORK_SERVICE} -p \"{\\\"spec\\\":{\\\"externalName': \\\"${SOURCE_CLUSTER}\\\"}}\""
-if [ ! -z "${DB_READREPLICA_HOSTS}" ]; then
- shw_grey "oc -n ${NAMESPACE} patch svc/${DB_READREPLICA_HOSTS} -p \"{\\\"spec\\\":{\\\"externalName': \\\"${ORIGINAL_DB_READREPLICA_HOSTS}\\\"}}\""
+shw_grey "oc -n $NAMESPACE patch svc/$DB_NETWORK_SERVICE -p '{\"spec\":{\"externalName\": \"$ORIGINAL_DB_HOST\"}}'"
+if [ "$DB_READREPLICA_HOSTS" ]; then
+ shw_grey "oc -n $NAMESPACE patch svc/$DB_READREPLICA_HOSTS -p '{\"spec\":{\"externalName\": \"$ORIGINAL_DB_READREPLICA_HOSTS\"}}'"
fi
echo ""
-shw_norm "Done"
+shw_grey "================================================"
+shw_grey " END_TIMESTAMP='$(date +%Y-%m-%dT%H:%M:%S%z)'"
+shw_grey "================================================"
+shw_norm "Done in $SECONDS seconds"
exit 0
diff --git a/helpers/update-versions.yml b/helpers/update-versions.yml
new file mode 100644
index 0000000000..33d5180709
--- /dev/null
+++ b/helpers/update-versions.yml
@@ -0,0 +1,58 @@
+# Lagoon Version Update Helper
+#
+# Helper to update Version inside Dockerfiles
+# Update versions below in `vars` and execute locally
+#
+# ansible-playbook helpers/update-versions.yml
+- name: update versions
+ hosts: 127.0.0.1
+ connection: local
+ vars:
+ # Newrelic - https://docs.newrelic.com/docs/release-notes/agent-release-notes/php-release-notes/
+ NEWRELIC_VERSION: '9.11.0.267'
+ # Composer - https://getcomposer.org/download/
+ COMPOSER_VERSION: '1.10.8'
+ COMPOSER_HASH_SHA256: '4c40737f5d5f36d04f8b2df37171c6a1ff520efcadcb8626cc7c30bd4c5178e5'
+ # Drupal Console Launcher - https://github.com/hechoendrupal/drupal-console-launcher/releases
+ DRUPAL_CONSOLE_LAUNCHER_VERSION: 1.9.4
+ DRUPAL_CONSOLE_LAUNCHER_SHA: b7759279668caf915b8e9f3352e88f18e4f20659
+ # Drush - https://github.com/drush-ops/drush/releases
+ DRUSH_VERSION: 8.3.5
+ # Drush Launcher Version - https://github.com/drush-ops/drush-launcher/releases
+ DRUSH_LAUNCHER_VERSION: 0.6.0
+ tasks:
+ - name: update NEWRELIC_VERSION
+ lineinfile:
+ path: "{{ lookup('env', 'PWD') }}/images/php/fpm/Dockerfile"
+ regexp: 'ENV NEWRELIC_VERSION='
+ line: 'ENV NEWRELIC_VERSION={{ NEWRELIC_VERSION }}'
+ - name: update COMPOSER_VERSION
+ lineinfile:
+ path: "{{ lookup('env', 'PWD') }}/images/php/cli/Dockerfile"
+ regexp: 'ENV COMPOSER_VERSION='
+ line: 'ENV COMPOSER_VERSION={{ COMPOSER_VERSION }} \'
+ - name: update COMPOSER_HASH_SHA256
+ lineinfile:
+ path: "{{ lookup('env', 'PWD') }}/images/php/cli/Dockerfile"
+ regexp: 'COMPOSER_HASH_SHA256='
+ line: ' COMPOSER_HASH_SHA256={{ COMPOSER_HASH_SHA256 }}'
+ - name: update DRUPAL_CONSOLE_LAUNCHER_VERSION
+ lineinfile:
+ path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile"
+ regexp: 'ENV DRUPAL_CONSOLE_LAUNCHER_VERSION='
+ line: 'ENV DRUPAL_CONSOLE_LAUNCHER_VERSION={{ DRUPAL_CONSOLE_LAUNCHER_VERSION }} \'
+ - name: update DRUPAL_CONSOLE_LAUNCHER_SHA
+ lineinfile:
+ path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile"
+ regexp: 'DRUPAL_CONSOLE_LAUNCHER_SHA='
+ line: ' DRUPAL_CONSOLE_LAUNCHER_SHA={{ DRUPAL_CONSOLE_LAUNCHER_SHA }} \'
+ - name: update DRUSH_VERSION
+ lineinfile:
+ path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile"
+ regexp: 'DRUSH_VERSION='
+ line: ' DRUSH_VERSION={{ DRUSH_VERSION }} \'
+ - name: update DRUSH_LAUNCHER_VERSION
+ lineinfile:
+ path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile"
+ regexp: 'DRUSH_LAUNCHER_VERSION='
+ line: ' DRUSH_LAUNCHER_VERSION={{ DRUSH_LAUNCHER_VERSION }} \'
diff --git a/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh b/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh
index 587e502a9f..c57454f145 100755
--- a/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh
+++ b/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh
@@ -183,8 +183,8 @@ if [[ ( "$BUILD_TYPE" == "pullrequest" || "$BUILD_TYPE" == "branch" ) && ! $TH
LAGOON_POSTROLLOUT_DISABLED=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_POSTROLLOUT_DISABLED") | "\(.value)"'))
fi
if [ ! -z "$LAGOON_ENVIRONMENT_VARIABLES" ]; then
- LAGOON_PREROLLOUT_DISABLED=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_PREROLLOUT_DISABLED") | "\(.value)"'))
- LAGOON_POSTROLLOUT_DISABLED=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_POSTROLLOUT_DISABLED") | "\(.value)"'))
+ LAGOON_PREROLLOUT_DISABLED=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_PREROLLOUT_DISABLED") | "\(.value)"'))
+ LAGOON_POSTROLLOUT_DISABLED=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_POSTROLLOUT_DISABLED") | "\(.value)"'))
fi
set -x
@@ -240,6 +240,13 @@ if [[ ( "$BUILD_TYPE" == "pullrequest" || "$BUILD_TYPE" == "branch" ) && ! $TH
do
DOCKERFILE=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$IMAGE_NAME.build.dockerfile false)
+
+ # allow to overwrite build dockerfile for this environment and service
+ ENVIRONMENT_DOCKERFILE_OVERRIDE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.overrides.$IMAGE_NAME.build.dockerfile false)
+ if [ ! $ENVIRONMENT_DOCKERFILE_OVERRIDE == "false" ]; then
+ DOCKERFILE=$ENVIRONMENT_DOCKERFILE_OVERRIDE
+ fi
+
if [ $DOCKERFILE == "false" ]; then
# No Dockerfile defined, assuming to download the Image directly
@@ -250,6 +257,13 @@ if [[ ( "$BUILD_TYPE" == "pullrequest" || "$BUILD_TYPE" == "branch" ) && ! $TH
# allow to overwrite image that we pull
OVERRIDE_IMAGE=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$IMAGE_NAME.labels.lagoon\\.image false)
+
+ # allow to overwrite image that we pull for this environment and service
+ ENVIRONMENT_IMAGE_OVERRIDE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.overrides.$IMAGE_NAME.image false)
+ if [ ! $ENVIRONMENT_IMAGE_OVERRIDE == "false" ]; then
+ OVERRIDE_IMAGE=$ENVIRONMENT_IMAGE_OVERRIDE
+ fi
+
if [ ! $OVERRIDE_IMAGE == "false" ]; then
# expand environment variables from ${OVERRIDE_IMAGE}
PULL_IMAGE=$(echo "${OVERRIDE_IMAGE}" | envsubst)
@@ -269,6 +283,13 @@ if [[ ( "$BUILD_TYPE" == "pullrequest" || "$BUILD_TYPE" == "branch" ) && ! $TH
TEMPORARY_IMAGE_NAME="${NAMESPACE}-${IMAGE_NAME}"
BUILD_CONTEXT=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$IMAGE_NAME.build.context .)
+
+ # allow to overwrite build context for this environment and service
+ ENVIRONMENT_BUILD_CONTEXT_OVERRIDE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.overrides.$IMAGE_NAME.build.context false)
+ if [ ! $ENVIRONMENT_BUILD_CONTEXT_OVERRIDE == "false" ]; then
+ BUILD_CONTEXT=$ENVIRONMENT_BUILD_CONTEXT_OVERRIDE
+ fi
+
if [ ! -f $BUILD_CONTEXT/$DOCKERFILE ]; then
echo "defined Dockerfile $DOCKERFILE for service $IMAGE_NAME not found"; exit 1;
fi
@@ -348,6 +369,15 @@ else
fi
ROUTES_AUTOGENERATE_ENABLED=$(cat .lagoon.yml | shyaml get-value routes.autogenerate.enabled true)
+ROUTES_AUTOGENERATE_ALLOW_PRS=$(cat .lagoon.yml | shyaml get-value routes.autogenerate.allowPullrequests $ROUTES_AUTOGENERATE_ENABLED)
+if [[ "$TYPE" == "pullrequest" && "$ROUTES_AUTOGENERATE_ALLOW_PRS" == "true" ]]; then
+ ROUTES_AUTOGENERATE_ENABLED=true
+fi
+## fail silently if the key autogenerateRoutes doesn't exist and default to whatever ROUTES_AUTOGENERATE_ENABLED is set to
+ROUTES_AUTOGENERATE_BRANCH=$(cat .lagoon.yml | shyaml -q get-value environments.${BRANCH//./\\.}.autogenerateRoutes $ROUTES_AUTOGENERATE_ENABLED)
+if [ "$ROUTES_AUTOGENERATE_BRANCH" =~ [Tt]rue ]; then
+ ROUTES_AUTOGENERATE_ENABLED=true
+fi
touch /kubectl-build-deploy/values.yaml
@@ -424,11 +454,6 @@ do
touch /kubectl-build-deploy/${SERVICE_NAME}-values.yaml
- SERVICE_TYPE_OVERRIDE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.types.$SERVICE_NAME false)
- if [ ! $SERVICE_TYPE_OVERRIDE == "false" ]; then
- SERVICE_TYPE=$SERVICE_TYPE_OVERRIDE
- fi
-
HELM_SERVICE_TEMPLATE="templates/service.yaml"
if [ -f /kubectl-build-deploy/helmcharts/${SERVICE_TYPE}/$HELM_SERVICE_TEMPLATE ]; then
cat /kubectl-build-deploy/values.yaml
@@ -464,6 +489,128 @@ TEMPLATE_PARAMETERS=()
### CUSTOM ROUTES FROM .lagoon.yml
##############################################
+
+ROUTES_SERVICE_COUNTER=0
+# we need to check for production routes for active/standby if they are defined, as these will get migrated between environments as required
+if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
+ if [ "${BRANCH//./\\.}" == "${ACTIVE_ENVIRONMENT}" ]; then
+ if [ -n "$(cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER 2> /dev/null)" ]; then
+ while [ -n "$(cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER 2> /dev/null)" ]; do
+ ROUTES_SERVICE=$(cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER)
+
+ ROUTE_DOMAIN_COUNTER=0
+ while [ -n "$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER 2> /dev/null)" ]; do
+ # Routes can either be a key (when the have additional settings) or just a value
+ if cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER &> /dev/null; then
+ ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
+ # Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml
+ ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g')
+ ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true)
+ ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true)
+ ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
+ ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
+ else
+ # Only a value given, assuming some defaults
+ ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
+ ROUTE_TLS_ACME=true
+ ROUTE_MIGRATE=true
+ ROUTE_INSECURE=Redirect
+ ROUTE_HSTS=null
+ fi
+
+ touch /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
+ echo "$ROUTE_ANNOTATIONS" | yq p - annotations > /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
+
+ # The very first found route is set as MAIN_CUSTOM_ROUTE
+ if [ -z "${MAIN_CUSTOM_ROUTE+x}" ]; then
+ MAIN_CUSTOM_ROUTE=$ROUTE_DOMAIN
+ fi
+
+ ROUTE_SERVICE=$ROUTES_SERVICE
+
+ cat /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
+
+ helm template ${ROUTE_DOMAIN} \
+ /kubectl-build-deploy/helmcharts/custom-ingress \
+ --set host="${ROUTE_DOMAIN}" \
+ --set service="${ROUTE_SERVICE}" \
+ --set tls_acme="${ROUTE_TLS_ACME}" \
+ --set insecure="${ROUTE_INSECURE}" \
+ --set hsts="${ROUTE_HSTS}" \
+ --set routeMigrate="${ROUTE_MIGRATE}" \
+ -f /kubectl-build-deploy/values.yaml -f /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${ROUTE_DOMAIN}.yaml
+
+ let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1
+ done
+
+ let ROUTES_SERVICE_COUNTER=ROUTES_SERVICE_COUNTER+1
+ done
+ fi
+ fi
+ if [ "${BRANCH//./\\.}" == "${STANDBY_ENVIRONMENT}" ]; then
+ if [ -n "$(cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER 2> /dev/null)" ]; then
+ while [ -n "$(cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER 2> /dev/null)" ]; do
+ ROUTES_SERVICE=$(cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER)
+
+ ROUTE_DOMAIN_COUNTER=0
+ while [ -n "$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER 2> /dev/null)" ]; do
+ # Routes can either be a key (when the have additional settings) or just a value
+ if cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER &> /dev/null; then
+ ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
+ # Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml
+ ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g')
+ ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true)
+ ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true)
+ ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
+ ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
+ else
+ # Only a value given, assuming some defaults
+ ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
+ ROUTE_TLS_ACME=true
+ ROUTE_MIGRATE=true
+ ROUTE_INSECURE=Redirect
+ ROUTE_HSTS=null
+ fi
+
+ touch /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
+ echo "$ROUTE_ANNOTATIONS" | yq p - annotations > /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
+
+ # The very first found route is set as MAIN_CUSTOM_ROUTE
+ if [ -z "${MAIN_CUSTOM_ROUTE+x}" ]; then
+ MAIN_CUSTOM_ROUTE=$ROUTE_DOMAIN
+ fi
+
+ ROUTE_SERVICE=$ROUTES_SERVICE
+
+ cat /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
+
+ helm template ${ROUTE_DOMAIN} \
+ /kubectl-build-deploy/helmcharts/custom-ingress \
+ --set host="${ROUTE_DOMAIN}" \
+ --set service="${ROUTE_SERVICE}" \
+ --set tls_acme="${ROUTE_TLS_ACME}" \
+ --set insecure="${ROUTE_INSECURE}" \
+ --set hsts="${ROUTE_HSTS}" \
+ --set routeMigrate="${ROUTE_MIGRATE}" \
+ -f /kubectl-build-deploy/values.yaml -f /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${ROUTE_DOMAIN}.yaml
+
+ let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1
+ done
+
+ let ROUTES_SERVICE_COUNTER=ROUTES_SERVICE_COUNTER+1
+ done
+ fi
+ fi
+fi
+
+# set some monitoring defaults
+if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
+ MONITORING_ENABLED="true"
+else
+ MONITORING_ENABLED="false"
+
+fi
+
# Two while loops as we have multiple services that want routes and each service has multiple routes
ROUTES_SERVICE_COUNTER=0
if [ -n "$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER 2> /dev/null)" ]; then
@@ -478,13 +625,16 @@ if [ -n "$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.
# Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml
ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g')
ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true)
+ ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false)
ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
+ MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "")
ROUTE_ANNOTATIONS=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.annotations {})
else
# Only a value given, assuming some defaults
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
ROUTE_TLS_ACME=true
+ ROUTE_MIGRATE=false
ROUTE_INSECURE=Redirect
ROUTE_HSTS=null
ROUTE_ANNOTATIONS="{}"
@@ -509,6 +659,11 @@ if [ -n "$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.
--set tls_acme="${ROUTE_TLS_ACME}" \
--set insecure="${ROUTE_INSECURE}" \
--set hsts="${ROUTE_HSTS}" \
+ --set routeMigrate="${ROUTE_MIGRATE}" \
+ --set ingressmonitorcontroller.enabled="${MONITORING_ENABLED}" \
+ --set ingressmonitorcontroller.path="${MONITORING_PATH}" \
+ --set ingressmonitorcontroller.alertContacts="${MONITORING_ALERTCONTACT}" \
+ --set ingressmonitorcontroller.statuspageId="${MONITORING_STATUSPAGEID}" \
-f /kubectl-build-deploy/values.yaml -f /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${ROUTE_DOMAIN}.yaml
let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1
@@ -528,13 +683,16 @@ else
# Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml
ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g')
ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true)
+ ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false)
ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
+ MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "")
ROUTE_ANNOTATIONS=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.annotations {})
else
# Only a value given, assuming some defaults
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
ROUTE_TLS_ACME=true
+ ROUTE_MIGRATE=false
ROUTE_INSECURE=Redirect
ROUTE_HSTS=null
ROUTE_ANNOTATIONS="{}"
@@ -559,6 +717,11 @@ else
--set tls_acme="${ROUTE_TLS_ACME}" \
--set insecure="${ROUTE_INSECURE}" \
--set hsts="${ROUTE_HSTS}" \
+ --set routeMigrate="${ROUTE_MIGRATE}" \
+ --set ingressmonitorcontroller.enabled="${MONITORING_ENABLED}" \
+ --set ingressmonitorcontroller.path="${MONITORING_PATH}" \
+ --set ingressmonitorcontroller.alertContacts="${MONITORING_ALERTCONTACT}" \
+ --set ingressmonitorcontroller.statuspageId="${MONITORING_STATUSPAGEID}" \
-f /kubectl-build-deploy/values.yaml -f /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${ROUTE_DOMAIN}.yaml
let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1
@@ -605,20 +768,6 @@ if [ "$(ls -A $YAML_FOLDER/)" ]; then
kubectl apply --insecure-skip-tls-verify -n ${NAMESPACE} -f $YAML_FOLDER/
fi
-##############################################
-### CUSTOM MONITORING_URLS FROM .lagoon.yml
-##############################################
-URL_COUNTER=0
-while [ -n "$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.monitoring_urls.$URL_COUNTER 2> /dev/null)" ]; do
- MONITORING_URL="$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.monitoring_urls.$URL_COUNTER)"
- if [[ $URL_COUNTER > 0 ]]; then
- MONITORING_URLS="${MONITORING_URLS}, ${MONITORING_URL}"
- else
- MONITORING_URLS="${MONITORING_URL}"
- fi
- let URL_COUNTER=URL_COUNTER+1
-done
-
##############################################
### PROJECT WIDE ENV VARIABLES
##############################################
@@ -639,25 +788,25 @@ fi
# Load all routes with correct schema and comma separated
ROUTES=$(kubectl -n ${NAMESPACE} get ingress --sort-by='{.metadata.name}' -l "acme.openshift.io/exposer!=true" -o=go-template --template='{{range $indexItems, $ingress := .items}}{{if $indexItems}},{{end}}{{$tls := .spec.tls}}{{range $indexRule, $rule := .spec.rules}}{{if $indexRule}},{{end}}{{if $tls}}https://{{else}}http://{{end}}{{.host}}{{end}}{{end}}')
+# Active / Standby routes
+ACTIVE_ROUTES=""
+STANDBY_ROUTES=""
+if [ ! -z "${STANDBY_ENVIRONMENT}" ]; then
+ACTIVE_ROUTES=$(kubectl -n ${NAMESPACE} get ingress --sort-by='{.metadata.name}' -l "dioscuri.amazee.io/migrate=true" -o=go-template --template='{{range $indexItems, $ingress := .items}}{{if $indexItems}},{{end}}{{$tls := .spec.tls}}{{range $indexRule, $rule := .spec.rules}}{{if $indexRule}},{{end}}{{if $tls}}https://{{else}}http://{{end}}{{.host}}{{end}}{{end}}')
+STANDBY_ROUTES=$(kubectl -n ${NAMESPACE} get ingress --sort-by='{.metadata.name}' -l "dioscuri.amazee.io/migrate=true" -o=go-template --template='{{range $indexItems, $ingress := .items}}{{if $indexItems}},{{end}}{{$tls := .spec.tls}}{{range $indexRule, $rule := .spec.rules}}{{if $indexRule}},{{end}}{{if $tls}}https://{{else}}http://{{end}}{{.host}}{{end}}{{end}}')
+fi
+
# Get list of autogenerated routes
AUTOGENERATED_ROUTES=$(kubectl -n ${NAMESPACE} get ingress --sort-by='{.metadata.name}' -l "lagoon.sh/autogenerated=true" -o=go-template --template='{{range $indexItems, $ingress := .items}}{{if $indexItems}},{{end}}{{$tls := .spec.tls}}{{range $indexRule, $rule := .spec.rules}}{{if $indexRule}},{{end}}{{if $tls}}https://{{else}}http://{{end}}{{.host}}{{end}}{{end}}')
-# If no MONITORING_URLS were specified, fall back to the ROUTE of the project
-if [ -z "$MONITORING_URLS"]; then
- echo "No monitoring_urls provided, using ROUTE"
- MONITORING_URLS="${ROUTE}"
-fi
-
yq write -i /kubectl-build-deploy/values.yaml 'route' "$ROUTE"
yq write -i /kubectl-build-deploy/values.yaml 'routes' "$ROUTES"
yq write -i /kubectl-build-deploy/values.yaml 'autogeneratedRoutes' "$AUTOGENERATED_ROUTES"
-yq write -i /kubectl-build-deploy/values.yaml 'monitoringUrls' "$MONITORING_URLS"
echo -e "\
LAGOON_ROUTE=${ROUTE}\n\
LAGOON_ROUTES=${ROUTES}\n\
LAGOON_AUTOGENERATED_ROUTES=${AUTOGENERATED_ROUTES}\n\
-LAGOON_MONITORING_URLS=${MONITORING_URLS}\n\
" >> /kubectl-build-deploy/values.env
# Generate a Config Map with project wide env variables
@@ -776,10 +925,13 @@ elif [ "$BUILD_TYPE" == "pullrequest" ] || [ "$BUILD_TYPE" == "branch" ]; then
parallel --retries 4 < /kubectl-build-deploy/lagoon/push
fi
+
+
# load the image hashes for just pushed Images
for IMAGE_NAME in "${!IMAGES_BUILD[@]}"
do
- IMAGE_HASHES[${IMAGE_NAME}]=$(docker inspect ${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}:${IMAGE_TAG:-latest} --format '{{index .RepoDigests 0}}')
+ JQ_QUERY=(jq -r ".[]|select(test(\"${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}\"))")
+ IMAGE_HASHES[${IMAGE_NAME}]=$(docker inspect ${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}:${IMAGE_TAG:-latest} --format '{{json .RepoDigests}}' | "${JQ_QUERY[@]}")
done
# elif [ "$BUILD_TYPE" == "promote" ]; then
@@ -1046,4 +1198,4 @@ if [ "${LAGOON_POSTROLLOUT_DISABLED}" != "true" ]; then
done
else
echo "post-rollout tasks are currently disabled LAGOON_POSTROLLOUT_DISABLED is set to true"
-fi
\ No newline at end of file
+fi
diff --git a/images/kubectl-build-deploy-dind/build-deploy.sh b/images/kubectl-build-deploy-dind/build-deploy.sh
index 4bd8c2b414..ff7e1a557a 100755
--- a/images/kubectl-build-deploy-dind/build-deploy.sh
+++ b/images/kubectl-build-deploy-dind/build-deploy.sh
@@ -106,15 +106,15 @@ do
if [ $PRIVATE_CONTAINER_REGISTRY_URL != "false" ]; then
echo "Attempting to log in to $PRIVATE_CONTAINER_REGISTRY_URL with user $PRIVATE_CONTAINER_REGISTRY_USERNAME - $PRIVATE_CONTAINER_REGISTRY_PASSWORD"
docker login --username $PRIVATE_CONTAINER_REGISTRY_USERNAME --password $PRIVATE_REGISTRY_CREDENTIAL $PRIVATE_CONTAINER_REGISTRY_URL
- kubectl create secret docker-registry "lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret" --docker-server=$PRIVATE_CONTAINER_REGISTRY_URL --docker-username=PRIVATE_CONTAINER_REGISTRY_USERNAME --docker-password=$PRIVATE_REGISTRY_REGISTRY_PASSWORD --dry-run -o yaml | kubectl apply -f -
+ kubectl create secret docker-registry "lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret" --docker-server=$PRIVATE_CONTAINER_REGISTRY_URL --docker-username=$PRIVATE_CONTAINER_REGISTRY_USERNAME --docker-password=$PRIVATE_REGISTRY_CREDENTIAL --dry-run -o yaml | kubectl apply -f -
REGISTRY_SECRETS+=("lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret")
- let PRIVATE_REGISTRY_COUNTER++
+ let ++PRIVATE_REGISTRY_COUNTER
else
echo "Attempting to log in to docker hub with user $PRIVATE_CONTAINER_REGISTRY_USERNAME - $PRIVATE_CONTAINER_REGISTRY_PASSWORD"
docker login --username $PRIVATE_CONTAINER_REGISTRY_USERNAME --password $PRIVATE_REGISTRY_CREDENTIAL
- kubectl create secret docker-registry "lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret" --docker-server="https://index.docker.io/v1/" --docker-username=PRIVATE_CONTAINER_REGISTRY_USERNAME --docker-password=$PRIVATE_REGISTRY_REGISTRY_PASSWORD --dry-run -o yaml | kubectl apply -f -
+ kubectl create secret docker-registry "lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret" --docker-server="https://index.docker.io/v1/" --docker-username=$PRIVATE_CONTAINER_REGISTRY_USERNAME --docker-password=$PRIVATE_REGISTRY_CREDENTIAL --dry-run -o yaml | kubectl apply -f -
REGISTRY_SECRETS+=("lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret")
- let PRIVATE_REGISTRY_COUNTER++
+ let ++PRIVATE_REGISTRY_COUNTER
fi
fi
done
diff --git a/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/_helpers.tpl
index d1cc78cae5..c05589bb6b 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/_helpers.tpl
+++ b/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/_helpers.tpl
@@ -73,4 +73,18 @@ lagoon.sh/prNumber: {{ .Values.prNumber | quote }}
lagoon.sh/prHeadBranch: {{ .Values.prHeadBranch | quote }}
lagoon.sh/prBaseBranch: {{ .Values.prBaseBranch | quote }}
{{- end }}
-{{- end -}}
\ No newline at end of file
+{{- end -}}
+
+{{/*
+Generate name for twig storage emptyDir
+*/}}
+{{- define "cli-persistent.twig-storage.name" -}}
+{{- printf "%s-twig" .Values.persistentStorage.name }}
+{{- end -}}
+
+{{/*
+Generate path for twig storage emptyDir
+*/}}
+{{- define "cli-persistent.twig-storage.path" -}}
+{{- printf "%s/php/twig" .Values.persistentStorage.path }}
+{{- end -}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/deployment.yaml
index 557e3fb05e..4faf4e1241 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/deployment.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/cli-persistent/templates/deployment.yaml
@@ -31,6 +31,8 @@ spec:
- name: {{ .Values.persistentStorage.name }}
persistentVolumeClaim:
claimName: {{ .Values.persistentStorage.name }}
+ - name: {{ include "cli-persistent.twig-storage.name" . | quote }}
+ emptyDir: {}
priorityClassName: {{ include "cli-persistent.lagoonPriority" . }}
enableServiceLinks: false
securityContext:
@@ -59,6 +61,8 @@ spec:
readOnly: true
- name: {{ .Values.persistentStorage.name }}
mountPath: {{ .Values.persistentStorage.path | quote }}
+ - name: {{ include "cli-persistent.twig-storage.name" . | quote }}
+ mountPath: {{ include "cli-persistent.twig-storage.path" . | quote }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/ingress.yaml b/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/ingress.yaml
index 06489fb824..9504bc6258 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/ingress.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/ingress.yaml
@@ -7,6 +7,7 @@ kind: Ingress
metadata:
name: {{ include "custom-ingress.fullname" . }}
labels:
+ dioscuri.amazee.io/migrate: {{ .Values.routeMigrate | quote }}
lagoon.sh/autogenerated: "false"
{{- include "custom-ingress.labels" . | nindent 4 }}
annotations:
@@ -21,6 +22,15 @@ metadata:
nginx.ingress.kubernetes.io/ssl-redirect: "true"
ingress.kubernetes.io/ssl-redirect: "true"
{{- end }}
+ monitor.stakater.com/enabled: "{{ .Values.ingressmonitorcontroller.enabled }}"
+ uptimerobot.monitor.stakater.com/interval: "{{ .Values.ingressmonitorcontroller.interval }}"
+ uptimerobot.monitor.stakater.com/alert-contacts: "{{ .Values.ingressmonitorcontroller.alertContacts }}"
+ {{- if .Values.ingressmonitorcontroller.path }}
+ monitor.stakater.com/overridePath: "{{ .Values.ingressmonitorcontroller.path }}"
+ {{- end }}
+ {{- if .Values.ingressmonitorcontroller.statuspageId }}
+ uptimerobot.monitor.stakater.com/status-pages: "{{ .Values.ingressmonitorcontroller.statuspageId }}"
+ {{- end }}
# HSTS Handling
{{- if .Values.hsts}}
# haproxy.router.openshift.io/hsts_header: {{ .Values.route_hsts }}
@@ -42,5 +52,3 @@ spec:
- backend:
serviceName: {{ .Values.service }}
servicePort: http
-
-
diff --git a/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/values.yaml
index 1190022f2d..59eec1e5ee 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/values.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/values.yaml
@@ -7,4 +7,10 @@ hsts: 'null'
tls_acme: true
insecure: Allow
service: ''
-annotations: {}
\ No newline at end of file
+annotations: {}
+routeMigrate: false
+
+ingressmonitorcontroller:
+ enabled: 'false'
+ interval: '60'
+ alertContacts: 'unconfigured'
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/values.yaml
index 799d97a64f..3ce2edbb09 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/values.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/values.yaml
@@ -7,7 +7,6 @@ replicaCount: 1
image: ""
environmentType: production
persistentStorage:
- # name: "mariadb" #don't use this, use the fullname and append -data to it in pvc.yaml so that if one or more are deployed there is no clash
size: 5Gi
path: "/var/lib/mysql"
diff --git a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/_helpers.tpl
index d3eec8be18..98ba6e299d 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/_helpers.tpl
+++ b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/_helpers.tpl
@@ -28,6 +28,14 @@ Create full hostname for autogenerated hosts
{{- printf "%s.%s" .Release.Name .Values.routesAutogenerateSuffix | trimSuffix "-" -}}
{{- end -}}
+{{/*
+Generate name of Persistent Storage
+Uses the Release Name (Lagoon Service Name) unless it's overwritten via .Values.persistentStorage.name
+*/}}
+{{- define "nginx-php-persistent.persistentStorageName" -}}
+{{- default .Release.Name .Values.persistentStorage.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
{{/*
Common labels
*/}}
@@ -79,4 +87,18 @@ lagoon.sh/prNumber: {{ .Values.prNumber | quote }}
lagoon.sh/prHeadBranch: {{ .Values.prHeadBranch | quote }}
lagoon.sh/prBaseBranch: {{ .Values.prBaseBranch | quote }}
{{- end }}
-{{- end -}}
\ No newline at end of file
+{{- end -}}
+
+{{/*
+Generate name for twig storage emptyDir
+*/}}
+{{- define "nginx-php-persistent.twig-storage.name" -}}
+{{- printf "%s-twig" (include "nginx-php-persistent.persistentStorageName" .) }}
+{{- end -}}
+
+{{/*
+Generate path for twig storage emptyDir
+*/}}
+{{- define "nginx-php-persistent.twig-storage.path" -}}
+{{- printf "%s/php/twig" .Values.persistentStorage.path }}
+{{- end -}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/deployment.yaml
index 37ad0dd03d..975c6fac64 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/deployment.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/deployment.yaml
@@ -24,9 +24,11 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- - name: {{ .Values.persistentStorage.name }}
+ - name: {{ include "nginx-php-persistent.persistentStorageName" . }}
persistentVolumeClaim:
- claimName: {{ .Values.persistentStorage.name }}
+ claimName: {{ include "nginx-php-persistent.persistentStorageName" . }}
+ - name: {{ include "nginx-php-persistent.twig-storage.name" . | quote }}
+ emptyDir: {}
priorityClassName: {{ include "nginx-php-persistent.lagoonPriority" . }}
enableServiceLinks: false
securityContext:
@@ -65,7 +67,7 @@ spec:
- configMapRef:
name: lagoon-env
volumeMounts:
- - name: {{ .Values.persistentStorage.name }}
+ - name: {{ include "nginx-php-persistent.persistentStorageName" . }}
mountPath: {{ .Values.persistentStorage.path | quote }}
resources:
{{- toYaml .Values.resources.nginx | nindent 12 }}
@@ -97,8 +99,10 @@ spec:
- name: NGINX_FASTCGI_PASS
value: '127.0.0.1'
volumeMounts:
- - name: {{ .Values.persistentStorage.name }}
+ - name: {{ include "nginx-php-persistent.persistentStorageName" . }}
mountPath: {{ .Values.persistentStorage.path | quote }}
+ - name: {{ include "nginx-php-persistent.twig-storage.name" . | quote }}
+ mountPath: {{ include "nginx-php-persistent.twig-storage.path" . | quote }}
resources:
{{- toYaml .Values.resources.php | nindent 12 }}
{{- with .Values.nodeSelector }}
@@ -112,4 +116,4 @@ spec:
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
- {{- end }}
\ No newline at end of file
+ {{- end }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/pvc.yaml
index 311e16f9e8..286c758d02 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/pvc.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/pvc.yaml
@@ -1,7 +1,7 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: {{ .Values.persistentStorage.name | quote }}
+ name: {{ include "nginx-php-persistent.persistentStorageName" . }}
labels:
{{- include "nginx-php-persistent.labels" . | nindent 4 }}
annotations:
diff --git a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/_helpers.tpl
index 7b1917e7d3..e90cd35afd 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/_helpers.tpl
+++ b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/_helpers.tpl
@@ -28,6 +28,14 @@ Create full hostname for autogenerated hosts
{{- printf "%s.%s" .Release.Name .Values.routesAutogenerateSuffix | trimSuffix "-" -}}
{{- end -}}
+{{/*
+Generate name of Persistent Storage
+Uses the Release Name (Lagoon Service Name) unless it's overwritten via .Values.persistentStorage.name
+*/}}
+{{- define "node-persistent.persistentStorageName" -}}
+{{- default .Release.Name .Values.persistentStorage.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
{{/*
Common labels
*/}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/deployment.yaml
index 8dc14d5aa1..48fc911bf5 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/deployment.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/deployment.yaml
@@ -24,9 +24,9 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- - name: {{ .Values.persistentStorage.name }}
+ - name: {{ include "node-persistent.persistentStorageName" . }}
persistentVolumeClaim:
- claimName: {{ .Values.persistentStorage.name }}
+ claimName: {{ include "node-persistent.persistentStorageName" . }}
priorityClassName: {{ include "node-persistent.lagoonPriority" . }}
enableServiceLinks: false
securityContext:
@@ -61,7 +61,7 @@ spec:
- configMapRef:
name: lagoon-env
volumeMounts:
- - name: {{ .Values.persistentStorage.name }}
+ - name: {{ include "node-persistent.persistentStorageName" . }}
mountPath: {{ .Values.persistentStorage.path | quote }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/pvc.yaml
index 0352448414..97adc712a7 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/pvc.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/pvc.yaml
@@ -1,7 +1,7 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: {{ .Values.persistentStorage.name | quote }}
+ name: {{ include "node-persistent.persistentStorageName" . }}
labels:
{{- include "node-persistent.labels" . | nindent 4 }}
annotations:
diff --git a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/_helpers.tpl
index cabff1e753..fdaf4b55cf 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/_helpers.tpl
+++ b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/_helpers.tpl
@@ -28,6 +28,14 @@ Create full hostname for autogenerated hosts
{{- printf "%s.%s" .Release.Name .Values.routesAutogenerateSuffix | trimSuffix "-" -}}
{{- end -}}
+{{/*
+Generate name of Persistent Storage
+Uses the Release Name (Lagoon Service Name) unless it's overwritten via .Values.persistentStorage.name
+*/}}
+{{- define "redis-persistent.persistentStorageName" -}}
+{{- default .Release.Name .Values.persistentStorage.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
{{/*
Common labels
*/}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/deployment.yaml
index 16f677f02c..df5e291d7b 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/deployment.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/deployment.yaml
@@ -26,9 +26,9 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- - name: {{ .Values.persistentStorage.name }}
+ - name: {{ include "redis-persistent.persistentStorageName" . }}
persistentVolumeClaim:
- claimName: {{ .Values.persistentStorage.name }}
+ claimName: {{ include "redis-persistent.persistentStorageName" . }}
priorityClassName: {{ include "redis-persistent.lagoonPriority" . }}
enableServiceLinks: false
securityContext:
@@ -64,7 +64,7 @@ spec:
- configMapRef:
name: lagoon-env
volumeMounts:
- - name: {{ .Values.persistentStorage.name }}
+ - name: {{ include "redis-persistent.persistentStorageName" . }}
mountPath: {{ .Values.persistentStorage.path | quote }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/pvc.yaml
index c22ee60730..a2e665a74f 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/pvc.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/pvc.yaml
@@ -1,7 +1,7 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: {{ .Values.persistentStorage.name | quote }}
+ name: {{ include "redis-persistent.persistentStorageName" . }}
labels:
{{- include "redis-persistent.labels" . | nindent 4 }}
annotations:
diff --git a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/values.yaml
index ef13f0a93b..b69515e9b4 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/values.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/values.yaml
@@ -9,7 +9,6 @@ image: ""
environmentType: production
persistentStorage:
- name: "redis"
size: 5Gi
path: "/data"
diff --git a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/_helpers.tpl
index 5dccb65c6c..469558e333 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/_helpers.tpl
+++ b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/_helpers.tpl
@@ -28,6 +28,14 @@ Create full hostname for autogenerated hosts
{{- printf "%s.%s" .Release.Name .Values.routesAutogenerateSuffix | trimSuffix "-" -}}
{{- end -}}
+{{/*
+Generate name of Persistent Storage
+Uses the Release Name (Lagoon Service Name) unless it's overwritten via .Values.persistentStorage.name
+*/}}
+{{- define "solr.persistentStorageName" -}}
+{{- default .Release.Name .Values.persistentStorage.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
{{/*
Common labels
*/}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/deployment.yaml
index d08626aa01..4c89c90633 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/deployment.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/deployment.yaml
@@ -26,9 +26,9 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- - name: {{ .Values.persistentStorage.name }}
+ - name: {{ include "solr.persistentStorageName" . }}
persistentVolumeClaim:
- claimName: {{ .Values.persistentStorage.name }}
+ claimName: {{ include "solr.persistentStorageName" . }}
priorityClassName: {{ include "solr.lagoonPriority" . }}
enableServiceLinks: false
securityContext:
@@ -60,7 +60,7 @@ spec:
- name: CRONJOBS
value: {{ .Values.inPodCronjobs | quote }}
volumeMounts:
- - name: {{ .Values.persistentStorage.name }}
+ - name: {{ include "solr.persistentStorageName" . }}
mountPath: {{ .Values.persistentStorage.path | quote }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/pvc.yaml
index 95037b14f5..5c9a313b1c 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/pvc.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/pvc.yaml
@@ -1,7 +1,7 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: {{ .Values.persistentStorage.name | quote }}
+ name: {{ include "solr.persistentStorageName" . }}
labels:
{{- include "solr.labels" . | nindent 4 }}
annotations:
diff --git a/images/kubectl-build-deploy-dind/helmcharts/solr/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/solr/values.yaml
index 00c2ee294e..825122a659 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/solr/values.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/solr/values.yaml
@@ -9,7 +9,6 @@ image: ""
environmentType: production
persistentStorage:
size: 5Gi
- name: solr
path: '/var/solr'
imagePullPolicy: Always
diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/_helpers.tpl
index 4f9b22221c..38b8033a93 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/_helpers.tpl
+++ b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/_helpers.tpl
@@ -28,6 +28,14 @@ Create full hostname for autogenerated hosts
{{- printf "%s.%s" .Release.Name .Values.routesAutogenerateSuffix | trimSuffix "-" -}}
{{- end -}}
+{{/*
+Generate name of Persistent Storage
+Uses the Release Name (Lagoon Service Name) unless it's overwritten via .Values.persistentStorage.name
+*/}}
+{{- define "varnish-persistent.persistentStorageName" -}}
+{{- default .Release.Name .Values.persistentStorage.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
{{/*
Common labels
*/}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/deployment.yaml
index b93c1aed0a..ce5974dc14 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/deployment.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/deployment.yaml
@@ -26,9 +26,9 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- - name: {{ .Values.persistentStorage.name }}
+ - name: {{ include "varnish-persistent.persistentStorageName" . }}
persistentVolumeClaim:
- claimName: {{ .Values.persistentStorage.name }}
+ claimName: {{ include "varnish-persistent.persistentStorageName" . }}
priorityClassName: {{ include "varnish-persistent.lagoonPriority" . }}
enableServiceLinks: false
securityContext:
@@ -53,6 +53,9 @@ spec:
- name: http
containerPort: 8080
protocol: TCP
+ - name: controlport
+ containerPort: 6082
+ protocol: TCP
livenessProbe:
tcpSocket:
port: http
@@ -60,7 +63,7 @@ spec:
tcpSocket:
port: http
volumeMounts:
- - name: {{ .Values.persistentStorage.name }}
+ - name: {{ include "varnish-persistent.persistentStorageName" . }}
mountPath: {{ .Values.persistentStorage.path | quote }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/pvc.yaml
index bd7532fa15..9e956adfa5 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/pvc.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/pvc.yaml
@@ -1,7 +1,7 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: {{ .Values.persistentStorage.name | quote }}
+ name: {{ include "varnish-persistent.persistentStorageName" . }}
labels:
{{- include "varnish-persistent.labels" . | nindent 4 }}
annotations:
diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/service.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/service.yaml
index e8902fe154..623ddf32e5 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/service.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/service.yaml
@@ -13,5 +13,9 @@ spec:
targetPort: http
protocol: TCP
name: http
+ - port: 6082
+ targetPort: controlport
+ protocol: TCP
+ name: controlport
selector:
{{- include "varnish-persistent.selectorLabels" . | nindent 4 }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/values.yaml
index 124dcfc89b..ebe5dea5cc 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/values.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/values.yaml
@@ -9,7 +9,6 @@ image: ""
environmentType: production
persistentStorage:
- name: "varnish"
size: 5Gi
path: "/var/cache/varnish"
diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/deployment.yaml
index 7ebcabdda1..dd5feb812c 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/deployment.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/deployment.yaml
@@ -45,6 +45,9 @@ spec:
- name: http
containerPort: 8080
protocol: TCP
+ - name: controlport
+ containerPort: 6082
+ protocol: TCP
livenessProbe:
tcpSocket:
port: http
diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/service.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/service.yaml
index 17d9aa366e..79d3f6bc95 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/service.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/service.yaml
@@ -13,5 +13,9 @@ spec:
targetPort: http
protocol: TCP
name: http
+ - port: 6082
+ targetPort: controlport
+ protocol: TCP
+ name: controlport
selector:
{{- include "varnish.selectorLabels" . | nindent 4 }}
diff --git a/images/mariadb/Dockerfile b/images/mariadb/Dockerfile
index c3bf44e43d..bb400b8b70 100644
--- a/images/mariadb/Dockerfile
+++ b/images/mariadb/Dockerfile
@@ -53,10 +53,9 @@ COPY entrypoints/ /lagoon/entrypoints/
COPY mysql-backup.sh /lagoon/
COPY my.cnf /etc/mysql/my.cnf
-RUN for i in /var/run/mysqld /var/lib/mysql /etc/mysql/conf.d /docker-entrypoint-initdb.d/ "${BACKUPS_DIR}"; \
+RUN for i in /var/run/mysqld /var/lib/mysql /etc/mysql/conf.d /docker-entrypoint-initdb.d/ "${BACKUPS_DIR}" /home; \
do mkdir -p $i; chown mysql $i; /bin/fix-permissions $i; \
- done && \
- ln -s /var/lib/mysql/.my.cnf /home/.my.cnf
+ done
COPY root/usr/share/container-scripts/mysql/readiness-probe.sh /usr/share/container-scripts/mysql/readiness-probe.sh
RUN /bin/fix-permissions /usr/share/container-scripts/mysql/ \
diff --git a/images/mariadb/entrypoints/9999-mariadb-init.bash b/images/mariadb/entrypoints/9999-mariadb-init.bash
index 1540ff34bf..72dc40ad22 100755
--- a/images/mariadb/entrypoints/9999-mariadb-init.bash
+++ b/images/mariadb/entrypoints/9999-mariadb-init.bash
@@ -23,7 +23,23 @@ for arg; do
esac
done
+# check if MARIADB_COPY_DATA_DIR_SOURCE is set, if yes we're coping the contents of the given folder into the data dir folder
+# this allows to prefill the datadir with a provided datadir (either added in a Dockerfile build, or mounted into the running container).
+# This is different than just setting $MARIADB_DATA_DIR to the source folder, as only /var/lib/mysql is a persistent folder, so setting
+# $MARIADB_DATA_DIR to another folder will make mariadb to not store the datadir across container restarts, while with this copy system
+# the data will be prefilled and persistent across container restarts.
+if [ -n "$MARIADB_COPY_DATA_DIR_SOURCE" ]; then
+ if [ -d ${MARIADB_DATA_DIR:-/var/lib/mysql}/mysql ]; then
+ echo "MARIADB_COPY_DATA_DIR_SOURCE is set, but MySQL directory already present in '${MARIADB_DATA_DIR:-/var/lib/mysql}/mysql' skipping copying"
+ else
+ echo "MARIADB_COPY_DATA_DIR_SOURCE is set, copying datadir contents from '$MARIADB_COPY_DATA_DIR_SOURCE' to '${MARIADB_DATA_DIR:-/var/lib/mysql}'"
+ CUR_DIR=${PWD}
+ cd ${MARIADB_COPY_DATA_DIR_SOURCE}/; tar cf - . | (cd ${MARIADB_DATA_DIR:-/var/lib/mysql}; tar xvf -)
+ cd $CUR_DIR
+ fi
+fi
+ln -sf ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf /home/.my.cnf
if [ "$1" = 'mysqld' -a -z "$wantHelp" ]; then
if [ ! -d "/run/mysqld" ]; then
@@ -31,7 +47,7 @@ if [ "$1" = 'mysqld' -a -z "$wantHelp" ]; then
chown -R mysql:mysql /run/mysqld
fi
- if [ -d /var/lib/mysql/mysql ]; then
+ if [ -d ${MARIADB_DATA_DIR:-/var/lib/mysql}/mysql ]; then
echo "MySQL directory already present, skipping creation"
echo "starting mysql for mysql upgrade."
@@ -56,7 +72,7 @@ if [ "$1" = 'mysqld' -a -z "$wantHelp" ]; then
else
echo "MySQL data directory not found, creating initial DBs"
- mysql_install_db --skip-name-resolve --skip-test-db --auth-root-authentication-method=normal --datadir=/var/lib/mysql --basedir=/usr
+ mysql_install_db --skip-name-resolve --skip-test-db --auth-root-authentication-method=normal --datadir=${MARIADB_DATA_DIR:-/var/lib/mysql} --basedir=/usr
echo "starting mysql for initdb.d import."
/usr/bin/mysqld --skip-networking --wsrep_on=OFF &
@@ -107,11 +123,11 @@ EOF
cat $tfile | mysql -v -u root
rm -v -f $tfile
- echo "[client]" >> /var/lib/mysql/.my.cnf
- echo "user=root" >> /var/lib/mysql/.my.cnf
- echo "password=${MARIADB_ROOT_PASSWORD}" >> /var/lib/mysql/.my.cnf
- echo "[mysql]" >> /var/lib/mysql/.my.cnf
- echo "database=${MARIADB_DATABASE}" >> /var/lib/mysql/.my.cnf
+ echo "[client]" >> ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf
+ echo "user=root" >> ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf
+ echo "password=${MARIADB_ROOT_PASSWORD}" >> ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf
+ echo "[mysql]" >> ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf
+ echo "database=${MARIADB_DATABASE}" >> ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf
for f in `ls /docker-entrypoint-initdb.d/*`; do
case "$f" in
@@ -129,6 +145,6 @@ EOF
fi
-echo "done, now starting daemon"
+ echo "done, now starting daemon"
fi
diff --git a/images/mariadb/my.cnf b/images/mariadb/my.cnf
index d7714558e0..2a727bb2d0 100644
--- a/images/mariadb/my.cnf
+++ b/images/mariadb/my.cnf
@@ -9,6 +9,7 @@ socket = /run/mysqld/mysqld.sock
[mysqld]
port = 3306
socket = /run/mysqld/mysqld.sock
+datadir = ${MARIADB_DATA_DIR:-/var/lib/mysql}
character_set_server = ${MARIADB_CHARSET:-utf8mb4}
collation_server = ${MARIADB_COLLATION:-utf8mb4_bin}
expire_logs_days = 10
diff --git a/images/mariadb/mysql-backup.sh b/images/mariadb/mysql-backup.sh
index bf66958096..bea76622ef 100755
--- a/images/mariadb/mysql-backup.sh
+++ b/images/mariadb/mysql-backup.sh
@@ -21,7 +21,7 @@
set -eu -o pipefail
# directory to put the backup files
-BACKUP_DIR=/var/lib/mysql/backup
+BACKUP_DIR=${MARIADB_DATA_DIR:-/var/lib/mysql}/backup
# MYSQL Parameters
MARIADB_USER=${MARIADB_USER:-lagoon}
diff --git a/images/mariadb/root/usr/share/container-scripts/mysql/readiness-probe.sh b/images/mariadb/root/usr/share/container-scripts/mysql/readiness-probe.sh
index d8c6d827c3..368be4374b 100755
--- a/images/mariadb/root/usr/share/container-scripts/mysql/readiness-probe.sh
+++ b/images/mariadb/root/usr/share/container-scripts/mysql/readiness-probe.sh
@@ -3,7 +3,7 @@
# openshift-mariadb: mysqld readinessProbe
#
-mysql --defaults-file=/var/lib/mysql/.my.cnf -e"SHOW DATABASES;"
+mysql --defaults-file=${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf -e"SHOW DATABASES;"
if [ $? -ne 0 ]; then
exit 1
diff --git a/images/nginx/Dockerfile b/images/nginx/Dockerfile
index ceb5c8c1b2..311bf2625f 100644
--- a/images/nginx/Dockerfile
+++ b/images/nginx/Dockerfile
@@ -36,6 +36,7 @@ COPY fastcgi.conf /etc/nginx/fastcgi_params
COPY helpers/ /etc/nginx/helpers/
COPY static-files.conf /etc/nginx/conf.d/app.conf
COPY redirects-map.conf /etc/nginx/redirects-map.conf
+COPY healthcheck/healthz.locations healthcheck/healthz.locations.php.disable /etc/nginx/conf.d/
RUN mkdir -p /app \
&& rm -f /etc/nginx/conf.d/default.conf \
diff --git a/images/nginx/docker-entrypoint b/images/nginx/docker-entrypoint
index 602171d227..2f8458ec4a 100755
--- a/images/nginx/docker-entrypoint
+++ b/images/nginx/docker-entrypoint
@@ -15,8 +15,20 @@ ep /etc/nginx/*
# Find all folders within /etc/nginx/conf.d/
find /etc/nginx/conf.d/ -type d | while read DIR; do
# envplate if found folder is not empty
- if find $DIR -mindepth 1 | read; then
+ if find $DIR -mindepth 1 | read; then
ep $DIR/*;
fi
done
ep /etc/nginx/helpers/*
+
+# If PHP is enabled, we override the Luascript /healthz check
+echo "Setting up Healthz routing"
+if [ ! -z "$NGINX_FASTCGI_PASS" ]; then
+ echo "Healthz routing - using PHP"
+ cp /etc/nginx/conf.d/healthz.locations.php.disable /etc/nginx/conf.d/healthz.locations
+fi
+
+if [ "$FAST_HEALTH_CHECK" == "true" ]; then
+ echo "FAST HEALTH CHECK ENABLED"
+ cp /etc/nginx/helpers/90_healthz_fast_check.conf.disabled /etc/nginx/helpers/90_health_fast_check.conf
+fi
\ No newline at end of file
diff --git a/images/nginx/healthcheck/README.md b/images/nginx/healthcheck/README.md
new file mode 100644
index 0000000000..43751e2e11
--- /dev/null
+++ b/images/nginx/healthcheck/README.md
@@ -0,0 +1,10 @@
+# Healthcheck
+
+In this directory you'll find two files
+
+- healthz.locations.php.disable
+- healthz.locations
+
+Both are designed to expose a `/.lagoonhealthz` location from the nginx service. The difference being that the `.php.disable` file is used to point to the [healthz-php](https://github.com/amazeeio/healthz-php) application _if_ there is a PHP service attached to this application.
+
+The logic for which of the two files are enabled are contained in this image's `docker-entrypoint` file - there we check for the existence of the env var `NGINX_FASTCGI_PASS`, which indicates (or should indicate) the presence of a PHP-fpm service.
\ No newline at end of file
diff --git a/images/nginx/healthcheck/healthz.locations b/images/nginx/healthcheck/healthz.locations
new file mode 100644
index 0000000000..95cf2ed753
--- /dev/null
+++ b/images/nginx/healthcheck/healthz.locations
@@ -0,0 +1,8 @@
+location /.lagoonhealthz {
+ content_by_lua_block {
+ ngx.status = ngx.HTTP_OK;
+ ngx.header.content_type = 'application/json';
+ ngx.say('{"check_nginx":"pass"}');
+ ngx.exit(ngx.OK);
+ }
+}
diff --git a/images/nginx/healthcheck/healthz.locations.php.disable b/images/nginx/healthcheck/healthz.locations.php.disable
new file mode 100644
index 0000000000..dd6be8e7ea
--- /dev/null
+++ b/images/nginx/healthcheck/healthz.locations.php.disable
@@ -0,0 +1,10 @@
+location /.lagoonhealthz {
+ rewrite ^/.lagoonhealthz(/.*)?$ /.lagoonhealthz/index.php;
+
+ location ~* \.php(/|$) {
+ include /etc/nginx/fastcgi.conf;
+ fastcgi_param SCRIPT_NAME /index.php;
+ fastcgi_param SCRIPT_FILENAME /healthz-php/index.php;
+ fastcgi_pass ${NGINX_FASTCGI_PASS:-php}:9000;
+ }
+}
diff --git a/images/nginx/helpers/90_healthz.conf b/images/nginx/helpers/90_healthz.conf
new file mode 100644
index 0000000000..33356eda06
--- /dev/null
+++ b/images/nginx/helpers/90_healthz.conf
@@ -0,0 +1 @@
+include /etc/nginx/conf.d/healthz.locations;
diff --git a/images/nginx/helpers/90_healthz_fast_check.conf.disabled b/images/nginx/helpers/90_healthz_fast_check.conf.disabled
new file mode 100644
index 0000000000..78cb43761e
--- /dev/null
+++ b/images/nginx/helpers/90_healthz_fast_check.conf.disabled
@@ -0,0 +1,13 @@
+set $fhcc none;
+
+if ( $http_user_agent ~* "StatusCake|Pingdom|Site25x7|Uptime|nagios" ) {
+ set $fhcc "A";
+}
+
+if ( $request_method = 'GET' ) {
+ set $fhcc "$fhcc G";
+}
+
+if ( $fhcc = 'A G' ) {
+ rewrite ~* /.lagoonhealthz last;
+}
\ No newline at end of file
diff --git a/images/oc-build-deploy-dind/build-deploy-docker-compose.sh b/images/oc-build-deploy-dind/build-deploy-docker-compose.sh
index 3144102fbe..fadb00cda6 100755
--- a/images/oc-build-deploy-dind/build-deploy-docker-compose.sh
+++ b/images/oc-build-deploy-dind/build-deploy-docker-compose.sh
@@ -91,7 +91,7 @@ do
if oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get service "$SERVICE_NAME" &> /dev/null; then
SERVICE_TYPE="mariadb-single"
# check if an existing mariadb service instance already exists
- elif oc -insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get serviceinstance "$SERVICE_NAME" &> /dev/null; then
+ elif oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get serviceinstance "$SERVICE_NAME" &> /dev/null; then
SERVICE_TYPE="mariadb-shared"
# check if we can use the dbaas operator
elif oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mariadbconsumer.v1.mariadb.amazee.io &> /dev/null; then
@@ -272,8 +272,8 @@ if [[ ( "$TYPE" == "pullrequest" || "$TYPE" == "branch" ) && ! $THIS_IS_TUG ==
LAGOON_POSTROLLOUT_DISABLED=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_POSTROLLOUT_DISABLED") | "\(.value)"'))
fi
if [ ! -z "$LAGOON_ENVIRONMENT_VARIABLES" ]; then
- LAGOON_PREROLLOUT_DISABLED=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_PREROLLOUT_DISABLED") | "\(.value)"'))
- LAGOON_POSTROLLOUT_DISABLED=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_POSTROLLOUT_DISABLED") | "\(.value)"'))
+ LAGOON_PREROLLOUT_DISABLED=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_PREROLLOUT_DISABLED") | "\(.value)"'))
+ LAGOON_POSTROLLOUT_DISABLED=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_POSTROLLOUT_DISABLED") | "\(.value)"'))
fi
set -x
@@ -429,7 +429,15 @@ else
fi
ROUTES_AUTOGENERATE_ENABLED=$(cat .lagoon.yml | shyaml get-value routes.autogenerate.enabled true)
-
+ROUTES_AUTOGENERATE_ALLOW_PRS=$(cat .lagoon.yml | shyaml get-value routes.autogenerate.allowPullrequests $ROUTES_AUTOGENERATE_ENABLED)
+if [[ "$TYPE" == "pullrequest" && "$ROUTES_AUTOGENERATE_ALLOW_PRS" == "true" ]]; then
+ ROUTES_AUTOGENERATE_ENABLED=true
+fi
+## fail silently if the key autogenerateRoutes doesn't exist and default to whatever ROUTES_AUTOGENERATE_ENABLED is set to
+ROUTES_AUTOGENERATE_BRANCH=$(cat .lagoon.yml | shyaml -q get-value environments.${BRANCH//./\\.}.autogenerateRoutes $ROUTES_AUTOGENERATE_ENABLED)
+if [ "$ROUTES_AUTOGENERATE_BRANCH" =~ [Tt]rue ]; then
+ ROUTES_AUTOGENERATE_ENABLED=true
+fi
for SERVICE_TYPES_ENTRY in "${SERVICE_TYPES[@]}"
do
@@ -505,7 +513,15 @@ TEMPLATE_PARAMETERS=()
### CUSTOM ROUTES FROM .lagoon.yml
##############################################
+if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
+ MONITORING_ENABLED="true"
+else
+ MONITORING_ENABLED="false"
+fi
+MONITORING_INTERVAL=60
+
ROUTES_SERVICE_COUNTER=0
+
# we need to check for production routes for active/standby if they are defined, as these will get migrated between environments as required
if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
if [ "${BRANCH//./\\.}" == "${ACTIVE_ENVIRONMENT}" ]; then
@@ -524,6 +540,7 @@ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true)
ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
+ MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "")
else
# Only a value given, assuming some defaults
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
@@ -565,6 +582,7 @@ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true)
ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
+ MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "")
else
# Only a value given, assuming some defaults
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
@@ -609,6 +627,7 @@ if [ -n "$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.
ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false)
ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
+ MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "")
else
# Only a value given, assuming some defaults
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
@@ -647,6 +666,7 @@ else
ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false)
ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
+ MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "")
else
# Only a value given, assuming some defaults
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
@@ -707,8 +727,10 @@ fi
##############################################
### CUSTOM MONITORING_URLS FROM .lagoon.yml
##############################################
+# @DEPRECATED - to be removed with Lagoon 2.0
URL_COUNTER=0
while [ -n "$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.monitoring_urls.$URL_COUNTER 2> /dev/null)" ]; do
+ echo "DEPRECATION WARNING: 'monitoring_urls' is being moved to a per-route 'monitoring-path', please update your route"
MONITORING_URL="$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.monitoring_urls.$URL_COUNTER)"
if [[ $URL_COUNTER > 0 ]]; then
MONITORING_URLS="${MONITORING_URLS}, ${MONITORING_URL}"
@@ -741,10 +763,8 @@ ROUTES=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get routes -l "ac
# Active / Standby routes
ACTIVE_ROUTES=""
STANDBY_ROUTES=""
-if [ "${BRANCH//./\\.}" == "${ACTIVE_ENVIRONMENT}" ]; then
+if [ ! -z "${STANDBY_ENVIRONMENT}" ]; then
ACTIVE_ROUTES=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get routes -l "dioscuri.amazee.io/migrate=true" -o=go-template --template='{{range $index, $route := .items}}{{if $index}},{{end}}{{if $route.spec.tls.termination}}https://{{else}}http://{{end}}{{$route.spec.host}}{{end}}')
-fi
-if [ "${BRANCH//./\\.}" == "${STANDBY_ENVIRONMENT}" ]; then
STANDBY_ROUTES=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get routes -l "dioscuri.amazee.io/migrate=true" -o=go-template --template='{{range $index, $route := .items}}{{if $index}},{{end}}{{if $route.spec.tls.termination}}https://{{else}}http://{{end}}{{$route.spec.host}}{{end}}')
fi
diff --git a/images/oc-build-deploy-dind/openshift-templates/cli-persistent/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/cli-persistent/deployment.yml
index 8312fbc933..dfe504fa80 100644
--- a/images/oc-build-deploy-dind/openshift-templates/cli-persistent/deployment.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/cli-persistent/deployment.yml
@@ -103,6 +103,8 @@ objects:
secret:
defaultMode: 420
secretName: lagoon-sshkey
+ - name: ${PERSISTENT_STORAGE_NAME}-twig
+ emptyDir: {}
priorityClassName: lagoon-priority-${ENVIRONMENT_TYPE}
containers:
- image: ${SERVICE_IMAGE}
@@ -125,6 +127,8 @@ objects:
- mountPath: /var/run/secrets/lagoon/sshkey/
name: lagoon-sshkey
readOnly: true
+ - name: ${PERSISTENT_STORAGE_NAME}-twig
+ mountPath: ${PERSISTENT_STORAGE_PATH}/php/twig
resources:
requests:
cpu: 10m
diff --git a/images/oc-build-deploy-dind/openshift-templates/nginx-php-persistent/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/nginx-php-persistent/deployment.yml
index 82da081d48..0f089522e3 100644
--- a/images/oc-build-deploy-dind/openshift-templates/nginx-php-persistent/deployment.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/nginx-php-persistent/deployment.yml
@@ -103,6 +103,8 @@ objects:
- name: ${SERVICE_NAME}
persistentVolumeClaim:
claimName: ${SERVICE_NAME}
+ - name: ${SERVICE_NAME}-twig
+ emptyDir: {}
priorityClassName: lagoon-priority-${ENVIRONMENT_TYPE}
containers:
- image: ${NGINX_SERVICE_IMAGE}
@@ -170,6 +172,8 @@ objects:
volumeMounts:
- name: ${SERVICE_NAME}
mountPath: ${PERSISTENT_STORAGE_PATH}
+ - name: ${SERVICE_NAME}-twig
+ mountPath: ${PERSISTENT_STORAGE_PATH}/php/twig
resources:
requests:
cpu: 10m
diff --git a/images/oc-build-deploy-dind/openshift-templates/nginx-php-redis-persistent/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/nginx-php-redis-persistent/deployment.yml
index 7e300f35c0..d81fbff00e 100644
--- a/images/oc-build-deploy-dind/openshift-templates/nginx-php-redis-persistent/deployment.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/nginx-php-redis-persistent/deployment.yml
@@ -109,6 +109,8 @@ objects:
- name: ${SERVICE_NAME}
persistentVolumeClaim:
claimName: ${SERVICE_NAME}
+ - name: ${SERVICE_NAME}-twig
+ emptyDir: {}
priorityClassName: lagoon-priority-${ENVIRONMENT_TYPE}
containers:
- image: ${NGINX_SERVICE_IMAGE}
@@ -180,6 +182,8 @@ objects:
volumeMounts:
- name: ${SERVICE_NAME}
mountPath: ${PERSISTENT_STORAGE_PATH}
+ - name: ${SERVICE_NAME}-twig
+ mountPath: ${PERSISTENT_STORAGE_PATH}/php/twig
resources:
requests:
cpu: 10m
diff --git a/images/oc-build-deploy-dind/openshift-templates/rabbitmq/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/rabbitmq/deployment.yml
index 20d2c87446..58e8ea4b4e 100644
--- a/images/oc-build-deploy-dind/openshift-templates/rabbitmq/deployment.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/rabbitmq/deployment.yml
@@ -68,8 +68,8 @@ objects:
service: ${SERVICE_NAME}
branch: ${SAFE_BRANCH}
project: ${SAFE_PROJECT}
- annotations:
- lagoon.sh/configMapSha: ${CONFIG_MAP_SHA}
+ annotations:
+ lagoon.sh/configMapSha: ${CONFIG_MAP_SHA}
spec:
priorityClassName: lagoon-priority-${ENVIRONMENT_TYPE}
containers:
diff --git a/images/oc-build-deploy-dind/openshift-templates/route.yml b/images/oc-build-deploy-dind/openshift-templates/route.yml
index 2f3c1e04d6..1f8ecca4ea 100644
--- a/images/oc-build-deploy-dind/openshift-templates/route.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/route.yml
@@ -40,6 +40,21 @@ parameters:
- name: ROUTE_MIGRATE
description: Setting to determine if this route should be migratable for active/standby purposes
required: true
+ - name: MONITORING_ENABLED
+ description: Default to monitoring disabled, only enabled on production routes
+ value: "false"
+ - name: MONITORING_INTERVAL
+ description: Frequency of checks by monitoring
+ value: ""
+ - name: MONITOR_ALERTCONTACTS
+ description: Alertcontacts to associate to this monitor
+ value: ""
+ - name: MONITORING_PATH
+ description: Path for monitoring of this route
+ value: ""
+ - name: MONITORING_STATUSPAGEID
+ description: Uptime Robot status page ID
+ value: ""
objects:
- apiVersion: v1
kind: Route
@@ -48,6 +63,11 @@ objects:
haproxy.router.openshift.io/disable_cookies: 'true'
haproxy.router.openshift.io/hsts_header: '${ROUTE_HSTS}'
kubernetes.io/tls-acme: '${ROUTE_TLS_ACME}'
+ monitor.stakater.com/enabled: '${MONITORING_ENABLED}'
+ uptimerobot.monitor.stakater.com/interval: '${MONITORING_INTERVAL}'
+ uptimerobot.monitor.stakater.com/alert-contacts: '${MONITOR_ALERTCONTACTS}'
+ monitor.stakater.com/overridePath: '${MONITORING_PATH}'
+ uptimerobot.monitor.stakater.com/status-pages: '${MONITORING_STATUSPAGEID}'
creationTimestamp: null
labels:
branch: ${SAFE_BRANCH}
diff --git a/images/oc-build-deploy-dind/scripts/exec-openshift-create-route.sh b/images/oc-build-deploy-dind/scripts/exec-openshift-create-route.sh
index 013045e6a3..0d68a1cc44 100644
--- a/images/oc-build-deploy-dind/scripts/exec-openshift-create-route.sh
+++ b/images/oc-build-deploy-dind/scripts/exec-openshift-create-route.sh
@@ -2,7 +2,7 @@
# TODO: find out why we are using the if/else and if it's still needed for kubernetes
if oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get route "$ROUTE_DOMAIN" &> /dev/null; then
- oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} patch route "$ROUTE_DOMAIN" -p "{\"metadata\":{\"labels\":{\"dioscuri.amazee.io/migrate\": \"${ROUTE_MIGRATE}\"},\"annotations\":{\"kubernetes.io/tls-acme\":\"${ROUTE_TLS_ACME}\",\"haproxy.router.openshift.io/hsts_header\":\"${ROUTE_HSTS}\"}},\"spec\":{\"to\":{\"name\":\"${ROUTE_SERVICE}\"},\"tls\":{\"insecureEdgeTerminationPolicy\":\"${ROUTE_INSECURE}\"}}}"
+ oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} patch route "$ROUTE_DOMAIN" -p "{\"metadata\":{\"labels\":{\"dioscuri.amazee.io/migrate\":\"${ROUTE_MIGRATE}\"},\"annotations\":{\"haproxy.router.openshift.io/disable_cookies\":\"true\",\"kubernetes.io/tls-acme\":\"${ROUTE_TLS_ACME}\",\"haproxy.router.openshift.io/hsts_header\":\"${ROUTE_HSTS}\",\"monitor.stakater.com/enabled\":\"${MONITORING_ENABLED}\",\"uptimerobot.monitor.stakater.com/interval\":\"${MONITORING_INTERVAL}\",\"uptimerobot.monitor.stakater.com/alert-contacts\":\"${MONITORING_ALERTCONTACT}\",\"monitor.stakater.com/overridePath\":\"${MONITORING_PATH}\",\"uptimerobot.monitor.stakater.com/status-pages\":\"${MONITORING_STATUSPAGEID}\"}},\"spec\":{\"to\":{\"name\":\"${ROUTE_SERVICE}\"},\"tls\":{\"insecureEdgeTerminationPolicy\":\"${ROUTE_INSECURE}\"}}}"
else
oc process --local -o yaml --insecure-skip-tls-verify \
-n ${OPENSHIFT_PROJECT} \
@@ -19,5 +19,10 @@ else
-p ROUTE_INSECURE="${ROUTE_INSECURE}" \
-p ROUTE_HSTS="${ROUTE_HSTS}" \
-p ROUTE_MIGRATE="${ROUTE_MIGRATE}" \
+ -p MONITORING_ENABLED="${MONITORING_ENABLED}" \
+ -p MONITOR_ALERTCONTACTS="${MONITOR_ALERTCONTACTS}" \
+ -p MONITORING_PATH="${MONITORING_PATH}" \
+ -p MONITORING_INTERVAL="${MONITORING_INTERVAL}" \
+ -p MONITORING_STATUSPAGEID="${MONITORING_STATUSPAGEID}" \
| outputToYaml
fi
diff --git a/images/php/cli-drupal/Dockerfile b/images/php/cli-drupal/Dockerfile
index bb284c4ea3..bf2a8a4fa2 100644
--- a/images/php/cli-drupal/Dockerfile
+++ b/images/php/cli-drupal/Dockerfile
@@ -9,7 +9,7 @@ ENV LAGOON=cli-drupal
# Defining Versions - https://github.com/hechoendrupal/drupal-console-launcher/releases
ENV DRUPAL_CONSOLE_LAUNCHER_VERSION=1.9.4 \
DRUPAL_CONSOLE_LAUNCHER_SHA=b7759279668caf915b8e9f3352e88f18e4f20659 \
- DRUSH_VERSION=8.3.1 \
+ DRUSH_VERSION=8.3.5 \
DRUSH_LAUNCHER_VERSION=0.6.0 \
DRUSH_LAUNCHER_FALLBACK=/opt/drush8/vendor/bin/drush
diff --git a/images/php/cli/Dockerfile b/images/php/cli/Dockerfile
index 419b38be4b..07254ebb97 100644
--- a/images/php/cli/Dockerfile
+++ b/images/php/cli/Dockerfile
@@ -8,8 +8,8 @@ ENV LAGOON=cli
# Defining Versions - Composer
# @see https://getcomposer.org/download/
-ENV COMPOSER_VERSION=1.10.1 \
- COMPOSER_HASH_SHA256=1831f266580f221189dc04d4b58d7fc50c934ffc3a0eca89ecb4a53aa44867e2
+ENV COMPOSER_VERSION=1.10.8 \
+ COMPOSER_HASH_SHA256=4c40737f5d5f36d04f8b2df37171c6a1ff520efcadcb8626cc7c30bd4c5178e5
RUN apk add --no-cache git \
unzip \
diff --git a/images/php/fpm/Dockerfile b/images/php/fpm/Dockerfile
index 6eb59880bd..c5462d2b7d 100644
--- a/images/php/fpm/Dockerfile
+++ b/images/php/fpm/Dockerfile
@@ -3,6 +3,11 @@ ARG PHP_IMAGE_VERSION
ARG ALPINE_VERSION
ARG IMAGE_REPO
FROM ${IMAGE_REPO:-lagoon}/commons as commons
+
+FROM composer:latest as healthcheckbuilder
+
+RUN composer create-project --no-dev amazeeio/healthz-php /healthz-php v0.0.3
+
FROM php:${PHP_IMAGE_VERSION}-fpm-alpine${ALPINE_VERSION}
LABEL maintainer="amazee.io"
@@ -17,6 +22,10 @@ COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
COPY --from=commons /sbin/tini /sbin/
COPY --from=commons /home /home
+# Copy healthcheck files
+
+COPY --from=healthcheckbuilder /healthz-php /healthz-php
+
RUN chmod g+w /etc/passwd \
&& mkdir -p /home
@@ -39,12 +48,9 @@ COPY ssmtp.conf /etc/ssmtp/ssmtp.conf
# New Relic PHP Agent.
# @see https://docs.newrelic.com/docs/release-notes/agent-release-notes/php-release-notes/
# @see https://docs.newrelic.com/docs/agents/php-agent/getting-started/php-agent-compatibility-requirements
-ENV NEWRELIC_VERSION=9.10.1.263
+ENV NEWRELIC_VERSION=9.11.0.267
-# Pin curl to Version 7.69.1-r0 as the current shipped one 7.67.0 has a bug, see
-# https://github.com/curl/curl/issues/4624
-# TODO: Remove as soon as Alpine 3.11 is shipped with a version higher than 7.67.0
-RUN apk add --no-cache curl=7.69.1-r0 libcurl=7.69.1-r0 --repository http://dl-cdn.alpinelinux.org/alpine/edge/main/
+RUN apk add --no-cache curl --repository http://dl-cdn.alpinelinux.org/alpine/edge/main/
RUN apk add --no-cache fcgi \
ssmtp \
@@ -64,6 +70,8 @@ RUN apk add --no-cache fcgi \
# for webp
libwebp-dev \
postgresql-dev \
+ # for yaml
+ yaml-dev \
# for imagemagick
imagemagick \
imagemagick-libs \
@@ -71,6 +79,7 @@ RUN apk add --no-cache fcgi \
&& apk add --no-cache --virtual .phpize-deps $PHPIZE_DEPS \
&& yes '' | pecl install -f apcu \
&& yes '' | pecl install -f xdebug \
+ && yes '' | pecl install -f yaml \
&& yes '' | pecl install -f redis-4.3.0 \
&& yes '' | pecl install -f imagick \
&& docker-php-ext-enable apcu redis xdebug imagick \
@@ -86,12 +95,14 @@ RUN apk add --no-cache fcgi \
&& sed -i '1s/^/;Intentionally disabled. Enable via setting env variable XDEBUG_ENABLE to true\n;/' /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini \
&& rm -rf /var/cache/apk/* /tmp/pear/ \
&& apk del .phpize-deps \
+ && echo "extension=yaml.so" > /usr/local/etc/php/conf.d/yaml.ini \
&& mkdir -p /tmp/newrelic && cd /tmp/newrelic \
&& wget https://download.newrelic.com/php_agent/archive/${NEWRELIC_VERSION}/newrelic-php5-${NEWRELIC_VERSION}-linux-musl.tar.gz \
&& gzip -dc newrelic-php5-${NEWRELIC_VERSION}-linux-musl.tar.gz | tar --strip-components=1 -xf - \
&& NR_INSTALL_USE_CP_NOT_LN=1 NR_INSTALL_SILENT=1 ./newrelic-install install \
&& sed -i -e "s/newrelic.appname = .*/newrelic.appname = \"\${LAGOON_PROJECT:-noproject}-\${LAGOON_GIT_SAFE_BRANCH:-nobranch}\"/" /usr/local/etc/php/conf.d/newrelic.ini \
&& sed -i -e "s/;newrelic.enabled = .*/newrelic.enabled = \${NEWRELIC_ENABLED:-false}/" /usr/local/etc/php/conf.d/newrelic.ini \
+ && sed -i -e "s/;newrelic.browser_monitoring.auto_instrument = .*/newrelic.browser_monitoring.auto_instrument = \${NEWRELIC_BROWSER_MONITORING_ENABLED:-true}/" /usr/local/etc/php/conf.d/newrelic.ini \
&& sed -i -e "s/newrelic.license = .*/newrelic.license = \"\${NEWRELIC_LICENSE:-}\"/" /usr/local/etc/php/conf.d/newrelic.ini \
&& sed -i -e "s/;newrelic.loglevel = .*/newrelic.loglevel = \"\${NEWRELIC_LOG_LEVEL:-warning}\"/" /usr/local/etc/php/conf.d/newrelic.ini \
&& sed -i -e "s/;newrelic.daemon.loglevel = .*/newrelic.daemon.loglevel = \"\${NEWRELIC_DAEMON_LOG_LEVEL:-warning}\"/" /usr/local/etc/php/conf.d/newrelic.ini \
diff --git a/images/php/fpm/entrypoints/60-php-xdebug.sh b/images/php/fpm/entrypoints/60-php-xdebug.sh
index 5bd0959a0e..1d05a274f1 100755
--- a/images/php/fpm/entrypoints/60-php-xdebug.sh
+++ b/images/php/fpm/entrypoints/60-php-xdebug.sh
@@ -3,7 +3,7 @@
# Tries to find the Dockerhost
get_dockerhost() {
# https://docs.docker.com/docker-for-mac/networking/#known-limitations-use-cases-and-workarounds
- if busybox timeout 1 busybox nslookup host.docker.internal &> /dev/null; then
+ if busybox timeout 1 busybox nslookup -query=A host.docker.internal &> /dev/null; then
echo "host.docker.internal"
return
fi
diff --git a/images/redis/conf/redis.conf b/images/redis/conf/redis.conf
index 1c82d74c42..06425ea1c6 100644
--- a/images/redis/conf/redis.conf
+++ b/images/redis/conf/redis.conf
@@ -11,4 +11,6 @@ maxmemory-policy allkeys-lru
protected-mode no
bind 0.0.0.0
+${REQUIREPASS_CONF:-}
+
include /etc/redis/${FLAVOR:-ephemeral}.conf
diff --git a/images/redis/docker-entrypoint b/images/redis/docker-entrypoint
index 93bcd95616..fafbb758ef 100755
--- a/images/redis/docker-entrypoint
+++ b/images/redis/docker-entrypoint
@@ -1,5 +1,13 @@
#!/bin/sh
+if [[ -n "${REDIS_PASSWORD}" ]]; then
+ export REQUIREPASS_CONF="# Enable basic/simple authentication
+# Warning: since Redis is pretty fast an outside user can try up to
+# 150k passwords per second against a good box. This means that you should
+# use a very strong password otherwise it will be very easy to break.
+requirepass ${REDIS_PASSWORD}"
+fi
+
ep /etc/redis/*
exec "$@"
diff --git a/images/solr/20-solr-datadir.sh b/images/solr/20-solr-datadir.sh
index c9e9e955aa..d7c1f486cc 100755
--- a/images/solr/20-solr-datadir.sh
+++ b/images/solr/20-solr-datadir.sh
@@ -1,6 +1,27 @@
#!/bin/sh
set -eo pipefail
+# check if SOLR_COPY_DATA_DIR_SOURCE is set, if yes we're coping the contents of the given folder into the data dir folder
+# this allows to prefill the datadir with a provided datadir (either added in a Dockerfile build, or mounted into the running container).
+# This is different than just setting $SOLR_DATA_DIR to the source folder, as only /var/solr is a persistent folder, so setting
+# $SOLR_DATA_DIR to another folder will make solr to not store the datadir across container restarts, while with this copy system
+# the data will be prefilled and persistent across container restarts.
+if [ -n "$SOLR_COPY_DATA_DIR_SOURCE" ]; then
+ echo "MARIADB_COPY_DATA_DIR_SOURCE is set, start copying from source location"
+ for solrcorepath in $(ls -d $SOLR_COPY_DATA_DIR_SOURCE/*/ | grep -v lost+found) ; do
+ corename=$(basename $solrcorepath)
+ if [ -d ${SOLR_DATA_DIR:-/var/solr}/$corename ]; then
+ echo "core $corename already present in destination, skipping copying"
+ else
+ echo "copying datadir contents from '$SOLR_COPY_DATA_DIR_SOURCE/$corename to '${SOLR_DATA_DIR:-/var/solr}/$corename'"
+ CUR_DIR=${PWD}
+ mkdir ${SOLR_DATA_DIR:-/var/solr}/$corename
+ cd $SOLR_COPY_DATA_DIR_SOURCE/$corename; tar cf - . | (cd ${SOLR_DATA_DIR:-/var/solr}/$corename; tar xvf -)
+ cd $CUR_DIR
+ fi
+ done
+fi
+
# Previously the Solr Config and Solr Data Dir was both kept in the persistent volume:
# - Solr data: /opt/solr/server/solr/mycores/${corename}/data
# - Solr config: /opt/solr/server/solr/mycores/${corename}/config
@@ -41,9 +62,9 @@ if [ ! -n "$(ls /opt/solr/server/solr/mycores)" ]; then
printf "\n\n"
fi
-if [ -n "$(ls /var/solr)" ]; then
+if [ -n "$(ls ${SOLR_DATA_DIR:-/var/solr})" ]; then
# Iterate through all existing solr cores
- for solrcorepath in $(ls -d /var/solr/*/ | grep -v lost+found) ; do
+ for solrcorepath in $(ls -d ${SOLR_DATA_DIR:-/var/solr}/*/ | grep -v lost+found) ; do
corename=$(basename $solrcorepath)
if [ -d ${solrcorepath}data ]; then
echo "${solrcorepath} has it's data in deprecated location ${solrcorepath}data, moving to ${solrcorepath}."
@@ -72,17 +93,19 @@ fi
function fixConfig {
fail=0
- if cat $1/solrconfig.xml | grep dataDir | grep -qv '/var/solr/${solr.core.name}'; then
+ if cat $1/solrconfig.xml | grep dataDir | grep -qv "${SOLR_DATA_DIR:-/var/solr}/\${solr.core.name}"; then
echo "Found old non lagoon compatible dataDir config in solrconfig.xml:"
cat $1/solrconfig.xml | grep dataDir
+ SOLR_DATA_DIR=${SOLR_DATA_DIR:-/var/solr}
+ SOLR_DATA_DIR_ESCAPED=${SOLR_DATA_DIR//\//\\/} # escapig the forward slashes with backslahes
if [ -w $1/ ]; then
- sed -ibak 's/.*/\/var\/solr\/${solr.core.name}<\/dataDir>/' $1/solrconfig.xml
+ sed -ibak "s/.*/$SOLR_DATA_DIR_ESCAPED\/\${solr.core.name}<\/dataDir>/" $1/solrconfig.xml
echo "automagically updated to compatible config: "
- echo ' /var/solr/${solr.core.name}'
+ echo " ${SOLR_DATA_DIR:-/var/solr}/\${solr.core.name}"
echo "Please update your solrconfig.xml to make this persistent."
else
echo "but no write permission to automagically change to compatible config: "
- echo ' /var/solr/${solr.core.name}'
+ echo " ${SOLR_DATA_DIR:-/var/solr}/\${solr.core.name}"
echo "Please update your solrconfig.xml and commit again."
fail=1
fi
diff --git a/images/varnish-drupal/drupal.vcl b/images/varnish-drupal/drupal.vcl
index b94c7b4551..02d5e2692d 100644
--- a/images/varnish-drupal/drupal.vcl
+++ b/images/varnish-drupal/drupal.vcl
@@ -14,10 +14,10 @@ backend default {
# Allow purging from localhost
# @TODO allow from openshift network
acl purge {
- "127.0.0.1";
- "10.0.0.0"/8;
- "172.16.0.0"/12;
- "192.168.0.0"/16;
+ "127.0.0.1";
+ "10.0.0.0"/8;
+ "172.16.0.0"/12;
+ "192.168.0.0"/16;
}
sub vcl_init {
@@ -31,52 +31,55 @@ sub vcl_init {
# This configuration is optimized for Drupal hosting:
# Respond to incoming requests.
sub vcl_recv {
- if (req.url ~ "^/varnish_status$") {
+ if (req.url ~ "^/varnish_status$") {
return (synth(200,"OK"));
}
# set the backend, which should be used:
set req.backend_hint = www_dir.backend("${VARNISH_BACKEND_HOST:-nginx}");
# Always set the forward ip.
- if (req.restarts == 0) {
- if (req.http.x-forwarded-for) {
- set req.http.X-Forwarded-For = req.http.X-Forwarded-For + ", " + client.ip;
- } else {
- set req.http.X-Forwarded-For = client.ip;
- }
- }
-
-
+ if (req.restarts == 0) {
+ if (req.http.x-forwarded-for) {
+ set req.http.X-Forwarded-For = req.http.X-Forwarded-For + ", " + client.ip;
+ }
+ else {
+ set req.http.X-Forwarded-For = client.ip;
+ }
+ }
if (req.http.X-LAGOON-VARNISH ) {
- ## Pass all Requests which are handled via an upstream Varnish
+ # Pass all Requests which are handled via an upstream Varnish
set req.http.X-LAGOON-VARNISH = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}, " + req.http.X-LAGOON-VARNISH;
set req.http.X-LAGOON-VARNISH-BYPASS = "true";
- } else if (req.http.Fastly-FF) {
- ## Pass all Requests which are handled via Fastly
+ }
+ else if (req.http.Fastly-FF) {
+ # Pass all Requests which are handled via Fastly
set req.http.X-LAGOON-VARNISH = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}, fastly";
set req.http.X-LAGOON-VARNISH-BYPASS = "true";
set req.http.X-Forwarded-For = req.http.Fastly-Client-IP;
- } else if (req.http.CF-RAY) {
- ## Pass all Requests which are handled via CloudFlare
+ }
+ else if (req.http.CF-RAY) {
+ # Pass all Requests which are handled via CloudFlare
set req.http.X-LAGOON-VARNISH = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}, cloudflare";
set req.http.X-LAGOON-VARNISH-BYPASS = "true";
set req.http.X-Forwarded-For = req.http.CF-Connecting-IP;
- } else if (req.http.X-Pull) {
- ## Pass all Requests which are handled via KeyCDN
+ }
+ else if (req.http.X-Pull) {
+ # Pass all Requests which are handled via KeyCDN
set req.http.X-LAGOON-VARNISH = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}, keycdn";
set req.http.X-LAGOON-VARNISH-BYPASS = "true";
- } else {
- ## We set a header to let a Varnish Chain know that it already has been varnishcached
+ }
+ else {
+ # We set a header to let a Varnish chain know that it already has been varnishcached
set req.http.X-LAGOON-VARNISH = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}";
- ## Allow to bypass based on env variable `VARNISH_BYPASS`
+ # Allow to bypass based on env variable `VARNISH_BYPASS`
set req.http.X-LAGOON-VARNISH-BYPASS = "${VARNISH_BYPASS:-false}";
}
# Websockets are piped
if (req.http.Upgrade ~ "(?i)websocket") {
- return (pipe);
+ return (pipe);
}
if (req.http.X-LAGOON-VARNISH-BYPASS == "true" || req.http.X-LAGOON-VARNISH-BYPASS == "TRUE") {
@@ -98,42 +101,43 @@ sub vcl_recv {
# Bypass a cache hit (the request is still sent to the backend)
if (req.method == "REFRESH") {
- if (!client.ip ~ purge) { return (synth(405, "Not allowed")); }
- set req.method = "GET";
- set req.hash_always_miss = true;
+ if (!client.ip ~ purge) {
+ return (synth(405, "Not allowed"));
+ }
+ set req.method = "GET";
+ set req.hash_always_miss = true;
}
# Only allow BAN requests from IP addresses in the 'purge' ACL.
if (req.method == "BAN" || req.method == "URIBAN" || req.method == "PURGE") {
- # Only allow BAN from defined ACL
- if (!client.ip ~ purge) {
- return (synth(403, "Your IP is not allowed."));
- }
-
- # Only allows BAN if the Host Header has the style of with "${SERVICE_NAME:-varnish}:8080" or "${SERVICE_NAME:-varnish}".
- # Such a request is only possible from within the Docker network, as a request from external goes trough the Kubernetes Router and for that needs a proper Host Header
- if (!req.http.host ~ "^${SERVICE_NAME:-varnish}(:\d+)?$") {
- return (synth(403, "Only allowed from within own network."));
- }
+ # Only allow BAN from defined ACL
+ if (!client.ip ~ purge) {
+ return (synth(403, "Your IP is not allowed."));
+ }
- if (req.method == "BAN") {
- # Logic for the ban, using the Cache-Tags header.
- if (req.http.Cache-Tags) {
- ban("obj.http.Cache-Tags ~ " + req.http.Cache-Tags);
- # Throw a synthetic page so the request won't go to the backend.
- return (synth(200, "Ban added."));
- }
- else {
- return (synth(403, "Cache-Tags header missing."));
- }
- }
+ # Only allows BAN if the Host Header has the style of with "${SERVICE_NAME:-varnish}:8080" or "${SERVICE_NAME:-varnish}".
+ # Such a request is only possible from within the Docker network, as a request from external goes trough the Kubernetes Router and for that needs a proper Host Header
+ if (!req.http.host ~ "^${SERVICE_NAME:-varnish}(:\d+)?$") {
+ return (synth(403, "Only allowed from within own network."));
+ }
- if (req.method == "URIBAN" || req.method == "PURGE") {
- ban("req.url ~ " + req.url);
+ if (req.method == "BAN") {
+ # Logic for the ban, using the Cache-Tags header.
+ if (req.http.Cache-Tags) {
+ ban("obj.http.Cache-Tags ~ " + req.http.Cache-Tags);
# Throw a synthetic page so the request won't go to the backend.
return (synth(200, "Ban added."));
}
+ else {
+ return (synth(403, "Cache-Tags header missing."));
+ }
+ }
+ if (req.method == "URIBAN" || req.method == "PURGE") {
+ ban("req.url ~ " + req.url);
+ # Throw a synthetic page so the request won't go to the backend.
+ return (synth(200, "Ban added."));
+ }
}
# Non-RFC2616 or CONNECT which is weird, we pipe that
@@ -147,23 +151,27 @@ sub vcl_recv {
return (pipe);
}
- # We only try to cache GET and HEAD, other things are passed
+ # Large binary files are passed.
+ if (req.url ~ "\.(msi|exe|dmg|zip|tgz|gz|pkg)$") {
+ return(pass);
+ }
+
+ # We only try to cache GET and HEAD, other things are passed.
if (req.method != "GET" && req.method != "HEAD") {
return (pass);
}
- # Any requests with Basic Auth are passed
- if (req.http.Authorization || req.http.Authenticate)
- {
+ # Any requests with Basic Authentication are passed.
+ if (req.http.Authorization || req.http.Authenticate) {
return (pass);
}
- ## Pass requests which are from blackfire
+ # Blackfire requests are passed.
if (req.http.X-Blackfire-Query) {
return (pass);
}
- # Some URLs should never be cached
+ # Some URLs should never be cached.
if (req.url ~ "^/status\.php$" ||
req.url ~ "^/update\.php$" ||
req.url ~ "^/admin([/?]|$).*$" ||
@@ -176,9 +184,8 @@ sub vcl_recv {
return (pass);
}
- # Plupload likes to get piped
- if (req.url ~ "^.*/plupload-handle-uploads.*$"
- ) {
+ # Plupload likes to get piped.
+ if (req.url ~ "^.*/plupload-handle-uploads.*$") {
return (pipe);
}
@@ -259,31 +266,32 @@ sub vcl_pipe {
}
sub vcl_hit {
- if (obj.ttl >= 0s) {
- # normal hit
- return (deliver);
+ if (obj.ttl >= 0s) {
+ # normal hit
+ return (deliver);
+ }
+ # We have no fresh fish. Lets look at the stale ones.
+ if (std.healthy(req.backend_hint)) {
+ # Backend is healthy. If the object is not older then 30secs, deliver it to the client
+ # and automatically create a separate backend request to warm the cache for this request.
+ if (obj.ttl + 30s > 0s) {
+ set req.http.grace = "normal(limited)";
+ return (deliver);
+ } else {
+ # No candidate for grace. Fetch a fresh object.
+ return (miss);
}
- # We have no fresh fish. Lets look at the stale ones.
- if (std.healthy(req.backend_hint)) {
- # Backend is healthy. If the object is not older then 30secs, deliver it to the client
- # and automatically create a separate backend request to warm the cache for this request.
- if (obj.ttl + 30s > 0s) {
- set req.http.grace = "normal(limited)";
- return (deliver);
- } else {
- # No candidate for grace. Fetch a fresh object.
- return(miss);
- }
+ }
+ else {
+ # backend is sick - use full grace
+ if (obj.ttl + obj.grace > 0s) {
+ set req.http.grace = "full";
+ return (deliver);
} else {
- # backend is sick - use full grace
- if (obj.ttl + obj.grace > 0s) {
- set req.http.grace = "full";
- return (deliver);
- } else {
- # no graced object.
- return (miss);
- }
+ # no graced object.
+ return (miss);
}
+ }
}
sub vcl_backend_response {
@@ -295,19 +303,18 @@ sub vcl_backend_response {
set beresp.http.X-Host = bereq.http.host;
# If the backend sends a X-LAGOON-VARNISH-BACKEND-BYPASS header we directly deliver
- if(beresp.http.X-LAGOON-VARNISH-BACKEND-BYPASS == "TRUE") {
+ if (beresp.http.X-LAGOON-VARNISH-BACKEND-BYPASS == "TRUE") {
return (deliver);
}
# Cache 404 and 403 for 10 seconds
- if(beresp.status == 404 || beresp.status == 403) {
+ if (beresp.status == 404 || beresp.status == 403) {
set beresp.ttl = 10s;
return (deliver);
}
# Don't allow static files to set cookies.
if (bereq.url ~ "(?i)\.(css|js|jpg|jpeg|gif|ico|png|tiff|tif|img|tga|wmf|swf|html|htm|woff|woff2|mp4|ttf|eot|svg)(\?.*)?$") {
- # beresp == Back-end response from the web server.
unset beresp.http.set-cookie;
unset beresp.http.Cache-Control;
@@ -321,6 +328,14 @@ sub vcl_backend_response {
set beresp.http.Cache-Control = "public, max-age=${VARNISH_ASSETS_TTL:-2628001}";
set beresp.http.Expires = "" + (now + beresp.ttl);
}
+
+ # Files larger than 10 MB get streamed.
+ if (beresp.http.Content-Length ~ "[0-9]{8,}") {
+ set beresp.do_stream = true;
+ set beresp.uncacheable = true;
+ set beresp.ttl = 120s;
+ }
+
# Disable buffering only for BigPipe responses
if (beresp.http.Surrogate-Control ~ "BigPipe/1.0") {
set beresp.do_stream = true;
@@ -359,18 +374,19 @@ sub vcl_deliver {
}
sub vcl_hash {
- hash_data(req.url);
- if (req.http.host) {
- hash_data(req.http.host);
- } else {
- hash_data(server.ip);
- }
- if (req.http.X-Forwarded-Proto) {
- hash_data(req.http.X-Forwarded-Proto);
- }
- if (req.http.HTTPS) {
- hash_data(req.http.HTTPS);
- }
+ hash_data(req.url);
+ if (req.http.host) {
+ hash_data(req.http.host);
+ }
+ else {
+ hash_data(server.ip);
+ }
+ if (req.http.X-Forwarded-Proto) {
+ hash_data(req.http.X-Forwarded-Proto);
+ }
+ if (req.http.HTTPS) {
+ hash_data(req.http.HTTPS);
+ }
return (lookup);
}
@@ -387,20 +403,20 @@ sub vcl_synth {
# Create our synthetic response
synthetic("");
return(deliver);
-}
+ }
return (deliver);
}
sub vcl_backend_error {
- # Restart the request, when we have a backend server error, to try another backend.
- # Restart max twice.
- if (bereq.retries < 2) {
- return(retry);
- }
+ # Restart the request, when we have a backend server error, to try another backend.
+ # Restart max twice.
+ if (bereq.retries < 2) {
+ return(retry);
+ }
- set beresp.http.Content-Type = "text/html; charset=utf-8";
- set beresp.http.Retry-After = "5";
- synthetic( {"
+ set beresp.http.Content-Type = "text/html; charset=utf-8";
+ set beresp.http.Retry-After = "5";
+ synthetic({"
@@ -443,6 +459,6 @@ sub vcl_backend_error {