Подготовка инфраструктуры
Требования
-
GitLab;
-
Kubernetes для запуска GitLab Kubernetes Executor.
Установка GitLab Runner
Следуйте официальным инструкциям для установки и регистрации GitLab Runner. Если вы собираетесь устанавливать ваш GitLab Runner в Kubernetes, то установите его в namespace gitlab-ci
.
Настройка окружения для сборки с Buildah
(Для Ubuntu 23.10 и выше) на хосте GitHub Runner запустите:
{ echo "kernel.apparmor_restrict_unprivileged_userns = 0" && echo "kernel.apparmor_restrict_unprivileged_unconfined = 0";} | sudo tee -a /etc/sysctl.d/20-apparmor-donotrestrict.conf && sudo sysctl -p /etc/sysctl.d/20-apparmor-donotrestrict.conf
Базовая настройка GitLab Runner (без кэширования)
Добавьте следующее в конфигурационный файл GitLab Runner config.toml
:
[[runners]]
environment = ["FF_USE_ADVANCED_POD_SPEC_CONFIGURATION=true"]
[runners.kubernetes]
namespace = "gitlab-ci"
[runners.kubernetes.pod_annotations]
"container.apparmor.security.beta.kubernetes.io/build" = "unconfined"
[runners.kubernetes.pod_security_context]
run_as_non_root = true
run_as_user = 1000
run_as_group = 1000
fs_group = 1000
[[runners.kubernetes.volumes.empty_dir]]
name = "gitlab-ci-kubernetes-executor-werf-cache"
mount_path = "/home/build/.werf"
[[runners.kubernetes.volumes.empty_dir]]
name = "gitlab-ci-kubernetes-executor-builds-cache"
mount_path = "/builds"
[[runners.kubernetes.volumes.empty_dir]]
name = "gitlab-ci-kubernetes-executor-helper-home"
mount_path = "/home/helper"
[[runners.kubernetes.pod_spec]]
name = "fix helper HOME"
patch = '''
containers:
- name: helper
env:
- name: HOME
value: /home/helper
'''
patch_type = "strategic"
Базовая настройка GitLab Runner (с кэшированием в Persistent Volumes)
Добавьте следующее в конфигурационный файл GitLab Runner config.toml
:
[[runners]]
environment = ["FF_USE_ADVANCED_POD_SPEC_CONFIGURATION=true"]
[runners.kubernetes]
namespace = "gitlab-ci"
[runners.kubernetes.pod_annotations]
"container.apparmor.security.beta.kubernetes.io/build" = "unconfined"
[runners.kubernetes.pod_security_context]
run_as_non_root = true
run_as_user = 1000
run_as_group = 1000
fs_group = 1000
[[runners.kubernetes.volumes.pvc]]
name = "gitlab-ci-kubernetes-executor-werf-cache"
mount_path = "/home/build/.werf"
[[runners.kubernetes.volumes.pvc]]
name = "gitlab-ci-kubernetes-executor-builds-cache"
mount_path = "/builds"
[[runners.kubernetes.volumes.pvc]]
name = "gitlab-ci-kubernetes-executor-helper-home"
mount_path = "/home/helper"
[[runners.kubernetes.pod_spec]]
name = "fix helper HOME"
patch = '''
containers:
- name: helper
env:
- name: HOME
value: /home/helper
'''
patch_type = "strategic"
[[runners.kubernetes.pod_spec]]
name = "fix volumes permissions"
patch = '''
initContainers:
- name: fix-volumes-permissions
image: alpine
command:
- sh
- -ec
- |
chown :$(id -g) /home/build/.werf /builds /home/helper
chmod g+rwx /home/build/.werf /builds /home/helper
securityContext:
runAsUser: 0
runAsNonRoot: false
volumeMounts:
- mountPath: /home/build/.werf
name: gitlab-ci-kubernetes-executor-werf-cache
- mountPath: /builds
name: gitlab-ci-kubernetes-executor-builds-cache
- mountPath: /home/helper
name: gitlab-ci-kubernetes-executor-helper-home
'''
patch_type = "strategic"
Создайте PVC:
kubectl create -f - <<EOF
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitlab-ci-kubernetes-executor-werf-cache
namespace: gitlab-ci
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitlab-ci-kubernetes-executor-builds-cache
namespace: gitlab-ci
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitlab-ci-kubernetes-executor-helper-home
namespace: gitlab-ci
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 30Gi
EOF
Настройте доступ к Kubernetes из GitLab Executor Pod’ов
werf будет запускаться в GitLab Executor Pod’ах. Скорее всего вы будете деплоить с werf в тот же кластер, в котором запускаются GitLab Executor Pod’ы. Если так, то вам нужно настроить отдельные ServiceAccount и ClusterRoleBinding.
Создайте ServiceAccount и ClusterRoleBinding:
kubectl create -f - <<EOF
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: gitlab-ci-kubernetes-executor
namespace: gitlab-ci
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitlab-ci-kubernetes-executor
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: gitlab-ci-kubernetes-executor
namespace: gitlab-ci
EOF
Для большей безопасности подумайте над использованием более ограниченной в правах ClusterRole/Role и используйте её вместо
cluster-admin
ClusterRole выше.
Теперь добавьте эту строку в конфигурационный файл GitLab Runner config.toml
:
[[runners]]
[runners.kubernetes]
service_account = "gitlab-ci-kubernetes-executor"
Разрешите использование FUSE (для Kubernetes Nodes с ядром Linux старее, чем 5.13)
Если Kubernetes Nodes, на которых вы будете запускать Kubernetes Executor Pods, имеют версию ядра Linux старее 5.13, то вам нужно разрешить использование FUSE:
kubectl create -f - <<EOF
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fuse-device-plugin
namespace: kube-system
spec:
selector:
matchLabels:
name: fuse-device-plugin
template:
metadata:
labels:
name: fuse-device-plugin
spec:
hostNetwork: true
containers:
- image: soolaugust/fuse-device-plugin:v1.0
name: fuse-device-plugin-ctr
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
volumeMounts:
- name: device-plugin
mountPath: /var/lib/kubelet/device-plugins
volumes:
- name: device-plugin
hostPath:
path: /var/lib/kubelet/device-plugins
---
apiVersion: v1
kind: LimitRange
metadata:
name: enable-fuse
namespace: gitlab-ci
spec:
limits:
- type: "Container"
default:
github.com/fuse: 1
EOF
Настройка Kubernetes для мульти-платформенных сборок (опционально)
Данный шаг требуется только для сборки образов для платформ, отличных от платформы системы, где запущен werf.
Активируйте эмуляторы для ваших Kubernetes Nodes, используя qemu-user-static:
kubectl create -f - <<EOF
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: qemu-user-static
namespace: gitlab-ci
labels:
app: qemu-user-static
spec:
selector:
matchLabels:
name: qemu-user-static
template:
metadata:
labels:
name: qemu-user-static
spec:
initContainers:
- name: qemu-user-static
image: multiarch/qemu-user-static
args: ["--reset", "-p", "yes"]
securityContext:
privileged: true
containers:
- name: pause
image: gcr.io/google_containers/pause
resources:
limits:
cpu: 50m
memory: 50Mi
requests:
cpu: 50m
memory: 50Mi
EOF
Конфигурация container registry
Включите сборщик мусора вашего container registry.
Настройка проекта
Настройка проекта GitLab
-
Включите требование удачно выполненного pipeline для merge requests.
-
Включите возможность автоматически отменять лишние pipelines.
-
Создайте и сохраните access token для очистки ненужных образов из container registry со следующей конфигурацией:
-
Token name:
werf-images-cleanup
; -
Role:
developer
; -
Scopes:
api
.
-
-
В переменных проекта добавьте следующие переменные:
-
Версия werf:
-
Key:
WERF_VERSION
; -
Value:
2 stable
;
-
-
Access token для очистки ненужных образов:
-
Key:
WERF_IMAGES_CLEANUP_PASSWORD
; -
Value:
<сохранённый "werf-images-cleanup" access token>
; -
Protect variable:
yes
; -
Mask variable:
yes
.
-
-
-
Добавьте плановое задание на каждую ночь для очистки ненужных образов в container registry, указав ветку
main
/master
в качестве Target branch.
Конфигурация CI/CD проекта
Так может выглядеть репозиторий, использующий werf для сборки и развертывания:
/.werf-deploy-report.json
/.werf-build-report.json
/.werf_secret_key
stages:
- build
- test
- review
- qa
- staging-shared
- staging-apps
- staging-smoke
- prod-shared
- prod-apps
- prod-smoke
- cleanup
variables:
ENABLE_ALL_IMAGES: "false"
ENABLE_APPS_IMAGES: "false"
ENABLE_IMAGE_SOURCES: "false"
WERF_REQUIRE_BUILT_IMAGES: "true"
workflow:
rules:
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS && $CI_PIPELINE_SOURCE == "push"
when: never
- if: !reference [.rules, if, mr]
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS
when: never
- if: !reference [.rules, if, tag]
- if: !reference [.rules, if, main]
default:
image:
name: "registry.werf.io/werf/werf:2-stable"
pull_policy: always
tags:
- "<GitLab Runner tag>"
.scripts:
setup_werf: |
source "$(werf ci-env gitlab --as-file)"
.rules:
if:
main: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
main_schedule: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PIPELINE_SOURCE == "schedule"
main_no_schedule: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PIPELINE_SOURCE != "schedule"
mr: $CI_PIPELINE_SOURCE == "merge_request_event"
mr_no_schedule: $CI_PIPELINE_SOURCE == "merge_request_event"&& $CI_PIPELINE_SOURCE != "schedule"
main_or_mr: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_PIPELINE_SOURCE == "merge_request_event"
main_or_mr_no_schedule: ($CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH || $CI_PIPELINE_SOURCE == "merge_request_event") && $CI_PIPELINE_SOURCE != "schedule"
tag: $CI_COMMIT_TAG
schedule: $CI_PIPELINE_SOURCE == "schedule"
no_schedule: $CI_PIPELINE_SOURCE != "schedule"
.artifacts:
deploy_report:
paths:
- .werf-deploy-report.json
expire_in: 1 year
when: always
.dismiss:
script:
- werf dismiss --with-namespace --use-deploy-report
environment:
action: stop
interruptible: false
timeout: null
.build:
stage: build
variables:
ENABLE_ALL_IMAGES: "true"
before_script:
- !reference [.scripts, setup_werf]
timeout: 30m
rules:
- if: !reference [.rules, if, main_or_mr_no_schedule]
images:apps:build:
extends:
- .build
script:
- werf build app1 app2
image:sources:build:
extends:
- .build
script:
- werf build sources
.test:simple:
stage: test
needs:
- image:sources:build
variables:
ENABLE_IMAGE_SOURCES: "true"
before_script:
- !reference [.scripts, setup_werf]
.test:simple:app1:
extends:
- .test:simple
rules:
- if: !reference [.rules, if, main_or_mr_no_schedule]
changes:
- "*"
- ".helm/**/**"
- "src/app1/**/**"
.test:simple:app2:
extends:
- .test:simple
rules:
- if: !reference [.rules, if, main_or_mr_no_schedule]
changes:
- "*"
- ".helm/**/**"
- "src/app2/**/**"
app1:lint:
extends:
- .test:simple:app1
script:
- werf kube-run sources -- go-task -d src/app1 lint
timeout: 5m
app1:unit:
extends:
- .test:simple:app1
script:
- werf kube-run sources -- go-task -d src/app1 test:unit
coverage: '/Code coverage is \d+\.\d+/'
timeout: 15m
app1:integration:
extends:
- .test:simple:app1
script:
- werf kube-run sources -- go-task -d src/app1 test:integration
coverage: '/Code coverage is \d+\.\d+/'
timeout: 30m
app2:lint:
extends:
- .test:simple:app2
script:
- werf kube-run sources -- go-task -d src/app2 lint
timeout: 5m
app2:unit:
extends:
- .test:simple:app2
script:
- werf kube-run sources -- go-task -d src/app2 test:unit
coverage: '/Code coverage is \d+\.\d+/'
timeout: 15m
app2:integration:
extends:
- .test:simple:app2
script:
- werf kube-run sources -- go-task -d src/app2 test:integration
coverage: '/Code coverage is \d+\.\d+/'
timeout: 30m
.test:complex:
stage: test
needs:
- images:apps:build
- image:sources:build
variables:
ENABLE_ALL_IMAGES: "true"
WERF_SET_TAGS_RELEASE: "tags.release=true"
before_script:
- !reference [.scripts, setup_werf]
script:
- werf converge --save-deploy-report
resource_group: ${CI_ENVIRONMENT_SLUG}
artifacts: !reference [.artifacts, deploy_report]
e2e:short:
extends:
- .test:complex
variables:
WERF_SET_TAGS_E2E_SHORT: "tags.e2e-short=true"
environment:
name: e2e-short/${CI_PIPELINE_IID}
on_stop: e2e:short:remove
interruptible: true
rules:
- if: !reference [.rules, if, mr_no_schedule]
e2e:short:remove:
extends:
- e2e:short
- .dismiss
needs:
- e2e:short
rules:
- if: !reference [.rules, if, mr_no_schedule]
when: always
allow_failure: true
e2e:long:
extends:
- .test:complex
variables:
WERF_SET_TAGS_E2E_LONG: "tags.e2e-long=true"
environment:
name: e2e-long/${CI_PIPELINE_IID}
on_stop: e2e:long:remove
rules:
- if: !reference [.rules, if, main_no_schedule]
e2e:long:remove:
extends:
- e2e:long
- .dismiss
needs:
- e2e:long
rules:
- if: !reference [.rules, if, main_no_schedule]
when: always
allow_failure: true
performance:
extends:
- .test:complex
variables:
WERF_SET_TAGS_PERFORMANCE: "tags.performance=true"
environment:
name: performance/${CI_PIPELINE_IID}
on_stop: performance:remove
resource_group: performance
rules:
- if: !reference [.rules, if, main_no_schedule]
performance:remove:
extends:
- performance
- .dismiss
needs:
- performance
rules:
- if: !reference [.rules, if, main_no_schedule]
when: always
allow_failure: true
review:
stage: review
needs:
- images:apps:build
- image:sources:build
variables:
ENABLE_ALL_IMAGES: "true"
WERF_SET_TAGS_RELEASE: "tags.release=true"
before_script:
- !reference [.scripts, setup_werf]
script:
- werf converge --save-deploy-report
environment:
name: review/${CI_MERGE_REQUEST_IID}
on_stop: review:remove
auto_stop_in: 3 days
timeout: 30m
interruptible: true
resource_group: ${CI_ENVIRONMENT_SLUG}
artifacts: !reference [.artifacts, deploy_report]
rules:
- if: !reference [.rules, if, mr_no_schedule]
review:remove:
extends:
- review
- .dismiss
needs:
- review
rules:
- if: !reference [.rules, if, mr_no_schedule]
when: manual
allow_failure: true
.qa:
stage: qa
needs:
- job: app1:lint
optional: true
- job: app2:lint
optional: true
- job: app1:unit
optional: true
- job: app2:unit
optional: true
- job: app1:integration
optional: true
- job: app2:integration
optional: true
- job: e2e:long
artifacts: false
- job: performance
artifacts: false
variables:
ENABLE_ALL_IMAGES: "true"
WERF_SET_TAGS_RELEASE: "tags.release=true"
before_script:
- !reference [.scripts, setup_werf]
script:
- werf converge --save-deploy-report
environment:
auto_stop_in: 3 days
timeout: 30m
resource_group: ${CI_ENVIRONMENT_SLUG}
artifacts: !reference [.artifacts, deploy_report]
rules:
- if: !reference [.rules, if, main_no_schedule]
when: manual
qa:eu:
extends:
- .qa
environment:
name: qa-eu/${CI_PIPELINE_IID}
on_stop: qa:eu:remove
qa:eu:remove:
extends:
- qa:eu
- .dismiss
needs:
- qa:eu
rules:
- if: !reference [.rules, if, main_no_schedule]
when: manual
allow_failure: true
qa:us:
extends:
- .qa
environment:
name: qa-us/${CI_PIPELINE_IID}
on_stop: qa:us:remove
qa:us:remove:
extends:
- qa:us
- .dismiss
needs:
- qa:us
rules:
- if: !reference [.rules, if, main_no_schedule]
when: manual
allow_failure: true
.staging:shared:
stage: staging-shared
needs:
- job: app1:lint
optional: true
- job: app2:lint
optional: true
- job: app1:unit
optional: true
- job: app2:unit
optional: true
- job: app1:integration
optional: true
- job: app2:integration
optional: true
- job: e2e:long
artifacts: false
- job: performance
artifacts: false
variables:
ENABLE_ALL_IMAGES: "true"
WERF_SET_TAGS_SHARED: "tags.shared=true"
before_script:
- !reference [.scripts, setup_werf]
script:
- werf converge
resource_group: ${CI_ENVIRONMENT_SLUG}
rules:
- if: !reference [.rules, if, main_no_schedule]
when: manual
staging:eu:shared:
extends:
- .staging:shared
environment:
name: staging-eu/shared
staging:us:shared:
extends:
- .staging:shared
environment:
name: staging-us/shared
.staging:app:
stage: staging-apps
variables:
ENABLE_ALL_IMAGES: "true"
before_script:
- !reference [.scripts, setup_werf]
script:
- werf converge
.staging:app:app1:
extends:
- .staging:app
rules:
- if: !reference [.rules, if, main_no_schedule]
when: manual
changes:
- "*"
- ".helm/**/**"
- "src/app1/**/**"
.staging:app:app2:
extends:
- .staging:app
resource_group: ${CI_ENVIRONMENT_SLUG}
rules:
- if: !reference [.rules, if, main_no_schedule]
when: manual
changes:
- "*"
- ".helm/**/**"
- "src/app2/**/**"
staging:eu:app1:
extends:
- .staging:app:app1
needs:
- staging:eu:shared
variables:
WERF_SET_TAGS_APP1: "tags.app1=true"
environment:
name: staging-eu/app1
staging:eu:app2:
extends:
- .staging:app:app2
needs:
- staging:eu:shared
variables:
WERF_SET_TAGS_APP2: "tags.app2=true"
environment:
name: staging-eu/app2
staging:us:app1:
extends:
- .staging:app:app1
needs:
- staging:us:shared
variables:
WERF_SET_TAGS_APP1: "tags.app1=true"
environment:
name: staging-us/app1
staging:us:app2:
extends:
- .staging:app:app2
needs:
- staging:us:shared
variables:
WERF_SET_TAGS_APP2: "tags.app2=true"
environment:
name: staging-us/app2
.prod:shared:
stage: prod-shared
variables:
ENABLE_ALL_IMAGES: "true"
WERF_SET_TAGS_SHARED: "tags.shared=true"
before_script:
- !reference [.scripts, setup_werf]
script:
- werf converge
resource_group: ${CI_ENVIRONMENT_SLUG}
rules:
- if: !reference [.rules, if, main_no_schedule]
when: manual
prod:eu:shared:
extends:
- .prod:shared
needs:
- staging:eu:smoke
environment:
name: prod-eu/shared
prod:us:shared:
extends:
- .prod:shared
needs:
- staging:us:smoke
environment:
name: prod-us/shared
.prod:app:
stage: prod-apps
variables:
ENABLE_ALL_IMAGES: "true"
before_script:
- !reference [.scripts, setup_werf]
script:
- werf converge
resource_group: ${CI_ENVIRONMENT_SLUG}
.prod:app:app1:
extends:
- .prod:app
rules:
- if: !reference [.rules, if, main_no_schedule]
when: manual
changes:
- "*"
- ".helm/**/**"
- "src/app1/**/**"
.prod:app:app2:
extends:
- .prod:app
rules:
- if: !reference [.rules, if, main_no_schedule]
when: manual
changes:
- "*"
- ".helm/**/**"
- "src/app2/**/**"
prod:eu:app1:
extends:
- .prod:app:app1
needs:
- prod:eu:shared
variables:
WERF_SET_TAGS_APP1: "tags.app1=true"
environment:
name: prod-eu/app1
prod:eu:app2:
extends:
- .prod:app:app2
needs:
- prod:eu:shared
variables:
WERF_SET_TAGS_APP2: "tags.app2=true"
environment:
name: prod-eu/app2
prod:us:app1:
extends:
- .prod:app:app1
needs:
- prod:us:shared
variables:
WERF_SET_TAGS_APP1: "tags.app1=true"
environment:
name: prod-us/app1
prod:us:app2:
extends:
- .prod:app:app2
needs:
- prod:us:shared
variables:
WERF_SET_TAGS_APP2: "tags.app2=true"
environment:
name: prod-us/app2
.smoke:
variables:
ENABLE_IMAGE_SOURCES: "true"
before_script:
- !reference [.scripts, setup_werf]
script:
- werf kube-run sources -- go-task test:smoke
timeout: 20m
rules:
- if: !reference [.rules, if, main_no_schedule]
when: manual
staging:eu:smoke:
extends:
- .smoke
stage: staging-smoke
needs:
- staging:eu:shared
staging:us:smoke:
extends:
- .smoke
stage: staging-smoke
needs:
- staging:us:shared
prod:eu:smoke:
extends:
- .smoke
stage: prod-smoke
needs:
- prod:eu:shared
prod:us:smoke:
extends:
- .smoke
stage: prod-smoke
needs:
- prod:us:shared
images:cleanup:
stage: cleanup
variables:
ENABLE_ALL_IMAGES: "true"
before_script:
- !reference [.scripts, setup_werf]
script:
- werf cr login -u nobody -p "${WERF_IMAGES_CLEANUP_PASSWORD:?}" "${WERF_REPO:?}"
- werf cleanup
resource_group: cleanup-images
rules:
- if: !reference [.rules, if, main_schedule]
dependencies:
- name: shared
version: 1.0.0
tags:
- shared
- apps
- release
- all
export-values: &export-values
- parent: werf
child: werf
- name: app
alias: app1
version: 1.0.0
tags:
- app1
- apps
- release
- all
export-values: *export-values
- name: app
alias: app2
version: 1.0.0
tags:
- app2
- apps
- release
- all
export-values: *export-values
- name: database
version: 1.0.0
condition: database.enabled
tags:
- database
- infra
- release
- all
export-values: *export-values
- name: e2e-short
version: 1.0.0
tags:
- e2e-short
- tests
- all
export-values: *export-values
- name: e2e-long
version: 1.0.0
tags:
- e2e-long
- tests
- all
export-values: *export-values
- name: performance
version: 1.0.0
tags:
- performance
- tests
- all
export-values: *export-values
apiVersion: v2
name: app
version: 1.0.0
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ $.Chart.Name }}
spec:
selector:
matchLabels:
app: {{ $.Chart.Name }}
template:
metadata:
labels:
app: {{ $.Chart.Name }}
spec:
imagePullSecrets:
- name: registrysecret
containers:
- name: {{ $.Chart.Name }}
image: {{ index $.Values.werf.image $.Chart.Name }}
ports:
- containerPort: 80
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ $.Chart.Name }}
spec:
ingressClassName: nginx
rules:
- host: "{{ $.Release.Name }}.{{ $.Chart.Name }}.example.org"
http:
paths:
- backend:
service:
name: {{ $.Chart.Name }}
port:
number: 80
path: /
pathType: Prefix
{{ if $.Release.IsInstall }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ $.Chart.Name }}-init-db-{{ $.Release.Revision }}
annotations:
werf.io/weight: "-20"
spec:
template:
spec:
imagePullSecrets:
- name: registrysecret
restartPolicy: Never
containers:
- name: init-db
image: {{ $.Values.werf.image.sources }}
command: ["go-task", "-d", "src/{{ $.Chart.Name }}", "db:init"]
{{ end }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ $.Chart.Name }}-migrate-db-{{ $.Release.Revision }}
annotations:
werf.io/weight: "-10"
spec:
template:
spec:
imagePullSecrets:
- name: registrysecret
restartPolicy: Never
containers:
- name: migrate-db
image: {{ $.Values.werf.image.sources }}
command: ["go-task", "-d", "src/{{ $.Chart.Name }}", "db:migrate"]
apiVersion: v1
kind: Service
metadata:
name: {{ $.Chart.Name }}
spec:
ports:
- port: 80
selector:
app: {{ $.Chart.Name }}
apiVersion: v2
name: database
version: 1.0.0
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: db
annotations:
werf.io/weight: "-30"
spec:
serviceName: db
selector:
matchLabels:
app: db
template:
metadata:
labels:
app: db
spec:
containers:
- name: db
image: alpine:3.17
command: ["tail", "-f", /dev/null]
apiVersion: v2
name: e2e-long
version: 1.0.0
apiVersion: batch/v1
kind: Job
metadata:
name: test-e2e-long-{{ $.Release.Revision }}
annotations:
werf.io/weight: "10"
spec:
template:
spec:
imagePullSecrets:
- name: registrysecret
restartPolicy: Never
containers:
- name: test-e2e-long
image: {{ $.Values.werf.image.sources }}
command: ["go-task", "test:e2e:long"]
apiVersion: v2
name: e2e-short
version: 1.0.0
apiVersion: batch/v1
kind: Job
metadata:
name: test-e2e-short-{{ $.Release.Revision }}
annotations:
werf.io/weight: "10"
spec:
template:
spec:
imagePullSecrets:
- name: registrysecret
restartPolicy: Never
containers:
- name: test-e2e-short
image: {{ $.Values.werf.image.sources }}
command: ["go-task", "test:e2e:short"]
apiVersion: v2
name: performance
version: 1.0.0
apiVersion: batch/v1
kind: Job
metadata:
name: test-performance-{{ $.Release.Revision }}
annotations:
werf.io/weight: "10"
spec:
template:
spec:
imagePullSecrets:
- name: registrysecret
restartPolicy: Never
containers:
- name: test-performance
image: {{ $.Values.werf.image.sources }}
command: ["go-task", "test:performance"]
apiVersion: v2
name: shared
version: 1.0.0
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: main
spec:
ingressClassName: nginx
rules:
- host: "{{ $.Release.Name }}.example.org"
http:
paths:
- backend:
service:
name: app1
port:
number: 80
path: /app1
pathType: Prefix
- backend:
service:
name: app2
port:
number: 80
path: /app2
pathType: Prefix
dependencies:
- name: shared
repository: ""
version: 1.0.0
- name: app1
repository: ""
version: 1.0.0
- name: app2
repository: ""
version: 1.0.0
- name: database
repository: ""
version: 1.0.0
- name: e2e-short
repository: ""
version: 1.0.0
- name: e2e-long
repository: ""
version: 1.0.0
- name: performance
repository: ""
version: 1.0.0
digest: sha256:efce1b655fce8c8bf7f1739dc34a98fed3dad7943cef39bd6829aeaee34c03e9
generated: "2023-02-08T12:17:50.931015996+03:00"
{{ $chartsTagNames := dict }}
{{- range $.Chart.Dependencies }}
{{- if .Alias }}
{{- $_ := set $chartsTagNames .Alias .Tags }}
{{- else }}
{{- $_ := set $chartsTagNames .Name .Tags }}
{{- end }}
{{- end }}
{{- range $chartsTagNames.shared }}
{{- if dig . false $.Values.tags }}
Main application: https://{{ $.Release.Name }}.example.org
{{- break }}
{{- end }}
{{- end }}
{{- range $chartsTagNames.app1 }}
{{- if dig . false $.Values.tags }}
Application 1: https://{{ $.Release.Name }}.app1.example.org
{{- break }}
{{- end }}
{{- end }}
{{- range $chartsTagNames.app2 }}
{{- if dig . false $.Values.tags }}
Application 2: https://{{ $.Release.Name }}.app2.example.org
{{- break }}
{{- end }}
{{- end }}
tags:
all: false
version: "3"
tasks:
test:e2e:short:
cmds:
# Here be your short E2E tests:
- echo Short E2E tests completed.
test:e2e:long:
cmds:
# Here be your long E2E tests:
- echo Long E2E tests completed.
test:performance:
cmds:
# Here be your performance tests:
- echo Performance tests completed.
test:smoke:
cmds:
# Here be your smoke tests:
- echo Smoke tests completed.
FROM alpine:3.17
WORKDIR /src
RUN apk add go-task
COPY . .
version: "3"
tasks:
build:
cmds:
# Here be building your app:
- cp main.sh app.sh
run:
cmds:
# Here be running your app:
- ./app.sh
lint:
cmds:
# Here be your linter:
- echo Lint completed.
test:unit:
cmds:
# Here be your unit tests:
- echo Unit tests completed. Code coverage is 12.34%
test:integration:
cmds:
# Here be your integration tests:
- echo Integration tests completed. Code coverage is 43.21%
db:init:
cmds:
# Here be your DB initialization:
- echo DB initialized.
db:migrate:
cmds:
# Here be your DB migrations:
- echo DB migrated.
FROM alpine:3.17 as builder
WORKDIR /app
RUN apk add go-task
COPY . .
RUN go-task build
FROM alpine:3.17
WORKDIR /app
RUN apk add nmap-ncat go-task
COPY --from=builder /app/app.sh /app/Taskfile.yaml ./
CMD ["go-task", "run"]
#!/bin/sh
while true; do
printf "HTTP/1.1 200 OK\n\nHello world.\n" | ncat -lp 80
done
version: "3"
tasks:
build:
cmds:
# Here be building your app:
- cp main.sh app.sh
run:
cmds:
# Here be running your app:
- ./app.sh
lint:
cmds:
# Here be your linter:
- echo Lint completed.
test:unit:
cmds:
# Here be your unit tests:
- echo Unit tests completed. Code coverage is 12.34%
test:integration:
cmds:
# Here be your integration tests:
- echo Integration tests completed. Code coverage is 43.21%
db:init:
cmds:
# Here be your DB initialization:
- echo DB initialized.
db:migrate:
cmds:
# Here be your DB migrations:
- echo DB migrated.
FROM alpine:3.17 as builder
WORKDIR /app
RUN apk add go-task
COPY . .
RUN go-task build
FROM alpine:3.17
WORKDIR /app
RUN apk add nmap-ncat go-task
COPY --from=builder /app/app.sh /app/Taskfile.yaml ./
CMD ["go-task", "run"]
#!/bin/sh
while true; do
printf "HTTP/1.1 200 OK\n\nHello world.\n" | ncat -lp 80
done
giterminismConfigVersion: 1
config:
goTemplateRendering:
allowEnvVariables:
- ENABLE_ALL_IMAGES
- ENABLE_APPS_IMAGES
- ENABLE_IMAGE_SOURCES
configVersion: 1
project: myproject
{{- if or (env "ENABLE_ALL_IMAGES" | eq "true") (env "ENABLE_APPS_IMAGES" | eq "true") }}
---
image: app1
context: src/app1
dockerfile: ./app.Dockerfile
---
image: app2
context: src/app2
dockerfile: ./app.Dockerfile
{{- end }}
{{- if or (env "ENABLE_ALL_IMAGES" | eq "true") (env "ENABLE_IMAGE_SOURCES" | eq "true") }}
---
image: sources
dockerfile: ./sources.Dockerfile
{{- end }}
Дополнительно:
- Добавьте для
werf cleanup
опции авторизации в container registry, следуя инструкциям.