diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index fa2c3ec..11e8da7 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,4 +1,4 @@ -workflow: +workflow: rules: - if: $CI_PIPELINE_SOURCE == "merge_request_event" - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH @@ -11,8 +11,8 @@ lint-markdown: stage: build image: node:lts script: - - npm install -g markdownlint-cli - - markdownlint "**/*.md" + - npm install -g markdownlint-cli2 + - markdownlint-cli2 "**/*.md" lint-yaml: stage: build diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index dac8fe0..61f0979 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -64,11 +64,3 @@ Check Markdown files: ```bash docker run --rm -v "$(pwd)":/workdir davidanson/markdownlint-cli2 "**/*.md" ``` - -Reproduce locally GitLab jobs: - -```bash -mkdir -p .gitlab/runner/local -docker run --rm --name gitlab-runner -v /var/run/docker.sock:/var/run/docker.sock -v $PWD/.gitlab/runner/local/config:/etc/gitlab-runner -v $PWD:$PWD --workdir $PWD gitlab/gitlab-runner exec docker lint-markdown -docker run --rm --name gitlab-runner -v /var/run/docker.sock:/var/run/docker.sock -v $PWD/.gitlab/runner/local/config:/etc/gitlab-runner -v $PWD:$PWD --workdir $PWD gitlab/gitlab-runner exec docker lint-yaml -``` diff --git a/docs/fundamentals/standards/glossary.md b/docs/fundamentals/standards/glossary.md index 7266601..800ff7d 100644 --- a/docs/fundamentals/standards/glossary.md +++ b/docs/fundamentals/standards/glossary.md @@ -1,4 +1,4 @@ -# Glossary +# Glossary ## IT diff --git a/docs/guides/learning-paths/code-pipelines/bamboo.md b/docs/guides/learning-paths/code-pipelines/bamboo.md new file mode 100644 index 0000000..b8082d1 --- /dev/null +++ b/docs/guides/learning-paths/code-pipelines/bamboo.md @@ -0,0 +1,100 @@ +# Bamboo Data Center + +## Introduction + +> Bamboo Data Center is a continuous delivery pipeline that offers resilience, reliability, and scalibility for teams of any size. + +[atlassian.com/software/bamboo](https://www.atlassian.com/software/bamboo) + +## Setup + +### Build the custom image + +Create `.bamboo/server/Dockerfile` file: + +```Dockerfile +# starts from the base image provided by Atlassian: https://hub.docker.com/r/atlassian/bamboo-server (7.2.2 based on Ubuntu 20.04.1 LTS, codename focal) +FROM atlassian/bamboo-server:7.2.2 + +# switches to root user for admin commands +USER root + +# installs system requirements +RUN apt-get update +RUN apt-get install -y apt-transport-https \ + ca-certificates \ + wget \ + curl \ + gnupg-agent \ + software-properties-common +RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - +RUN add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) \ + stable" +RUN wget https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb \ + && dpkg -i packages-microsoft-prod.deb +RUN apt-get update + +# installs Docker: https://docs.docker.com/engine/install/ubuntu/ +RUN apt-get install -y docker-ce \ + docker-ce-cli \ + containerd.io +RUN usermod -a -G docker bamboo + +# installs .NET SDK LTS: https://docs.microsoft.com/en-us/dotnet/core/install/linux-ubuntu +RUN apt-get install -y dotnet-sdk-3.1 +RUN apt-get install -y dotnet-sdk-5.0 + +# switches back to bamboo user +USER bamboo + +# updates image entrypoint with commands that can only be ran when the container starts +RUN echo "chown root:docker /var/run/docker.sock" >> /entrypoint.sh +``` + +```bash +# creates a new image +docker build . -t devprofr/bamboo-server -f .bamboo/server/Dockerfile --no-cache +``` + +### Run the custom image + +```bash +docker volume create --name bambooVolume + +# for Linux +docker run -v /var/run/docker.sock:/var/run/docker.sock -v bambooVolume:/var/atlassian/application-data/bamboo --name="bamboo" --init -d -p 54663:54663 -p 8085:8085 devprofr/bamboo-server + +# for Windows +docker run -v //var/run/docker.sock:/var/run/docker.sock -v bambooVolume:/var/atlassian/application-data/bamboo --name="bamboo" --init -d -p 54663:54663 -p 8085:8085 devprofr/bamboo-server +``` + +### Configuration + +Open [localhost:8085](http://localhost:8085/) + +#### Set server capabilities + +_Limitation 2021-02-28_: Unfortunately it is not possible to automate it through an API call + +You have to manually go to this page ["Bamboo administration > Server capabilities"](http://localhost:8085/admin/agent/configureSharedLocalCapabilities.action) and set the server capabilities (if not present), it must be done only once/ + +Category | Executable / Label | Path | Bamboo key +-----------|--------------------|-------------------|-------------------------------- +Executable | dotnet | `/usr/bin/dotnet` | `system.builder.command.dotnet` +Docker | Docker | `/usr/bin/docker` | `system.docker.executable` + +### Troubleshooting + +```bash +docker exec -it bamboo sh +docker exec -u 0 -it bamboo bash +``` + +### Clean-up + +```bash +docker stop bamboo +docker rm bamboo +``` diff --git a/docs/guides/learning-paths/code-pipelines/bitbucket-cloud.md b/docs/guides/learning-paths/code-pipelines/bitbucket-cloud.md new file mode 100644 index 0000000..aeda487 --- /dev/null +++ b/docs/guides/learning-paths/code-pipelines/bitbucket-cloud.md @@ -0,0 +1,44 @@ +# Bitbucket (Cloud) + +## Introduction + +> Bitbucket is more than just Git code management. +> Bitbucket gives teams one place to plan projects, collaborate on code, test, and deploy. + +[bitbucket.org/product](https://bitbucket.org/product/) + +## Setup + +Create a file `bitbucket-pipelines.yml`: + +```yml +image: mcr.microsoft.com/dotnet/sdk:5.0 + +pipelines: + default: + - parallel: + - step: + name: Build and Test + caches: + - dotnetcore + script: + - REPORTS_PATH=./test-reports/build_${BITBUCKET_BUILD_NUMBER} + - cd samples/dotnet + - dotnet restore + - dotnet build --no-restore --configuration Release + - dotnet test --no-build --configuration Release --test-adapter-path:. --logger:"junit;LogFilePath=$REPORTS_PATH/junit.xml" + - step: + name: Lint the code + caches: + - dotnetcore + script: + - export SOLUTION_NAME=Devpro.CIToolsDemo + - export REPORTS_PATH=linter-reports + - cd samples/dotnet + - dotnet new tool-manifest + - dotnet tool install JetBrains.ReSharper.GlobalTools --version 2021.1.0-eap02 + - dotnet tool restore + - dotnet jb inspectcode ${SOLUTION_NAME}.sln --output="${REPORTS_PATH}/jb-${BITBUCKET_BUILD_NUMBER}.xml" + artifacts: + - linter-reports/** +``` diff --git a/docs/guides/learning-paths/code-pipelines/code-pipelines.md b/docs/guides/learning-paths/code-pipelines/code-pipelines.md new file mode 100644 index 0000000..6bf27fa --- /dev/null +++ b/docs/guides/learning-paths/code-pipelines/code-pipelines.md @@ -0,0 +1,15 @@ +# Code pipelines + +Let's explore different tools to implement CI pipelines! + +## Platforms + +Platform | Typology +--------------------------------------|------------------------ +Azure DevOps | Cloud +[Bamboo Data Center](bamboo.md) | Self-hosted (container) +[Bitbucket Cloud](bitbucket-cloud.md) | Cloud +[Concourse](concourse.md) | Self-hosted (container) +GitHub | Cloud +GitLab | Cloud +[TeamCity Professional](teamcity.md) | Self-hosted (container) diff --git a/docs/guides/learning-paths/code-pipelines/concourse.md b/docs/guides/learning-paths/code-pipelines/concourse.md new file mode 100644 index 0000000..c9bfb39 --- /dev/null +++ b/docs/guides/learning-paths/code-pipelines/concourse.md @@ -0,0 +1,62 @@ +# Concourse samples + +Comprehensive samples to quickly get up to speed with [Concourse](../../../organizations/communities/concourse/concourse.md). + +## Requirements + +* Have an account to a running Concourse instance + * For the first time, you can use the local containers with `docker compose -f samples/concourse/compose.yml up -d` + * You can also deploy it in a Kubernetes cluster with [Helm chart](https://github.com/devpro/helm-charts/tree/feature/concourse/charts/concourse) + * Ultimately, you can run it on a [VM](https://github.com/devpro/information-technology-guide/blob/main/docs/communities/concourse/ubuntu-install.md) + +* Have `fly` executable on the machine running the command lines (careful with the version that needs to match the one from Concourse instance) + * Grab it from the [releases GitHub page](https://github.com/concourse/concourse/releases) or from the running Concourse web page + +## Samples + +### Login + +```bash +fly --target localhost login --concourse-url http://localhost:8080/ +``` + +### Pipelines + +* Hello world + +Login on localhost: + +```bash +fly --target localhost set-pipeline --pipeline helloworld --config samples/concourse/tasks/basic/01_helloworld.yml + +# enables the pipeline and run it (can also be done from http://localhost:8080/teams/main/pipelines/helloworld, click on play symbol then on + symbol) +fly -t localhost unpause-pipeline -p helloworld +fly -t localhost trigger-job -j helloworld/job +``` + +* .NET + +```bash +fly --target localhost set-pipeline --pipeline aspnetcore --config samples/concourse/tasks/dotnet/01_aspnetcore.yml + +fly -t localhost unpause-pipeline -p aspnetcore + +fly -t localhost trigger-job -j aspnetcore/build-webapp + +fly --target localhost set-pipeline --pipeline dotnetglobaltool --config pipelines/dotnetcore/02_globaltool.yml --var mdbatlas-publickey=xxxx --var mdbatlas-publickey=yyyy -var almops-token=zzz --var almops-org=xxxx -var almops-user=yyyy -var almops-token=zzz + +fly -t localhost unpause-pipeline -p dotnetglobaltool + +fly -t localhost trigger-job -j dotnetglobaltool/mongodb-atlas -w +fly -t localhost trigger-job -j dotnetglobaltool/azure-devops -w +``` + +### Tasks + +* Hello world + +```bash +fly --target localhost login --concourse-url http://localhost:8080/ + +fly -t localhost execute --config samples/concourse/tasks/basic/helloworld.yml +``` diff --git a/docs/guides/learning-paths/code-pipelines/teamcity.md b/docs/guides/learning-paths/code-pipelines/teamcity.md new file mode 100644 index 0000000..e58b29c --- /dev/null +++ b/docs/guides/learning-paths/code-pipelines/teamcity.md @@ -0,0 +1,57 @@ +# TeamCity + +In this tutorial we'll setup an instance of TeamCity, running on Docker, and configure a CI pipeline on the code samples available in this repository. + +## Setup + +### Create local folders + +```bash +# creates local folders to store TeamCity run files +md data logs agent +md agent/conf +``` + +### Run TeamCity server in a container + +```bash +# starts the container +docker run -it --name teamcity-server -v $PWD/data:/data/teamcity_server/datadir -v $PWD/logs:/opt/teamcity/logs -p 8111:8111 jetbrains/teamcity-server + +# if stopped, starts again the container +docker start teamcity-server +``` + +### Run TeamCity agent in a container + +#### Use the Docker image + +```bash +docker run -it --name teamcity-agent -e SERVER_URL="http://teamcity-server:8111" -v $PWD/agent/conf:/data/teamcity_agent/conf --link teamcity-server jetbrains/teamcity-agent +``` + +→ [hub.docker.com](https://hub.docker.com/r/jetbrains/teamcity-agent/) + +#### Use a custom image + +The default image may not contain all the needed tools for the pipeline to run. + +TODO + +### Clean-up + +```bash +docker rm teamcity-server teamcity-agent +``` + +## Configuration + +### First TeamCity configuration + +* Open [localhost:8111](http://localhost:8111) +* Log-in with empty username and the token shown in the container log file +* Go to the "Administration" section and click on "Users" to create a new user account (make sure to give the super administrative privilege) +* Authorize the agent [localhost:8111/agents](http://localhost:8111/agents.html?tab=unauthorizedAgents) +* Go to the "Projects" section and create a new project (use `https://github.com/devpro/ci-pipeline-samples` as repository URL) +* Use "Auto-detected Build Steps" to have TeamCity review what is needed (you can select everything except the NuGet and msbuild step) +* Review steps and step names and start a new build diff --git a/docs/guides/workstations/ubuntu/ubuntu.md b/docs/guides/workstations/ubuntu/ubuntu.md index aa7b2ee..f7370c9 100644 --- a/docs/guides/workstations/ubuntu/ubuntu.md +++ b/docs/guides/workstations/ubuntu/ubuntu.md @@ -1,4 +1,4 @@ -# Ubuntu +# Ubuntu The following instructions target **Ubuntu 24.04**. For previous instructions: [Ubuntu 20.04](archive/ubuntu-20_04.md). @@ -10,6 +10,7 @@ For previous instructions: [Ubuntu 20.04](archive/ubuntu-20_04.md). ```bash sudo apt update sudo apt -y upgrade +sudo apt autoremove ``` ## Packages diff --git a/docs/guides/workstations/windows/windows.md b/docs/guides/workstations/windows/windows.md index f2ddc56..bb78d5e 100644 --- a/docs/guides/workstations/windows/windows.md +++ b/docs/guides/workstations/windows/windows.md @@ -1,4 +1,4 @@ -# Windows +# Windows The following instructions target **Windows 11**. For previous versions: [Windows 10](archive/windows-10.md). @@ -29,8 +29,7 @@ For previous versions: [Windows 10](archive/windows-10.md). ### Utilities -- KeePass - - Plugins: [KeeTheme](https://github.com/xatupal/KeeTheme) (dark theme) +- Password manager: 1Password, or KeePass (with [KeeTheme](https://github.com/xatupal/KeeTheme)) for example - WinDirStat ```batch @@ -48,9 +47,9 @@ For previous versions: [Windows 10](archive/windows-10.md). 1. [Visual Studio Code](../../../organizations/companies/microsoft/vscode.md) 2. Git 3. Notepad++ -4. [MongoDB Compass](https://www.mongodb.com/try/download/compass) -5. [Visual Studio 2022](../../../organizations/companies/microsoft/vs2022.md) or [Rider](https://www.jetbrains.com/rider/) -6. [WebStorm](https://www.jetbrains.com/webstorm/) +4. [MongoDB Compass](../../../organizations/companies/mongodb/compass.md) +5. [Rider](../../../organizations/companies/jetbrains/rider.md) or [Visual Studio 2026](../../../organizations/companies/microsoft/vs2026.md) +6. [WebStorm](../../../organizations/companies/jetbrains/webstorm.md) ### Office diff --git a/docs/organizations/companies/armis/armis.md b/docs/organizations/companies/armis/armis.md index b74e8a1..11263d1 100644 --- a/docs/organizations/companies/armis/armis.md +++ b/docs/organizations/companies/armis/armis.md @@ -1,3 +1,3 @@ -# Armis +# Armis -[armis.com](https://www.armis.com/) +🌐 [armis.com](https://www.armis.com/) diff --git a/docs/organizations/companies/armorcode/armorcode.md b/docs/organizations/companies/armorcode/armorcode.md index fa45461..63a74cd 100644 --- a/docs/organizations/companies/armorcode/armorcode.md +++ b/docs/organizations/companies/armorcode/armorcode.md @@ -1,3 +1,3 @@ -# ArmorCode +# ArmorCode -[armorcode.com](https://www.armorcode.com/) +🌐 [armorcode.com](https://www.armorcode.com/) diff --git a/docs/organizations/companies/checkpoint/checkpoint.md b/docs/organizations/companies/checkpoint/checkpoint.md index 4d9228b..3614fb9 100644 --- a/docs/organizations/companies/checkpoint/checkpoint.md +++ b/docs/organizations/companies/checkpoint/checkpoint.md @@ -1,6 +1,6 @@ -# Check Point +# Check Point -[checkpoint.com](https://www.checkpoint.com/) +🌐 [checkpoint.com](https://www.checkpoint.com/) ## Check Point CloudGuard diff --git a/docs/organizations/companies/crowdstrike/crowdstrike.md b/docs/organizations/companies/crowdstrike/crowdstrike.md index 1a7b5b7..3979f38 100644 --- a/docs/organizations/companies/crowdstrike/crowdstrike.md +++ b/docs/organizations/companies/crowdstrike/crowdstrike.md @@ -1,3 +1,3 @@ -# CrowdStrike +# CrowdStrike -[crowdstrike.com](https://www.crowdstrike.com/) +🌐 [crowdstrike.com](https://www.crowdstrike.com/) diff --git a/docs/organizations/companies/cyscale/cyscale.md b/docs/organizations/companies/cyscale/cyscale.md index c7386f6..cbd7ea6 100644 --- a/docs/organizations/companies/cyscale/cyscale.md +++ b/docs/organizations/companies/cyscale/cyscale.md @@ -1,3 +1,3 @@ # Cyscale -[cyscale.com](https://cyscale.com/) +🌐 [cyscale.com](https://cyscale.com/) diff --git a/docs/organizations/companies/data-theorem/data-theorem.md b/docs/organizations/companies/data-theorem/data-theorem.md index 05116ec..2af6c5b 100644 --- a/docs/organizations/companies/data-theorem/data-theorem.md +++ b/docs/organizations/companies/data-theorem/data-theorem.md @@ -1,6 +1,6 @@ # Data Theorem -[datatheorem.com](https://www.datatheorem.com/) +🌐 [datatheorem.com](https://www.datatheorem.com/) ## Cloud Secure diff --git a/docs/organizations/companies/enalean/teleap.md b/docs/organizations/companies/enalean/tuleap.md similarity index 100% rename from docs/organizations/companies/enalean/teleap.md rename to docs/organizations/companies/enalean/tuleap.md diff --git a/docs/organizations/companies/fortinet/fortinet.md b/docs/organizations/companies/fortinet/fortinet.md index 12c0d5e..6664aae 100644 --- a/docs/organizations/companies/fortinet/fortinet.md +++ b/docs/organizations/companies/fortinet/fortinet.md @@ -1,6 +1,6 @@ -# Fortinet +# Fortinet -[fortinet.com](https://www.fortinet.com/) +🌐 [fortinet.com](https://www.fortinet.com/) ## Lacework FortiCNAPP diff --git a/docs/organizations/companies/gitlab/runner-container.md b/docs/organizations/companies/gitlab/runner-container.md index e6a727f..7c168fb 100644 --- a/docs/organizations/companies/gitlab/runner-container.md +++ b/docs/organizations/companies/gitlab/runner-container.md @@ -1,4 +1,4 @@ -# GitLab Runner executed in a container +# GitLab Runner executed in a container ## Use cases @@ -13,10 +13,18 @@ Use Docker image: mkdir -p .gitlab/runner/local # displays help on a command -docker run --rm --name gitlab-runner -v /var/run/docker.sock:/var/run/docker.sock -v $PWD/.gitlab/runner/local/config:/etc/gitlab-runner -v $PWD:$PWD --workdir $PWD gitlab/gitlab-runner exec help +docker run --rm --name gitlab-runner \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v $PWD/.gitlab/runner/local/config:/etc/gitlab-runner \ + -v $PWD:$PWD --workdir $PWD \ + gitlab/gitlab-runner exec help # executes shell on "build" job -docker run --rm --name gitlab-runner -v /var/run/docker.sock:/var/run/docker.sock -v $PWD/.gitlab/runner/local/config:/etc/gitlab-runner -v $PWD:$PWD --workdir $PWD gitlab/gitlab-runner exec shell build +docker run --rm --name gitlab-runner \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v $PWD/.gitlab/runner/local/config:/etc/gitlab-runner \ + -v $PWD:$PWD --workdir $PWD \ + gitlab/gitlab-runner exec shell build ``` Warning: Includes are not supported unfortunately ([Issue #2797](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2797), alternative with [firecow/gitlab-ci-local](https://github.com/firecow/gitlab-ci-local)) diff --git a/docs/organizations/companies/google/google-cloud/google-cloud.md b/docs/organizations/companies/google/google-cloud/google-cloud.md index 881f0bc..d53b05c 100644 --- a/docs/organizations/companies/google/google-cloud/google-cloud.md +++ b/docs/organizations/companies/google/google-cloud/google-cloud.md @@ -2,8 +2,43 @@ > Google Cloud offers over 150 products and services for cloud computing, data analytics, AI and machine learning, security, and more. -🌐 [cloud.google.com](https://cloud.google.com/), [console](https://console.cloud.google.com/) +🌐 [cloud.google.com](https://cloud.google.com/) + +Launched in April, 2008. ## Content * [CLI](gcloud-cli.md) + +### Quick start + +Go to [console.cloud.google.com](https://console.cloud.google.com/) to reach the web console (portal) and start your experience with GCP. + +Build your learning path from official resources: + +* [Cloud Architecture Center](https://cloud.google.com/architecture) +* [Google Cloud Skills Boost](https://www.cloudskillsboost.google/) + +Keep up-to-date with [Google Cloud Podcast](https://cloud.google.com/podcasts/gcp-podcast) + +### Concepts + +* [Marketplace](https://console.cloud.google.com/marketplace) lets you quickly deploy software on Google Cloud +* [Virtual Private Cloud (VPC)](https://cloud.google.com/vpc) is a global virtual network that spans all regions. Single VPC for an entire organization, isolated within projects. Increase IP space with no downtime. + +## Products + +Navigate the product offering with [interactive cheat sheet](https://googlecloudcheatsheet.withgoogle.com/) and [Google Cloud solutions page](https://cloud.google.com/solutions). + +### Application Life Management (ALM) + +* [Artifact Registry](https://cloud.google.com/artifact-registry) + * [Artifact Analysis](https://cloud.google.com/artifact-analysis/docs) +* [Cloud Deploy](https://cloud.google.com/deploy) + * [Skaffold](skaffold.md) + +## Google Kubernetes Engine (GKE) + +> The most scalable and fully automated Kubernetes service. + +[cloud.google.com](https://cloud.google.com/kubernetes-engine), [docs](https://cloud.google.com/kubernetes-engine/docs) diff --git a/docs/organizations/companies/google/google-cloud/skaffold.md b/docs/organizations/companies/google/google-cloud/skaffold.md new file mode 100644 index 0000000..66c3f53 --- /dev/null +++ b/docs/organizations/companies/google/google-cloud/skaffold.md @@ -0,0 +1,5 @@ +# Skaffold + +> Skaffold handles the workflow for building, pushing and deploying your application, allowing you to focus on what matters most: writing code. + +[skaffold.dev](https://skaffold.dev/), [code](https://github.com/GoogleContainerTools/skaffold), [docs](https://skaffold.dev/docs/), [cloud.google.com](https://cloud.google.com/skaffold) diff --git a/docs/organizations/companies/ibm/ibm.md b/docs/organizations/companies/ibm/ibm.md index ef72396..8778add 100644 --- a/docs/organizations/companies/ibm/ibm.md +++ b/docs/organizations/companies/ibm/ibm.md @@ -1,6 +1,6 @@ # IBM -[ibm.com](https://ibm.com) +🌐 [ibm.com](https://ibm.com) ## IBM Cloud Security and Compliance Center diff --git a/docs/organizations/companies/infomaniak/infomaniak.md b/docs/organizations/companies/infomaniak/infomaniak.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/organizations/companies/jetbrains/rider.md b/docs/organizations/companies/jetbrains/rider.md new file mode 100644 index 0000000..654fff8 --- /dev/null +++ b/docs/organizations/companies/jetbrains/rider.md @@ -0,0 +1,3 @@ +# Rider + +[jetbrains.com/rider](https://www.jetbrains.com/rider/) diff --git a/docs/organizations/companies/jetbrains/webstorm.md b/docs/organizations/companies/jetbrains/webstorm.md new file mode 100644 index 0000000..6d4dca2 --- /dev/null +++ b/docs/organizations/companies/jetbrains/webstorm.md @@ -0,0 +1,3 @@ +# WebStorm + +[jetbrains.com/webstorm](https://www.jetbrains.com/webstorm/) diff --git a/docs/organizations/companies/octopus-deploy/codefresh.md b/docs/organizations/companies/octopus-deploy/codefresh.md index 16b9128..9aeddc3 100644 --- a/docs/organizations/companies/octopus-deploy/codefresh.md +++ b/docs/organizations/companies/octopus-deploy/codefresh.md @@ -1,5 +1,5 @@ -# Codefresh +# Codefresh -[codefresh.io](https://codefresh.io/) +🌐 [codefresh.io](https://codefresh.io/) [How we replaced the default K8s scheduler to optimize our Continuous Integration builds](https://codefresh.io/blog/custom-k8s-scheduler-continuous-integration/) - July 7, 2025 diff --git a/docs/organizations/companies/octopus-deploy/octopus-deploy.md b/docs/organizations/companies/octopus-deploy/octopus-deploy.md index e69de29..1f51dfc 100644 --- a/docs/organizations/companies/octopus-deploy/octopus-deploy.md +++ b/docs/organizations/companies/octopus-deploy/octopus-deploy.md @@ -0,0 +1 @@ +# Octopus Deploy diff --git a/docs/organizations/companies/orca-security/orca-security.md b/docs/organizations/companies/orca-security/orca-security.md index d82e566..0dd8c8b 100644 --- a/docs/organizations/companies/orca-security/orca-security.md +++ b/docs/organizations/companies/orca-security/orca-security.md @@ -1,3 +1,3 @@ -# Orca Security +# Orca Security -[orca.security](https://orca.security/) +🌐 [orca.security](https://orca.security/) diff --git a/docs/organizations/companies/paloaltonetworks/paloaltonetworks.md b/docs/organizations/companies/paloaltonetworks/paloaltonetworks.md index 73e27a2..f178238 100644 --- a/docs/organizations/companies/paloaltonetworks/paloaltonetworks.md +++ b/docs/organizations/companies/paloaltonetworks/paloaltonetworks.md @@ -1,6 +1,6 @@ -# Palo Alto Networks +# Palo Alto Networks -[paloaltonetworks.com](https://www.paloaltonetworks.com/) +🌐 [paloaltonetworks.com](https://www.paloaltonetworks.com/) ## Prisma Cloud diff --git a/docs/organizations/companies/qualys/qualys.md b/docs/organizations/companies/qualys/qualys.md index 6d0e247..0f92325 100644 --- a/docs/organizations/companies/qualys/qualys.md +++ b/docs/organizations/companies/qualys/qualys.md @@ -1,3 +1,3 @@ # Qualys -[qualys.com](https://www.qualys.com/) +🌐 [qualys.com](https://www.qualys.com/) diff --git a/docs/organizations/companies/rapid7/rapid7.md b/docs/organizations/companies/rapid7/rapid7.md index 6f4f5c0..5c0f96f 100644 --- a/docs/organizations/companies/rapid7/rapid7.md +++ b/docs/organizations/companies/rapid7/rapid7.md @@ -1,6 +1,6 @@ -# Rapid7 +# Rapid7 -[rapid7.com](https://www.rapid7.com/) +🌐 [rapid7.com](https://www.rapid7.com/) ## InsightCloudSec diff --git a/docs/organizations/companies/rapidfort/kimia.md b/docs/organizations/companies/rapidfort/kimia.md index b2bd892..70266a7 100644 --- a/docs/organizations/companies/rapidfort/kimia.md +++ b/docs/organizations/companies/rapidfort/kimia.md @@ -1,5 +1,5 @@ -# Kimia +# Kimia > Kimia is a Kubernetes-native, OCI-compliant container image builder designed for secure, daemonless builds in cloud environments. -[code](https://github.com/rapidfort/kimia) +🌐 [code](https://github.com/rapidfort/kimia) diff --git a/docs/organizations/companies/sentinelone/sentinelone.md b/docs/organizations/companies/sentinelone/sentinelone.md index e7c471d..6071781 100644 --- a/docs/organizations/companies/sentinelone/sentinelone.md +++ b/docs/organizations/companies/sentinelone/sentinelone.md @@ -1,6 +1,6 @@ -# SentinelOne +# SentinelOne -[sentinelone.com](https://www.sentinelone.com/) +🌐 [sentinelone.com](https://www.sentinelone.com/) ## Singularity Cloud Security diff --git a/docs/organizations/companies/snyk/snyk.md b/docs/organizations/companies/snyk/snyk.md index 17f3533..9fdbe00 100644 --- a/docs/organizations/companies/snyk/snyk.md +++ b/docs/organizations/companies/snyk/snyk.md @@ -1,3 +1,3 @@ -# Snyk +# Snyk -[snyk.io](https://snyk.io) +🌐 [snyk.io](https://snyk.io) diff --git a/docs/organizations/companies/suse/alp.md b/docs/organizations/companies/suse/alp.md deleted file mode 100644 index c0a41b4..0000000 --- a/docs/organizations/companies/suse/alp.md +++ /dev/null @@ -1,3 +0,0 @@ -# SUSE’s Adaptable Linux Platform (ALP) - -🌐 [docs](https://documentation.suse.com/#alp) diff --git a/docs/organizations/companies/suse/archive/epinio-samples.md b/docs/organizations/companies/suse/archive/epinio-samples.md new file mode 100644 index 0000000..3245e2d --- /dev/null +++ b/docs/organizations/companies/suse/archive/epinio-samples.md @@ -0,0 +1,48 @@ +# Epinio samples + +Samples of usage of [Epinio](epinio.md) to deploy workload with a command. + +## Angular + +Run the following command to deploy the Angular application: + +```bash +epinio push --name ngsample --path ngsample --env BP_WEB_SERVER=nginx --env BP_WEB_SERVER_ROOT=dist/ngsample --env BP_NODE_RUN_SCRIPTS=build --env BP_WEB_SERVER_ENABLE_PUSH_STATE=true +# NODE_ENV=development +``` + +References: + +- [paketo-buildpacks/samples/web-servers/angular-nginx-sample](https://github.com/paketo-buildpacks/samples/tree/main/web-servers/angular-nginx-sample) +- [Paketo Buildpacks > Build and Serve a Frontend Framework App](https://paketo.io/docs/howto/web-servers/#build-and-serve-a-frontend-framework-app) + +## .NET + +Run the following command to deploy an ASP.NET application: + +```bash +epinio push --name aspnetapisample --path src/WebApi --env ASPNETCORE_ENVIRONMENT=Development +``` + +For Paketo Buildpacks to be able to create artifacts that run, the file `Procfile` must be added (see [Override the Start Process Set by the Buildpack](https://paketo.io/docs/howto/dotnet-core/#override-the-start-process-set-by-the-buildpack)). + +For example `samples/dotnet/src/WebApi/Procfile`: + +```Procfile +web: dotnet EpinioDotnetSamples.WebApi.dll +``` + +## React + +Run the following command to deploy the application: + +```bash +epinio push --name reactsample --path sample-app --env BP_WEB_SERVER=nginx --env BP_WEB_SERVER_ROOT=build --env BP_NODE_RUN_SCRIPTS=build --env BP_WEB_SERVER_ENABLE_PUSH_STATE=true +``` + +References: + +- [paketo-buildpacks/samples/web-servers/react-frontend-sample](https://github.com/paketo-buildpacks/samples/tree/main/web-servers/react-frontend-sample) +- [Paketo Buildpacks > Build and Serve a Frontend Framework App](https://paketo.io/docs/howto/web-servers/#build-and-serve-a-frontend-framework-app) +- [react.dev](https://react.dev/) +- [create-react-app.dev](https://create-react-app.dev/) diff --git a/docs/organizations/companies/suse/archive/epinio.md b/docs/organizations/companies/suse/archive/epinio.md new file mode 100644 index 0000000..8d576e4 --- /dev/null +++ b/docs/organizations/companies/suse/archive/epinio.md @@ -0,0 +1,94 @@ +# Epinio + +> Tame your developer workflow to go from Code to URL in one push. + +[epinio.io](https://epinio.io/), [docs](https://docs.epinio.io/), [code](https://github.com/epinio/epinio) + +## Getting started + +### Introduction + +Epinio addresses the wish to hide all the complexity of building and deploying code. This subject was adressed before by Cloud Foundry, which got depecrated with the rise of Kubernetes. + +Epinio is Cloud Native, free, open-source, simple solution that runs in a Kubernetes cluster. Its development is lead by SUSE. + +### Quick start + +!!! tip + + Use short name for the application name (less than 22) and do not use `-` to avoid Ingress issue (see [application name restrictions](#application-name-restrictions)). + +* Make sure you have a Kubernetes cluster to work on and you are connected (by executing `kubectl config current-context` in a terminal for example) +* Follow [Epinio official quickstart](https://docs.epinio.io/tutorials/quickstart) + +### Presentations + +* Youtube videos + * [SUSECON - Customize your Developer Experience with Epinio](https://www.youtube.com/watch?v=cr4vWO9J7tk) - October 8, 2022 + * [SUSE Projects - Epinio demo: Wordpress on Digital Ocean](https://www.youtube.com/watch?v=OdPF0qH5Pf8&t=296s) - July 6, 2021 + * [Kubesimplify - Let's Learn Epinio](https://www.youtube.com/watch?v=ietNQSQXhAc) - June 23, 2021 + * [SUSECON - Epinio Demo: Building a PaaS on Kubernetes using off-the-shelf components](https://www.youtube.com/watch?v=HKXtAgh3ILw) - November 10, 2021 + +## Going further + +### Application examples + +!!! tip + + Delete local files before pushing the application if done from a developer workstation (see [local files restriction](#local-files-restriction)). + +Language/Framework | Path | Details +---------------------|-------------------------------------------------------------------------------------------------|------------------------------------------------------ +Angular (TypeScript) | [devpro/epinio-samples](https://github.com/devpro/epinio-samples/tree/main/samples/angular) | Angular Web App (Single Page Application) +.NET (C#) | [devpro/epinio-samples](https://github.com/devpro/epinio-samples/tree/main/samples/dotnet) | ASP.NET 7 Web API (REST), ASP.NET Web App (Razor) +Go | [epinio/golang-sample-app](https://github.com/epinio/epinio/tree/main/assets/golang-sample-app) | - +Java | [spring-projects/spring-petclinic](https://github.com/spring-projects/spring-petclinic) | Spring +JavaScript | [ellisonleao/clumsy-bird](https://github.com/ellisonleao/clumsy-bird) | melonJS game engine +PHP | [epinio/sample-app](https://github.com/epinio/epinio/tree/main/assets/sample-app) | phpinfo +Python | [mageran/minio-epinio](https://github.com/mageran/minio-epinio/tree/main/samples/photo-album) | Flask and Boto3 for the backend and Svelte for the UI +React | [devpro/epinio-samples](https://github.com/devpro/epinio-samples/tree/main/samples/react) | React Web App (Single Page Application) +Ruby on Rails | [epinio/example-rails](https://github.com/epinio/example-rails) | - +Wordpress | [epinio/example-wordpress](https://github.com/epinio/example-wordpress) | CMS written in PHP and using a MySQL database + +### CLI + +#### CLI cheat sheet + +Command | Action +-----------------------------------------------|---------------------------------------------------- +`epinio app delete sample` | Delete an application +`epinio login -u admin 'https://'` | Log in Epinio server +`epinio push --name sample --path sample-app` | Create or update an application from a local folder +`epinio settings show` | Display Epinio server information + +#### CLI examples + +```bash +# creates an application from a remote git repository on a branch +epinio push --name dotnetapisample --git-provider github --git https://github.com/devpro/epinio-samples,feature/init-solution + +# creates an application from a remote git repository +epinio push --name clumsybird --git-provider github --git https://github.com/ellisonleao/clumsy-bird +``` + +### Helm chart + +* [devpro/helm-charts](https://github.com/devpro/helm-charts/blob/main/charts/epinio/README.md) encapsulates Epinio Helm chart and provides concrete example of installation and use + +### Local cluster with K3D + +* [mesquitamv/epinio-deploy](https://github.com/mesquitamv/epinio-deploy) + +### Limitations + +#### Application name restrictions + +* **Max length**: There is an issue with Ingress if the application name is too long +(reproduced with raspnetwebrazorsample which generated a service name like raspnetwebrazorsample-40251af7269c59d923bfa391bc241c7a320d332c, of 63 characters). +* **Special characters**: Avoid special character like `-` + +### Local files restriction + +Currently, there is no way to ignore local folders and files when pushing an application. This is a serious issue while working on a codebase using NPM packages because node_modules can easily be heavy. + +This limitation is tracked by [Issue #2589](https://github.com/epinio/epinio/issues/2589). diff --git a/docs/organizations/companies/suse/archive/hobbyfarm.md b/docs/organizations/companies/suse/archive/hobbyfarm.md new file mode 100644 index 0000000..f6fa6ad --- /dev/null +++ b/docs/organizations/companies/suse/archive/hobbyfarm.md @@ -0,0 +1,45 @@ +# HobbyFarm + +> Hobbyfarm is an interactive coding platform that runs in the browser + +🌐 [hobbyfarm.github.io](https://hobbyfarm.github.io/hobbyfarm/), +[docs](https://hobbyfarm.github.io/), +[organization](https://github.com/hobbyfarm), +[issues](https://github.com/hobbyfarm/hobbyfarm/issues) + +## Software design + +### Technologies + +* Front-end: Angular +* Back-end: Go +* Documentation: Markdown +* Website: Hugo + +### Code repositories + +Name | Content +------------------------------------------------------------------------|----------------------------------------------------------------- +[admin-ui](https://github.com/hobbyfarm/admin-ui) | HobbyFarm administration UI (web application) +[ec2-operator](https://github.com/hobbyfarm/ec2-operator) | Amazon EC2 operator for HobbyFarm +[gargantua](https://github.com/hobbyfarm/gargantua) | HobbyFarm back-end (monolith application) +[hfcli](https://github.com/hobbyfarm/hfcli) | HobbyFarm Command Line Interface (CLI) +[hobbyfarm](https://github.com/hobbyfarm/hobbyfarm) | HobbyFarm Helm chart +[hobbyfarm.github.io](https://github.com/hobbyfarm/hobbyfarm.github.io) | HobbyFarm documentation/website +[ui](https://github.com/hobbyfarm/ui) | HobbyFarm UI + +### Local setup + +* Start [Gargantua](https://github.com/hobbyfarm/gargantua/blob/master/CONTRIBUTING.md) +* Start Admin UI +* TODO + +## Operations + +### Installation + +* [Documentation](https://hobbyfarm.github.io/docs/setup/installation/) + +## References + +* [Amazon EC2](https://aws.amazon.com/ec2/) diff --git a/docs/organizations/companies/suse/archive/hypper.md b/docs/organizations/companies/suse/archive/hypper.md new file mode 100644 index 0000000..d1868af --- /dev/null +++ b/docs/organizations/companies/suse/archive/hypper.md @@ -0,0 +1,5 @@ +# Hypper + +> Hypper makes it easy to install and manage cluster level applications while leveraging Helm and charts + +[hypper.io](https://hypper.io/), [code](https://github.com/rancher-sandbox/hypper) diff --git a/docs/organizations/companies/suse/archive/s3gw.md b/docs/organizations/companies/suse/archive/s3gw.md new file mode 100644 index 0000000..0259e1f --- /dev/null +++ b/docs/organizations/companies/suse/archive/s3gw.md @@ -0,0 +1,5 @@ +# s3gw + +> The s3gw project is a lightweight, open source S3 service for small deployments, and easy to deploy in a cloud native environment such as Kubernetes + +[s3gw.tech](https://s3gw.tech/), [code](https://github.com/s3gw-tech/s3gw) diff --git a/docs/organizations/companies/suse/elemental.md b/docs/organizations/companies/suse/elemental.md new file mode 100644 index 0000000..df6f941 --- /dev/null +++ b/docs/organizations/companies/suse/elemental.md @@ -0,0 +1,13 @@ +# Elemental + +> Elemental is a software stack enabling a centralized, full cloud-native OS management with Kubernetes. + +[docs](https://elemental.docs.rancher.com/), [code](https://github.com/rancher/elemental) + +## Design + +* [Architecture](https://elemental.docs.rancher.com/architecture/) + +## Presentations + +* [Global Online Meetup: Elemental](https://www.youtube.com/watch?v=-uenjgsxI5U) - July 19, 2023 diff --git a/docs/organizations/companies/suse/fleet.md b/docs/organizations/companies/suse/fleet.md new file mode 100644 index 0000000..456f4b1 --- /dev/null +++ b/docs/organizations/companies/suse/fleet.md @@ -0,0 +1,113 @@ +# Fleet + +> Fleet is a container management and deployment engine designed to offer users more control on the local cluster and constant monitoring through GitOps + +[fleet.rancher.io](https://fleet.rancher.io/), [code](https://github.com/rancher/fleet) + +## Quick start + +### Getting to know Fleet + +* Youtube videos + * [GitOps The Planet (E4) - GitOps at Edge](https://www.youtube.com/watch?v=OPbgvBSAO9U) - January 4, 2023 + * [Using Fleet to Manage Clusters at Scale](https://www.youtube.com/watch?v=8gXbxt3AjdE&t=723s) - November 21, 2020 + +### Install Fleet objects in your Kubernetes cluster + +```bash +# sets version +FLEET_VERSION=0.3.9 + +# (optional) views objects that will be created +helm template fleet-crd https://github.com/rancher/fleet/releases/download/v${FLEET_VERSION}/fleet-crd-${FLEET_VERSION}.tgz > temp/fleet-crd.yaml +helm template -n cattle-fleet-system fleet https://github.com/rancher/fleet/releases/download/v${FLEET_VERSION}/fleet-${FLEET_VERSION}.tgz > temp/fleet.yaml + +# installs +helm -n cattle-fleet-system install --create-namespace --wait fleet-crd https://github.com/rancher/fleet/releases/download/v${FLEET_VERSION}/fleet-crd-${FLEET_VERSION}.tgz +helm -n cattle-fleet-system install --create-namespace --wait fleet https://github.com/rancher/fleet/releases/download/v${FLEET_VERSION}/fleet-${FLEET_VERSION}.tgz + +# makes sure all containers are running fine +kubectl get all -n cattle-fleet-system +``` + +### Use examples repo (or your own) to deploy your first applications + +```bash +# creates sample namespace +kubectl create namespace fleet-sample +# creates or updates the sample gitrepo +kubectl apply -f samples/gitrepo-guestbook-sample.yaml -n fleet-sample +# views fleet action +kubectl get fleet -n fleet-sample +``` + +## Configuration + +* [Git Repository Contents](https://fleet.rancher.io/gitrepo-content) + * [`fleet.yaml`](https://fleet.rancher.io/ref-fleet-yaml) + +## Samples + +* [Fleet examples](https://github.com/rancher/fleet-examples) +* [Martin Weiss](https://github.com/Martin-Weiss/rancher-fleet) +* [SUSE Exchange Paris 2023](https://github.com/devpro/helm-charts/tree/main/samples/suse-exchange-paris-2023) + +## Alternatives + +Name | Model +-----------|------ +**ArgoCD** | Push +**Fleet** | Pull +**Flux** | Pull + +## Limitations + +### Helm chart dependencies + +* tgz files in charts folder (coming from `helm dependency update`) must be added to git to be picked up by Fleet (see [issue #250](https://github.com/rancher/fleet/issues/250)) + +### Helm chart with objects on multiple namespaces + +* You may encounter the issue `Error while running post render on files: invalid cluster scoped object [name=* kind=PodSecurityPolicy apiVersion=policy/v1beta1] found, consider using "defaultNamespace", not "namespace" in fleet.yaml` + +### Modified GitRepos + +* [Generating Diffs to Ignore Modified GitRepos](https://fleet.rancher.io/bundle-diffs) + +## Troubleshoot + +### Fleet Kubernetes objects + +Kind | Name +------------------------------------------|------------------------- +bundles.fleet.cattle.io | CustomResourceDefinition +bundledeployments.fleet.cattle.io | CustomResourceDefinition +bundlenamespacemappings.fleet.cattle.io | CustomResourceDefinition +clustergroups.fleet.cattle.io | CustomResourceDefinition +clusters.fleet.cattle.io | CustomResourceDefinition +clusterregistrationtokens.fleet.cattle.io | CustomResourceDefinition +gitrepos.fleet.cattle.io | CustomResourceDefinition +clusterregistrations.fleet.cattle.io | CustomResourceDefinition +gitreporestrictions.fleet.cattle.io | CustomResourceDefinition +contents.fleet.cattle.io | CustomResourceDefinition +imagescans.fleet.cattle.io | CustomResourceDefinition +gitjobs.gitjob.cattle.io | CustomResourceDefinition +gitjob | ServiceAccount +fleet-controller | ServiceAccount +fleet-controller-bootstrap | ServiceAccount +fleet-controller | ConfigMap +gitjob | ClusterRole +fleet-controller | ClusterRole +fleet-controller-bootstrap | ClusterRole +gitjob-binding | ClusterRoleBinding +fleet-controller | ClusterRoleBinding +fleet-controller-bootstrap | ClusterRoleBinding +fleet-controller | Role +fleet-controller | RoleBinding +gitjob | Service +gitjob | Deployment +fleet-controller | Deployment + +### Git token expired + +* If you manage the GitRepo from Rancher UI and create new git credential, save twice the UI diff --git a/docs/organizations/companies/suse/harvester.md b/docs/organizations/companies/suse/harvester.md new file mode 100644 index 0000000..79de491 --- /dev/null +++ b/docs/organizations/companies/suse/harvester.md @@ -0,0 +1,9 @@ +# Harvester + +> The open source hyperconverged infrastructure (HCI) solution for a cloud native world + +[harvesterhci.io](https://harvesterhci.io/), [docs](https://docs.harvesterhci.io/), [docs](https://github.com/harvester/harvester) + +## Commercial offer + +* [Support Matrix](https://www.suse.com/suse-harvester/support-matrix/all-supported-versions/harvester-v1-7-x/) diff --git a/docs/organizations/companies/suse/k3s.md b/docs/organizations/companies/suse/k3s.md index 2ca9299..a6f6d35 100644 --- a/docs/organizations/companies/suse/k3s.md +++ b/docs/organizations/companies/suse/k3s.md @@ -1,25 +1,28 @@ -# K3s +# K3s -> K3s is a CNCF sandbox project that delivers a lightweight yet powerful certified Kubernetes distribution. When used with SUSE Rancher, K3s is ideal for running production workloads across resource-restrained, remote locations or on IoT devices. +> K3s is a CNCF sandbox project that delivers a lightweight yet powerful certified Kubernetes distribution -→ [k3s.io](https://k3s.io/), [docs](https://rancher.com/docs/k3s/latest/en/), [GitHub](https://github.com/k3s-io/k3s), +[k3s.io](https://k3s.io/), [docs](https://rancher.com/docs/k3s/latest/en/), [code](https://github.com/k3s-io/k3s), [suse.com/products/k3s](https://www.suse.com/products/k3s/) -## General idea +## Architecture ![How K3s works](https://k3s.io/img/how-it-works-k3s-revised.svg) -## Quick start - -### Install +Single binary: -### Run in a container with k3d +* [containerd](https://containerd.io/) +* [flannel](https://github.com/flannel-io/flannel) +* [CoreDNS](https://coredns.io/) +* iptables +* [SQLite](https://www.sqlite.org/) +* [klipper-lb](https://github.com/k3s-io/klipper-lb) +* [Helm](https://helm.sh/) +* [traefik](https://traefik.io/) Ingress Controller -> k3d is a lightweight wrapper to run K3s (Rancher Lab's minimal Kubernetes distribution) in Docker. - -→ [k3d.io](https://k3d.io/) ([GitHub](https://github.com/k3d-io/k3d)) +## Quick start -#### Create a cluster with k3d +[Quick-Start Guide](https://rancher.com/docs/k3s/latest/en/quick-start/) ```bash # runs installation script @@ -35,6 +38,15 @@ kubectl get nodes k3d cluster delete firstcluster ``` -### Run in a Linux system +## Knowledge + +* [Advanced Options and Configuration](https://rancher.com/docs/k3s/latest/en/advanced/) + * Auto-deploying manifests + +## Cluster API + +* [zawachte/cluster-api-k3s](https://github.com/zawachte/cluster-api-k3s) + +## Infrastructure automation (IaC) -* [Quick-Start Guide](https://rancher.com/docs/k3s/latest/en/quick-start/) +* [rlex/ansible-role-k3s](https://github.com/rlex/ansible-role-k3s) diff --git a/docs/organizations/companies/suse/kubewarden.md b/docs/organizations/companies/suse/kubewarden.md new file mode 100644 index 0000000..b5e8d5d --- /dev/null +++ b/docs/organizations/companies/suse/kubewarden.md @@ -0,0 +1,9 @@ +# Kubewarden + +> Kubewarden is a policy engine for Kubernetes. Its mission is to simplify the adoption of policy-as-code. + +[kubewarden.io](https://www.kubewarden.io/), [code](https://github.com/kubewarden) + +## Presentations + +* [Global Online Meetup](https://www.crowdcast.io/e/gomu_rancher_kubewarden_01182023/1) - February 15th, 2023 diff --git a/docs/organizations/companies/suse/longhorn.md b/docs/organizations/companies/suse/longhorn.md index 1653664..3f75cbb 100644 --- a/docs/organizations/companies/suse/longhorn.md +++ b/docs/organizations/companies/suse/longhorn.md @@ -1,4 +1,4 @@ -# Longhorn +# Longhorn > Cloud native distributed block storage for Kubernetes @@ -9,3 +9,14 @@ ### 1.6.0 [Blog annoucement](https://www.suse.com/c/rancher_blog/announcing-longhorn-1-6-0/) - February 8, 2024 + +## Features + +### Backup & restore + +* [CSI Snapshot Support](https://longhorn.io/docs/1.5.1/snapshots-and-backups/csi-snapshot-support/) + * [Container Storage Interface (CSI)](https://github.com/container-storage-interface/spec) + +## General litterature + +* [Self Hosted Kubernetes - Solving the Storage Problem](https://refaktory.net/blog/posts/self-hosted-kubernetes-solving-the-storage-problem) by Refaktory - 2022-08-09 diff --git a/docs/organizations/companies/suse/neuvector.md b/docs/organizations/companies/suse/neuvector.md deleted file mode 100644 index 2d2f8e7..0000000 --- a/docs/organizations/companies/suse/neuvector.md +++ /dev/null @@ -1,34 +0,0 @@ -# NeuVector - -## Scanner in CI pipelines - -### GitLab - -Updates from [plugin](https://gitlab.com/neuvector/gitlab-plugin) (MR are not looked at...): - -* Scan a private registry - -```yaml -# GitLab Project > Settings > CI/CD > Variables > CONTAINER_REGISTRY_USER & IMAGE_REGISTRY_PASSWORD - -include: - - remote: 'https://gitlab.com/neuvector/gitlab-plugin/-/raw/master/scan.yml' - -stages: - - scan - -neuvector_scan: - stage: scan - variables: - image_registry_url: "https://registry-1.docker.io" - image_registry_user: $CONTAINER_REGISTRY_USER - image_registry_password: $IMAGE_REGISTRY_PASSWORD - image_repo: "library/alpine" - image_tag: "3.6" - nv_registry_user: $CONTAINER_REGISTRY_USER - nv_registry_password: $IMAGE_REGISTRY_PASSWORD - scan_layers: "false" - high_vul_to_fail: 5 - medium_vul_to_fail: 9 - vul_names_to_fail: "CVE-2020-1971, CVE-2020-1972" -``` diff --git a/docs/organizations/companies/suse/neuvector/architecture-overview.png b/docs/organizations/companies/suse/neuvector/architecture-overview.png new file mode 100644 index 0000000..b02f811 Binary files /dev/null and b/docs/organizations/companies/suse/neuvector/architecture-overview.png differ diff --git a/docs/organizations/companies/suse/neuvector/architecture.md b/docs/organizations/companies/suse/neuvector/architecture.md new file mode 100644 index 0000000..eac92f5 --- /dev/null +++ b/docs/organizations/companies/suse/neuvector/architecture.md @@ -0,0 +1,22 @@ +# NeuVector Architecture + +## Overview + +![archivecture overview](architecture-overview.png) + +Ref. [Basics > Overview](https://docs.neuvector.com/basics/overview) + +## Kubernetes objects + +* Controller + * Manage policies REST API + +* Scanner + +* Web UI + * Manager User Interface + * CLI tool + +* Enforcer (DaemonSet) + * Enforce Securities Policies + * Inspect Network Traffic diff --git a/docs/organizations/companies/suse/neuvector/neuvector.md b/docs/organizations/companies/suse/neuvector/neuvector.md new file mode 100644 index 0000000..a7f34bb --- /dev/null +++ b/docs/organizations/companies/suse/neuvector/neuvector.md @@ -0,0 +1,183 @@ +# NeuVector + +> NeuVector delivers Full Lifecycle Container Security with the only cloud-native, Kubernetes security platform providing end-to-end vulnerability management, automated CI/CD pipeline security, +> and complete run-time security including the industry’s only container firewall to protect your infrastructure from zero days and insider threats. + +[neuvector.com](https://neuvector.com/), [docs](https://docs.neuvector.com/), [code](https://github.com/neuvector/neuvector) + +## Content + +* [Architecture](architecture.md) + +## Features + +* Automated _Behavioral-Based_ Zero-Trust modes + * Discover + * Monitor + * Protect + +* Continuously watches every packet + * Layer 3 + * Layer 4 + * Layer 7 + +* Network traffic to the source of truth + +* Security-as-Code for replicating Zero-Trust Segmentation + * [neuvector.com/videos](https://neuvector.com/videos/the-neuvector-minute-security-policy-as-code/) + +* Protect data with Data Loss Prevention (DLP) + * [How to Protect Sensitive Data in Containers with Container DLP](https://blog.neuvector.com/article/protect-sensitive-data-with-container-dlp) + +* Service Mesh integration + * [How to Secure Containers in a Service Mesh such as Istio and Linkerd2](https://neuvector.com/videos/secure-containers-in-service-mesh-istio/) + +* Automation + * [REST API and Automation](https://open-docs.neuvector.com/automation/automation) + +## Trainings + +* NeuVector Rodeo + +* [NeuVector Youtube channel](https://www.youtube.com/channel/UCpAoVOdUS0i7T92cszeRVoQ) + * [NeuVector - 101 (Fall 2021)](https://www.youtube.com/watch?v=9ihaBr_QGzQ) + * [NeuVector Minute - Installing NeuVector on Rancher](https://www.youtube.com/watch?v=cc8nA7nxuDc) - Nov 12, 2020 + * [Zero-Trust Security for Kubernetes and Container Workloads](https://www.youtube.com/watch?v=SzNbJ7W3Mik) - March 29, 2022 + +## Versions + +### v5.3.0 + +* [Enhancing Network Security and Automation](https://www.suse.com/c/neuvector-releases-v-5-3-0-enhancing-network-security-and-automation/) + +### v5 + +* New scanning targets +* Zero-drift process and file protection +* Split policy mode +* Web app firewall rule detection +* CRD updates +* Enhanced Rancher Integration +* Automated promotion of group nodes + +## Installation + +### Rancher App + +* In Rancher, from your cluster, go to Apps > Charts and look for NeuVector and click on Install +* In Step 2 > Edit Options + * In Container Runtime, make sure you select the right runtime (containerd for instance with AKS) + * In Ingress Configuration, check the Manager Ingress Status box, fill Manager Ingress Host (neuvector.demo for example) +* In Step 2 > Edit YAML, edit the content to add ingressClassName + + ```bash + manager: + ingress: + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + enabled: true + host: neuvector.demo + ingressClassName: nginx + path: / + secretName: null + tls: false + ``` + +* Click on Install and review the overall installation process +* Once installed correctly (all pods running fine), go to Service Discovery > Ingresses + * In `cattle-neuvector-system` namespace, click on the target link + * Log in with admin/admin and update immediatly the password + +## Q&A + +Q: It is possible to export reports and scans in pdf and automate the creation sending them via email for example? + +A: Yes, this could be done by leveraging the API + +Q: Is it possible to customize login UI? + +A: The name can be changed but no other customization for the moment (a feature request has been created to cover this part) + +Q: Must NeuVector be installed into each working cluster or is it possible to have one central NeuVector cluster and route to it from each downstream cluster? + +A: The components such as the scanner, enforcer, etc. must be installed in each cluster but you can federate clusters together so there's a single UI to manage multiple clusters + +Q: Can we "ignore/silence" a vulnerability so it doesn't show in the reporting? + +A: You can "accept" vulnerabilities that negate them coming up in reports/alerts, reports can also filter out vulnerabilities with (for example) a low CVE score, No fix, etc. +So you could generate a list of all known CVE's in your environment, filter by no fix and then bulk accept those + +Q: How much of a performance overhead is the enforcer? + +A: This is documented in the [FAQ](https://neuvector.com/wp-content/uploads/2019/05/NeuVector-Customer-FAQ.pdf) at point 2 + +## Known issues + +* Timeout while on the web interface + * Refresh the page and authenticate again + +## Articles + +* [Enhancing Kubernetes Application Security with NeuVector](https://www.infracloud.io/blogs/secure-container-images-using-neuvector/) - September 15th, 2023 +* [FAQ](https://neuvector.com/wp-content/uploads/2019/05/NeuVector-Customer-FAQ.pdf) + +## Alternatives + +* [tetragon](https://tetragon.io/) ([playground](https://labs.iximiuz.com/playgrounds/tetragon)) + +## Recipes + +### How to scan control plane nodes + +By default, only worker nodes are scanned. You can change this by adapting the tolerations of the enforcer when installing NeuVector. +The default can be seen in [values.yaml](https://github.com/neuvector/neuvector-helm/blob/master/charts/core/values.yaml) (enforcer / tolerations). +Depending on the Kubernetes distribution, the taints may be different on non-worker nodes. + +To tolerate all possible taints, a config would be: + +```yaml +enforcer: + tolerations: + - operator: "Exists" +``` + +## Integrations + +### Harbor + +* [Installing and Configuring NeuVector Harbor Registry Adapter](https://github.com/rancher/barn/blob/main/Walkthroughs/NeuVector/Harbor%20Adapter%20Configuration/README.md) + +## Scanning + +### GitLab + +[Scan for Vulnerabilities during Gitlab Build Pipeline](https://docs.neuvector.com/scanning/build/gitlab) + +Updates from [plugin](https://gitlab.com/neuvector/gitlab-plugin) (MR are not looked at...): + +* Scan a private registry + +```yaml +# GitLab Project > Settings > CI/CD > Variables > CONTAINER_REGISTRY_USER & IMAGE_REGISTRY_PASSWORD + +include: + - remote: 'https://gitlab.com/neuvector/gitlab-plugin/-/raw/master/scan.yml' + +stages: + - scan + +neuvector_scan: + stage: scan + variables: + image_registry_url: "https://registry-1.docker.io" + image_registry_user: $CONTAINER_REGISTRY_USER + image_registry_password: $IMAGE_REGISTRY_PASSWORD + image_repo: "library/alpine" + image_tag: "3.6" + nv_registry_user: $CONTAINER_REGISTRY_USER + nv_registry_password: $IMAGE_REGISTRY_PASSWORD + scan_layers: "false" + high_vul_to_fail: 5 + medium_vul_to_fail: 9 + vul_names_to_fail: "CVE-2020-1971, CVE-2020-1972" +``` diff --git a/docs/organizations/companies/suse/opensuse.md b/docs/organizations/companies/suse/opensuse.md index cc62b28..df8844b 100644 --- a/docs/organizations/companies/suse/opensuse.md +++ b/docs/organizations/companies/suse/opensuse.md @@ -1,5 +1,8 @@ -# openSUSE +# openSUSE -[opensuse.org](https://www.opensuse.org/) +> The openSUSE distribution is a stable, easy to use and complete multi-purpose distribution. It is aimed towards users and developers working on the desktop or server. +(...) It comes with more than 1,000 open source applications. openSUSE Tumbleweed is the rolling release, providing the latest upstream software releases, yet only those packages that pass testing. + +→ [opensuse.org](https://www.opensuse.org/), [wiki](https://en.opensuse.org/Main_Page) [build.opensuse.org/project](https://build.opensuse.org/project) diff --git a/docs/organizations/companies/suse/products.md b/docs/organizations/companies/suse/products.md new file mode 100644 index 0000000..fa63676 --- /dev/null +++ b/docs/organizations/companies/suse/products.md @@ -0,0 +1,49 @@ +# Products + +All this projects are open-source and managed by SUSE. + +## Cloud Native components + +### GitOps + +- [Fleet](fleet.md) + +### Kubernetes cluster management + +- [Rancher](rancher/rancher.md) + +### Kubernetes distributions + +- [K3s](k3s.md) +- [RKE1](rke.md) +- [RKE2](rke2.md) + +### Operating system management + +- [Elemental](elemental.md) + +### Policies + +- [Kubewarden](kubewarden.md) + +### Security + +- [NeuVector](neuvector/neuvector.md) + +### Storage + +- [Longhorn](longhorn.md) + +### Virtualization + +- [Harvester](harvester.md) + +### Workstation + +- [Rancher Desktop](rancher-desktop.md) + +## Linux distributions + +- [openSUSE](opensuse.md) +- [SLE Micro](sle-micro.md) +- [SLES](sles.md) diff --git a/docs/organizations/companies/suse/rancher-desktop.md b/docs/organizations/companies/suse/rancher-desktop.md index 5379b01..3e6900c 100644 --- a/docs/organizations/companies/suse/rancher-desktop.md +++ b/docs/organizations/companies/suse/rancher-desktop.md @@ -1,4 +1,4 @@ -# Rancher Desktop +# Rancher Desktop > An open-source desktop application for Mac, Windows and Linux. Rancher Desktop runs Kubernetes and container management on your desktop. @@ -6,4 +6,4 @@ You can choose the version of Kubernetes you want to run. You can build, push, pull, and run container images using either containerd or Moby (dockerd). The container images you build can be run by Kubernetes immediately without the need for a registry. -→ [rancherdesktop.io](https://rancherdesktop.io/), [code](https://github.com/rancher-sandbox/rancher-desktop) +[rancherdesktop.io](https://rancherdesktop.io/), [code](https://github.com/rancher-sandbox/rancher-desktop) diff --git a/docs/organizations/companies/suse/rancher.md b/docs/organizations/companies/suse/rancher.md deleted file mode 100644 index d700df5..0000000 --- a/docs/organizations/companies/suse/rancher.md +++ /dev/null @@ -1,40 +0,0 @@ -# Rancher - -> Rancher is a container management platform built for organizations that deploy containers in production. Rancher makes it easy to run Kubernetes everywhere, meet IT requirements, and empower DevOps teams. - -→ [rancher.com](https://rancher.com/), [docs](https://rancher.com/docs/rancher/v2.6/en/) - -## Presentation - -![Rancher platform](https://rancher.com/docs/img/rancher/platform.png) - -## Quick start - -* [Get Started with SUSE Rancher in 2 Easy Steps](https://www.suse.com/products/suse-rancher/get-started/) -(see also [Installing Rancher on a Single Node Using Docker](https://rancher.com/docs/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/)) - -```bash -# creates Rancher container -sudo docker run --privileged -d --restart=unless-stopped -p 80:80 -p 443:443 rancher/rancher - -# manual: open http://localhost and follow instructions to login, at the end download the kubeconfig file - -# sets kubectl to the Kubernetes cluster and displays the single node -export KUBECONFIG=local.yaml -kubectl get nodes -``` - -## Operations - -* [Rancher Upgrade Checklist](https://www.suse.com/support/kb/doc/?id=000020061) - -## Learning - -* [Rancher Manager 2.8 for Rancher Prime Operations](https://www.suse.com/training/course/ran201v2.8) -* [Rancher Manager 2.7 for Rancher Prime Deployment](https://www.suse.com/training/course/ran211v2.7) - -## Tips - -Q. Is it possible to change the rancher/shell to another image? - -A. Yes, it’s a setting in global settings. It can also be set with the environment variable on rancher CATTLE_SHELL_IMAGE=my/customshell:tag diff --git a/docs/organizations/companies/suse/rancher/3ds-outscale.md b/docs/organizations/companies/suse/rancher/3ds-outscale.md new file mode 100644 index 0000000..f4a6826 --- /dev/null +++ b/docs/organizations/companies/suse/rancher/3ds-outscale.md @@ -0,0 +1,180 @@ +# 3DS OUTSCALE + +> Founded in 2010 and a strategic partner of Dassault Systèmes, 3DS OUTSCALE is at the forefront of Cloud Computing infrastructure services (IaaS). + +:octicons-cross-reference-24: **Links:** [:material-web: outscale.com](https://outscale.com/), [:simple-github: organization](https://github.com/outscale) + +## Getting started + +A good starting point is the [User Guide](https://docs.outscale.com/userguide). + +### Account + +An _Account Key_ and a _Secret Key_ are needed to authenticate and manage the resources on OUTSCALE Cloud. + +### Management + +* [API](https://docs.outscale.com/api) ([reference](https://docs.outscale.com/en/userguide/OUTSCALE-APIs-Reference.html), [code](https://github.com/outscale/osc-api)) +* [OSC CLI](https://docs.outscale.com/en/userguide/Installing-and-Configuring-OSC-CLI.html) ([code](https://github.com/outscale/osc-cli)) +* [AWS CLI](https://docs.outscale.com/en/userguide/Installing-and-Configuring-AWS-CLI.html) +* [Cockpit (web UI)](https://cockpit.outscale.com/) ([v2 (beta)](https://new.cockpit.outscale.com/), [docs](https://docs.outscale.com/en/userguide/About-Cockpit.html)) +* [Visual Studio Extension](https://marketplace.visualstudio.com/items?itemName=outscale.osc-viewer) ([code](https://github.com/outscale/vscode-osc-viewer)) + +## Recipes + +### Create a Kubernetes cluster from OSC VMs and install Rancher on it + +* Terraform: use [Rancher Quickstart](https://github.com/rancher/quickstart/blob/master/rancher/outscale/README.md) + +### Connect to a OSC VM + +* From OUTSCALE Web UI + * In "Compute" > "VMs" (Instances in cockpit V1), click on "Create" + * In "Security", add a rule to authorize SSH (port 22) from "My IP" + * Copy the public IP address, download the rsa file and log in with `ssh -i ~/.ssh/outscale_xxx.rsa -l outscale` + +### Create a Kubernetes Rancher on OCS VMs from Rancher + +* From Rancher UI + * In "Cluster Management" > "Drivers" > "Node Drivers" + * Select "Outscale" ([definition](https://github.com/rancher/rancher/blob/release/v2.7/pkg/data/management/machinedriver_data.go#L140)) and click on "Activate" + * In "Cluster Management" > "Clusters" + * In "Create" form, select "RKE2/K3s", click on "outscale" + +### RKE2 creation from Rancher UI + +* Open Rancher + * In "Cluster Management", "Drivers", "Node Drivers", enable "Outscale" + * In "Cluster Management", "Cloud Credentials", click on "Create", select "Outscale", submit and fill the informations + * In "Cluster Management", "Clusters", click on "Create", select "outscale" + * Specify the "supportOmi" (check [Official OMIs Reference](https://docs.outscale.com/en/userguide/Official-OMIs-Reference.html)) + * Set "tinav5.c3r4p1" as "instanceType" (check [Instance Types](https://docs.outscale.com/en/userguide/Instance-Types.html)) + +### RKE2 troubleshooting + +* Open Rancher + * In "Cluster Management", "Clusters", click on the cluster, in the Machine Pool line click on the menu and select "Download SSH Key" + +* Open [new.cockpit.outscale.com](https://new.cockpit.outscale.com/) + * In "Compute", "VMs", in the VM line, copy the "Public IP" value + +* Open a terminal + +```bash +# makes sure ssh files have the right permission +chmod 600 /path/to/ssh +# connects to the VM +ssh -i /path/to/ssh/id_rsa -l outscale +# example: ssh -i /mnt/c/Users/SomeUser/workspace/temp/osc-dummy01-pool1-xxxxxx-yyyy/id_rsa 1.2.3.4 -l outscale +``` + +* Investigate potential issues + +```bash +journalctl -xefu rke2-server +systemctl status rke2-server +journalctl -u rancher-system-agent.service -f + +# installs kubectl +curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" +sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + +sudo cp /etc/rancher/rke2/rke2.yaml . +sudo chown outscale:outscale rke2.yaml +export KUBECONFIG=/home/outscale/rke2.yaml +kubectl get pods --all-namespaces +``` + +## Ressources + +### Network + +* Virtual Private Clouds +* External IPs +* Flexible Network Interfaces +* Load Balancing Unit +* VPN Connections +* DirectLink +* OUTSCALE Public IPs +* OUTSCALE NTP Servers + +Ref. [docs](https://docs.outscale.com/en/userguide/Network-and-Security.html) + +### Storage + +* [Block Storage Unit](https://docs.outscale.com/en/userguide/Block-Storage-Unit-BSU.html) +* [OUTSCALE Object Storage](https://docs.outscale.com/en/userguide/OUTSCALE-Object-Storage-OOS.html) + +### Compute + +* [Flexible Compute Unit](https://docs.outscale.com/en/userguide/Flexible-Compute-Unit-FCU.html) +* [OUTSCALE Machine Images](https://docs.outscale.com/en/userguide/OUTSCALE-Machine-Images-OMIs.html) +* [Flexible GPUs](https://docs.outscale.com/en/userguide/Flexible-GPUs-fGPUs.html) + +## Open-source projects + +### Kubernetes + +Name | Links +------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------- +[BSU CSI driver](https://github.com/outscale/osc-bsu-csi-driver) | - +[Cloud Controller Manager](https://github.com/outscale/cloud-provider-osc) | - +[Cluster API Provider](https://github.com/outscale/cluster-api-provider-outscale) | [Book](https://cluster-api-outscale.oos-website.eu-west-2.outscale.com/) +[Docker machine plugin](https://github.com/outscale/docker-machine-driver-outscale) | [Technical guide](https://docs.outscale.com/en/userguide/Using-DockerMachine-with-3DS-OUTSCALE.html) +[RKE provisioning example](https://github.com/outscale/osc-k8s-rke-cluster) | - +[Terraform provider](https://github.com/outscale/terraform-provider-outscale) | [Docs](https://registry.terraform.io/providers/outscale/outscale/latest/docs) +[Rancher Driver UI](https://github.com/outscale/rancher-ui-driver-outscale) | - + +#### Cloud Controller Manager + +* Install from Helm ([code](https://github.com/outscale/cloud-provider-osc/tree/OSC-MIGRATION/deploy/k8s-osc-ccm), [ArtifactHub](https://artifacthub.io/packages/helm/osc-cloud-controller-manager/osc-cloud-controller-manager)) + +```bash +helm install my-osc-cloud-controller-manager oci://registry-1.docker.io/outscalehelm/osc-cloud-controller-manager +``` + +* Use annotations ([examples](https://github.com/outscale/cloud-provider-osc/tree/OSC-MIGRATION/examples)) + +```yaml +# Service example +apiVersion: v1 +metadata: + annotations: + service.beta.kubernetes.io/osc-load-balancer-name-length: "20" + service.beta.kubernetes.io/osc-load-balancer-name: "simple-lb-test" +``` + +#### Container Storage Interface + +TODO + +### Cloud + +* [Frieza](https://github.com/outscale/frieza) + +## Closed-source software + +### Tina OS + +* [TINA OS Cloud Orchestrator](https://en.outscale.com/pourquoi-outscale/tina-os-cloud-orchestrator/) + +## Glossary + +Name | Meaning +-----|---------------------------- +AK | Account Key +BSU | Block Storage Unit +CAPI | Cluster API +CCM | Cloud Controller Manager +CSI | Container Storage Interface +EIP | External IP +FCU | Flexible Compute Unit +fGPU | Flexible GPU +FNI | Flexible Network Interface +GPU | Graphics Processing Units +LBU | Load Balancing Unit +OMI | OUTSCALE Machine Image +OOS | OUTSCALE Object Storage +OSC | OUTSCALE Cloud +SK | Secret Key +VPC | Virtual Private Clouds diff --git a/docs/organizations/companies/suse/rancher/applications.md b/docs/organizations/companies/suse/rancher/applications.md new file mode 100644 index 0000000..4f91f39 --- /dev/null +++ b/docs/organizations/companies/suse/rancher/applications.md @@ -0,0 +1,35 @@ +# Rancher Applications + +Available in Rancher, in the menu for a Cluster, with the "Apps" link. + +## Chart repositories + +### Indexes + +TODO: check & update with Rancher 2.8 + +```txt +https://charts.rancher.io/index.yaml +https://partner-charts.rancher.io/index.yaml +https://rke2-charts.rancher.io/index.yaml +https://rancher.github.io/elemental-operator/index.yaml +https://raw.githubusercontent.com/rancher/ui-plugin-charts/main/index.yaml +https://raw.githubusercontent.com/rancher/partner-extensions/gh-pages/index.yaml +``` + +### Official charts + +* Helm chart codebase for Rancher 2.8: [rancher/charts](https://github.com/rancher/charts/tree/release-v2.8/charts) + +* Helm chart repository for Rancher Prime: [charts.rancher.com/server-charts/prime](https://charts.rancher.com/server-charts/prime) + +* Helm CLI + +```bash +helm repo add rancher-charts https://charts.rancher.io +helm upgrade --install --namespace xxxx yyyy rancher-charts/yyyy --create-namespace +``` + +### Partner charts + +* Repository for Partner charts: [rancher/partner-charts](https://github.com/rancher/partner-charts/tree/main-source/charts) diff --git a/docs/organizations/companies/suse/rancher/architecture.md b/docs/organizations/companies/suse/rancher/architecture.md new file mode 100644 index 0000000..5f28270 --- /dev/null +++ b/docs/organizations/companies/suse/rancher/architecture.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/organizations/companies/suse/rancher/authentication.md b/docs/organizations/companies/suse/rancher/authentication.md new file mode 100644 index 0000000..a6e6fd5 --- /dev/null +++ b/docs/organizations/companies/suse/rancher/authentication.md @@ -0,0 +1,49 @@ +# Authentication + +## Key features + +* [Users and Groups](https://docs.ranchermanager.rancher.io/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/manage-users-and-groups) +* [Role-Based Access Control (RBAC)](https://docs.ranchermanager.rancher.io/pages-for-subheaders/manage-role-based-access-control-rbac) +* [Cluster and Project Roles](https://docs.ranchermanager.rancher.io/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles) + +## Providers + +### Keycloak + +See [Get started with Keycloak on Kubernetes started](https://www.keycloak.org/getting-started/getting-started-kube) +and [Configure Keycloak (OIDC)](https://docs.ranchermanager.rancher.io/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/authentication-config/configure-keycloak-oidc) + +* Open the Keycloak admin console + * Create a Realm + + > A realm in Keycloak is the equivalent of a tenant. It allows creating isolated groups of applications and users. By default there is a single realm in Keycloak called master. This is dedicated to manage Keycloak and should not be used for your own applications. + + * Create a User and set Credentials (password) + * (Optional) Validate login on Account Console with the newly created User (`https://mydomain/realms/demo/account/#/`) + + * Create a Client + * **Add Client**, Client ID to "rancher", Client Protocol to "openid-connect" + * In **Client details > Settings**, set Base URL to `https://rancherurl/`, Valid Redirect URIs to `https://rancherurl/verify-auth`, Access Type to confidential, activate Authorization Enabled and click on Save (so Credentials tab can be displayed) + * In **Client details > Mappers**, create the 3 Mappers (Groups Mapper, Client Audience, Group Path) + * In **Client details > Credentials**, copy client secret + + * Realm Settings > General tab, click OpenID Endpoint Configuration and copy from the JSON output will display values for issuer and authorization_endpoint + + ```json + { + "issuer":"https://keycloak.mydomain/realms/demo", + "authorization_endpoint":"https://keycloak.mydomain/realms/demo/protocol/openid-connect/auth" + // ... + } + ``` + +* Open Rancher + * Configure a Keycloak OIDC account form, change Endpoints to Specify (advanced) and override the Issuer and Auth Endpoint values + +## Integrations + +### NeuVector + +* [horantj/rancher-nv-rbac](https://github.com/horantj/rancher-nv-rbac) + * [Roles](https://vimeo.com/790515566) + * [membership](https://vimeo.com/790248342) diff --git a/docs/organizations/companies/suse/rancher/automation.md b/docs/organizations/companies/suse/rancher/automation.md new file mode 100644 index 0000000..fec6f66 --- /dev/null +++ b/docs/organizations/companies/suse/rancher/automation.md @@ -0,0 +1,20 @@ +# Automation with Rancher + +## Terraform + +* [Rancher2 Provider](https://registry.terraform.io/providers/rancher/rancher2/latest/docs) ([code](https://github.com/rancher/terraform-provider-rancher2)) + +## Usecases + +### Kubernetes management cluster with Rancher + +![Cluster creation diagram](cluster-provisioning-logic.svg) + +### RKE1 cluster from Rancher + +* [RKE Templates and Infrastructure > Terraform](https://ranchermanager.docs.rancher.com/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure#terraform) + +## Code examples + +* [Devpro Terraform projects](https://github.com/devpro/terraform-projects) +* [Rancher quickstarts](https://github.com/rancher/quickstart) diff --git a/docs/organizations/companies/suse/rancher/cluster-provisioning-logic.svg b/docs/organizations/companies/suse/rancher/cluster-provisioning-logic.svg new file mode 100644 index 0000000..3d0bafe --- /dev/null +++ b/docs/organizations/companies/suse/rancher/cluster-provisioning-logic.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/organizations/companies/suse/rancher/extensions.md b/docs/organizations/companies/suse/rancher/extensions.md new file mode 100644 index 0000000..edfc96a --- /dev/null +++ b/docs/organizations/companies/suse/rancher/extensions.md @@ -0,0 +1,35 @@ +# Rancher extensions + +💡 Introduced in Rancher 2.7.0 + +> Extensions allow users, developers, partners, and customers to extend and enhance the Rancher UI. In addition, users can make changes and create enhancements to their UI functionality independent of Rancher releases. +Extensions will enable users to build on top of Rancher to better tailor it to their respective environments. + +→ [ranchermanager.docs.rancher.com](https://ranchermanager.docs.rancher.com/integrations-in-rancher/rancher-extensions), [rancher.github.io/dashboard](https://rancher.github.io/dashboard/extensions/home) + +## Quick start + +* Enable Extensions in Rancher + * From the Rancher UI, from the left menu, under Configuration, click on "Extensions" + * If "Extension support is not enabled" is displayed, then click on "Enable" + * If no extensions is displayed in "Available" you may have to refresh the page + * By default, two extensions will be available: "Elemental" and "Kubewarden" + +## Available extensions + +* [Rancher UI plugins](https://github.com/rancher/ui-plugin-charts) + * [Elemental](https://github.com/rancher/elemental-ui) ([Helm chart](https://github.com/rancher/elemental-ui/tree/main/charts/elemental)) + * [Kubewarden](https://github.com/kubewarden/ui) ([Helm chart](https://github.com/kubewarden/ui/tree/main/charts/kubewarden)) +* [Rancher Partner Extensions](https://github.com/rancher/partner-extensions) (installation simplified in Rancher UI, see [PR #9260](https://github.com/rancher/dashboard/pull/9260)) + +## Samples + +* [Rancher UI Plugin examples](https://github.com/rancher/ui-plugin-examples) +* [Devpro samples](https://github.com/devpro/rancher-extensions-samples) + +## Creating an extension + +* [Getting Started](https://rancher.github.io/dashboard/extensions/extensions-getting-started) +* Node driver extension + * [example](https://github.com/rancher/ui-plugin-examples/tree/main/pkg/node-driver) + * [docs](https://rancher.github.io/dashboard/extensions/usecases/node-driver/machine-config) diff --git a/docs/organizations/companies/suse/rancher/gettingstarted.md b/docs/organizations/companies/suse/rancher/gettingstarted.md new file mode 100644 index 0000000..96b5d69 --- /dev/null +++ b/docs/organizations/companies/suse/rancher/gettingstarted.md @@ -0,0 +1,46 @@ +# Getting started with Rancher + +## Quick start + +### Run Rancher with Docker + +To have a quick look at Rancher UI, follow the procedure given at [rancher.com/quick-start](https://www.rancher.com/quick-start#getstarted-1). +More details on [Installing Rancher on a Single Node Using Docker](https://ranchermanager.docs.rancher.com/pages-for-subheaders/rancher-on-a-single-node-with-docker) documentation. + +* Open a terminal and use Docker CLI to run Rancher container + +```bash +# starts Rancher container +docker run --name local_rancher --privileged -d --restart=unless-stopped -p 3001:443 rancher/rancher +# waits few seconds to make sure Rancher is running +curl -k -L https://localhost:3001/dashboard +# gets the generated password +docker logs local_rancher 2>&1 | grep "Bootstrap Password:" +# prints the processes running in Rancher container (k3d, containerd, coredns, rancher in particular) +docker exec -it local_rancher ps -ef +``` + +* Open local [Rancher dashboard](https://localhost:3001/dashboard) + * Ignore the invalid certificate warning + * Login the password copied in the previous steps + * Set the password that you want, agree with terms and conditions and click on Continue + * You are now on Rancher home page! Here you see the list of Kubernetes clusters managed by Rancher + * Click on the `local`, which is the one running Rancher + * Explore the UI with on the left the menu to access all Kubernetes resources, on the top specific actions and the user menu + * Open a Kubectl shell and inspect the running containers + + ```bash + # inspects the running container + kubectl get pods -A + # displays the applications installed by Helm (fleet and webhooks) + helm list --all-namespaces + ``` + +* Use the terminal to clean resources + +```bash +# stops Rancher container +docker stop local_rancher +# delete Rancher container +docker rm local_rancher +``` diff --git a/docs/organizations/companies/suse/rancher/installation.md b/docs/organizations/companies/suse/rancher/installation.md new file mode 100644 index 0000000..758b92a --- /dev/null +++ b/docs/organizations/companies/suse/rancher/installation.md @@ -0,0 +1,22 @@ +# Installation of Rancher + +## General procedures + +* [Install/Upgrade Rancher on a Kubernetes Cluster](https://docs.ranchermanager.rancher.io/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster) +* [Rancher Upgrade Checklist](https://www.suse.com/support/kb/doc/?id=000020061) +* [Rancher Upgrade FAQ](https://www.suse.com/support/kb/doc/?id=000020727) + +## Providers + +* [AKS](microsoft-azure.md) +* [Outscale](3ds-outscale.md) +* [Nutanix](nutanix.md) +* [vSphere](wmware-vsphere.md) + +## Known issues + +* If `cert-manager` is used, make sure `cert-manager` CRDs are applied before installing it with Helm (see [Install CustomResourceDefinitions](https://cert-manager.io/docs/installation/helm/#3-install-customresourcedefinitions)) + +## Tutorials + +* [pitch7900/Rancher-3-nodes-install](https://github.com/pitch7900/Rancher-3-nodes-install) (in French) diff --git a/docs/organizations/companies/suse/rancher/microsoft-azure.md b/docs/organizations/companies/suse/rancher/microsoft-azure.md new file mode 100644 index 0000000..7ce42aa --- /dev/null +++ b/docs/organizations/companies/suse/rancher/microsoft-azure.md @@ -0,0 +1,97 @@ +# Microsoft Azure + +## Getting started + +### Ressources + +Name | Type +-----------------------------------|------------------------------ +**AKS** (Azure Kubernetes Service) | Kubernetes cluster management + +### Best practices + +* [Azure resource naming convention](https://learn.microsoft.com/en-us/azure/cloud-adoption-framework/ready/azure-best-practices/resource-naming) + +![Diagram of the components of an Azure resource name](https://learn.microsoft.com/en-us/azure/cloud-adoption-framework/_images/ready/resource-naming.png) + +### Tips + +* When creating an Azure VM from Azure, the default Linux Admin username is `azureuser` + +## Usecases + +### Install Rancher on AKS + +* [Installing Rancher on AKS](https://docs.ranchermanager.rancher.io/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks) + * `samples/scripts/aks-rancher-installation.sh` + +### Provision AKS from Rancher + +#### Cloud credentials + +In order to authenticate and authorize actions against Azure, you need to create an Azure Active Directory (AD) application. It can be done through the web UI (portal) or the command line. + +* [Use the portal](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal). + * In `Azure Active Directory` > `App registrations`, register a new application and save the value of Tenant ID, client ID, client secret + * In `Subscription` > `IAM`, assign a Contributor role to the application and save the Subscription ID value + +Then, in Rancher add the newly created credentials. + +* Create a new Azure Cloud Credential in Rancher + +#### AKS Kubernetes cluster creation from Rancher + +* In `Cluster Management`, click on `Create` (it will the provider selection page) + * Under `Provision new nodes and create a cluster using RKE2/K3s` (make sure `RKE2/K3s` is checked), click on `Azure` (it will open the `Cluster: Create Azure` form) + * Under `Machine Pools`, click on `Show Advanced` and make sure to override all Azure ressource names + * Under `Cluster Configuration` > `Basics`, select "Azure" in `Cloud Provider` list and fill `Cloud Provider Config` field (see [Setting up the Azure Cloud Provider](https://rancher.com/docs/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/azure/)) + + ```json + { + "cloud":"AzurePublicCloud", + "tenantId": "", + "aadClientId": "", + "aadClientSecret": "", + "subscriptionId": "", + "resourceGroup": "", + "location": "", + "subnetName": "", + "securityGroupName": "", + "securityGroupResourceGroup": "", + "vnetName": "", + "vnetResourceGroup": "", + "primaryAvailabilitySetName": "", + "routeTableResourceGroup": "", + "cloudProviderBackoff": false, + "useManagedIdentityExtension": false, + "useInstanceMetadata": true + } + ``` + + * Under `Cluster Configuration` > `Advanced` > `Additional Controller Manager Args`, click `Add` and add the flag `--configure-cloud-routes=false` (see [Rancher issue #34367](https://github.com/rancher/rancher/issues/34367)) + +### Use Azure VM as Kubernetes nodes + +#### Ingress Controller in a Kubernetes cluster with Azure VM nodes + +```bash +# installs with helm (see https://kubernetes.github.io/ingress-nginx/deploy/) +helm upgrade --install ingress-nginx ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --namespace ingress-nginx --create-namespace + +# checks NGINX Ingress Controller service (if EXTERNAL-IP gets stuck at then you need to look at the Cloud Provider configuration) +kubectl --namespace ingress-nginx get services -o wide ingress-nginx-controller +``` + +## Known issues + +### AKS cluster on a service princical with a new secret + +Once the secret used by the Cloud Credentials is revoked, we have to create another one and may prevent the AKS cluster from starting. In this case run the following command: + +```bash +az aks update-credentials --resource-group rg-xxxx --name aks-xxxx --reset-service-principal --service-principal "xxxx" --client-secret "xxxx" +``` + +See [AKS startup error “Token refresh failed with invalid client secret error”](https://blog-bertrand-thomas.devpro.fr/2023/08/22/aks-startup-error-token-refresh-failed-with-invalid-client-secret-error/) for more information. diff --git a/docs/organizations/companies/suse/rancher/migrations.md b/docs/organizations/companies/suse/rancher/migrations.md new file mode 100644 index 0000000..3e3240b --- /dev/null +++ b/docs/organizations/companies/suse/rancher/migrations.md @@ -0,0 +1,7 @@ +# Rancher migrations + +## Swarm to Kubernetes clusters managed by Rancher + +* Design Kubernetes solution + * Migrate docker-compose files to Kubernetes files + * Use tools like [Kompose](https://kompose.io/) ([code](https://github.com/kubernetes/kompose)) or [Move2Kube](https://move2kube.konveyor.io/) ([code](https://github.com/konveyor/move2kube), [tutorial](https://move2kube.konveyor.io/tutorials/migrating-from-docker-compose-to-kubernetes)) diff --git a/docs/organizations/companies/suse/rancher/nutanix.md b/docs/organizations/companies/suse/rancher/nutanix.md new file mode 100644 index 0000000..ea5e8c5 --- /dev/null +++ b/docs/organizations/companies/suse/rancher/nutanix.md @@ -0,0 +1,28 @@ +# Nutanix + +## Getting started + +### Rancher & RKE on Nutanix + +* [How to deploy RKE1 clusters from Rancher on Nutanix](https://www.nutanix.dev/2023/06/09/how-to-deploy-a-fleet-of-rancher-rke1-clusters-on-nutanix-the-complete-guide-part-1/) - June 9, 2023 + +## Open-Source Software + +### Kubernetes + +Name | Installation | Documentation +--------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------|----------------------------------------------------------------------------- +[Cloud Controller Manager](https://github.com/nutanix-cloud-native/cloud-provider-nutanix) | | - +[Custer API Provider](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix) | | [Getting Started](https://opendocs.nutanix.com/capx/latest/getting_started/) +CSI Snapshot Controller | [Helm chart](https://github.com/nutanix/helm/tree/master/charts/nutanix-csi-snapshot) | - +CSI Storage Driver | [Helm chart](https://github.com/nutanix/helm/tree/master/charts/nutanix-csi-storage) | - +[Docker machine](https://github.com/nutanix/docker-machine) | | - +[NDB Service Operator](https://github.com/nutanix-cloud-native/ndb-operator) | [Helm chart](https://github.com/nutanix/helm/tree/master/charts/ndb-operator) | - +[Rancher UI Driver](https://github.com/nutanix/rancher-ui-driver) | | - + +## Glossary + +Abbreviation | Meaning +-------------|----------------------------------------------------------------------------------- +**CSI** | [Container Storage Interface](https://github.com/container-storage-interface/spec) +**NDB** | Nutanix Database diff --git a/docs/organizations/companies/suse/rancher/observability.md b/docs/organizations/companies/suse/rancher/observability.md new file mode 100644 index 0000000..37246fb --- /dev/null +++ b/docs/organizations/companies/suse/rancher/observability.md @@ -0,0 +1,13 @@ +# Observability + +## guides + +* [Monitoring](https://ranchermanager.docs.rancher.com/pages-for-subheaders/monitoring-alerting-guides) +* [Monitoring V2](https://ranchermanager.docs.rancher.com/pages-for-subheaders/monitoring-v2-configuration-guides) +* [Monitoring and Alerting](https://ranchermanager.docs.rancher.com/pages-for-subheaders/monitoring-and-alerting) +* [Logging](https://ranchermanager.docs.rancher.com/pages-for-subheaders/logging) + * [Logging Operator](https://kube-logging.dev/) ([github/kube-logging](https://github.com/kube-logging)) + +## Prometheus exporters + +* [David-VTUK/prometheus-rancher-exporter](https://github.com/David-VTUK/prometheus-rancher-exporter) diff --git a/docs/organizations/companies/suse/rancher/operations.md b/docs/organizations/companies/suse/rancher/operations.md new file mode 100644 index 0000000..8a2f1f1 --- /dev/null +++ b/docs/organizations/companies/suse/rancher/operations.md @@ -0,0 +1,8 @@ +# Rancher operations + +## Backup strategy + +* Rancher data + * [Backups and Disaster Recovery](https://ranchermanager.docs.rancher.com/pages-for-subheaders/backup-restore-and-disaster-recovery) +* Downstream clusters data + * [Kasten K10 by Veeam](https://www.suse.com/c/kasten-k10-by-veeam-and-suse-rancher-enterprise-k8s-data-protection/) diff --git a/docs/organizations/companies/suse/rancher/prime.md b/docs/organizations/companies/suse/rancher/prime.md new file mode 100644 index 0000000..1a9fa30 --- /dev/null +++ b/docs/organizations/companies/suse/rancher/prime.md @@ -0,0 +1,3 @@ +# Prime + +[Migrating to the Prime Registry](https://scc.suse.com/rancher-docs/rancherprime/latest/en/suse-rancher-prime/migrating-to-the-prime-registry.html) diff --git a/docs/organizations/companies/suse/rancher/provisioning.md b/docs/organizations/companies/suse/rancher/provisioning.md new file mode 100644 index 0000000..ec9b60d --- /dev/null +++ b/docs/organizations/companies/suse/rancher/provisioning.md @@ -0,0 +1,74 @@ +# Resource provisioning + +## General features + +* [Launching Kubernetes with Rancher](https://docs.ranchermanager.rancher.io/pages-for-subheaders/launch-kubernetes-with-rancher) +* [Cluster Management Resources](https://rancher.github.io/dashboard/code-base-works/cluster-management-resources) + +## Provisioning logic + +💡 Rancher v2.6.0 introduces V2 cluster provisioning that leverages [CAPI](https://cluster-api.sigs.k8s.io/) resources including Clusters and MachineDeployments to define and manage the desired state of downstream RKE2 and K3s clusters + +### Drivers + +From Rancher UI, drivers can be viewed and managed from **Cluster Management > Drivers page**. + +#### Cluster drivers + +Operators are used for built-in cluster drivers, for example [AKS operator](https://github.com/rancher/aks-operator), [EKS operator](https://github.com/rancher/eks-operator), [GKE operator](https://github.com/rancher/gke-operator). + +See also: [Kontainer Engine Example Driver](https://github.com/rancher-plugins/kontainer-engine-driver-example) + +#### Node drivers + +Drivers are [docker-machine](https://github.com/docker/machine) implementations for each provider, whether for RKE or RKE2/K3s. It is an API to create and delete VMs. + +Built-in node drivers are defined in [rancher/machine](https://github.com/rancher/machine/tree/master/drivers). + +Additional node drivers are added in [rancher/rancher](https://github.com/rancher/rancher/blob/release/v2.7/pkg/data/management/machinedriver_data.go#L74). + +See also: [UI DevKit > Machine Drivers](https://rancher.github.io/dashboard/code-base-works/machine-drivers) + +### Templates (V1 provisioning) + +In V1 provisioning, clusters have nodes, nodePools, and nodeTemplates. + +#### Node Templates + +Node templates make it possible to reuse machine configs for RKE1 provisioning. + +### Templates (V2 provisioning) + +In V2 provisioning, clusters have [Machines](https://cluster-api.sigs.k8s.io/user/concepts.html#machine) which are instances of MachineTemplates and have a specific configuration for each infrastructure provider. + +#### MachineTemplates + +#### Cluster templates + +> Cluster templates encompass both Kubernetes configuration and node pool configuration, allowing a single template to contain all the information Rancher needs to provision new nodes in a cloud provider +> and install Kubernetes on those nodes. (ref. [How-to Guides > New User Guides > Manage Clusters](https://ranchermanager.docs.rancher.com/how-to-guides/new-user-guides/manage-clusters/manage-cluster-templates)) + +Currently (August 2023) examples are only provided for RKE2. + +See also: [devpro/helm-charts](https://github.com/devpro/helm-charts/tree/main/charts/rancher-cluster-templates), [Kubernetes Master Class: Creating RKE2 Cluster Templates](https://youtu.be/xXtOP7CHbSA)). + +### Kubernetes distribution specifics + +#### RKE + +Rancher uses custom CRDs to create clusters and custom controllers that will be used with docker-machine drivers. + +Node templates are used to ease cluster creation. + +RKE1 cluster template exist but doesn't allow node pool configuration or RBAC. + +#### RKE2/K3s + +RKE2/K3s cluster creation uses [Povisioning V2](https://github.com/rancher/rancher/tree/release/v2.7/pkg/controllers/provisioningv2) + +Rancher uses the Cluster API controllers and CRDs internally. But it wraps its own Cluster and other CRDs around it to make it "easier to use" and maybe add additional features necessary for Rancher. +Rancher then bundles its own RKE2 Cluster API provider, which uses the same docker machine drivers to create and delete VMs. + +When a Cluster (provisioning.cattle.io/v1) is created, various CAPI objects are generated: RKECluster, RKEControlPlane, Cluster, RKEBootstrapTemplate, MachineDeployment and infra specific kinds like Amazonec2MachineTemplate. + +Currently (January 2023) it is not easily possible to use other Cluster API providers with Rancher. diff --git a/docs/organizations/companies/suse/rancher-api.md b/docs/organizations/companies/suse/rancher/rancher-api.md similarity index 100% rename from docs/organizations/companies/suse/rancher-api.md rename to docs/organizations/companies/suse/rancher/rancher-api.md diff --git a/docs/organizations/companies/suse/rancher-docker-howto.md b/docs/organizations/companies/suse/rancher/rancher-docker-howto.md similarity index 100% rename from docs/organizations/companies/suse/rancher-docker-howto.md rename to docs/organizations/companies/suse/rancher/rancher-docker-howto.md diff --git a/docs/organizations/companies/suse/rancher/rancher.md b/docs/organizations/companies/suse/rancher/rancher.md new file mode 100644 index 0000000..cc779f8 --- /dev/null +++ b/docs/organizations/companies/suse/rancher/rancher.md @@ -0,0 +1,80 @@ +# Rancher + +> Rancher is a container management platform built for organizations that deploy containers in production. +> Rancher makes it easy to run Kubernetes everywhere, meet IT requirements, and empower DevOps teams. + +[rancher.com](https://rancher.com/), [code](https://github.com/rancher/rancher), [docs](https://ranchermanager.docs.rancher.com) + +## Presentation + +![Rancher platform](https://rancher.com/docs/img/rancher/platform.png) + +[ranchermanager.docs.rancher.com](https://ranchermanager.docs.rancher.com/reference-guides/rancher-manager-architecture) + +## Content + +* [Applications](applications.md) +* [Architecture](architecture.md) +* [Authentication](authentication.md) +* [Automation](automation.md) +* [Extensions](extensions.md) +* [Getting started](gettingstarted.md) +* [Installation](installation.md) +* [Migrations](migrations.md) +* [Operations](operations.md) +* [Provisioning](provisioning.md) +* [Training](training.md) + +## Releases + +Version | Date | Links +--------------------------------------------------------------------|------------|-------------------------------------------------------------------- +[**2.7.5**](https://github.com/rancher/rancher/releases/tag/v2.7.6) | 2023-07-31 | [Annoucement](https://forums.rancher.com/t/rancher-release-v2-7-6/41410) +[**2.7.0**](https://github.com/rancher/rancher/releases/tag/v2.7.0) | 2022-11-16 | [Annoucement](https://forums.rancher.com/t/rancher-release-v2-7-0/39478) +[**2.6.0**](https://github.com/rancher/rancher/releases/tag/v2.6.0) | 2021-08-31 | [Annoucement](https://forums.rancher.com/t/rancher-release-v2-6-0/21048) + +[See more](versions.md) + +## Goodies + +* [Rancher Best Practices](https://www.suse.com/support/kb/doc/?id=000020105) +* [Rancher Brand Guidelines & Resources](https://www.rancher.com/brand-guidelines) +* [Rancher Forums](https://forums.rancher.com/) +* [Rancher UI DevKit](https://rancher.github.io/dashboard/) +* [Support Matrix for Rancher](https://www.suse.com/suse-rancher/support-matrix/all-supported-versions/rancher-v2-7-5/) +* Non-official: [Rancher Barn](https://github.com/rancher/barn) (recipes) + +## Alternatives + +* [Kubermatic](https://github.com/kubermatic/kubermatic) + +## Quick start + +* [Get Started with SUSE Rancher in 2 Easy Steps](https://www.suse.com/products/suse-rancher/get-started/) +(see also [Installing Rancher on a Single Node Using Docker](https://rancher.com/docs/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/)) + +```bash +# creates Rancher container +sudo docker run --privileged -d --restart=unless-stopped -p 80:80 -p 443:443 rancher/rancher + +# manual: open http://localhost and follow instructions to login, at the end download the kubeconfig file + +# sets kubectl to the Kubernetes cluster and displays the single node +export KUBECONFIG=local.yaml +kubectl get nodes +``` + +## Operations + +* [Rancher Upgrade Checklist](https://www.suse.com/support/kb/doc/?id=000020061) + +## Learning + +* [Rancher Manager 2.8 for Rancher Prime Operations](https://www.suse.com/training/course/ran201v2.8) +* [Rancher Manager 2.7 for Rancher Prime Deployment](https://www.suse.com/training/course/ran211v2.7) + +## Tips + +Q. Is it possible to change the rancher/shell to another image? + +A. Yes, it’s a setting in global settings. It can also be set with the environment variable on rancher CATTLE_SHELL_IMAGE=my/customshell:tag diff --git a/docs/organizations/companies/suse/rancher/training.md b/docs/organizations/companies/suse/rancher/training.md new file mode 100644 index 0000000..aafc54e --- /dev/null +++ b/docs/organizations/companies/suse/rancher/training.md @@ -0,0 +1,5 @@ +# Rancher training + +## Online self-learning + +* [A Cloud Guru - Introduction to Rancher](https://learn.acloud.guru/course/introduction-to-rancher/dashboard) diff --git a/docs/organizations/companies/suse/rancher/versions.md b/docs/organizations/companies/suse/rancher/versions.md new file mode 100644 index 0000000..ed6c40a --- /dev/null +++ b/docs/organizations/companies/suse/rancher/versions.md @@ -0,0 +1,114 @@ +# Rancher versions + +## 2.8 + +### 2.8.1 + +Release date | 2024-12-31 +-------------|-------------------------------------------------------------------------------- +Release note | [github.com](https://github.com/rancher/rancher/releases/tag/v2.8.1) +Annoucement | [forums.rancher.com](https://forums.rancher.com/t/rancher-release-v2-8-1/42468) + +### 2.8.0 + +Release date | 2023-12-06 +-------------|-------------------------------------------------------------------------------- +Release note | [github.com](https://github.com/rancher/rancher/releases/tag/v2.8.0) +Annoucement | [forums.rancher.com](https://forums.rancher.com/t/rancher-release-v2-8-0/42098) + +Articles: + +* [Building a Custom Read-only Global Role with the Rancher Kubernetes API](https://www.suse.com/c/rancher_blog/building-a-custom-read-only-global-role-with-the-rancher-kubernetes-api/) - January 9, 2024 + +## 2.7 + +### 2.7.9 + +Release date | 2023-11-02 +-------------|-------------------------------------------------------------------------------- +Release note | [github.com](https://github.com/rancher/rancher/releases/tag/v2.7.9) +Annoucement | [forums.rancher.com](https://forums.rancher.com/t/rancher-release-v2-7-9/41872) + +### 2.7.6 + +Release date | 2023-07-31 +-------------|-------------------------------------------------------------------------------- +Release note | [github.com](https://github.com/rancher/rancher/releases/tag/v2.7.6) +Annoucement | [forums.rancher.com](https://forums.rancher.com/t/rancher-release-v2-7-6/41410) + +### 2.7.5 + +!!! warning + + Skip upgrading to 2.7.5 and immediately upgrade to 2.7.6. + +Release date | 2023-06-29 +-------------|-------------------------------------------------------------------------------- +Release note | [github.com](https://github.com/rancher/rancher/releases/tag/v2.7.5) +Announcement | [forums.rancher.com](https://forums.rancher.com/t/rancher-release-v2-7-5/40993) +Enhancements | Kubernetes v1.26 support + +### 2.7.4 (security release) + +Release date | 2023-06-01 +-------------|-------------------------------------------------------------------------------- +Release note | [github.com](https://github.com/rancher/rancher/releases/tag/v2.7.4) +Announcement | [forums.rancher.com](https://forums.rancher.com/t/rancher-release-v2-7-4/40836) + +### 2.7.3 (security release) + +Release date | 2023-04-24 +-------------|-------------------------------------------------------------------------------- +Release note | [github.com](https://github.com/rancher/rancher/releases/tag/v2.7.3) +Announcement | [forums.rancher.com](https://forums.rancher.com/t/rancher-release-v2-7-3/40521) + +### 2.7.2 + +Release date | 2023-04-12 +-------------|---------------------------------------------------------------------------------------------------------------------------------------------- +Release note | [github.com](https://github.com/rancher/rancher/releases/tag/v2.7.2) +Announcement | [forums.rancher.com](https://forums.rancher.com/t/rancher-release-v2-7-2/40445) +New features | Rancher webhook installed in downstream clusters, Pod Security Admissions and Pod Security Standards as replacement for Pod Security Policies +Enhancements | Kubernetes v1.25 Support, RBAC for Fleet users ([Issue](https://github.com/rancher/dashboard/issues/7315) + +### 2.7.1 (security release) + +Release date | 2023-01-25 +-------------|-------------------------------------------------------------------------------- +Release note | [github.com](https://github.com/rancher/rancher/releases/tag/v2.7.1) +Announcement | [forums.rancher.com](https://forums.rancher.com/t/rancher-release-v2-7-1/39886) + +### 2.7.0 + +Release date | 2022-11-16 +-------------|-------------------------------------------------------------------------------- +Release note | [github.com](https://github.com/rancher/rancher/releases/tag/v2.7.0) +Announcement | [forums.rancher.com](https://forums.rancher.com/t/rancher-release-v2-7-0/39478) +New features | Rancher Extensions + +## 2.6 + +### 2.6.9 (bug fixes) + +Release date | 2022-10-18 +-------------|-------------------------------------------------------------------------------- +Release note | [github.com](https://github.com/rancher/rancher/releases/tag/v2.6.9) +Announcement | [forums.rancher.com](https://forums.rancher.com/t/rancher-release-v2-6-9/39243) + +### 2.6.0 + +Release date | 2021-08-31 +-------------|--------------------------------------------------------------------------------------------------------- +Release note | [github.com](https://github.com/rancher/rancher/releases/tag/v2.6.0) +Announcement | [forums.rancher.com](https://forums.rancher.com/t/rancher-release-v2-6-0/21048) +New features | Support for Keycloak with OIDC +Enhancements | Redesigned Rancher User Experience (UI), Improvements for Hosted Kubernetes Clusters (AKS, EKS, and GKE) + +### 2.5 + +## 2.5.0 + +Release date | 2020-10-06 +-------------|-------------------------------------------------------------------------------- +Release note | [github.com](https://github.com/rancher/rancher/releases/tag/v2.5.0) +Announcement | [forums.rancher.com](https://forums.rancher.com/t/rancher-release-v2-5-0/18510) diff --git a/docs/organizations/companies/suse/rancher/wmware-vsphere.md b/docs/organizations/companies/suse/rancher/wmware-vsphere.md new file mode 100644 index 0000000..cd918e3 --- /dev/null +++ b/docs/organizations/companies/suse/rancher/wmware-vsphere.md @@ -0,0 +1,29 @@ +# VMware vSphere + +:octicons-light-bulb-16: **Info:** [vSphere](https://www.vmware.com/products/vsphere.html) is VMware's "Enterprise Workload Platform". +It is a virtualization platform, that includes vCenter Configuration Manager, vCenter Application Discovery Manager and vMotion. + +## Usecases + +### Install Rancher in vSphere + +* [Installing Rancher in a vSphere Environment](https://rancher.com/docs/rancher/v2.6/en/best-practices/rancher-server/rancher-in-vsphere/) + +#### Create VM template in vSphere + +* [Create a VMware vSphere template for Ubuntu Server 18.04](https://learn.microsoft.com/en-us/azure/cloud-adoption-framework/manage/hybrid/server/best-practices/vmware-ubuntu-template) +* Download Ubuntu ova from [cloud-images.ubuntu.com](https://cloud-images.ubuntu.com/focal/current/) +* Download Ubunu iso from [releases.ubuntu.com](https://releases.ubuntu.com/focal/) + +### Provision Kubernetes cluster on vSphere VMs from Rancher + +* [Creating a vSphere Cluster](https://rancher.com/docs/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/vsphere/) +* [Rancher, vSphere Network Protocol Profiles and static IP addresses for k8s nodes](https://www.virtualthoughts.co.uk/2020/03/29/rancher-vsphere-network-protocol-profiles-and-static-ip-addresses-for-k8s-nodes/) - March 29, 2020 +* [Using cloud-init for VM templating on vSphere](https://blah.cloud/infrastructure/using-cloud-init-for-vm-templating-on-vsphere/) - June 9, 2019 +* [frank-at-suse/vsphere_HA_autoscale_cluster](https://github.com/frank-at-suse/vsphere_HA_autoscale_cluster) + +## References + +### Storage + +* [Compatibility Matrices for vSphere Container Storage Plug-in](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.0/vmware-vsphere-csp-getting-started/GUID-D4AAD99E-9128-40CE-B89C-AD451DA8379D.html) diff --git a/docs/organizations/companies/suse/rke.md b/docs/organizations/companies/suse/rke.md index f75ce56..8e6f319 100644 --- a/docs/organizations/companies/suse/rke.md +++ b/docs/organizations/companies/suse/rke.md @@ -1,9 +1,16 @@ -# Rancher Kubernetes Engine (RKE) - -> RKE is a CNCF-certified Kubernetes distribution that solves common installation complexities of Kubernetes by removing most host dependencies, presenting a stable path for deployment, upgrades & rollbacks. - -→ [suse.com/products/rancher-kubernetes-engine](https://www.suse.com/products/rancher-kubernetes-engine/), [docs](https://rancher.com/docs/rke/latest/en/) - -## Quick start - -* [RKE Kubernetes Installation](https://rancher.com/docs/rke/latest/en/installation/) +# Rancher Kubernetes Engine (RKE) + +> RKE is a CNCF-certified Kubernetes distribution that solves common installation complexities of Kubernetes by removing most host dependencies, presenting a stable path for deployment, upgrades & rollbacks. + +[suse.com/products](https://www.suse.com/products/rancher-kubernetes-engine/), [docs](https://rke.docs.rancher.com/) + +## Quick start + +* [RKE Kubernetes Installation](https://rancher.com/docs/rke/latest/en/installation/) + +## Components + +* cri-dockerd +* etcd +* NGINX Ingress Controller +* Canal diff --git a/docs/organizations/companies/suse/rke2.md b/docs/organizations/companies/suse/rke2.md index f73d889..f913065 100644 --- a/docs/organizations/companies/suse/rke2.md +++ b/docs/organizations/companies/suse/rke2.md @@ -1,6 +1,29 @@ -# RKE2 +# RKE2 -Disable CoreDNS autoscaler +> RKE2 (Rancher Kubernetes Engine v2), also known as RKE Government, is Rancher's next-generation Kubernetes distribution. It is a fully conformant Kubernetes distribution that focuses on security and compliance within the U.S. Federal Government sector. + +[docs](https://docs.rke2.io/) + +## Architecture + +![Architecture Overview](https://docs.rke2.io/assets/images/overview-06f8a098e271952bfe5db78b3a0e9b25.png) + +[ADRs](https://github.com/rancher/rke2/blob/master/docs/adrs/README.md) + +## Components + +- containerd +- etcd +- NGINX Ingress Controller +- Canal + +## Features + +- [Cluster Autoscaler for Rancher with RKE2](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/rancher) + +## Recipes + +### Disable CoreDNS autoscaler ```yaml apiVersion: helm.cattle.io/v1 @@ -13,3 +36,76 @@ spec: autoscaler: enabled: false ``` + +### Migration from RKE1 to RKE2 + +- [Issue #562](https://github.com/rancher/rke2/issues/562) +- [rancher/migration-agent](https://github.com/rancher/migration-agent) + +### Windows Clusters support + +- [Launching Kubernetes on Windows Clusters](https://docs.ranchermanager.rancher.io/pages-for-subheaders/use-windows-clusters) +- [Create a Windows HostProcess Pod](https://kubernetes.io/docs/tasks/configure-pod-container/create-hostprocess-pod/) + +## Installation + +### Ansible + +- [rancherfederal/rke2-ansible](https://github.com/rancherfederal/rke2-ansible) + +### Azure VM + +- Review VM specifications (example) + - Size: Standard_D2s_v3 (2 vcpus, 8 GiB memory) + - Operating System: Linux (Ubuntu 20.04) + - Location: West Europe + +- Create the VMs + - From the web UI + - With Azure CLI: [quick start](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/quick-create-cli), [tutorial](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/tutorial-manage-vm), `samples/scripts/az-vm.sh` + - With an IaC tool, such as Azure Resource Manager or Terraform + +### RKE2 quickstart + +- Connect with SSH: `ssh @` + +- Follow the [Quick Start](https://docs.rke2.io/install/quickstart/), see the `samples/scripts/az-rke2.sh` + +- Once created and running fine, import the cluster in Rancher, execute the command line to register the cluster and make sure the agent is running fine and the cluster can be seen in Rancher + +### Cluster API + +- [Cluster API Provider RKE2](https://github.com/rancher-sandbox/cluster-api-provider-rke2) + +### Helm chart installed + +NAME | NAMESPACE | CHART | APP VERSION +--------------------|-------------|----------------------------------------------|------------ +rke2-canal | kube-system | rke2-canal-v3.22.2-build2022050902 | v3.22.2 +rke2-coredns | kube-system | rke2-coredns-1.19.400 | 1.9.3 +rke2-ingress-nginx | kube-system | rke2-ingress-nginx-4.1.004 | 1.2.0 +rke2-metrics-server | kube-system | rke2-metrics-server-2.11.100-build2021111904 | 0.5.0 + +### Processes running on a server node + +- /usr/local/bin/rke2 server +- containerd -c /var/lib/rancher/rke2/agent/etc/containerd/config.toml -a /run/k3s/containerd/containerd.sock --state /run/k3s/containerd --root /var/lib/rancher/rke2/agent/containerd +- kubelet +- /var/lib/rancher/rke2/data/v1.23.9-rke2r1-eef53a0d1ec2/bin/containerd-shim-runc-v2 -namespace k8s.io -id container_id -address /run/k3s/containerd/containerd.sock +- kube-proxy --cluster-cidr=10.42.0.0/16 --conntrack-max-per-core=0 --conntrack-tcp-timeout-close-wait=0s --conntrack-tcp-timeout-established=0s --healthz-bind-address=127.0.0.1 --hostname-override=vm-bthomas-rke2server --kubeconfig=/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig --proxy-mode=iptables +- kube-scheduler --permit-port-sharing=true --authentication-kubeconfig=/var/lib/rancher/rke2/server/cred/scheduler.kubeconfig --authorization-kubeconfig=/var/lib/rancher/rke2/server/cred/scheduler.kubeconfig --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/rke2/server/cred/scheduler.kubeconfig --profiling=false --secure-port=10259 +- kube-apiserver +- etcd --config-file=/var/lib/rancher/rke2/server/db/etcd/config +- cloud-controller-manager +- kube-controller-manager +- /cluster-proportional-autoscaler +- /nginx-ingress-controller + +### Processes running on a worker node + +- /usr/local/bin/rke2 agent +- containerd -c /var/lib/rancher/rke2/agent/etc/containerd/config.toml -a /run/k3s/containerd/containerd.sock --state /run/k3s/containerd --root /var/lib/rancher/rke2/agent/containerd +- kubelet +- /var/lib/rancher/rke2/data/v1.23.9-rke2r1-eef53a0d1ec2/bin/containerd-shim-runc-v2 -namespace k8s.io -id container_id -address /run/k3s/containerd/containerd.sock +- kube-proxy --cluster-cidr=10.42.0.0/16 --conntrack-max-per-core=0 --conntrack-tcp-timeout-close-wait=0s --conntrack-tcp-timeout-established=0s --healthz-bind-address=127.0.0.1 --hostname-override=vm-bthomas-rke2worker1 --kubeconfig=/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig --proxy-mode=iptables +- /nginx-ingress-controller --election-id=ingress-controller-leader --controller-class=k8s.io/ingress-nginx --ingress-class=nginx --configmap=kube-system/rke2-ingress-nginx-controller --validating-webhook=:8443 --validating-webhook-certificate=/usr/local/certificates/cert --validating-webhook-key=/usr/local/certificates/key --watch-ingress-without-class=true diff --git a/docs/organizations/companies/suse/sles.md b/docs/organizations/companies/suse/sles.md index 8a4c3df..4bcc737 100644 --- a/docs/organizations/companies/suse/sles.md +++ b/docs/organizations/companies/suse/sles.md @@ -1,4 +1,4 @@ -# SUSE Linux Enterprise Server (SLES) +# SUSE Linux Enterprise Server (SLES) ## SUSE Linux Enterprise Desktop @@ -17,3 +17,98 @@ > SUSE Linux Enterprise Server (SLES) is an adaptable and easy-to-manage linux server platform that allows developers and administrators to deploy business-critical workloads on-premises, in the cloud and at the edge. → [suse.com/products/server](https://www.suse.com/products/server/) + +## Cheat sheet + +### Quickstart + +* Basic commands + +```bash +# displays system version +cat /etc/os-release + +# gets network information +ip a + +# displays date +date + +# updates date +su -c 'date -s "11 DEC 2022 21:30:00"' + +# shuts down the system +su -c 'shutdown now' +``` + +* Text editors + +```bash +# installs vi +zypper install vi +``` + +* Time Synchronization with NTP (Network Time Protocol) ([docs](https://documentation.suse.com/sles/15-SP3/html/SLES-all/cha-ntp.html)) + +```bash +# installs chrony +zypper install chrony + +# edits configuration +vi /etc/chrony.conf + +# starts and enables chrony service +systemctl start chronyd.service +systemctl enable chronyd.service +``` + +* Hostname + +```bash +# updates hostname +hostnamectl set-hostname + +# displays hostname (after logout/login) +hostname +``` + +* Static IP Address + +```bash +# edits network interface configuration +vi /etc/sysconfig/network/ifcfg-eth0 + +# restarts network service +systemctl restart network + +# displays routes configuration +more /etc/sysconfig/network/routes + +# displays name resolution configuration +more /etc/resolv.conf +``` + +* Enable SSH + +```bash +# switches to root account +su + +# installs OpenSSH +zypper install openssh + +# starts sshd service and checks status +systemctl start sshd + +# checks sshd service status +systemctl status sshd + +# enables sshd service +systemctl enable sshd + +# adds sshd service in firewall +firewall-cmd --permanent --add-service=ssh + +# reloads firewall +firewall-cmd --reload +``` diff --git a/docs/organizations/companies/suse/suse-glossary.md b/docs/organizations/companies/suse/suse-glossary.md new file mode 100644 index 0000000..d7516fd --- /dev/null +++ b/docs/organizations/companies/suse/suse-glossary.md @@ -0,0 +1,5 @@ +# Glossary + +Acronym | Meaning +--------|------------------------- +ALP | Adaptable Linux Platform diff --git a/docs/organizations/companies/suse/suse.md b/docs/organizations/companies/suse/suse.md index b5f435b..6fef8de 100644 --- a/docs/organizations/companies/suse/suse.md +++ b/docs/organizations/companies/suse/suse.md @@ -1,4 +1,4 @@ -# SUSE +# SUSE 🌐 [suse.com](https://www.suse.com/) @@ -29,9 +29,9 @@ * Harvester * [K3s](k3s.md) * [Longhorn](longhorn.md) -* [NeuVector](neuvector.md) +* [NeuVector](neuvector/neuvector.md) * [openSUSE](opensuse.md) -* [Rancher](rancher.md) +* [Rancher](rancher/rancher.md) * [Rancher Desktop](rancher-desktop.md) * [RKE](rke.md) @@ -52,3 +52,8 @@ ## Resources * [PINT (The Public Cloud Information Tracker)](https://pint.suse.com/) + +## Goodies + +* [Rancher Academy](https://www.rancher.academy/) +* [SUSE Technical Reference Documentation](https://documentation.suse.com/trd-supported.html) diff --git a/docs/organizations/companies/wiz/glossary.md b/docs/organizations/companies/wiz/glossary.md deleted file mode 100644 index 4973780..0000000 --- a/docs/organizations/companies/wiz/glossary.md +++ /dev/null @@ -1,3 +0,0 @@ -# Glossary - -CNAPP (cloud-native application protection platform) diff --git a/docs/organizations/companies/wiz/wiz.md b/docs/organizations/companies/wiz/wiz.md index ec4a840..abc5ed1 100644 --- a/docs/organizations/companies/wiz/wiz.md +++ b/docs/organizations/companies/wiz/wiz.md @@ -1,8 +1,8 @@ -# Wiz +# Wiz -[wiz.io](https://www.wiz.io/) +🌐 [wiz.io](https://www.wiz.io/) -[YouTube channel](https://www.youtube.com/@wizsecurity) +[YouTube](https://www.youtube.com/@wizsecurity) ## History diff --git a/samples/azure-cli/aks-rancher-installation.sh b/samples/azure-cli/aks-rancher-installation.sh new file mode 100644 index 0000000..0377afb --- /dev/null +++ b/samples/azure-cli/aks-rancher-installation.sh @@ -0,0 +1,108 @@ +#!/bin/bash + +# sets parameters +AZURE_LOCATION=westeurope +CERT_MANAGER_VERSION=v1.10.0 +KUBERNETES_VERSION=v1.23.12 +NODE_COUNT=2 +RESOURCE_PREFIX=bthomas-kubemgmt01 +SUBSCRIPTION_ID=*********** +VM_SIZE=Standard_D2s_v3 + +# MANUAL: authenticates and sets account +az login +az account set --subscription $SUBSCRIPTION_ID + +# creates resource group +az group create --name rg-${RESOURCE_PREFIX} --location ${AZURE_LOCATION} + +# create AKS resource (Kubernetes cluster managed by Azure) +az aks create \ + --resource-group rg-${RESOURCE_PREFIX} \ + --name aks-${RESOURCE_PREFIX} \ + --kubernetes-version ${KUBERNETES_VERSION} \ + --node-count ${NODE_COUNT} \ + --node-vm-size ${VM_SIZE} + +# adds cluster credentials to local kubectl config +az aks get-credentials --resource-group rg-${RESOURCE_PREFIX} --name aks-${RESOURCE_PREFIX} +chmod 600 ~/.kube/config + +# makes sure Helm repo chart has been added (https://github.com/devpro/helm-charts/blob/main/README.md) +helm repo add devpro https://devpro.github.io/helm-charts +helm repo update + +# installs NGINX Ingress Controller +helm upgrade --install ingress-nginx devpro/ingress-nginx --namespace ingress-nginx --create-namespace +# MANUAL: waits until External IP is set +kubectl get service ingress-nginx-controller --namespace=ingress-nginx +# saves NGINX Ingress service public IP +NGINX_PUBLIC_IP=`kubectl get service -n ingress-nginx ingress-nginx-controller --output jsonpath='{.status.loadBalancer.ingress[0].ip}'` + +# installs cert-manager +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/${CERT_MANAGER_VERSION}/cert-manager.crds.yaml +helm upgrade --install cert-manager devpro/cert-manager --namespace cert-manager --create-namespace +# MANUAL: makes sure all 3 pods are running fine (cert-manager, cert-manager-cainjector, cert-manager-webhook) +kubectl get pods,clusterissuer --namespace cert-manager + +# installs Let's Encrypt cluster issuers +helm upgrade --install letsencrypt devpro/letsencrypt \ + --set registration.emailAddress=mypersonal@email.address \ + --namespace cert-manager +# MANUAL: makes sure there are 3 cluster issuers (letsencrypt-prod, letsencrypt-staging, selfsigned-cluster-issuer) +kubectl get clusterissuer + +# installs Rancher with Helm +kubectl create namespace cattle-system +helm upgrade --install rancher devpro/rancher \ + --set rancher.hostname=rancher.${NGINX_PUBLIC_IP}.sslip.io \ + --namespace cattle-system +# MANUAL: with Rancher < 2.6.7, edit the rancher ingress object to add "ingressClassName: nginx" under "spec" +# checks everything is ok +kubectl get svc,deploy,pod,ingress,pv,certificate -n cattle-system +# MANUAL: retrieves generated password +kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{ .data.bootstrapPassword|base64decode}}{{ "\n" }}' +# makes sure Rancher UI is available +curl https://rancher.${NGINX_PUBLIC_IP}.sslip.io +# MANUAL: edits Public IP address Azure resource (Configuration) to set a DNS name label, for example rancher-demo(.westeurope.cloudapp.azure.com), update the server-url in Rancher > Global Settings +kubectl edit ingress rancher -n cattle-system +# apiVersion: networking.k8s.io/v1 +# kind: Ingress +# metadata: +# spec: +# ingressClassName: nginx +# rules: +# - host: rancher-demo.westeurope.cloudapp.azure.com +# http: +# paths: +# - backend: +# service: +# name: rancher +# port: +# number: 80 +# pathType: ImplementationSpecific +# tls: +# - hosts: +# - rancher-demo.westeurope.cloudapp.azure.com +# secretName: tls-rancher-ingress +# MANUAL: opens https://rancher.${NGINX_PUBLIC_IP}.sslip.io + +# MANUAL: adds GitRepo +# apiVersion: fleet.cattle.io/v1alpha1 +# kind: GitRepo +# spec: +# branch: release/demo +# clientSecretName: gitrepo-auth-rrxgw +# insecureSkipTLSVerify: false +# paths: +# - fleet/ingress-nginx +# - fleet/cert-manager +# - fleet/sealed-secrets +# repo: https://github.com/devpro/kubernetes-demo-definitions +# targets: [] + +# OPTIONAL: removes Rancher +helm uninstall rancher --namespace cattle-system + +# cleans up +az group delete --name rg-${RESOURCE_PREFIX} --yes --no-wait diff --git a/samples/azure-cli/az-rke2.sh b/samples/azure-cli/az-rke2.sh new file mode 100644 index 0000000..2fc67c9 --- /dev/null +++ b/samples/azure-cli/az-rke2.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# checks if NetworkManager is enabled (if it is follow https://docs.rke2.io/known_issues/#networkmanager) +sudo systemctl status NetworkManager +# checks if networkd is enabled +sudo systemctl status systemd-networkd.service +# makes sure firewalld is disabled +sudo systemctl status firewalld +# makes sure AppArmor is loaded +sudo aa-status + +# installs RKE2 on the server node +curl -sfL https://get.rke2.io | sudo sh - +# enables the rke2-server service +sudo systemctl enable rke2-server.service +# starts the service +sudo systemctl start rke2-server.service +# OPTIONAL: follows the logs +sudo journalctl -u rke2-server -f +# OPTIONAL: checks Kubernetes locally +sudo cp /etc/rancher/rke2/rke2.yaml . +chown bthomas:bthomas rke2.yaml +/var/lib/rancher/rke2/bin/kubectl get nodes --kubeconfig rke2.yaml +# retrieves the node token +sudo cat /var/lib/rancher/rke2/server/node-token +# OPTIONAL: cleanup +/usr/local/bin/rke2/rke2-killall.sh +/usr/local/bin/rke2/rke2-uninstall.sh + +# installs RKE2 on the worker node +curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sudo sh - +# enables the rke2-agent service +sudo systemctl enable rke2-agent.service +# configures the rke2-agent service +sudo mkdir -p /etc/rancher/rke2/ +sudo cat << \EOF | sudo tee /etc/rancher/rke2/config.yaml +server: https://:9345 +token: +EOF +# starts the service +sudo systemctl start rke2-agent.service +# OPTIONAL: follows the logs +sudo journalctl -u rke2-agent -f + +# the cluster is now operational and can be accessed from a remote site (just edit the local ~/.kube/config file) +kubectl get nodes + +# for self-signed certificates add insecure flag or edit kube/config file to comment certificate-authority-data line and add "insecure-skip-tls-verify: true" line under clusters/cluster +kubectl --insecure-skip-tls-verify get nodes diff --git a/samples/azure-cli/az-vm.sh b/samples/azure-cli/az-vm.sh new file mode 100644 index 0000000..7adebf5 --- /dev/null +++ b/samples/azure-cli/az-vm.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# sets parameters +AZ_LOCATION=westeurope +RG_NAME=rg-bthomas-rke2220825 +VM_NAME=vm-bthomas-rke2worker1 +VM_IMAGE="Canonical:UbuntuServer:20.04-LTS:latest" +VM_SIZE=Standard_D2_v2 +VM_ADMINUSER=azureuser +VNET_NAME=vnet-bthomas-rke220220825 + +# MANUAL: authenticates or double check account +az login +az account show + +# OPTIONAL: creates a resource group +az group create --name ${RG_NAME} --location ${AZURE_LOCATION} + +# OPTIONAL: creates a virtual network +az network vnet create \ + --resource-group ${RG_NAME} \ + --name ${VNET_NAME} \ + --address-prefix 10.0.0.0/16 + +# OPTIONAL: views an incomplete list of VM images +az vm image list + +# creates a new virtual machine (see https://docs.microsoft.com/en-us/cli/azure/vm?view=azure-cli-latest#az-vm-create) +az vm create --resource-group ${RG_NAME} --name ${VM_NAME} \ + --image ${VM_IMAGE} --size ${VM_SIZE} \ + --vnet-name ${VNET_NAME} --subnet default \ + --storage-sku StandardSSD_LRS --data-disk-delete-option Delete \ + --public-ip-sku Standard --admin-username ${VM_ADMINUSER} + +# MANUAL: copy the public IP (output as publicIpAddress) + +# OPTIONAL: views the created VM +az vm show --name ${VM_NAME} --resource-group ${RG_NAME} + +# updates the network interface +az vm update -n ${VM_NAME} -g ${RG_NAME} --set networkProfile.networkInterfaces[0].deleteOption=Delete + +# OPTIONAL: enables auto-shutdown +az vm auto-shutdown -g ${RG_NAME} -n ${VM_NAME} --time 2030 --email "bertrand.thomas@suse.com" + +# OPTIONAL: open ports +az vm open-port -g ${RG_NAME} -n ${VM_NAME} --name RKE2_ports --port 6443,9345 --priority 100 + +# OPTIONAL: connects to the VM +ssh ${VM_ADMINUSER}@${VM_PUBLICIPADDRESS} diff --git a/samples/concourse/compose.yml b/samples/concourse/compose.yml new file mode 100644 index 0000000..8e3beed --- /dev/null +++ b/samples/concourse/compose.yml @@ -0,0 +1,27 @@ +# ref. https://concourse-ci.org/docker-compose.yml +version: '3' + +services: + concourse-db: + image: postgres + environment: + POSTGRES_DB: concourse + POSTGRES_PASSWORD: concourse_pass + POSTGRES_USER: concourse_user + PGDATA: /database + + concourse: + image: concourse/concourse + command: quickstart + privileged: true + depends_on: [concourse-db] + ports: ["8080:8080"] + environment: + CONCOURSE_POSTGRES_HOST: concourse-db + CONCOURSE_POSTGRES_USER: concourse_user + CONCOURSE_POSTGRES_PASSWORD: concourse_pass + CONCOURSE_POSTGRES_DATABASE: concourse + CONCOURSE_EXTERNAL_URL: http://localhost:8080 + CONCOURSE_ADD_LOCAL_USER: test:test + CONCOURSE_MAIN_TEAM_LOCAL_USER: test + CONCOURSE_WORKER_BAGGAGECLAIM_DRIVER: overlay diff --git a/samples/concourse/pipelines/basic/01_helloworld.yml b/samples/concourse/pipelines/basic/01_helloworld.yml new file mode 100644 index 0000000..a0c6892 --- /dev/null +++ b/samples/concourse/pipelines/basic/01_helloworld.yml @@ -0,0 +1,13 @@ +jobs: + - name: job + public: true + plan: + - task: simple-task + config: + platform: linux + image_resource: + type: registry-image + source: {repository: busybox} + run: + path: echo + args: ["Hello, world!"] diff --git a/samples/concourse/pipelines/dotnet/01_aspnetcore.yml b/samples/concourse/pipelines/dotnet/01_aspnetcore.yml new file mode 100644 index 0000000..16bd339 --- /dev/null +++ b/samples/concourse/pipelines/dotnet/01_aspnetcore.yml @@ -0,0 +1,30 @@ +resources: + - name: git-repository + type: git + source: + uri: https://github.com/devpro/cf-dotnet-samples.git + branch: master +jobs: + - name: build-webapp + plan: + - get: git-repository + trigger: true + - task: run-tests + privileged: true + config: + platform: linux + inputs: + - name: git-repository + image_resource: + type: docker-image + source: + repository: microsoft/dotnet + tag: 2.2-sdk + run: + path: sh + args: + - -exc + - | + ls -alrt + cd ./git-repository/dotnetcore/2.2/webapp/ + dotnet build diff --git a/samples/concourse/pipelines/dotnet/02_globaltool.yml b/samples/concourse/pipelines/dotnet/02_globaltool.yml new file mode 100644 index 0000000..5f6103d --- /dev/null +++ b/samples/concourse/pipelines/dotnet/02_globaltool.yml @@ -0,0 +1,46 @@ +jobs: + - name: mongodb-atlas + plan: + - task: run-mdbatlas + privileged: true + config: + platform: linux + image_resource: + type: docker-image + source: + repository: mcr.microsoft.com/dotnet/core/sdk + tag: 3.1 + run: + path: sh + args: + - -exc + - | + export PATH="$PATH:/root/.dotnet/tools" + dotnet tool install --global mdbatlas + mdbatlas list orgs + params: + mdbatlas__PublicKey: ((mdbatlas-publickey)) + mdbatlas__PrivateKey: ((mdbatlas-privatekey)) + - name: azure-devops + plan: + - task: run-almops + privileged: true + config: + platform: linux + image_resource: + type: docker-image + source: + repository: mcr.microsoft.com/dotnet/core/sdk + tag: 3.1 + run: + path: sh + args: + - -exc + - | + export PATH="$PATH:/root/.dotnet/tools" + dotnet tool install --global almops + almops list projects + params: + almops__BaseUrl: ((almops-org)) + almops__Username: ((almops-user)) + almops__Token: ((almops-token)) diff --git a/samples/concourse/tasks/basic/helloworld.yml b/samples/concourse/tasks/basic/helloworld.yml new file mode 100644 index 0000000..50a85dc --- /dev/null +++ b/samples/concourse/tasks/basic/helloworld.yml @@ -0,0 +1,7 @@ +platform: linux +image_resource: + type: registry-image + source: {repository: busybox} +run: + path: echo + args: ["Hello, world!"] diff --git a/samples/kubernetes/manifests/aks-helloworld-applications.yaml b/samples/kubernetes/manifests/aks-helloworld-applications.yaml new file mode 100644 index 0000000..e4a9a13 --- /dev/null +++ b/samples/kubernetes/manifests/aks-helloworld-applications.yaml @@ -0,0 +1,136 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: aks-helloworld-one +spec: + replicas: 1 + selector: + matchLabels: + app: aks-helloworld-one + template: + metadata: + labels: + app: aks-helloworld-one + spec: + containers: + - name: aks-helloworld-one + image: mcr.microsoft.com/azuredocs/aks-helloworld:v1 + ports: + - containerPort: 80 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + env: + - name: TITLE + value: "Welcome to Azure Kubernetes Service (AKS)" +--- +apiVersion: v1 +kind: Service +metadata: + name: aks-helloworld-one +spec: + type: ClusterIP + ports: + - port: 80 + selector: + app: aks-helloworld-one +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: aks-helloworld-two +spec: + replicas: 1 + selector: + matchLabels: + app: aks-helloworld-two + template: + metadata: + labels: + app: aks-helloworld-two + spec: + containers: + - name: aks-helloworld-two + image: mcr.microsoft.com/azuredocs/aks-helloworld:v1 + ports: + - containerPort: 80 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + env: + - name: TITLE + value: "AKS Ingress Demo" +--- +apiVersion: v1 +kind: Service +metadata: + name: aks-helloworld-two +spec: + type: ClusterIP + ports: + - port: 80 + selector: + app: aks-helloworld-two +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: hello-world-ingress + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$2 +spec: + ingressClassName: nginx + rules: + - http: + paths: + - path: /hello-world-one(/|$)(.*) + pathType: Prefix + backend: + service: + name: aks-helloworld-one + port: + number: 80 + - path: /hello-world-two(/|$)(.*) + pathType: Prefix + backend: + service: + name: aks-helloworld-two + port: + number: 80 + - path: /(.*) + pathType: Prefix + backend: + service: + name: aks-helloworld-one + port: + number: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: hello-world-ingress-static + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/rewrite-target: /static/$2 +spec: + ingressClassName: nginx + rules: + - http: + paths: + - path: /static(/|$)(.*) + pathType: Prefix + backend: + service: + name: aks-helloworld-one + port: + number: 80 diff --git a/samples/kubernetes/manifests/gitrepo-guestbook-sample.yaml b/samples/kubernetes/manifests/gitrepo-guestbook-sample.yaml new file mode 100644 index 0000000..44be453 --- /dev/null +++ b/samples/kubernetes/manifests/gitrepo-guestbook-sample.yaml @@ -0,0 +1,8 @@ +apiVersion: fleet.cattle.io/v1alpha1 +kind: GitRepo +metadata: + name: guestbook-sample +spec: + repo: "https://github.com/rancher/fleet-examples" + paths: + - simple