diff --git a/Dockerfile b/Dockerfile index 02782d0c..b15d94d8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ # BUILD: docker build --rm -t puckel/docker-airflow . # SOURCE: https://github.com/puckel/docker-airflow -FROM python:3.7-slim-buster +FROM python:3.8-slim-buster LABEL maintainer="Puckel_" # Never prompt the user for choices on installation/configuration of packages @@ -12,7 +12,7 @@ ENV DEBIAN_FRONTEND noninteractive ENV TERM linux # Airflow -ARG AIRFLOW_VERSION=1.10.9 +ARG AIRFLOW_VERSION=2.0.0 ARG AIRFLOW_USER_HOME=/usr/local/airflow ARG AIRFLOW_DEPS="" ARG PYTHON_DEPS="" diff --git a/README.md b/README.md index 922e51a7..ce8106c7 100644 --- a/README.md +++ b/README.md @@ -10,27 +10,28 @@ This repository contains **Dockerfile** of [apache-airflow](https://github.com/a ## Informations -* Based on Python (3.7-slim-buster) official Image [python:3.7-slim-buster](https://hub.docker.com/_/python/) and uses the official [Postgres](https://hub.docker.com/_/postgres/) as backend and [Redis](https://hub.docker.com/_/redis/) as queue +* Based on Python (3.8-slim-buster) official Image [python:3.8-slim-buster](https://hub.docker.com/_/python/) and uses the official [Postgres](https://hub.docker.com/_/postgres/) as backend and [Redis](https://hub.docker.com/_/redis/) as queue * Install [Docker](https://www.docker.com/) * Install [Docker Compose](https://docs.docker.com/compose/install/) * Following the Airflow release from [Python Package Index](https://pypi.python.org/pypi/apache-airflow) ## Installation -Pull the image from the Docker repository. +Up to this moment, there is no public image `puckel/docker-airflow:2.0.0`, so we have to build it. After cloning this repo, you may do + + docker build -t puckel/docker-airflow:2.0.0 . - docker pull puckel/docker-airflow ## Build Optionally install [Extra Airflow Packages](https://airflow.incubator.apache.org/installation.html#extra-package) and/or python dependencies at build time : - docker build --rm --build-arg AIRFLOW_DEPS="datadog,dask" -t puckel/docker-airflow . - docker build --rm --build-arg PYTHON_DEPS="flask_oauthlib>=0.9" -t puckel/docker-airflow . + docker build --rm --build-arg AIRFLOW_DEPS="datadog,dask" -t puckel/docker-airflow:2.0.0 . + docker build --rm --build-arg PYTHON_DEPS="flask_oauthlib>=0.9" -t puckel/docker-airflow:2.0.0 . or combined - docker build --rm --build-arg AIRFLOW_DEPS="datadog,dask" --build-arg PYTHON_DEPS="flask_oauthlib>=0.9" -t puckel/docker-airflow . + docker build --rm --build-arg AIRFLOW_DEPS="datadog,dask" --build-arg PYTHON_DEPS="flask_oauthlib>=0.9" -t puckel/docker-airflow:2.0.0 . Don't forget to update the airflow images in the docker-compose files to puckel/docker-airflow:latest. @@ -38,7 +39,7 @@ Don't forget to update the airflow images in the docker-compose files to puckel/ By default, docker-airflow runs Airflow with **SequentialExecutor** : - docker run -d -p 8080:8080 puckel/docker-airflow webserver + docker run -d -p 8080:8080 puckel/docker-airflow:2.0.0 webserver If you want to run another executor, use the other docker-compose.yml files provided in this repository. @@ -54,7 +55,7 @@ NB : If you want to have DAGs example loaded (default=False), you've to set the `LOAD_EX=n` - docker run -d -p 8080:8080 -e LOAD_EX=y puckel/docker-airflow + docker run -d -p 8080:8080 -e LOAD_EX=y puckel/docker-airflow:2.0.0 If you want to use Ad hoc query, make sure you've configured connections: Go to Admin -> Connections and Edit "postgres_default" set this values (equivalent to values in airflow.cfg/docker-compose*.yml) : @@ -65,7 +66,7 @@ Go to Admin -> Connections and Edit "postgres_default" set this values (equivale For encrypted connection passwords (in Local or Celery Executor), you must have the same fernet_key. By default docker-airflow generates the fernet_key at startup, you have to set an environment variable in the docker-compose (ie: docker-compose-LocalExecutor.yml) file to set the same key accross containers. To generate a fernet_key : - docker run puckel/docker-airflow python -c "from cryptography.fernet import Fernet; FERNET_KEY = Fernet.generate_key().decode(); print(FERNET_KEY)" + docker run puckel/docker-airflow:2.0.0 python -c "from cryptography.fernet import Fernet; FERNET_KEY = Fernet.generate_key().decode(); print(FERNET_KEY)" ## Configuring Airflow @@ -98,6 +99,9 @@ In order to incorporate plugins into your docker container - Airflow: [localhost:8080](http://localhost:8080/) - Flower: [localhost:5555](http://localhost:5555/) +To log into airflow webserver, the default credentials are +- username: airflow +- password: airflow ## Scale the number of workers @@ -111,7 +115,7 @@ This can be used to scale to a multi node setup using docker swarm. If you want to run other airflow sub-commands, such as `list_dags` or `clear` you can do so like this: - docker run --rm -ti puckel/docker-airflow airflow list_dags + docker run --rm -ti puckel/docker-airflow:2.0.0 airflow list_dags or with your docker-compose set up like this: @@ -119,8 +123,8 @@ or with your docker-compose set up like this: You can also use this to run a bash shell or any other command in the same environment that airflow would be run in: - docker run --rm -ti puckel/docker-airflow bash - docker run --rm -ti puckel/docker-airflow ipython + docker run --rm -ti puckel/docker-airflow:2.0.0 bash + docker run --rm -ti puckel/docker-airflow:2.0.0 ipython # Simplified SQL database configuration using PostgreSQL diff --git a/config/airflow.cfg b/config/airflow.cfg index 9e4d5229..a7dfed82 100644 --- a/config/airflow.cfg +++ b/config/airflow.cfg @@ -3,79 +3,40 @@ # subfolder in a code repository. This path must be absolute. dags_folder = /usr/local/airflow/dags -# The folder where airflow should store its log files -# This path must be absolute -base_log_folder = /usr/local/airflow/logs - -# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search. -# Set this to True if you want to enable remote logging. -remote_logging = False - -# Users must supply an Airflow connection id that provides access to the storage -# location. -remote_log_conn_id = -remote_base_log_folder = -encrypt_s3_logs = False - -# Logging level -logging_level = INFO - -# Logging level for Flask-appbuilder UI -fab_logging_level = WARN - -# Logging class -# Specify the class that will specify the logging configuration -# This class has to be on the python classpath -# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG -logging_config_class = - -# Flag to enable/disable Colored logs in Console -# Colour the logs when the controlling terminal is a TTY. -colored_console_log = True - -# Log format for when Colored logs is enabled -colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {{%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d}} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s -colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter - -# Format of Log line -log_format = [%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s -simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s - -# Log filename format -log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log -log_processor_filename_template = {{ filename }}.log -dag_processor_manager_log_location = /usr/local/airflow/logs/dag_processor_manager/dag_processor_manager.log - -# Name of handler to read task instance logs. -# Default to use task handler. -task_log_reader = task - # Hostname by providing a path to a callable, which will resolve the hostname. -# The format is "package:function". +# The format is "package.function". # -# For example, default value "socket:getfqdn" means that result from getfqdn() of "socket" +# For example, default value "socket.getfqdn" means that result from getfqdn() of "socket" # package will be used as hostname. # # No argument should be required in the function specified. -# If using IP address as hostname is preferred, use value ``airflow.utils.net:get_host_ip_address`` -hostname_callable = socket:getfqdn +# If using IP address as hostname is preferred, use value ``airflow.utils.net.get_host_ip_address`` +hostname_callable = socket.getfqdn # Default timezone in case supplied date times are naive # can be utc (default), system, or any IANA timezone string (e.g. Europe/Amsterdam) default_timezone = utc # The executor class that airflow should use. Choices include -# SequentialExecutor, LocalExecutor, CeleryExecutor, DaskExecutor, KubernetesExecutor -executor = SequentialExecutor +# ``SequentialExecutor``, ``LocalExecutor``, ``CeleryExecutor``, ``DaskExecutor``, +# ``KubernetesExecutor``, ``CeleryKubernetesExecutor`` or the +# full import path to the class when using a custom executor. +executor = SequentialExecutor # The SqlAlchemy connection string to the metadata database. # SqlAlchemy supports many different database engine, more information # their website -# sql_alchemy_conn = sqlite:////tmp/airflow.db +sql_alchemy_conn = sqlite:////usr/local/airflow/airflow.db # The encoding for the databases sql_engine_encoding = utf-8 +# Collation for ``dag_id``, ``task_id``, ``key`` columns in case they have different encoding. +# This is particularly useful in case of mysql with utf8mb4 encoding because +# primary keys for XCom table has too big size and ``sql_engine_collation_for_ids`` should +# be set to ``utf8mb3_general_ci``. +# sql_engine_collation_for_ids = + # If SqlAlchemy should pool database connections. sql_alchemy_pool_enabled = True @@ -90,8 +51,8 @@ sql_alchemy_pool_size = 5 # It follows then that the total number of simultaneous connections the pool will allow # is pool_size + max_overflow, # and the total number of "sleeping" connections the pool will allow is pool_size. -# max_overflow can be set to -1 to indicate no overflow limit; -# no limit will be placed on the total number of concurrent connections. Defaults to 10. +# max_overflow can be set to ``-1`` to indicate no overflow limit; +# no limit will be placed on the total number of concurrent connections. Defaults to ``10``. sql_alchemy_max_overflow = 10 # The SqlAlchemy pool recycle is the number of seconds a connection @@ -110,12 +71,19 @@ sql_alchemy_pool_pre_ping = True # SqlAlchemy supports databases with the concept of multiple schemas. sql_alchemy_schema = +# Import path for connect args in SqlAlchemy. Defaults to an empty dict. +# This is useful when you want to configure db engine args that SqlAlchemy won't parse +# in connection string. +# See https://docs.sqlalchemy.org/en/13/core/engines.html#sqlalchemy.create_engine.params.connect_args +# sql_alchemy_connect_args = + # The amount of parallelism as a setting to the executor. This defines # the max number of task instances that should run simultaneously # on this airflow installation parallelism = 32 # The number of task instances allowed to run concurrently by the scheduler +# in one DAG. Can be overridden by ``concurrency`` on DAG level. dag_concurrency = 16 # Are DAGs paused by default at creation @@ -124,27 +92,46 @@ dags_are_paused_at_creation = True # The maximum number of active DAG runs per DAG max_active_runs_per_dag = 16 -# Whether to load the examples that ship with Airflow. It's good to -# get started, but you probably want to set this to False in a production +# Whether to load the DAG examples that ship with Airflow. It's good to +# get started, but you probably want to set this to ``False`` in a production # environment load_examples = True -# Where your Airflow plugins are stored +# Whether to load the default connections that ship with Airflow. It's good to +# get started, but you probably want to set this to ``False`` in a production +# environment +load_default_connections = True + +# Path to the folder containing Airflow plugins plugins_folder = /usr/local/airflow/plugins +# Should tasks be executed via forking of the parent process ("False", +# the speedier option) or by spawning a new python process ("True" slow, +# but means plugin changes picked up by tasks straight away) +execute_tasks_new_python_interpreter = False + # Secret key to save connection passwords in the db -fernet_key = $FERNET_KEY +fernet_key = y_xj2l7hU5QKHGn-9o_T9-tIu-pUN1wvXom1ZanIN1w= # Whether to disable pickling dags -donot_pickle = False +donot_pickle = True # How long before timing out a python file import -dagbag_import_timeout = 30 +dagbag_import_timeout = 30.0 + +# Should a traceback be shown in the UI for dagbag import errors, +# instead of just the exception message +dagbag_import_error_tracebacks = True + +# If tracebacks are shown, how many entries from the traceback should be shown +dagbag_import_error_traceback_depth = 2 # How long before timing out a DagFileProcessor, which processes a dag file dag_file_processor_timeout = 50 -# The class to use for running task instances in a subprocess +# The class to use for running task instances in a subprocess. +# Choices include StandardTaskRunner, CgroupTaskRunner or the full import path to the class +# when using a custom task runner. task_runner = StandardTaskRunner # If set, tasks without a ``run_as_user`` argument will be run with this user @@ -154,17 +141,13 @@ default_impersonation = # What security module to use (for example kerberos) security = -# If set to False enables some unsecure features like Charts and Ad Hoc Queries. -# In 2.0 will default to True. -secure_mode = False - # Turn unit test mode on (overwrites many configuration options with test # values at runtime) unit_test_mode = False # Whether to enable pickling for xcom (note that this is insecure and allows for -# RCE exploits). This will be deprecated in Airflow 2.0 (be forced to False). -enable_xcom_pickling = True +# RCE exploits). +enable_xcom_pickling = False # When a task is killed forcefully, this is the amount of time in seconds that # it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED @@ -173,10 +156,7 @@ killed_task_cleanup_time = 60 # Whether to override params with dag_run.conf. If you pass some key-value pairs # through ``airflow dags backfill -c`` or # ``airflow dags trigger -c``, the key-value pairs will override the existing ones in params. -dag_run_conf_overrides_params = False - -# Worker initialisation check to validate Metadata Database connection -worker_precheck = False +dag_run_conf_overrides_params = True # When discovering DAGs, ignore any files that don't contain the strings ``DAG`` and ``airflow``. dag_discovery_safe_mode = True @@ -184,17 +164,166 @@ dag_discovery_safe_mode = True # The number of retries each task is going to have by default. Can be overridden at dag or task level. default_task_retries = 0 -# Whether to serialises DAGs and persist them in DB. -# If set to True, Webserver reads from DB instead of parsing DAG files -# More details: https://airflow.apache.org/docs/stable/dag-serialization.html -store_serialized_dags = False - # Updating serialized DAG can not be faster than a minimum interval to reduce database write rate. min_serialized_dag_update_interval = 30 +# Fetching serialized DAG can not be faster than a minimum interval to reduce database +# read rate. This config controls when your DAGs are updated in the Webserver +min_serialized_dag_fetch_interval = 10 + +# Whether to persist DAG files code in DB. +# If set to True, Webserver reads file contents from DB instead of +# trying to access files in a DAG folder. +# Example: store_dag_code = False +# store_dag_code = + +# Maximum number of Rendered Task Instance Fields (Template Fields) per task to store +# in the Database. +# All the template_fields for each of Task Instance are stored in the Database. +# Keeping this number small may cause an error when you try to view ``Rendered`` tab in +# TaskInstance view for older tasks. +max_num_rendered_ti_fields_per_task = 30 + # On each dagrun check against defined SLAs check_slas = True +# Path to custom XCom class that will be used to store and resolve operators results +# Example: xcom_backend = path.to.CustomXCom +xcom_backend = airflow.models.xcom.BaseXCom + +# By default Airflow plugins are lazily-loaded (only loaded when required). Set it to ``False``, +# if you want to load plugins whenever 'airflow' is invoked via cli or loaded from module. +lazy_load_plugins = True + +# By default Airflow providers are lazily-discovered (discovery and imports happen only when required). +# Set it to False, if you want to discover providers whenever 'airflow' is invoked via cli or +# loaded from module. +lazy_discover_providers = True + +# Number of times the code should be retried in case of DB Operational Errors. +# Not all transactions will be retried as it can cause undesired state. +# Currently it is only used in ``DagFileProcessor.process_file`` to retry ``dagbag.sync_to_db``. +max_db_retries = 3 + +[logging] +# The folder where airflow should store its log files +# This path must be absolute +base_log_folder = /usr/local/airflow/logs + +# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search. +# Set this to True if you want to enable remote logging. +remote_logging = False + +# Users must supply an Airflow connection id that provides access to the storage +# location. +remote_log_conn_id = + +# Path to Google Credential JSON file. If omitted, authorization based on `the Application Default +# Credentials +# `__ will +# be used. +google_key_path = + +# Storage bucket URL for remote logging +# S3 buckets should start with "s3://" +# Cloudwatch log groups should start with "cloudwatch://" +# GCS buckets should start with "gs://" +# WASB buckets should start with "wasb" just to help Airflow select correct handler +# Stackdriver logs should start with "stackdriver://" +remote_base_log_folder = + +# Use server-side encryption for logs stored in S3 +encrypt_s3_logs = False + +# Logging level +logging_level = INFO + +# Logging level for Flask-appbuilder UI +fab_logging_level = WARN + +# Logging class +# Specify the class that will specify the logging configuration +# This class has to be on the python classpath +# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG +logging_config_class = + +# Flag to enable/disable Colored logs in Console +# Colour the logs when the controlling terminal is a TTY. +colored_console_log = True + +# Log format for when Colored logs is enabled +colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s +colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter + +# Format of Log line +log_format = [%%(asctime)s] {%%(filename)s:%%(lineno)d} %%(levelname)s - %%(message)s +simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s + +# Specify prefix pattern like mentioned below with stream handler TaskHandlerWithCustomFormatter +# Example: task_log_prefix_template = {ti.dag_id}-{ti.task_id}-{execution_date}-{try_number} +task_log_prefix_template = + +# Formatting for how airflow generates file names/paths for each task run. +log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log + +# Formatting for how airflow generates file names for log +log_processor_filename_template = {{ filename }}.log + +# full path of dag_processor_manager logfile +dag_processor_manager_log_location = /usr/local/airflow/logs/dag_processor_manager/dag_processor_manager.log + +# Name of handler to read task instance logs. +# Defaults to use ``task`` handler. +task_log_reader = task + +# A comma\-separated list of third-party logger names that will be configured to print messages to +# consoles\. +# Example: extra_loggers = connexion,sqlalchemy +extra_loggers = + +[metrics] + +# StatsD (https://github.com/etsy/statsd) integration settings. +# Enables sending metrics to StatsD. +statsd_on = False +statsd_host = localhost +statsd_port = 8125 +statsd_prefix = airflow + +# If you want to avoid sending all the available metrics to StatsD, +# you can configure an allow list of prefixes (comma separated) to send only the metrics that +# start with the elements of the list (e.g: "scheduler,executor,dagrun") +statsd_allow_list = + +# A function that validate the statsd stat name, apply changes to the stat name if necessary and return +# the transformed stat name. +# +# The function should have the following signature: +# def func_name(stat_name: str) -> str: +stat_name_handler = + +# To enable datadog integration to send airflow metrics. +statsd_datadog_enabled = False + +# List of datadog tags attached to all metrics(e.g: key1:value1,key2:value2) +statsd_datadog_tags = + +# If you want to utilise your own custom Statsd client set the relevant +# module path below. +# Note: The module path must exist on your PYTHONPATH for Airflow to pick it up +# statsd_custom_client_path = + +[secrets] +# Full class name of secrets backend to enable (will precede env vars and metastore in search path) +# Example: backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend +backend = + +# The backend_kwargs param is loaded into a dictionary and passed to __init__ of secrets backend class. +# See documentation for the secrets backend you are using. JSON is expected. +# Example for AWS Systems Manager ParameterStore: +# ``{"connections_prefix": "/airflow/connections", "profile_name": "default"}`` +backend_kwargs = + [cli] # In what way should the cli access the API. The LocalClient will use the # database directly, while the json_client will use the api running on the @@ -207,13 +336,47 @@ api_client = airflow.api.client.local_client endpoint_url = http://localhost:8080 [debug] -# Used only with DebugExecutor. If set to True DAG will fail with first +# Used only with ``DebugExecutor``. If set to ``True`` DAG will fail with first # failed task. Helpful for debugging purposes. fail_fast = False [api] -# How to authenticate users of the API -auth_backend = airflow.api.auth.backend.default +# Enables the deprecated experimental API. Please note that these APIs do not have access control. +# The authenticated user has full access. +# +# .. warning:: +# +# This `Experimental REST API `__ is +# deprecated since version 2.0. Please consider using +# `the Stable REST API `__. +# For more information on migration, see +# `UPDATING.md `_ +enable_experimental_api = False + +# How to authenticate users of the API. See +# https://airflow.apache.org/docs/stable/security.html for possible values. +# ("airflow.api.auth.backend.default" allows all requests for historic reasons) +auth_backend = airflow.api.auth.backend.deny_all + +# Used to set the maximum page limit for API requests +maximum_page_limit = 100 + +# Used to set the default page limit when limit is zero. A default limit +# of 100 is set on OpenApi spec. However, this particular default limit +# only work when limit is set equal to zero(0) from API requests. +# If no limit is supplied, the OpenApi spec default is used. +fallback_page_limit = 100 + +# The intended audience for JWT token credentials used for authorization. This value must match on the client and server sides. If empty, audience will not be tested. +# Example: google_oauth2_audience = project-id-random-value.apps.googleusercontent.com +google_oauth2_audience = + +# Path to Google Cloud Service Account key file (JSON). If omitted, authorization based on +# `the Application Default Credentials +# `__ will +# be used. +# Example: google_key_path = /files/service-account-json +google_key_path = [lineage] # what lineage backend to use @@ -235,16 +398,30 @@ default_ram = 512 default_disk = 512 default_gpus = 0 +# Is allowed to pass additional/unused arguments (args, kwargs) to the BaseOperator operator. +# If set to False, an exception will be thrown, otherwise only the console message will be displayed. +allow_illegal_arguments = False + [hive] # Default mapreduce queue for HiveOperator tasks default_hive_mapred_queue = +# Template for mapred_job_name in HiveOperator, supports the following named parameters +# hostname, dag_id, task_id, execution_date +# mapred_job_name_template = + [webserver] # The base url of your website as airflow cannot guess what domain or # cname you are using. This is used in automated emails that # airflow sends to point links to the right web server base_url = http://localhost:8080 +# Default timezone to display all dates in the UI, can be UTC, system, or +# any IANA timezone string (e.g. Europe/Amsterdam). If left empty the +# default value of core/default_timezone will be used +# Example: default_ui_timezone = America/New_York +default_ui_timezone = UTC + # The ip specified when starting the web server web_server_host = 0.0.0.0 @@ -273,9 +450,13 @@ worker_refresh_batch_size = 1 # Number of seconds to wait before refreshing a batch of workers. worker_refresh_interval = 30 +# If set to True, Airflow will track files in plugins_folder directory. When it detects changes, +# then reload the gunicorn. +reload_on_plugin_change = False + # Secret key used to run your flask app # It should be as random as possible -secret_key = temporary_key +secret_key = 5b5GOkm5PddhiGyFsoEZFw== # Number of workers to run the Gunicorn web server workers = 4 @@ -290,8 +471,13 @@ access_logfile = - # Log files for the gunicorn webserver. '-' means log to stderr. error_logfile = - +# Access log format for gunicorn webserver. +# default format is %%(h)s %%(l)s %%(u)s %%(t)s "%%(r)s" %%(s)s %%(b)s "%%(f)s" "%%(a)s" +# documentation - https://docs.gunicorn.org/en/stable/settings.html#access-log-format +access_logformat = + # Expose the configuration file in the web server -expose_config = True +expose_config = False # Expose hostname in the web server expose_hostname = True @@ -299,26 +485,11 @@ expose_hostname = True # Expose stacktrace in the web server expose_stacktrace = True -# Set to true to turn on authentication: -# https://airflow.apache.org/security.html#web-authentication -authenticate = False - -# Filter the list of dags by owner name (requires authentication to be enabled) -filter_by_owner = False - -# Filtering mode. Choices include user (default) and ldapgroup. -# Ldap group filtering requires using the ldap backend -# -# Note that the ldap server needs the "memberOf" overlay to be set up -# in order to user the ldapgroup mode. -owner_mode = user - -# Default DAG view. Valid values are: -# tree, graph, duration, gantt, landing_times +# Default DAG view. Valid values are: ``tree``, ``graph``, ``duration``, ``gantt``, ``landing_times`` dag_default_view = tree -# "Default DAG orientation. Valid values are:" -# LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top) +# Default DAG orientation. Valid values are: +# ``LR`` (Left->Right), ``TB`` (Top->Bottom), ``RL`` (Right->Left), ``BT`` (Bottom->Top) dag_orientation = LR # Puts the webserver in demonstration mode; blurs the names of Operators for @@ -345,11 +516,8 @@ hide_paused_dags_by_default = False # Consistent page size across all listing views in the UI page_size = 100 -# Use FAB-based webserver with RBAC feature -rbac = False - # Define the color of navigation bar -navbar_color = #007A87 +navbar_color = #fff # Default dagrun to show in UI default_dag_run_display_number = 25 @@ -377,7 +545,7 @@ proxy_fix_x_prefix = 1 cookie_secure = False # Set samesite policy on session cookie -cookie_samesite = +cookie_samesite = Lax # Default setting for wrap toggle on DAG code and TI log views. default_wrap = False @@ -392,20 +560,30 @@ x_frame_enabled = True # Unique ID of your account in the analytics tool # analytics_id = +# 'Recent Tasks' stats will show for old DagRuns if set +show_recent_stats_for_completed_runs = True + # Update FAB permissions and sync security manager roles # on webserver startup update_fab_perms = True -# Minutes of non-activity before logged out from UI -# 0 means never get forcibly logged out -force_log_out_after = 0 - -# The UI cookie lifetime in days -session_lifetime_days = 30 +# The UI cookie lifetime in minutes. User will be logged out from UI after +# ``session_lifetime_minutes`` of non-activity +session_lifetime_minutes = 43200 [email] + +# Configuration email backend and whether to +# send email alerts on retry or failure +# Email backend to use email_backend = airflow.utils.email.send_email_smtp +# Whether email alerts should be sent when a task is retried +default_email_on_retry = True + +# Whether email alerts should be sent when a task failed +default_email_on_failure = True + [smtp] # If you want airflow to send emails on retries, failure, and you want to use @@ -420,12 +598,29 @@ smtp_ssl = False # smtp_password = smtp_port = 25 smtp_mail_from = airflow@example.com +smtp_timeout = 30 +smtp_retry_limit = 5 [sentry] -# Sentry (https://docs.sentry.io) integration +# Sentry (https://docs.sentry.io) integration. Here you can supply +# additional configuration options based on the Python platform. See: +# https://docs.sentry.io/error-reporting/configuration/?platform=python. +# Unsupported options: ``integrations``, ``in_app_include``, ``in_app_exclude``, +# ``ignore_errors``, ``before_breadcrumb``, ``before_send``, ``transport``. +# Enable error reporting to Sentry +sentry_on = false sentry_dsn = +[celery_kubernetes_executor] + +# This section only applies if you are using the ``CeleryKubernetesExecutor`` in +# ``[core]`` section above +# Define when to send a task to ``KubernetesExecutor`` when using ``CeleryKubernetesExecutor``. +# When the queue of a task is ``kubernetes_queue``, the task is executed via ``KubernetesExecutor``, +# otherwise via ``CeleryExecutor`` +kubernetes_queue = kubernetes + [celery] # This section only applies if you are using the CeleryExecutor in @@ -437,7 +632,7 @@ celery_app_name = airflow.executors.celery_executor # ``airflow celery worker`` command. This defines the number of task instances that # a worker will take, so size up your workers based on the resources on # your worker box and the nature of your tasks -worker_concurrency = 16 +worker_concurrency = 8 # The maximum and minimum concurrency that will be used when starting workers with the # ``airflow celery worker`` command (always keep minimum processes, but grow @@ -446,7 +641,17 @@ worker_concurrency = 16 # If autoscale option is available, worker_concurrency will be ignored. # http://docs.celeryproject.org/en/latest/reference/celery.bin.worker.html#cmdoption-celery-worker-autoscale # Example: worker_autoscale = 16,12 -worker_autoscale = 16,12 +# worker_autoscale = + +# Used to increase the number of tasks that a worker prefetches which can improve performance. +# The number of processes multiplied by worker_prefetch_multiplier is the number of tasks +# that are prefetched by a worker. A value greater than 1 can result in tasks being unnecessarily +# blocked if there are multiple workers and one worker prefetches tasks that sit behind long +# running tasks while another worker has unutilized processes that are unable to process the already +# claimed blocked tasks. +# https://docs.celeryproject.org/en/stable/userguide/optimizing.html#prefetch-limits +# Example: worker_prefetch_multiplier = 1 +# worker_prefetch_multiplier = # When you start an airflow worker, airflow starts a tiny web server # subprocess to serve the workers local log files to the airflow main @@ -455,11 +660,14 @@ worker_autoscale = 16,12 # visible from the main web server to connect into the workers. worker_log_server_port = 8793 +# Umask that will be used when starting workers with the ``airflow celery worker`` +# in daemon mode. This control the file-creation mode mask which determines the initial +# value of file permission bits for newly created files. +worker_umask = 0o077 + # The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally -# a sqlalchemy database. Refer to the Celery documentation for more -# information. -# http://docs.celeryproject.org/en/latest/userguide/configuration.html#broker-settings -broker_url = redis://redis:6379/1 +# a sqlalchemy database. Refer to the Celery documentation for more information. +broker_url = redis://redis:6379/0 # The Celery result_backend. When a job finishes, it needs to update the # metadata of the job. Therefore it will post a message on a message bus, @@ -467,10 +675,10 @@ broker_url = redis://redis:6379/1 # This status is used by the scheduler to update the state of the task # The use of a database is highly recommended # http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-result-backend-settings -result_backend = db+postgresql://airflow:airflow@postgres/airflow +result_backend = db+postgresql://postgres:airflow@postgres/airflow # Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start -# it ``airflow flower``. This defines the IP that Celery Flower runs on +# it ``airflow celery flower``. This defines the IP that Celery Flower runs on flower_host = 0.0.0.0 # The root URL for Flower @@ -494,15 +702,13 @@ sync_parallelism = 0 # Import path for celery configuration options celery_config_options = airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG - -# In case of using SSL ssl_active = False ssl_key = ssl_cert = ssl_cacert = # Celery Pool implementation. -# Choices include: prefork (default), eventlet, gevent or solo. +# Choices include: ``prefork`` (default), ``eventlet``, ``gevent`` or ``solo``. # See: # https://docs.celeryproject.org/en/latest/userguide/workers.html#concurrency # https://docs.celeryproject.org/en/latest/userguide/concurrency/eventlet.html @@ -510,7 +716,23 @@ pool = prefork # The number of seconds to wait before timing out ``send_task_to_executor`` or # ``fetch_celery_task_state`` operations. -operation_timeout = 2 +operation_timeout = 1.0 + +# Celery task will report its status as 'started' when the task is executed by a worker. +# This is used in Airflow to keep track of the running tasks and if a Scheduler is restarted +# or run in HA mode, it can adopt the orphan tasks launched by previous SchedulerJob. +task_track_started = True + +# Time in seconds after which Adopted tasks are cleared by CeleryExecutor. This is helpful to clear +# stalled tasks. +task_adoption_timeout = 600 + +# The Maximum number of retries for publishing task messages to the broker when failing +# due to ``AirflowTaskTimeout`` error before giving up and marking Task as failed. +task_publish_max_retries = 3 + +# Worker initialisation check to validate Metadata Database connection +worker_precheck = False [celery_broker_transport_options] @@ -545,15 +767,15 @@ tls_key = # listen (in seconds). job_heartbeat_sec = 5 +# How often (in seconds) to check and tidy up 'running' TaskInstancess +# that no longer have a matching DagRun +clean_tis_without_dagrun_interval = 15.0 + # The scheduler constantly tries to trigger new tasks (look at the # scheduler section in the docs for more information). This defines # how often the scheduler should run (in seconds). scheduler_heartbeat_sec = 5 -# After how much time should the scheduler terminate in seconds -# -1 indicates to run continuously (see also num_runs) -run_duration = -1 - # The number of times to try to schedule each DAG file # -1 indicates unlimited number num_runs = -1 @@ -570,10 +792,16 @@ dag_dir_list_interval = 300 # How often should stats be printed to the logs. Setting to 0 will disable printing stats print_stats_interval = 30 +# How often (in seconds) should pool usage stats be sent to statsd (if statsd_on is enabled) +pool_metrics_interval = 5.0 + # If the last scheduler heartbeat happened more than scheduler_health_check_threshold # ago (in seconds), scheduler is considered unhealthy. # This is used by the health check in the "/health" endpoint scheduler_health_check_threshold = 30 + +# How often (in seconds) should the scheduler check for orphaned tasks and SchedulerJobs +orphaned_tasks_check_interval = 300.0 child_process_log_directory = /usr/local/airflow/logs/scheduler # Local task jobs periodically heartbeat to the DB. If the job has @@ -581,10 +809,10 @@ child_process_log_directory = /usr/local/airflow/logs/scheduler # associated task instance as failed and will re-schedule the task. scheduler_zombie_task_threshold = 300 -# Turn off scheduler catchup by setting this to False. +# Turn off scheduler catchup by setting this to ``False``. # Default behavior is unchanged and # Command Line Backfills still work, but the scheduler -# will not do scheduler catchup if this is False, +# will not do scheduler catchup if this is ``False``, # however it can be set on a per DAG basis in the # DAG definition (catchup) catchup_by_default = True @@ -599,21 +827,32 @@ catchup_by_default = True # Set this to 0 for no limit (not advised) max_tis_per_query = 512 -# Statsd (https://github.com/etsy/statsd) integration settings -statsd_on = False -statsd_host = localhost -statsd_port = 8125 -statsd_prefix = airflow +# Should the scheduler issue ``SELECT ... FOR UPDATE`` in relevant queries. +# If this is set to False then you should not run more than a single +# scheduler at once +use_row_level_locking = True -# If you want to avoid send all the available metrics to StatsD, -# you can configure an allow list of prefixes to send only the metrics that -# start with the elements of the list (e.g: scheduler,executor,dagrun) -statsd_allow_list = +# Max number of DAGs to create DagRuns for per scheduler loop +# +# Default: 10 +# max_dagruns_to_create_per_loop = + +# How many DagRuns should a scheduler examine (and lock) when scheduling +# and queuing tasks. +# +# Default: 20 +# max_dagruns_per_loop_to_schedule = + +# Should the Task supervisor process perform a "mini scheduler" to attempt to schedule more tasks of the +# same DAG. Leaving this on will mean tasks in the same DAG execute quicker, but might starve out other +# dags in some circumstances +# +# Default: True +# schedule_after_task_execution = -# The scheduler can run multiple threads in parallel to schedule dags. -# This defines how many threads will run. -max_threads = 2 -authenticate = False +# The scheduler can run multiple processes in parallel to parse dags. +# This defines how many processes will run. +parsing_processes = 2 # Turn off scheduler use of cron intervals by setting this to False. # DAGs submitted manually in the web UI or with trigger_dag will still run. @@ -623,70 +862,6 @@ use_job_schedule = True # Only has effect if schedule_interval is set to None in DAG allow_trigger_in_future = False -[ldap] -# set this to ldaps://: -uri = -user_filter = objectClass=* -user_name_attr = uid -group_member_attr = memberOf -superuser_filter = -data_profiler_filter = -bind_user = cn=Manager,dc=example,dc=com -bind_password = insecure -basedn = dc=example,dc=com -cacert = /etc/ca/ldap_ca.crt -search_scope = LEVEL - -# This setting allows the use of LDAP servers that either return a -# broken schema, or do not return a schema. -ignore_malformed_schema = False - -[mesos] -# Mesos master address which MesosExecutor will connect to. -master = localhost:5050 - -# The framework name which Airflow scheduler will register itself as on mesos -framework_name = Airflow - -# Number of cpu cores required for running one task instance using -# 'airflow run --local -p ' -# command on a mesos slave -task_cpu = 1 - -# Memory in MB required for running one task instance using -# 'airflow run --local -p ' -# command on a mesos slave -task_memory = 256 - -# Enable framework checkpointing for mesos -# See http://mesos.apache.org/documentation/latest/slave-recovery/ -checkpoint = False - -# Failover timeout in milliseconds. -# When checkpointing is enabled and this option is set, Mesos waits -# until the configured timeout for -# the MesosExecutor framework to re-register after a failover. Mesos -# shuts down running tasks if the -# MesosExecutor framework fails to re-register within this timeframe. -# Example: failover_timeout = 604800 -# failover_timeout = - -# Enable framework authentication for mesos -# See http://mesos.apache.org/documentation/latest/configuration/ -authenticate = False - -# Mesos credentials, if authentication is enabled -# Example: default_principal = admin -# default_principal = -# Example: default_secret = admin -# default_secret = - -# Optional Docker Image to run on slave before running the command -# This image should be accessible from mesos slave i.e mesos slave -# should be able to pull this docker image before executing the command. -# Example: docker_image_slave = puckel/docker-airflow -# docker_image_slave = - [kerberos] ccache = /tmp/airflow_krb5_ccache @@ -703,12 +878,15 @@ api_rev = v3 # UI to hide sensitive variable fields when set to True hide_sensitive_variable_fields = True +# A comma-separated list of sensitive keywords to look for in variables names. +sensitive_variable_fields = + [elasticsearch] # Elasticsearch host host = # Format of the log_id, which is used to query for a given tasks logs -log_id_template = {{dag_id}}-{{task_id}}-{{execution_date}}-{{try_number}} +log_id_template = {dag_id}-{task_id}-{execution_date}-{try_number} # Used to mark the end of a log stream for a task end_of_log_mark = end_of_log @@ -732,175 +910,35 @@ use_ssl = False verify_certs = True [kubernetes] -# The repository, tag and imagePullPolicy of the Kubernetes Image for the Worker to Run -worker_container_repository = -worker_container_tag = -worker_container_image_pull_policy = IfNotPresent +# Path to the YAML pod file. If set, all other kubernetes-related fields are ignored. +pod_template_file = -# If True (default), worker pods will be deleted upon termination -delete_worker_pods = True +# The repository of the Kubernetes Image for the Worker to Run +worker_container_repository = -# Number of Kubernetes Worker Pod creation calls per scheduler loop -worker_pods_creation_batch_size = 1 +# The tag of the Kubernetes Image for the Worker to Run +worker_container_tag = # The Kubernetes namespace where airflow workers should be created. Defaults to ``default`` namespace = default -# The name of the Kubernetes ConfigMap containing the Airflow Configuration (this file) -# Example: airflow_configmap = airflow-configmap -airflow_configmap = +# If True, all worker pods will be deleted upon termination +delete_worker_pods = True -# The name of the Kubernetes ConfigMap containing ``airflow_local_settings.py`` file. -# -# For example: -# -# ``airflow_local_settings_configmap = "airflow-configmap"`` if you have the following ConfigMap. -# -# ``airflow-configmap.yaml``: -# -# .. code-block:: yaml -# -# --- -# apiVersion: v1 -# kind: ConfigMap -# metadata: -# name: airflow-configmap -# data: -# airflow_local_settings.py: | -# def pod_mutation_hook(pod): -# ... -# airflow.cfg: | -# ... -# Example: airflow_local_settings_configmap = airflow-configmap -airflow_local_settings_configmap = - -# For docker image already contains DAGs, this is set to ``True``, and the worker will -# search for dags in dags_folder, -# otherwise use git sync or dags volume claim to mount DAGs -dags_in_image = False - -# For either git sync or volume mounted DAGs, the worker will look in this subpath for DAGs -dags_volume_subpath = - -# For DAGs mounted via a volume claim (mutually exclusive with git-sync and host path) -dags_volume_claim = - -# For volume mounted logs, the worker will look in this subpath for logs -logs_volume_subpath = - -# A shared volume claim for the logs -logs_volume_claim = - -# For DAGs mounted via a hostPath volume (mutually exclusive with volume claim and git-sync) -# Useful in local environment, discouraged in production -dags_volume_host = - -# A hostPath volume for the logs -# Useful in local environment, discouraged in production -logs_volume_host = - -# A list of configMapsRefs to envFrom. If more than one configMap is -# specified, provide a comma separated list: configmap_a,configmap_b -env_from_configmap_ref = - -# A list of secretRefs to envFrom. If more than one secret is -# specified, provide a comma separated list: secret_a,secret_b -env_from_secret_ref = - -# Git credentials and repository for DAGs mounted via Git (mutually exclusive with volume claim) -git_repo = -git_branch = -git_subpath = - -# The specific rev or hash the git_sync init container will checkout -# This becomes GIT_SYNC_REV environment variable in the git_sync init container for worker pods -git_sync_rev = - -# Use git_user and git_password for user authentication or git_ssh_key_secret_name -# and git_ssh_key_secret_key for SSH authentication -git_user = -git_password = -git_sync_root = /git -git_sync_dest = repo - -# Mount point of the volume if git-sync is being used. -# i.e. /usr/local/airflow/dags -git_dags_folder_mount_point = - -# To get Git-sync SSH authentication set up follow this format -# -# ``airflow-secrets.yaml``: -# -# .. code-block:: yaml -# -# --- -# apiVersion: v1 -# kind: Secret -# metadata: -# name: airflow-secrets -# data: -# # key needs to be gitSshKey -# gitSshKey: -# Example: git_ssh_key_secret_name = airflow-secrets -git_ssh_key_secret_name = - -# To get Git-sync SSH authentication set up follow this format -# -# ``airflow-configmap.yaml``: -# -# .. code-block:: yaml -# -# --- -# apiVersion: v1 -# kind: ConfigMap -# metadata: -# name: airflow-configmap -# data: -# known_hosts: | -# github.com ssh-rsa <...> -# airflow.cfg: | -# ... -# Example: git_ssh_known_hosts_configmap_name = airflow-configmap -git_ssh_known_hosts_configmap_name = - -# To give the git_sync init container credentials via a secret, create a secret -# with two fields: GIT_SYNC_USERNAME and GIT_SYNC_PASSWORD (example below) and -# add ``git_sync_credentials_secret = `` to your airflow config under the -# ``kubernetes`` section -# -# Secret Example: -# -# .. code-block:: yaml -# -# --- -# apiVersion: v1 -# kind: Secret -# metadata: -# name: git-credentials -# data: -# GIT_SYNC_USERNAME: -# GIT_SYNC_PASSWORD: -git_sync_credentials_secret = - -# For cloning DAGs from git repositories into volumes: https://github.com/kubernetes/git-sync -git_sync_container_repository = k8s.gcr.io/git-sync -git_sync_container_tag = v3.1.1 -git_sync_init_container_name = git-sync-clone -git_sync_run_as_user = 65533 - -# The name of the Kubernetes service account to be associated with airflow workers, if any. -# Service accounts are required for workers that require access to secrets or cluster resources. -# See the Kubernetes RBAC documentation for more: -# https://kubernetes.io/docs/admin/authorization/rbac/ -worker_service_account_name = - -# Any image pull secrets to be given to worker pods, If more than one secret is -# required, provide a comma separated list: secret_a,secret_b -image_pull_secrets = - -# GCP Service Account Keys to be provided to tasks run on Kubernetes Executors -# Should be supplied in the format: key-name-1:key-path-1,key-name-2:key-path-2 -gcp_service_account_keys = +# If False (and delete_worker_pods is True), +# failed worker pods will not be deleted so users can investigate them. +delete_worker_pods_on_failure = False + +# Number of Kubernetes Worker Pod creation calls per scheduler loop. +# Note that the current default of "1" will only launch a single pod +# per-heartbeat. It is HIGHLY recommended that users increase this +# number to match the tolerance of their kubernetes cluster for +# better performance. +worker_pods_creation_batch_size = 1 + +# Allows users to launch pods in multiple namespaces. +# Will require creating a cluster-role for the scheduler +multi_namespace_mode = False # Use the service account kubernetes gives to pods to connect to kubernetes cluster. # It's intended for clients that expect to be running inside a pod running on kubernetes. @@ -910,81 +948,53 @@ in_cluster = True # When running with in_cluster=False change the default cluster_context or config_file # options to Kubernetes client. Leave blank these to use default behaviour like ``kubectl`` has. # cluster_context = -# config_file = -# Affinity configuration as a single line formatted JSON object. -# See the affinity model for top-level key names (e.g. ``nodeAffinity``, etc.): -# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#affinity-v1-core -affinity = - -# A list of toleration objects as a single line formatted JSON array -# See: -# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#toleration-v1-core -tolerations = +# Path to the kubernetes configfile to be used when ``in_cluster`` is set to False +# config_file = # Keyword parameters to pass while calling a kubernetes client core_v1_api methods # from Kubernetes Executor provided as a single line formatted JSON dictionary string. # List of supported params are similar for all core_v1_apis, hence a single config -# variable for all apis. -# See: -# https://raw.githubusercontent.com/kubernetes-client/python/master/kubernetes/client/apis/core_v1_api.py -# Note that if no _request_timeout is specified, the kubernetes client will wait indefinitely -# for kubernetes api responses, which will cause the scheduler to hang. -# The timeout is specified as [connect timeout, read timeout] -kube_client_request_args = {{"_request_timeout" : [60,60] }} - -# Specifies the uid to run the first process of the worker pods containers as -run_as_user = - -# Specifies a gid to associate with all containers in the worker pods -# if using a git_ssh_key_secret_name use an fs_group -# that allows for the key to be read, e.g. 65533 -fs_group = - -[kubernetes_node_selectors] - -# The Key-value pairs to be given to worker pods. -# The worker pods will be scheduled to the nodes of the specified key-value pairs. -# Should be supplied in the format: key = value - -[kubernetes_annotations] - -# The Key-value annotations pairs to be given to worker pods. -# Should be supplied in the format: key = value - -[kubernetes_environment_variables] - -# The scheduler sets the following environment variables into your workers. You may define as -# many environment variables as needed and the kubernetes launcher will set them in the launched workers. -# Environment variables in this section are defined as follows -# `` = `` -# -# For example if you wanted to set an environment variable with value `prod` and key -# ``ENVIRONMENT`` you would follow the following format: -# ENVIRONMENT = prod -# -# Additionally you may override worker airflow settings with the ``AIRFLOW__
__`` -# formatting as supported by airflow normally. - -[kubernetes_secrets] - -# The scheduler mounts the following secrets into your workers as they are launched by the -# scheduler. You may define as many secrets as needed and the kubernetes launcher will parse the -# defined secrets and mount them as secret environment variables in the launched workers. -# Secrets in this section are defined as follows -# `` = =`` -# -# For example if you wanted to mount a kubernetes secret key named ``postgres_password`` from the -# kubernetes secret object ``airflow-secret`` as the environment variable ``POSTGRES_PASSWORD`` into -# your workers you would follow the following format: -# ``POSTGRES_PASSWORD = airflow-secret=postgres_credentials`` -# -# Additionally you may override worker airflow settings with the ``AIRFLOW__
__`` -# formatting as supported by airflow normally. - -[kubernetes_labels] - -# The Key-value pairs to be given to worker pods. -# The worker pods will be given these static labels, as well as some additional dynamic labels -# to identify the task. -# Should be supplied in the format: ``key = value`` +# variable for all apis. See: +# https://raw.githubusercontent.com/kubernetes-client/python/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/api/core_v1_api.py +kube_client_request_args = + +# Optional keyword arguments to pass to the ``delete_namespaced_pod`` kubernetes client +# ``core_v1_api`` method when using the Kubernetes Executor. +# This should be an object and can contain any of the options listed in the ``v1DeleteOptions`` +# class defined here: +# https://github.com/kubernetes-client/python/blob/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/models/v1_delete_options.py#L19 +# Example: delete_option_kwargs = {"grace_period_seconds": 10} +delete_option_kwargs = + +# Enables TCP keepalive mechanism. This prevents Kubernetes API requests to hang indefinitely +# when idle connection is time-outed on services like cloud load balancers or firewalls. +enable_tcp_keepalive = False + +# When the `enable_tcp_keepalive` option is enabled, TCP probes a connection that has +# been idle for `tcp_keep_idle` seconds. +tcp_keep_idle = 120 + +# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond +# to a keepalive probe, TCP retransmits the probe after `tcp_keep_intvl` seconds. +tcp_keep_intvl = 30 + +# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond +# to a keepalive probe, TCP retransmits the probe `tcp_keep_cnt number` of times before +# a connection is considered to be broken. +tcp_keep_cnt = 6 + +[smart_sensor] +# When `use_smart_sensor` is True, Airflow redirects multiple qualified sensor tasks to +# smart sensor task. +use_smart_sensor = False + +# `shard_code_upper_limit` is the upper limit of `shard_code` value. The `shard_code` is generated +# by `hashcode % shard_code_upper_limit`. +shard_code_upper_limit = 10000 + +# The number of running smart sensor processes for each service. +shards = 5 + +# comma separated sensor classes support in smart_sensor. +sensors_enabled = NamedHivePartitionSensor diff --git a/docker-compose-CeleryExecutor.yml b/docker-compose-CeleryExecutor.yml index de4f5dac..af6ba37d 100644 --- a/docker-compose-CeleryExecutor.yml +++ b/docker-compose-CeleryExecutor.yml @@ -1,4 +1,4 @@ -version: '2.1' +version: '2.2' services: redis: image: 'redis:5.0.5' @@ -16,7 +16,7 @@ services: # - ./pgdata:/var/lib/postgresql/data/pgdata webserver: - image: puckel/docker-airflow:1.10.9 + image: puckel/docker-airflow:2.0.0 restart: always depends_on: - postgres @@ -43,7 +43,7 @@ services: retries: 3 flower: - image: puckel/docker-airflow:1.10.9 + image: puckel/docker-airflow:2.0.0 restart: always depends_on: - redis @@ -52,10 +52,10 @@ services: # - REDIS_PASSWORD=redispass ports: - "5555:5555" - command: flower + command: celery flower scheduler: - image: puckel/docker-airflow:1.10.9 + image: puckel/docker-airflow:2.0.0 restart: always depends_on: - webserver @@ -74,7 +74,7 @@ services: command: scheduler worker: - image: puckel/docker-airflow:1.10.9 + image: puckel/docker-airflow:2.0.0 restart: always depends_on: - scheduler @@ -89,4 +89,4 @@ services: # - POSTGRES_PASSWORD=airflow # - POSTGRES_DB=airflow # - REDIS_PASSWORD=redispass - command: worker + command: celery worker diff --git a/docker-compose-LocalExecutor.yml b/docker-compose-LocalExecutor.yml index 26e9e92e..33311c00 100644 --- a/docker-compose-LocalExecutor.yml +++ b/docker-compose-LocalExecutor.yml @@ -1,4 +1,4 @@ -version: '3.7' +version: '3.8' services: postgres: image: postgres:9.6 @@ -12,7 +12,7 @@ services: max-file: "3" webserver: - image: puckel/docker-airflow:1.10.9 + image: puckel/docker-airflow:2.0.0 restart: always depends_on: - postgres diff --git a/script/entrypoint.sh b/script/entrypoint.sh index 166f4837..862c9d55 100755 --- a/script/entrypoint.sh +++ b/script/entrypoint.sh @@ -109,19 +109,20 @@ fi case "$1" in webserver) - airflow initdb + airflow db init + airflow users create --username airflow --password airflow --firstname Peter --lastname Parker --role Admin --email spiderman@superhero.org if [ "$AIRFLOW__CORE__EXECUTOR" = "LocalExecutor" ] || [ "$AIRFLOW__CORE__EXECUTOR" = "SequentialExecutor" ]; then # With the "Local" and "Sequential" executors it should all run in one container. airflow scheduler & fi exec airflow webserver ;; - worker|scheduler) - # Give the webserver time to run initdb. + celery|scheduler) + # Give the webserver time to run db init. sleep 10 exec airflow "$@" ;; - flower) + celery) sleep 10 exec airflow "$@" ;;