Compare commits

..

No commits in common. "main" and "simple_site" have entirely different histories.

128 changed files with 16 additions and 4017 deletions

View File

@ -1,713 +0,0 @@
[defaults]
# (boolean) By default, Ansible will issue a warning when received from a task action (module or action plugin).
# These warnings can be silenced by adjusting this setting to False.
;action_warnings=True
# (list) Accept a list of cowsay templates that are 'safe' to use, set to an empty list if you want to enable all installed templates.
;cowsay_enabled_stencils=bud-frogs, bunny, cheese, daemon, default, dragon, elephant-in-snake, elephant, eyes, hellokitty, kitty, luke-koala, meow, milk, moofasa, moose, ren, sheep, small, stegosaurus, stimpy, supermilker, three-eyes, turkey, turtle, tux, udder, vader-koala, vader, www
# (string) Specify a custom cowsay path or swap in your cowsay implementation of choice.
;cowpath=
# (string) This allows you to choose a specific cowsay stencil for the banners or use 'random' to cycle through them.
;cow_selection=default
# (boolean) This option forces color mode even when running without a TTY or the "nocolor" setting is True.
force_color=True
# (path) The default root path for Ansible config files on the controller.
;home=~/.ansible
# (boolean) This setting allows suppressing colorizing output, which is used to give a better indication of failure and status information.
;nocolor=False
# (boolean) If you have cowsay installed but want to avoid the 'cows' (why????), use this.
;nocows=False
# (boolean) Sets the default value for the any_errors_fatal keyword, if True, Task failures will be considered fatal errors.
;any_errors_fatal=False
# (path) The password file to use for the become plugin. ``--become-password-file``.
# If executable, it will be run and the resulting stdout will be used as the password.
;become_password_file=
# (pathspec) Colon-separated paths in which Ansible will search for Become Plugins.
;become_plugins=/Users/fabioisti/.ansible/plugins/become:/usr/share/ansible/plugins/become
# (string) Chooses which cache plugin to use, the default 'memory' is ephemeral.
fact_caching=memory
# (string) Defines connection or path information for the cache plugin.
;fact_caching_connection=
# (string) Prefix to use for cache plugin files/tables.
;fact_caching_prefix=ansible_facts
# (integer) Expiration timeout for the cache plugin data.
fact_caching_timeout=86400
# (list) List of enabled callbacks, not all callbacks need enabling, but many of those shipped with Ansible do as we don't want them activated by default.
;callbacks_enabled=
# (string) When a collection is loaded that does not support the running Ansible version (with the collection metadata key `requires_ansible`).
;collections_on_ansible_version_mismatch=warning
# (pathspec) Colon-separated paths in which Ansible will search for collections content. Collections must be in nested *subdirectories*, not directly in these directories. For example, if ``COLLECTIONS_PATHS`` includes ``'{{ ANSIBLE_HOME ~ "/collections" }}'``, and you want to add ``my.collection`` to that directory, it must be saved as ``'{{ ANSIBLE_HOME} ~ "/collections/ansible_collections/my/collection" }}'``.
collections_path=/Users/fabioisti/.ansible/collections:/usr/share/ansible/collections
# (boolean) A boolean to enable or disable scanning the sys.path for installed collections.
;collections_scan_sys_path=True
# (path) The password file to use for the connection plugin. ``--connection-password-file``.
;connection_password_file=
# (pathspec) Colon-separated paths in which Ansible will search for Action Plugins.
;action_plugins=/Users/fabioisti/.ansible/plugins/action:/usr/share/ansible/plugins/action
# (boolean) When enabled, this option allows lookup plugins (whether used in variables as ``{{lookup('foo')}}`` or as a loop as with_foo) to return data that is not marked 'unsafe'.
# By default, such data is marked as unsafe to prevent the templating engine from evaluating any jinja2 templating language, as this could represent a security risk. This option is provided to allow for backward compatibility, however, users should first consider adding allow_unsafe=True to any lookups that may be expected to contain data that may be run through the templating engine late.
;allow_unsafe_lookups=False
# (boolean) This controls whether an Ansible playbook should prompt for a login password. If using SSH keys for authentication, you probably do not need to change this setting.
;ask_pass=False
# (boolean) This controls whether an Ansible playbook should prompt for a vault password.
ask_vault_pass=True
# (pathspec) Colon-separated paths in which Ansible will search for Cache Plugins.
;cache_plugins=/Users/fabioisti/.ansible/plugins/cache:/usr/share/ansible/plugins/cache
# (pathspec) Colon-separated paths in which Ansible will search for Callback Plugins.
;callback_plugins=/Users/fabioisti/.ansible/plugins/callback:/usr/share/ansible/plugins/callback
# (pathspec) Colon-separated paths in which Ansible will search for Cliconf Plugins.
;cliconf_plugins=/Users/fabioisti/.ansible/plugins/cliconf:/usr/share/ansible/plugins/cliconf
# (pathspec) Colon-separated paths in which Ansible will search for Connection Plugins.
;connection_plugins=/Users/fabioisti/.ansible/plugins/connection:/usr/share/ansible/plugins/connection
# (boolean) Toggles debug output in Ansible. This is *very* verbose and can hinder multiprocessing. Debug output can also include secret information despite no_log settings being enabled, which means debug mode should not be used in production.
;debug=False
# (string) This indicates the command to use to spawn a shell under, which is required for Ansible's execution needs on a target. Users may need to change this in rare instances when shell usage is constrained, but in most cases, it may be left as is.
;executable=/bin/sh
# (pathspec) Colon-separated paths in which Ansible will search for Jinja2 Filter Plugins.
;filter_plugins=/Users/fabioisti/.ansible/plugins/filter:/usr/share/ansible/plugins/filter
# (boolean) This option controls if notified handlers run on a host even if a failure occurs on that host.
# When false, the handlers will not run if a failure has occurred on a host.
# This can also be set per play or on the command line. See Handlers and Failure for more details.
;force_handlers=False
# (integer) Maximum number of forks Ansible will use to execute tasks on target hosts.
;forks=5
# (string) This setting controls the default policy of fact gathering (facts discovered about remote systems).
# This option can be useful for those wishing to save fact gathering time. Both 'smart' and 'explicit' will use the cache plugin.
;gathering=implicit
# (string) This setting controls how duplicate definitions of dictionary variables (aka hash, map, associative array) are handled in Ansible.
# This does not affect variables whose values are scalars (integers, strings) or arrays.
# **WARNING**, changing this setting is not recommended as this is fragile and makes your content (plays, roles, collections) nonportable, leading to continual confusion and misuse. Don't change this setting unless you think you have an absolute need for it.
# We recommend avoiding reusing variable names and relying on the ``combine`` filter and ``vars`` and ``varnames`` lookups to create merged versions of the individual variables. In our experience, this is rarely needed and is a sign that too much complexity has been introduced into the data structures and plays.
# For some uses you can also look into custom vars_plugins to merge on input, even substituting the default ``host_group_vars`` that is in charge of parsing the ``host_vars/`` and ``group_vars/`` directories. Most users of this setting are only interested in inventory scope, but the setting itself affects all sources and makes debugging even harder.
# All playbooks and roles in the official examples repos assume the default for this setting.
# Changing the setting to ``merge`` applies across variable sources, but many sources will internally still overwrite the variables. For example ``include_vars`` will dedupe variables internally before updating Ansible, with 'last defined' overwriting previous definitions in same file.
# The Ansible project recommends you **avoid ``merge`` for new projects.**
# It is the intention of the Ansible developers to eventually deprecate and remove this setting, but it is being kept as some users do heavily rely on it. New projects should **avoid 'merge'**.
;hash_behaviour=replace
# (pathlist) Comma-separated list of Ansible inventory sources
;inventory=/etc/ansible/hosts
# (pathspec) Colon-separated paths in which Ansible will search for HttpApi Plugins.
;httpapi_plugins=/Users/fabioisti/.ansible/plugins/httpapi:/usr/share/ansible/plugins/httpapi
# (float) This sets the interval (in seconds) of Ansible internal processes polling each other. Lower values improve performance with large playbooks at the expense of extra CPU load. Higher values are more suitable for Ansible usage in automation scenarios when UI responsiveness is not required but CPU usage might be a concern.
# The default corresponds to the value hardcoded in Ansible <= 2.1
;internal_poll_interval=0.001
# (pathspec) Colon-separated paths in which Ansible will search for Inventory Plugins.
;inventory_plugins=/Users/fabioisti/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory
# (string) This is a developer-specific feature that allows enabling additional Jinja2 extensions.
# See the Jinja2 documentation for details. If you do not know what these do, you probably don't need to change this setting :)
;jinja2_extensions=[]
# (boolean) This option preserves variable types during template operations.
;jinja2_native=False
# (boolean) Enables/disables the cleaning up of the temporary files Ansible used to execute the tasks on the remote.
# If this option is enabled it will disable ``ANSIBLE_PIPELINING``.
;keep_remote_files=False
# (boolean) Controls whether callback plugins are loaded when running /usr/bin/ansible. This may be used to log activity from the command line, send notifications, and so on. Callback plugins are always loaded for ``ansible-playbook``.
;bin_ansible_callbacks=False
# (tmppath) Temporary directory for Ansible to use on the controller.
;local_tmp=/Users/fabioisti/.ansible/tmp
# (list) List of logger names to filter out of the log file.
;log_filter=
# (path) File to which Ansible will log on the controller.
# When not set the logging is disabled.
;log_path=
# (pathspec) Colon-separated paths in which Ansible will search for Lookup Plugins.
;lookup_plugins=/Users/fabioisti/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup
# (string) Sets the macro for the 'ansible_managed' variable available for :ref:`ansible_collections.ansible.builtin.template_module` and :ref:`ansible_collections.ansible.windows.win_template_module`. This is only relevant to those two modules.
;ansible_managed=Ansible managed
# (string) This sets the default arguments to pass to the ``ansible`` adhoc binary if no ``-a`` is specified.
;module_args=
# (string) Compression scheme to use when transferring Python modules to the target.
;module_compression=ZIP_DEFLATED
# (string) Module to use with the ``ansible`` AdHoc command, if none is specified via ``-m``.
;module_name=command
# (pathspec) Colon-separated paths in which Ansible will search for Modules.
;library=/Users/fabioisti/.ansible/plugins/modules:/usr/share/ansible/plugins/modules
# (pathspec) Colon-separated paths in which Ansible will search for Module utils files, which are shared by modules.
;module_utils=/Users/fabioisti/.ansible/plugins/module_utils:/usr/share/ansible/plugins/module_utils
# (pathspec) Colon-separated paths in which Ansible will search for Netconf Plugins.
;netconf_plugins=/Users/fabioisti/.ansible/plugins/netconf:/usr/share/ansible/plugins/netconf
# (boolean) Toggle Ansible's display and logging of task details, mainly used to avoid security disclosures.
;no_log=False
# (boolean) Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts, this will disable a newer style PowerShell modules from writing to the event log.
;no_target_syslog=False
# (raw) What templating should return as a 'null' value. When not set it will let Jinja2 decide.
;null_representation=
# (integer) For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling), this is how often to check back on the status of those tasks when an explicit poll interval is not supplied. The default is a reasonably moderate 15 seconds which is a tradeoff between checking in frequently and providing a quick turnaround when something may have completed.
;poll_interval=15
# (path) Option for connections using a certificate or key file to authenticate, rather than an agent or passwords, you can set the default value here to avoid re-specifying ``--private-key`` with every invocation.
;private_key_file=
# (boolean) By default, imported roles publish their variables to the play and other roles, this setting can avoid that.
# This was introduced as a way to reset role variables to default values if a role is used more than once in a playbook.
# Starting in version '2.17' M(ansible.builtin.include_roles) and M(ansible.builtin.import_roles) can individually override this via the C(public) parameter.
# Included roles only make their variables public at execution, unlike imported roles which happen at playbook compile time.
;private_role_vars=False
# (integer) Port to use in remote connections, when blank it will use the connection plugin default.
;remote_port=
# (string) Sets the login user for the target machines
# When blank it uses the connection plugin's default, normally the user currently executing Ansible.
;remote_user=
# (pathspec) Colon-separated paths in which Ansible will search for Roles.
;roles_path=/Users/fabioisti/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
# (string) Set the main callback used to display Ansible output. You can only have one at a time.
# You can have many other callbacks, but just one can be in charge of stdout.
# See :ref:`callback_plugins` for a list of available options.
;stdout_callback=default
# (string) Set the default strategy used for plays.
;strategy=linear
# (pathspec) Colon-separated paths in which Ansible will search for Strategy Plugins.
;strategy_plugins=/Users/fabioisti/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy
# (boolean) Toggle the use of "su" for tasks.
;su=False
# (string) Syslog facility to use when Ansible logs to the remote target.
;syslog_facility=LOG_USER
# (pathspec) Colon-separated paths in which Ansible will search for Terminal Plugins.
;terminal_plugins=/Users/fabioisti/.ansible/plugins/terminal:/usr/share/ansible/plugins/terminal
# (pathspec) Colon-separated paths in which Ansible will search for Jinja2 Test Plugins.
;test_plugins=/Users/fabioisti/.ansible/plugins/test:/usr/share/ansible/plugins/test
# (integer) This is the default timeout for connection plugins to use.
;timeout=10
# (string) Can be any connection plugin available to your ansible installation.
# There is also a (DEPRECATED) special 'smart' option, that will toggle between 'ssh' and 'paramiko' depending on controller OS and ssh versions.
;transport=ssh
# (boolean) When True, this causes ansible templating to fail steps that reference variable names that are likely typoed.
# Otherwise, any '{{ template_expression }}' that contains undefined variables will be rendered in a template or ansible action line exactly as written.
;error_on_undefined_vars=True
# (pathspec) Colon-separated paths in which Ansible will search for Vars Plugins.
;vars_plugins=/Users/fabioisti/.ansible/plugins/vars:/usr/share/ansible/plugins/vars
# (string) The vault_id to use for encrypting by default. If multiple vault_ids are provided, this specifies which to use for encryption. The ``--encrypt-vault-id`` CLI option overrides the configured value.
;vault_encrypt_identity=
# (string) The label to use for the default vault id label in cases where a vault id label is not provided.
;vault_identity=default
# (list) A list of vault-ids to use by default. Equivalent to multiple ``--vault-id`` args. Vault-ids are tried in order.
;vault_identity_list=
# (string) If true, decrypting vaults with a vault id will only try the password from the matching vault-id.
;vault_id_match=False
# (path) The vault password file to use. Equivalent to ``--vault-password-file`` or ``--vault-id``.
# If executable, it will be run and the resulting stdout will be used as the password.
;vault_password_file=
# (integer) Sets the default verbosity, equivalent to the number of ``-v`` passed in the command line.
;verbosity=0
# (boolean) Toggle to control the showing of deprecation warnings
;deprecation_warnings=True
# (boolean) Toggle to control showing warnings related to running devel.
;devel_warning=True
# (boolean) Normally ``ansible-playbook`` will print a header for each task that is run. These headers will contain the name: field from the task if you specified one. If you didn't then ``ansible-playbook`` uses the task's action to help you tell which task is presently running. Sometimes you run many of the same action and so you want more information about the task to differentiate it from others of the same action. If you set this variable to True in the config then ``ansible-playbook`` will also include the task's arguments in the header.
# This setting defaults to False because there is a chance that you have sensitive values in your parameters and you do not want those to be printed.
# If you set this to True you should be sure that you have secured your environment's stdout (no one can shoulder surf your screen and you aren't saving stdout to an insecure file) or made sure that all of your playbooks explicitly added the ``no_log: True`` parameter to tasks that have sensitive values :ref:`keep_secret_data` for more information.
;display_args_to_stdout=False
# (boolean) Toggle to control displaying skipped task/host entries in a task in the default callback.
;display_skipped_hosts=True
# (string) Root docsite URL used to generate docs URLs in warning/error text; must be an absolute URL with a valid scheme and trailing slash.
;docsite_root_url=https://docs.ansible.com/ansible-core/
# (pathspec) Colon-separated paths in which Ansible will search for Documentation Fragments Plugins.
;doc_fragment_plugins=/Users/fabioisti/.ansible/plugins/doc_fragments:/usr/share/ansible/plugins/doc_fragments
# (string) By default, Ansible will issue a warning when a duplicate dict key is encountered in YAML.
# These warnings can be silenced by adjusting this setting to False.
;duplicate_dict_key=warn
# (string) for the cases in which Ansible needs to return a file within an editor, this chooses the application to use.
;editor=vi
# (boolean) Whether or not to enable the task debugger, this previously was done as a strategy plugin.
# Now all strategy plugins can inherit this behavior. The debugger defaults to activating when
# a task is failed on unreachable. Use the debugger keyword for more flexibility.
;enable_task_debugger=False
# (boolean) Toggle to allow missing handlers to become a warning instead of an error when notifying.
;error_on_missing_handler=True
# (list) Which modules to run during a play's fact gathering stage, using the default of 'smart' will try to figure it out based on connection type.
# If adding your own modules but you still want to use the default Ansible facts, you will want to include 'setup' or corresponding network module to the list (if you add 'smart', Ansible will also figure it out).
# This does not affect explicit calls to the 'setup' module, but does always affect the 'gather_facts' action (implicit or explicit).
facts_modules=smart
# (boolean) Set this to "False" if you want to avoid host key checking by the underlying connection plugin Ansible uses to connect to the host.
# Please read the documentation of the specific connection plugin used for details.
host_key_checking=False
# (boolean) Facts are available inside the `ansible_facts` variable, this setting also pushes them as their own vars in the main namespace.
# Unlike inside the `ansible_facts` dictionary where the prefix `ansible_` is removed from fact names, these will have the exact names that are returned by the module.
;inject_facts_as_vars=True
# (string) Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode. Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``. All discovery modes employ a lookup table to use the included system Python (on distributions known to include one), falling back to a fixed ordered list of well-known Python interpreter locations if a platform-specific default is not available. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent`` or ``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backward-compatibility with older Ansible releases that always defaulted to ``/usr/bin/python``, will use that interpreter if present.
interpreter_python=auto_silent
# (boolean) If 'false', invalid attributes for a task will result in warnings instead of errors.
;invalid_task_attribute_failed=True
# (boolean) By default, Ansible will issue a warning when there are no hosts in the inventory.
# These warnings can be silenced by adjusting this setting to False.
;localhost_warning=True
# (int) This will set log verbosity if higher than the normal display verbosity, otherwise it will match that.
;log_verbosity=
# (int) Maximum size of files to be considered for diff display.
;max_diff_size=104448
# (list) List of extensions to ignore when looking for modules to load.
# This is for rejecting script and binary module fallback extensions.
;module_ignore_exts=.pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, .rst, .yaml, .yml, .ini
# (bool) Enables whether module responses are evaluated for containing non-UTF-8 data.
# Disabling this may result in unexpected behavior.
# Only ansible-core should evaluate this configuration.
;module_strict_utf8_response=True
# (list) TODO: write it
;network_group_modules=eos, nxos, ios, iosxr, junos, enos, ce, vyos, sros, dellos9, dellos10, dellos6, asa, aruba, aireos, bigip, ironware, onyx, netconf, exos, voss, slxos
# (boolean) Previously Ansible would only clear some of the plugin loading caches when loading new roles, this led to some behaviors in which a plugin loaded in previous plays would be unexpectedly 'sticky'. This setting allows the user to return to that behavior.
;old_plugin_cache_clear=False
# (string) for the cases in which Ansible needs to return output in a pageable fashion, this chooses the application to use.
;pager=less
# (path) A number of non-playbook CLIs have a ``--playbook-dir`` argument; this sets the default value for it.
;playbook_dir=
# (string) This sets which playbook dirs will be used as a root to process vars plugins, which includes finding host_vars/group_vars.
;playbook_vars_root=top
# (path) A path to configuration for filtering which plugins installed on the system are allowed to be used.
# See :ref:`plugin_filtering_config` for details of the filter file's format.
# The default is /etc/ansible/plugin_filters.yml
;plugin_filters_cfg=
# (string) Attempts to set RLIMIT_NOFILE soft limit to the specified value when executing Python modules (can speed up subprocess usage on Python 2.x. See https://bugs.python.org/issue11284). The value will be limited by the existing hard limit. Default value of 0 does not attempt to adjust existing system-defined limits.
;python_module_rlimit_nofile=0
# (bool) This controls whether a failed Ansible playbook should create a .retry file.
;retry_files_enabled=False
# (path) This sets the path in which Ansible will save .retry files when a playbook fails and retry files are enabled.
# This file will be overwritten after each run with the list of failed hosts from all plays.
;retry_files_save_path=
# (str) This setting can be used to optimize vars_plugin usage depending on the user's inventory size and play selection.
;run_vars_plugins=demand
# (bool) This adds the custom stats set via the set_stats plugin to the default output.
;show_custom_stats=False
# (string) Action to take when a module parameter value is converted to a string (this does not affect variables). For string parameters, values such as '1.00', "['a', 'b',]", and 'yes', 'y', etc. will be converted by the YAML parser unless fully quoted.
# Valid options are 'error', 'warn', and 'ignore'.
# Since 2.8, this option defaults to 'warn' but will change to 'error' in 2.12.
;string_conversion_action=warn
# (boolean) Allows disabling of warnings related to potential issues on the system running Ansible itself (not on the managed hosts).
# These may include warnings about third-party packages or other conditions that should be resolved if possible.
;system_warnings=True
# (string) A string to insert into target logging for tracking purposes
;target_log_info=
# (boolean) This option defines whether the task debugger will be invoked on a failed task when ignore_errors=True is specified.
# True specifies that the debugger will honor ignore_errors, and False will not honor ignore_errors.
;task_debugger_ignore_errors=True
# (integer) Set the maximum time (in seconds) for a task action to execute in.
# Timeout runs independently from templating or looping. It applies per each attempt of executing the task's action and remains unchanged by the total time spent on a task.
# When the action execution exceeds the timeout, Ansible interrupts the process. This is registered as a failure due to outside circumstances, not a task failure, to receive appropriate response and recovery process.
# If set to 0 (the default) there is no timeout.
;task_timeout=0
# (string) Make ansible transform invalid characters in group names supplied by inventory sources.
;force_valid_group_names=never
# (boolean) Toggles the use of persistence for connections.
;use_persistent_connections=False
# (bool) A toggle to disable validating a collection's 'metadata' entry for a module_defaults action group. Metadata containing unexpected fields or value types will produce a warning when this is True.
;validate_action_group_metadata=True
# (list) Accept list for variable plugins that require it.
;vars_plugins_enabled=host_group_vars
# (list) Allows to change the group variable precedence merge order.
;precedence=all_inventory, groups_inventory, all_plugins_inventory, all_plugins_play, groups_plugins_inventory, groups_plugins_play
# (string) The salt to use for the vault encryption. If it is not provided, a random salt will be used.
;vault_encrypt_salt=
# (bool) Force 'verbose' option to use stderr instead of stdout
;verbose_to_stderr=False
# (integer) For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling), this is how long, in seconds, to wait for the task spawned by Ansible to connect back to the named pipe used on Windows systems. The default is 5 seconds. This can be too low on slower systems, or systems under heavy load.
# This is not the total time an async command can run for, but is a separate timeout to wait for an async command to start. The task will only start to be timed against its async_timeout once it has connected to the pipe, so the overall maximum duration the task can take will be extended by the amount specified here.
;win_async_startup_timeout=5
# (list) Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these.
# This affects vars_files, include_vars, inventory and vars plugins among others.
;yaml_valid_extensions=.yml, .yaml, .json
[privilege_escalation]
# (boolean) Display an agnostic become prompt instead of displaying a prompt containing the command line supplied become method.
;agnostic_become_prompt=True
# (boolean) When ``False``(default), Ansible will skip using become if the remote user is the same as the become user, as this is normally a redundant operation. In other words root sudo to root.
# If ``True``, this forces Ansible to use the become plugin anyways as there are cases in which this is needed.
;become_allow_same_user=False
# (boolean) Toggles the use of privilege escalation, allowing you to 'become' another user after login.
;become=False
# (boolean) Toggle to prompt for privilege escalation password.
;become_ask_pass=False
# (string) executable to use for privilege escalation, otherwise Ansible will depend on PATH.
;become_exe=
# (string) Flags to pass to the privilege escalation executable.
;become_flags=
# (string) Privilege escalation method to use when `become` is enabled.
;become_method=sudo
# (string) The user your login/remote user 'becomes' when using privilege escalation, most systems will use 'root' when no user is specified.
;become_user=root
[persistent_connection]
# (path) Specify where to look for the ansible-connection script. This location will be checked before searching $PATH.
# If null, ansible will start with the same directory as the ansible script.
;ansible_connection_path=
# (int) This controls the amount of time to wait for a response from a remote device before timing out a persistent connection.
;command_timeout=30
# (integer) This controls the retry timeout for persistent connection to connect to the local domain socket.
;connect_retry_timeout=15
# (integer) This controls how long the persistent connection will remain idle before it is destroyed.
;connect_timeout=30
# (path) Path to the socket to be used by the connection persistence system.
;control_path_dir=/Users/fabioisti/.ansible/pc
[connection]
# (boolean) This is a global option, each connection plugin can override either by having more specific options or not supporting pipelining at all.
# Pipelining, if supported by the connection plugin, reduces the number of network operations required to execute a module on the remote server, by executing many Ansible modules without actual file transfer.
# It can result in a very significant performance improvement when enabled.
# However this conflicts with privilege escalation (become). For example, when using 'sudo:' operations you must first disable 'requiretty' in /etc/sudoers on all managed hosts, which is why it is disabled by default.
# This setting will be disabled if ``ANSIBLE_KEEP_REMOTE_FILES`` is enabled.
;pipelining=False
[colors]
# (string) Defines the color to use on 'Changed' task status.
;changed=yellow
# (string) Defines the default color to use for ansible-console.
;console_prompt=white
# (string) Defines the color to use when emitting debug messages.
;debug=dark gray
# (string) Defines the color to use when emitting deprecation messages.
;deprecate=purple
# (string) Defines the color to use when showing added lines in diffs.
;diff_add=green
# (string) Defines the color to use when showing diffs.
;diff_lines=cyan
# (string) Defines the color to use when showing removed lines in diffs.
;diff_remove=red
# (string) Defines the color to use when emitting a constant in the ansible-doc output.
;doc_constant=dark gray
# (string) Defines the color to use when emitting a deprecated value in the ansible-doc output.
;doc_deprecated=magenta
# (string) Defines the color to use when emitting a link in the ansible-doc output.
;doc_link=cyan
# (string) Defines the color to use when emitting a module name in the ansible-doc output.
;doc_module=yellow
# (string) Defines the color to use when emitting a plugin name in the ansible-doc output.
;doc_plugin=yellow
# (string) Defines the color to use when emitting cross-reference in the ansible-doc output.
;doc_reference=magenta
# (string) Defines the color to use when emitting error messages.
;error=red
# (string) Defines the color to use for highlighting.
;highlight=white
# (string) Defines the color to use when showing 'Included' task status.
;included=cyan
# (string) Defines the color to use when showing 'OK' task status.
;ok=green
# (string) Defines the color to use when showing 'Skipped' task status.
;skip=cyan
# (string) Defines the color to use on 'Unreachable' status.
;unreachable=bright red
# (string) Defines the color to use when emitting verbose messages. In other words, those that show with '-v's.
;verbose=blue
# (string) Defines the color to use when emitting warning messages.
;warn=bright purple
[selinux]
# (boolean) This setting causes libvirt to connect to LXC containers by passing ``--noseclabel`` parameter to ``virsh`` command. This is necessary when running on systems which do not have SELinux.
;libvirt_lxc_noseclabel=False
# (list) Some filesystems do not support safe operations and/or return inconsistent errors, this setting makes Ansible 'tolerate' those in the list without causing fatal errors.
# Data corruption may occur and writes are not always verified when a filesystem is in the list.
;special_context_filesystems=fuse, nfs, vboxsf, ramfs, 9p, vfat
[diff]
# (bool) Configuration toggle to tell modules to show differences when in 'changed' status, equivalent to ``--diff``.
;always=False
# (integer) Number of lines of context to show when displaying the differences between files.
;context=3
[galaxy]
# (path) The directory that stores cached responses from a Galaxy server.
# This is only used by the ``ansible-galaxy collection install`` and ``download`` commands.
# Cache files inside this dir will be ignored if they are world writable.
;cache_dir=/Users/fabioisti/.ansible/galaxy_cache
# (bool) whether ``ansible-galaxy collection install`` should warn about ``--collections-path`` missing from configured :ref:`collections_paths`.
;collections_path_warning=True
# (path) Collection skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy collection``, same as ``--collection-skeleton``.
;collection_skeleton=
# (list) patterns of files to ignore inside a Galaxy collection skeleton directory.
;collection_skeleton_ignore=^.git$, ^.*/.git_keep$
# (bool) Disable GPG signature verification during collection installation.
;disable_gpg_verify=False
# (bool) Some steps in ``ansible-galaxy`` display a progress wheel which can cause issues on certain displays or when outputting the stdout to a file.
# This config option controls whether the display wheel is shown or not.
# The default is to show the display wheel if stdout has a tty.
;display_progress=
# (path) Configure the keyring used for GPG signature verification during collection installation and verification.
;gpg_keyring=
# (boolean) If set to yes, ansible-galaxy will not validate TLS certificates. This can be useful for testing against a server with a self-signed certificate.
;ignore_certs=
# (list) A list of GPG status codes to ignore during GPG signature verification. See L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes) for status code descriptions.
# If fewer signatures successfully verify the collection than `GALAXY_REQUIRED_VALID_SIGNATURE_COUNT`, signature verification will fail even if all error codes are ignored.
;ignore_signature_status_codes=
# (str) The number of signatures that must be successful during GPG signature verification while installing or verifying collections.
# This should be a positive integer or all to indicate all signatures must successfully validate the collection.
# Prepend + to the value to fail if no valid signatures are found for the collection.
;required_valid_signature_count=1
# (path) Role skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy``/``ansible-galaxy role``, same as ``--role-skeleton``.
;role_skeleton=
# (list) patterns of files to ignore inside a Galaxy role or collection skeleton directory.
;role_skeleton_ignore=^.git$, ^.*/.git_keep$
# (string) URL to prepend when roles don't specify the full URI, assume they are referencing this server as the source.
;server=https://galaxy.ansible.com
# (list) A list of Galaxy servers to use when installing a collection.
# The value corresponds to the config ini header ``[galaxy_server.{{item}}]`` which defines the server details.
# See :ref:`galaxy_server_config` for more details on how to define a Galaxy server.
# The order of servers in this list is used as the order in which a collection is resolved.
# Setting this config option will ignore the :ref:`galaxy_server` config option.
;server_list=
# (int) The default timeout for Galaxy API calls. Galaxy servers that don't configure a specific timeout will fall back to this value.
;server_timeout=60
# (path) Local path to galaxy access token file
;token_path=/Users/fabioisti/.ansible/galaxy_token
[inventory]
# (string) This setting changes the behaviour of mismatched host patterns, it allows you to force a fatal error, a warning or just ignore it.
;host_pattern_mismatch=warning
# (boolean) If 'true', it is a fatal error when any given inventory source cannot be successfully parsed by any available inventory plugin; otherwise, this situation only attracts a warning.
;any_unparsed_is_failed=False
# (bool) Toggle to turn on inventory caching.
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory configuration.
# This message will be removed in 2.16.
;cache=False
# (string) The plugin for caching inventory.
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
# This message will be removed in 2.16.
;cache_plugin=
# (string) The inventory cache connection.
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
# This message will be removed in 2.16.
;cache_connection=
# (string) The table prefix for the cache plugin.
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
# This message will be removed in 2.16.
;cache_prefix=ansible_inventory_
# (string) Expiration timeout for the inventory cache plugin data.
# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
# This message will be removed in 2.16.
;cache_timeout=3600
# (list) List of enabled inventory plugins, it also determines the order in which they are used.
;enable_plugins=host_list, script, auto, yaml, ini, toml
# (bool) Controls if ansible-inventory will accurately reflect Ansible's view into inventory or its optimized for exporting.
;export=False
# (list) List of extensions to ignore when using a directory as an inventory source.
;ignore_extensions=.pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, .rst, .orig, .ini, .cfg, .retry
# (list) List of patterns to ignore when using a directory as an inventory source.
;ignore_patterns=
# (bool) If 'true' it is a fatal error if every single potential inventory source fails to parse, otherwise, this situation will only attract a warning.
;unparsed_is_failed=False
# (boolean) By default, Ansible will issue a warning when no inventory was loaded and notes that it will use an implicit localhost-only inventory.
# These warnings can be silenced by adjusting this setting to False.
;inventory_unparsed_warning=True
[netconf_connection]
# (string) This variable is used to enable bastion/jump host with netconf connection. If set to True the bastion/jump host ssh settings should be present in ~/.ssh/config file, alternatively it can be set to custom ssh configuration file path to read the bastion/jump host settings.
;ssh_config=
[paramiko_connection]
# (boolean) TODO: write it
;host_key_auto_add=False
# (boolean) TODO: write it
;look_for_keys=True
[jinja2]
# (list) This list of filters avoids 'type conversion' when templating variables.
# Useful when you want to avoid conversion into lists or dictionaries for JSON strings, for example.
;dont_type_filters=string, to_json, to_nice_json, to_yaml, to_nice_yaml, ppretty, json
[tags]
# (list) default list of tags to run in your plays, Skip Tags has precedence.
;run=
# (list) default list of tags to skip in your plays, has precedence over Run Tags
;skip=

View File

@ -1,13 +0,0 @@
externals:
children:
rup_tests:
hosts:
liquid:
ansible_host: 146.48.108.15
nextrup_copy_test:
ansible_host: 146.48.108.16
misc_tests:
hosts:
bigbrain:
ansible_host: 146.48.108.14

View File

@ -1,3 +0,0 @@
ansible_user: ansible
#ansible_password: "{{ ansible_crypted_password }}"
ansible_python_interpreter: /usr/bin/python3

View File

@ -1,9 +0,0 @@
$ANSIBLE_VAULT;1.1;AES256
36323030396532326432653332633031386532393564653565623231343037376432626337353666
3136663234656635613337653036356366363431346263630a313034633838663263663662653639
64333761373334633465363632646366656430626362623630343830383735663830303462623630
6465323036343965640a393536313637623930613431643962613237363733653163613366643837
64616462626165396632353365666334363035393864386534363831643631646530663739323538
61646635343264393737336666653330383863623362663166306632653939376463653362363431
33366334306330643266303730653863633363303964316361626665363262343833323063343932
31313938626338326431

View File

@ -1,117 +0,0 @@
bind_allow_query:
- "any"
bind_listen:
ipv4:
- port: 53
addresses:
- "127.0.0.1"
- "146.48.108.51"
- port: 5353
addresses:
- "127.0.1.1"
bind_zones:
- name: 'sifi.isti.cnr.it'
# default: primary [primary, secondary, forward]
# type: primary
# create_forward_zones: true
# Skip creation of reverse zones
# create_reverse_zones: false
# fpr type: secondary
primaries:
- 146.48.108.51
networks:
- '146.48.108'
#ipv6_networks:
# - '2001:db9::/48'
name_servers:
- ns1.sifi.isti.cnr.it.
# hostmaster_email: admin
#
#allow_updates:
# - "10.0.1.2"
# - 'key "external-dns"'
#allow_transfers:
# - 'key "external-dns"'
hosts:
- name: ns1
ip: 146.48.108.51
- name: bigbrain
ip: 146.48.108.14
- name: wireguarder
ip: 146.48.108.13
#ipv6: '2001:db9::1'
#mail_servers:
# - name: mail001
# preference: 10
bind_logging:
enable: true
channels:
- channel: general
file: "data/general.log"
versions: 3
size: 10M
print_time: true # true | false
print_category: true
print_severity: true
severity: dynamic # critical | error | warning | notice | info | debug [level] | dynamic
- channel: query
file: "data/query.log"
versions: 5
size: 10M
print_time: "" # true | false
severity: info #
- channel: dnssec
file: "data/dnssec.log"
versions: 5
size: 10M
print_time: "" # true | false
severity: info #
- channel: notify
file: "data/notify.log"
versions: 5
size: 10M
print_time: "" # true | false
severity: info #
- channel: transfers
file: "data/transfers.log"
versions: 5
size: 10M
print_time: "" # true | false
severity: info #
- channel: slog
syslog: security # kern | user | mail | daemon | auth | syslog | lpr |
# news | uucp | cron | authpriv | ftp |
# local0 | local1 | local2 | local3 |
# local4 | local5 | local6 | local7
# file: "data/transfers.log"
#versions: 5
#size: 10M
print_time: "" # true | false
severity: info #
categories:
"xfer-out":
- transfers
- slog
"xfer-in":
- transfers
- slog
notify:
- notify
"lame-servers":
- general
config:
- general
default:
- general
security:
- general
- slog
dnssec:
- dnssec
queries:
- query

View File

@ -1,79 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIELzCCAxegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBijELMAkGA1UEBhMCSVQx
EDAOBgNVBAgMB1R1c2NhbnkxDTALBgNVBAcMBFBpc2ExDTALBgNVBAoMBElTVEkx
DTALBgNVBAsMBFNJRkkxKjAoBgkqhkiG9w0BCQEWG2ZhYmlvLnNpbmliYWxkaUBp
c3RpLmNuci5pdDEQMA4GA1UEAwwHcm9vdC1jYTAeFw0yNjA0MjAxMjU3MjVaFw0y
ODA3MjMxMjU3MjVaMIGKMQswCQYDVQQGEwJJVDEQMA4GA1UECAwHVHVzY2FueTEN
MAsGA1UEBwwEUGlzYTENMAsGA1UECgwESVNUSTENMAsGA1UECwwEU0lGSTEqMCgG
CSqGSIb3DQEJARYbZmFiaW8uc2luaWJhbGRpQGlzdGkuY25yLml0MRAwDgYDVQQD
DAdyb290LWNhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAq1A22Q0X
nJAwlbbkFr5/L6THhkquWakWs8/AJx5iIYZEXI7BkxU2R1qtUdfMp36ifwb4nmVZ
6WCzl9WzYqZqSZN79dtzENT5Y+Kwy9cGCHcEK6jZ//5w+Uqlad3wwnQq3UubN4m6
cmolg8pY6xqVjOK2AptrEIGc557JX3kujFci2n0Db3yzDtOJh7cTV7d/duCgX8el
zZBGLB47HXsVpy2cb70iyqC/CWGgCuYmXDNujzrhabboi8HA88IbqnY4jx5T1d0f
R7IuWXX+fG0D8fEiL/wqTNFk+rAGfTAyx3JPGtDhfHn+sXeUirh8n694sMU5WRWW
jd3b64/JaDdXBwIDAQABo4GdMIGaMDcGCWCGSAGG+EIBDQQqFihPUE5zZW5zZSBH
ZW5lcmF0ZWQgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB0GA1UdDgQWBBShuiplNRfk
tfYS+JhEaZlrc2zWaTAfBgNVHSMEGDAWgBShuiplNRfktfYS+JhEaZlrc2zWaTAP
BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOC
AQEAWV2VrUz8Gl2QjZNNKaovWpAboJXzqlhLyQncRm5Pb5iZ9IbEUVhb68L65QYm
POFeetUyef1OgPqZ1cr8+ihiqTb6IXZqOhtOTZWBiyD+RX8UmvBN86uX7jkbvbQL
AteTdm9K2n0DKhjjk12D3FK+6WUO2NiwfMBL8EDzt9vzf3SxTRgPCc9A4Wud35Y1
MErGUfrGoq3QzQtNevfQ3+qopLF+tCbNdfKpXEFRPfDbzEIlzIPfc8uRKq5XueW9
RVFUgoXJ0bJlcvncyGEBCjrPYUCld/i2oKvE+50qEkCWgci3cEDev6/p5W7dDiA2
BKjq45LlfNj/1ZBQDE8U2QLIBA==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIEKDCCAxCgAwIBAgIBAjANBgkqhkiG9w0BAQsFADCBijELMAkGA1UEBhMCSVQx
EDAOBgNVBAgMB1R1c2NhbnkxDTALBgNVBAcMBFBpc2ExDTALBgNVBAoMBElTVEkx
DTALBgNVBAsMBFNJRkkxKjAoBgkqhkiG9w0BCQEWG2ZhYmlvLnNpbmliYWxkaUBp
c3RpLmNuci5pdDEQMA4GA1UEAwwHcm9vdC1jYTAeFw0yNjA0MjAxMzAwMjhaFw0y
ODA3MjMxMzAwMjhaMIGDMQswCQYDVQQGEwJJVDEQMA4GA1UECAwHVHVzY2FueTEN
MAsGA1UEBwwEUGlzYTENMAsGA1UECgwESVNUSTEqMCgGCSqGSIb3DQEJARYbZmFi
aW8uc2luaWJhbGRpQGlzdGkuY25yLml0MRgwFgYDVQQDDA9pbnRlcm1lZGlhdGUt
Y2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDJssRmE4uoIBTp7j7L
XStVzuO4vuBwTWVlQy+5CJVG7Yt4tkKZ1pkkn3xBbdpSbHxleDmUfP7eKXUe6cWo
Jv1aCQ4DbZMGOseo6OXQ3fBIjbp+f9pYtEEQkUCFz6PV3CwFnzFIjjKxjsPN6gXE
ZtAe/zo9zAc/fqySFVxYgBvBYz8UhMJ7VzU+sna84ojbYSleF3CzPKrN6dmWj0uq
o6o7EWLxUPVEnNlSpYfWp9SO1Hcouu9Fj15BSVUZFZLdsxI7S9UnraqFwXxf0eBl
/0zm97DSkOwdj2BmXaeGvrOZmfwln7vO5HRUZq1/VFcu81hUgr6H9zVTwRJbrbdO
42y1AgMBAAGjgZ0wgZowNwYJYIZIAYb4QgENBCoWKE9QTnNlbnNlIEdlbmVyYXRl
ZCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHQYDVR0OBBYEFCAPopFmSzDWL0TM+aS9
Oxr/Df2QMB8GA1UdIwQYMBaAFKG6KmU1F+S19hL4mERpmWtzbNZpMA8GA1UdEwEB
/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4IBAQCJId+d
X6IbDzguLM3nSBGwvTSVtvNHXAnZQxqXW7DQCF12i8rvGXndMgZ2JwxA8p3Ljcyf
eZoxBKDp1ftehtWxipIguX0DSC8R3SwsFBr7yBbmpMHDGlGqWtQnDpv6bSDRtCAp
f13B+6AVx8XtT6MNJuOAGue/4kzwi/xkWWMJVNXoKFSw6qOH5IhOiJnYWpasx7LK
nJ/O8Q8fKIVp/Ganmc4NdCArM9dHipt8HXAiqYNW02RSLOrCp6E7pQRLB3R8TZoj
NXvDjwKXb3CXwZRLbytm+egu+Oml6Bdb9wC7y4QHLV6JBIKvMMI/6aOhgLeFVI7v
K9idaANxrsZPFQ7i
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIExzCCA6+gAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCSVQx
EDAOBgNVBAgMB1R1c2NhbnkxDTALBgNVBAcMBFBpc2ExDTALBgNVBAoMBElTVEkx
KjAoBgkqhkiG9w0BCQEWG2ZhYmlvLnNpbmliYWxkaUBpc3RpLmNuci5pdDEYMBYG
A1UEAwwPaW50ZXJtZWRpYXRlLWNhMB4XDTI2MDQyNDA5MjA1OVoXDTI3MDUyNjA5
MjA1OVowaTELMAkGA1UEBhMCSVQxEDAOBgNVBAgMB1R1c2NhbnkxDTALBgNVBAcM
BFBpc2ExDTALBgNVBAoMBElTVEkxKjAoBgkqhkiG9w0BCQEWG2ZhYmlvLnNpbmli
YWxkaUBpc3RpLmNuci5pdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AK74AA1JuHvsT60jCWp+Rp2inBBdlzWlXIS7eAjEmFWr3TApbUZ9W0HPgQ+WuUsA
9I/iiQedGHlaaCjeYGH/kTPkWhpZpCJ3rB+cIcWUlU5UPg+U1E3mwNEFEkJxJ8iB
SN1Fpt+RZemhnZJpZqSKRiQku3XNq56WBfnR0oQ63CJmPsH3+1WJsU5PxHvymcNN
ci3ISvU9rSKtziX61L08Yt20NMd6/HTcORpZZBNS8vSa/2Yk5BMBgrZUXk7/lS0+
hkzgt0omCTU9q7hYXg29Ihdp1YKLOjO+4aM/9POliBn+sIYyBcbY9Y5lqQ0KdsAP
3VofycDNJFJ9JhrANFlqYP8CAwEAAaOCAV0wggFZMAkGA1UdEwQCMAAwEQYJYIZI
AYb4QgEBBAQDAgZAMDQGCWCGSAGG+EIBDQQnFiVPUE5zZW5zZSBHZW5lcmF0ZWQg
U2VydmVyIENlcnRpZmljYXRlMB0GA1UdDgQWBBSHLdstzVl5xCb+XT5sj39TGhUS
ADCBtwYDVR0jBIGvMIGsgBQgD6KRZksw1i9EzPmkvTsa/w39kKGBkKSBjTCBijEL
MAkGA1UEBhMCSVQxEDAOBgNVBAgMB1R1c2NhbnkxDTALBgNVBAcMBFBpc2ExDTAL
BgNVBAoMBElTVEkxDTALBgNVBAsMBFNJRkkxKjAoBgkqhkiG9w0BCQEWG2ZhYmlv
LnNpbmliYWxkaUBpc3RpLmNuci5pdDEQMA4GA1UEAwwHcm9vdC1jYYIBAjAdBgNV
HSUEFjAUBggrBgEFBQcDAQYIKwYBBQUIAgIwCwYDVR0PBAQDAgWgMA0GCSqGSIb3
DQEBCwUAA4IBAQCsbDfFqTr2+p5cpV7KxAyIqQtT6fo0f0rvJeAglJ38rWSne4Sn
LDfTQmx/bKSf79E/TuoxGoTjsL9TceqPoDbt8TXgxPALBbON2XAah7JFAotAB6dG
kOMbmBiKOghDMPMDriU+zQAFQ/OtjuhzHD0GpciRKyVgC14iDBmeyEgSOEBqH4sp
lxKCJDNjWC2THv8dqLlaE4QlRNcprEiUNJhbxNg39A+PjYKHp5O5epfdMAVpzqC6
wgDww95xKM9xG4YZzpmoUn8sziJ2XTWWiLj9HHSaGcHx3H/QPpSiXM802tEs3gHr
rZI3EjNgrdhHxS7HZuAixXtTDeK4bfuk9n4L
-----END CERTIFICATE-----

View File

@ -1,12 +0,0 @@
---
wg_interface: wg0
wg_port: 51820
#wg_server_public_interface: eth0
wg_server_address: 192.168.99.1/32
#wg_server_private_key: "{{ wg_server_private_key }}"
wg_peers:
- name: fabio_test
publicKey: "dzODOKndtafZSf2GqvClFdxrpwyNJnZ/AsZkNl+ovEE="
allowedIP: "192.168.99.4/32"

View File

@ -1,2 +0,0 @@
---
ansible_user: clouseau

View File

@ -1,8 +0,0 @@
$ANSIBLE_VAULT;1.1;AES256
32653362346631356539383863653761626165383165653839343965636363623637396138346163
3239313335343263343265633966353230323663373664640a346234373733303139303139613962
36316132363366376436353936653930326235303937623762636438313138636238613136303930
3866303033613561630a666534326237363162656461336632623966653132656361333565356136
37666536323434323934353362383363643561306136656339663666643764626565666264373761
64306334323632326665386431323965653034393162373331313339653736346136313736626565
663230663364393634306438616133623665

View File

@ -1,37 +0,0 @@
main_lab_hosts:
children:
lan:
hosts:
edge.home.arpa:
inspector.home.arpa:
god.home.arpa:
swarm:
children:
enabling:
hosts:
enabling[1:3].home.arpa:
swarm_master: true
swarm_drain: false
vars:
swarm_name: enabling
swarm1:
hosts:
swarm1m[1:3].home.arpa:
swarm_master: true
swarm1w[1:4].home.arpa:
vars:
swarm_name: the_swarm
cluster:
hosts:
worker[1:3].home.arpa:
cl1m1.home.arpa:
front:
hosts:
main_lab_edge:
ansible_host: 146.48.108.5
operators:
children:
lan:
enabling:

View File

@ -1,11 +0,0 @@
prouction_hosts:
children:
public_access:
hosts:
#cloud.reterup.it:
#ansible_host: 146.48.108.17
backups:
ansible_host: 146.48.108.12
ftp_server:
hosts:
backups:

View File

@ -1,6 +0,0 @@
prox1_lab_hosts:
children:
front:
hosts:
prox1_lab_edge:
ansible_host: 146.48.108.11

View File

@ -1,23 +0,0 @@
---
# SIFI
sifi:
children:
opn:
hosts:
sifi_opnsense.sifi.isti.cnr.it:
# ns1.sifi.isti.cnr.it:
# ansible_host: 146.48.108.51 #[WAN public ip]
# ansible_host: 10.20.30.111
wireguard_server:
hosts:
wireguarder.sifi.isti.cnr.it:
# ansible_host: 146.48.108.13
nameserver:
hosts:
ns1.sifi.isti.cnr.it:
ansible_host: 146.48.108.51
# dns1.internal.sifi.isti.cnr.it:
# ansible_host: 10.11.12.11
workers:
hosts:
worker1.internal.sifi.isti.cnr.it:

View File

@ -1,60 +0,0 @@
- hosts: all
become: yes
#debugger: on_failed
tasks:
- name: Add the ansible group
group:
name: ansible
gid: 1100
state: present
- name: Add the ansible user as a system user
user:
name: ansible
uid: 1100
group: ansible
# Directly generate hash
# https://www.lisenet.com/2019/ansible-generate-crypted-passwords-for-the-user-module/
password: "{{ ansible_crypted_password | password_hash('sha512') }}"
shell: /bin/bash
# Uncomment to prevent password reset
update_password: on_create
system: yes
home: /srv/ansible
state: present
- name: Set ansible user as sudoer
copy:
content: "ansible ALL = (ALL) NOPASSWD:ALL"
dest: /etc/sudoers.d/ansible
owner: root
group: root
mode: 0440
- name: Init cache directory
ansible.builtin.file:
path: /var/cache/ansible
owner: ansible
group: ansible
state: directory
mode: u=rwx,g=rw,o=r
- name: Init etc directory
ansible.builtin.file:
path: /etc/ansible
owner: ansible
group: ansible
state: directory
mode: u=rwx,g=rw,o=r
# Inserts public keys of allowed externals users to log in as ansible
# e.g. fabio
- name: Create the .ssh directory
file: path=/srv/ansible/.ssh owner=ansible group=ansible mode=0700 state=directory
- name: Add the mandatory ssh keys to the ansible user
template: src=library/templates/ansible_auth_keys.j2 dest=/srv/ansible/.ssh/authorized_keys owner=ansible group=ansible mode=0600

View File

@ -1,68 +0,0 @@
---
# play 1 - local
# check config info from inventory
# play 2
# gather status
# play 3
# bootstrap
# play 4
# join
- name: Tests
hosts: swarm
connection: local
gather_facts: no
vars:
swarm_list: {}
tasks:
- debug: var=hostvars
- name: Gather Swarm info
set_fact:
swarm_list: "{{swarm_list + hostvars[hostname]['swarm_name']}}"
- debug: var=swarm_list
# # show all the hosts in the inventory
# - debug:
# msg: "{{ item }}"
# loop: "{{ groups['all'] }}"
# - name: printsomthieng
# debug:
# msg: "{{ item }}"
# with_items: "{{ play_hosts }}"
# #run_once: true
# debug:
# msg:
# # - "{{ item }}"
# - "{{hostvars[item]['swarm_master']}}"
# #loop: "{{ hosts.items() | list }}"
# - name: Create swarm_<SWARM>_<MASTER|WORKER>_<OPERATIONAL|BOOTSTRAP> groups
# add_host:
# hostname: "{{ item }}"
# groups: "swarm_{{item}}manager_operational"
# with_items: "{{ play_hosts }}"
# run_once: true
# - name: Display
# command: echo "swarm_{{item}}manager_operational"

View File

@ -1,5 +0,0 @@
---
- name: Install FTP Server
hosts: ftp_server
roles:
- robgmills.pure-ftpd

View File

@ -1 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEArNhKFcJ6T08sn7kTTLf+rO9HEvgOvqfhv5HQ2sRf2tFYfjfCb0zHKnMkgW+sy5gMU10Lyx1r7juXCvqRC955uIM97m1B1Xc6sVqASVKuGPhCKfhxEaMAyBcWFdE+HYbCOPYVN+JMrcwWfbblwiZTtK1OCqaEUvDDI7cFeU68noXwggEp46T48eqMUdi541D9Y+BVx9HYAo6OCQz0+6eXwxJL+tpRcAAXIMMWv362CYHoOgIU45R7xVSMLY1k/HLrcEAblwxEaSpduCH5cWUXZE/56IyxpvP44BxZkVhNdqJLmg4hxBQWhoMNYiTZxbLay3W2TwBCM111cAtUx4M/jQ== fabio@pc-fabio

View File

@ -1,19 +0,0 @@
---
- name: Configure Nameserver
hosts: nameserver
collections:
- bodsch.dns
tasks:
- name: Import role Bind
ansible.builtin.import_role:
name: bind
- name: Start a service
become: True
ansible.builtin.systemd:
name: named
state: restarted

View File

@ -1,17 +0,0 @@
---
- name: Install Nextcloud AIO Docker
hosts: all
become: true
vars:
pip_install_packages:
- name: docker
docker_version: "=5:28.2.2-1~ubuntu.24.04~noble"
docker_users:
- fabio
- ansible
roles:
- geerlingguy.pip
- geerlingguy.docker
# - nextcloud_aio

View File

@ -1,7 +0,0 @@
---
- import_playbook: ping_all.yaml
- name: Basic check nodes
hosts: all
roles:
- common

View File

@ -1,28 +0,0 @@
---
# Usese oxlorg.opnsense
# Check documentation @ https://ansible-opnsense.oxl.app/usage/2_basic.html#prerequisites
- name: Configure OPNSense
hosts: opn
connection: local #executes on controller
gather_facts: false
collections:
- oxlorg.opnsense
module_defaults:
oxlorg.opnsense.alias:
api_credential_file: '/Users/fabioisti/Keys/ns1.sifi.isti.cnr.it_fabio_apikey.txt'
firewall: "{{ ansible_host}}"
ssl_verify: true
ssl_ca_file: '/Users/fabioisti/git/SSE-LAB/ansible/inventories/group_vars/sifi/SIFI_CA.pem'
tasks:
- name : Check libs
script: /Users/fabioisti/test_httpx.py
args:
executable: python3
- name: Test
oxlorg.opnsense.alias:
name: 'ANSIBLE_TEST1'
content: ['1.1.1.1']

View File

@ -1,10 +0,0 @@
---
- hosts: all
gather_facts: False
connection: local
tasks:
- name: ping
shell: ping -c 1 -W 2 {{ ansible_host }}

View File

@ -1,24 +0,0 @@
# requirements.yml
---
roles:
# - name: bodsch.dns.bind
# version:
# - name: nginx
# src: git@github.com:myorg/ansible-role-nginx.git
# scm: git
# version: v2.0.0
collections:
- name: bodsch.dns
source: https://github.com/bodsch/ansible-collection-dns.git
type: git
version: 1.4.1
# - name: community.postgresql
# version: "3.2.0"
# - name: ansible.posix
# version: "1.5.4"
# - name: myorg.infrastructure
# source: https://hub.internal.com/api/galaxy/
# version: "1.0.0"

View File

@ -1,2 +0,0 @@
---
config-flavor: none

View File

@ -1,23 +0,0 @@
---
## Register output of whoami
- name: Who am I
ansible.builtin.command: whoami
register: _my_whoiam_var
## Displays variable as to stdout
- name: Debug
ansible.builtin.debug:
var: _my_whoiam_var.stdout
## Check if can write on tmp
- name: Check write operation
ansible.builtin.copy:
content: "Hello world"
dest: /tmp/{{ _my_whoiam_var.stdout}}.hello-world.txt
## Cleans up
- name: Clean up
ansible.builtin.file:
path: /tmp/{{ _my_whoiam_var.stdout}}.hello-world.txt
state: absent

View File

@ -1,5 +0,0 @@
---
- name: Check access to Internet
wait_for:
host: 1.1.1.1
timeout: 5

View File

@ -1,3 +0,0 @@
---
- import_tasks: basic_checks.yaml
- import_tasks: connectivity.yml

View File

@ -1,4 +0,0 @@
---
# configure as master if flag
# identify swarm by node variable
become_user: docker

View File

@ -1,40 +0,0 @@
Docker Role
=========
Tasks :
- Installs dependencies and docker env [tasks/install.yml](tasks/install.yml)
- Creates user docker (group docker) for docker operations [tasks/docker_user.yml](tasks/docker_user.yml)
Requirements
------------
Uses **become**
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).

View File

@ -1,3 +0,0 @@
#SPDX-License-Identifier: MIT-0
---
# defaults file for docker

View File

@ -1,3 +0,0 @@
#SPDX-License-Identifier: MIT-0
---
# handlers file for docker

View File

@ -1,35 +0,0 @@
#SPDX-License-Identifier: MIT-0
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.1
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@ -1,24 +0,0 @@
---
- name: Add the docker group
group:
name: docker
gid: 1101
state: present
- name: Add the docker user
user:
name: docker
uid: 1101
group: docker
# Directly generate hash
# https://www.lisenet.com/2019/ansible-generate-crypted-passwords-for-the-user-module/
password: "{{ docker_crypted_password | password_hash('sha512') }}"
shell: /bin/bash
system: yes
home: /srv/docker
state: present
- name: Reset ssh connection to allow user changes to affect ansible user
ansible.builtin.meta:
reset_connection

View File

@ -1,41 +0,0 @@
---
- name: Install aptitude
apt:
name: aptitude
state: latest
update_cache: true
- name: Install required system packages
apt:
pkg:
- apt-transport-https
- ca-certificates
- curl
- software-properties-common
- python3-pip
- virtualenv
- python3-setuptools
state: latest
update_cache: true
- name: Add Docker GPG apt Key
apt_key:
url: https://download.docker.com/linux/ubuntu/gpg
state: present
- name: Add Docker Repository
apt_repository:
repo: deb https://download.docker.com/linux/ubuntu focal stable
state: present
- name: Update apt and install docker-ce
apt:
name: docker-ce
state: latest
update_cache: true
- name: Install Docker Module for Python
pip:
break_system_packages: true
name: docker

View File

@ -1,17 +0,0 @@
#SPDX-License-Identifier: MIT-0
---
# tasks file for docker
- include_tasks: install.yml
args:
apply:
become: yes
- include_tasks: docker_user.yml
args:
apply:
become: yes
# - include_tasks: swarm.yml
# args:
# apply:
# become: yes
# become_user: docker

View File

@ -1,3 +0,0 @@
#SPDX-License-Identifier: MIT-0
localhost

View File

@ -1,5 +0,0 @@
#SPDX-License-Identifier: MIT-0
---
- hosts: localhost
roles:
- docker

View File

@ -1,3 +0,0 @@
#SPDX-License-Identifier: MIT-0
---
# vars file for docker

View File

@ -1,4 +0,0 @@
skip_list:
- 'yaml'
- 'risky-shell-pipe'
- 'role-name'

View File

@ -1,4 +0,0 @@
# These are supported funding model platforms
---
github: geerlingguy
patreon: geerlingguy

View File

@ -1,71 +0,0 @@
---
name: CI
'on':
pull_request:
push:
branches:
- master
schedule:
- cron: "0 7 * * 0"
defaults:
run:
working-directory: 'geerlingguy.docker'
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- name: Check out the codebase.
uses: actions/checkout@v4
with:
path: 'geerlingguy.docker'
- name: Set up Python 3.
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install test dependencies.
run: pip3 install yamllint
- name: Lint code.
run: |
yamllint .
molecule:
name: Molecule
runs-on: ubuntu-latest
strategy:
matrix:
distro:
- rockylinux9
- ubuntu2404
- ubuntu2204
- debian12
- debian11
- fedora40
- opensuseleap15
steps:
- name: Check out the codebase.
uses: actions/checkout@v4
with:
path: 'geerlingguy.docker'
- name: Set up Python 3.
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install test dependencies.
run: pip3 install ansible molecule molecule-plugins[docker] docker
- name: Run Molecule tests.
run: molecule test
env:
PY_COLORS: '1'
ANSIBLE_FORCE_COLOR: '1'
MOLECULE_DISTRO: ${{ matrix.distro }}

View File

@ -1,40 +0,0 @@
---
# This workflow requires a GALAXY_API_KEY secret present in the GitHub
# repository or organization.
#
# See: https://github.com/marketplace/actions/publish-ansible-role-to-galaxy
# See: https://github.com/ansible/galaxy/issues/46
name: Release
'on':
push:
tags:
- '*'
defaults:
run:
working-directory: 'geerlingguy.docker'
jobs:
release:
name: Release
runs-on: ubuntu-latest
steps:
- name: Check out the codebase.
uses: actions/checkout@v4
with:
path: 'geerlingguy.docker'
- name: Set up Python 3.
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install Ansible.
run: pip3 install ansible-core
- name: Trigger a new import on Galaxy.
run: >-
ansible-galaxy role import --api-key ${{ secrets.GALAXY_API_KEY }}
$(echo ${{ github.repository }} | cut -d/ -f1) $(echo ${{ github.repository }} | cut -d/ -f2)

View File

@ -1,34 +0,0 @@
---
name: Close inactive issues
'on':
schedule:
- cron: "55 6 * * 1" # semi-random time
jobs:
close-issues:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v8
with:
days-before-stale: 120
days-before-close: 60
exempt-issue-labels: bug,pinned,security,planned
exempt-pr-labels: bug,pinned,security,planned
stale-issue-label: "stale"
stale-pr-label: "stale"
stale-issue-message: |
This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution!
Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark issues as stale.
close-issue-message: |
This issue has been closed due to inactivity. If you feel this is in error, please reopen the issue or file a new issue with the relevant details.
stale-pr-message: |
This pr has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution!
Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark issues as stale.
close-pr-message: |
This pr has been closed due to inactivity. If you feel this is in error, please reopen the issue or file a new issue with the relevant details.
repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,5 +0,0 @@
*.retry
*/__pycache__
*.pyc
.cache

View File

@ -1,10 +0,0 @@
---
extends: default
rules:
line-length:
max: 200
level: warning
ignore: |
.github/workflows/stale.yml

View File

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2017 Jeff Geerling
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,173 +0,0 @@
# Ansible Role: Docker
[![CI](https://github.com/geerlingguy/ansible-role-docker/actions/workflows/ci.yml/badge.svg)](https://github.com/geerlingguy/ansible-role-docker/actions/workflows/ci.yml)
An Ansible Role that installs [Docker](https://www.docker.com) on Linux.
## Requirements
None.
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
```yaml
# Edition can be one of: 'ce' (Community Edition) or 'ee' (Enterprise Edition).
docker_edition: 'ce'
docker_packages:
- "docker-{{ docker_edition }}"
- "docker-{{ docker_edition }}-cli"
- "docker-{{ docker_edition }}-rootless-extras"
docker_packages_state: present
```
The `docker_edition` should be either `ce` (Community Edition) or `ee` (Enterprise Edition).
You can also specify a specific version of Docker to install using the distribution-specific format:
Red Hat/CentOS: `docker-{{ docker_edition }}-<VERSION>` (Note: you have to add this to all packages);
Debian/Ubuntu: `docker-{{ docker_edition }}=<VERSION>` (Note: you have to add this to all packages).
You can control whether the package is installed, uninstalled, or at the latest version by setting `docker_packages_state` to `present`, `absent`, or `latest`, respectively. Note that the Docker daemon will be automatically restarted if the Docker package is updated. This is a side effect of flushing all handlers (running any of the handlers that have been notified by this and any other role up to this point in the play).
```yaml
docker_obsolete_packages:
- docker
- docker.io
- docker-engine
- docker-doc
- docker-compose
- docker-compose-v2
- podman-docker
- containerd
- runc
```
`docker_obsolete_packages` for different os-family:
- [`RedHat.yaml`](./vars/RedHat.yml)
- [`Debian.yaml`](./vars/Debian.yml)
- [`Suse.yaml`](./vars/Suse.yml)
A list of packages to be uninstalled prior to running this role. See [Docker's installation instructions](https://docs.docker.com/engine/install/debian/#uninstall-old-versions) for an up-to-date list of old packages that should be removed.
```yaml
docker_service_manage: true
docker_service_state: started
docker_service_enabled: true
docker_service_start_command: ""
docker_restart_handler_state: restarted
```
Variables to control the state of the `docker` service, and whether it should start on boot. If you're installing Docker inside a Docker container without systemd or sysvinit, you should set `docker_service_manage` to `false`.
```yaml
docker_install_compose_plugin: true
docker_compose_package: docker-compose-plugin
docker_compose_package_state: present
```
Docker Compose Plugin installation options. These differ from the below in that docker-compose is installed as a docker plugin (and used with `docker compose`) instead of a standalone binary.
```yaml
docker_install_compose: false
docker_compose_version: "v2.32.1"
docker_compose_arch: "{{ ansible_facts.architecture }}"
docker_compose_url: "https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-linux-{{ docker_compose_arch }}"
docker_compose_path: /usr/local/bin/docker-compose
```
Docker Compose installation options.
```yaml
docker_add_repo: true
```
Controls whether this role will add the official Docker repository. Set to `false` if you want to use the default docker packages for your system or manage the package repository on your own.
```yaml
docker_repo_url: https://download.docker.com/linux
```
The main Docker repo URL, common between Debian and RHEL systems.
```yaml
docker_apt_release_channel: stable
docker_apt_gpg_key: "{{ docker_repo_url }}/{{ ansible_facts.distribution | lower }}/gpg"
docker_apt_filename: "docker"
```
(Used only for Debian/Ubuntu.) You can switch the channel to `nightly` if you want to use the Nightly release.
You can change `docker_apt_gpg_key` to a different url if you are behind a firewall or provide a trustworthy mirror.
`docker_apt_filename` controls the name of the source list file created in `sources.list.d`. If you are upgrading from an older (<7.0.0) version of this role, you should change this to the name of the existing file (e.g. `download_docker_com_linux_debian` on Debian) to avoid conflicting lists.
```yaml
docker_yum_repo_url: "{{ docker_repo_url }}/{{ 'fedora' if ansible_facts.distribution == 'Fedora' else 'rhel' if ansible_facts.distribution == 'RedHat' else 'centos' }}/docker-{{ docker_edition }}.repo"
docker_yum_repo_enable_nightly: '0'
docker_yum_repo_enable_test: '0'
docker_yum_gpg_key: "{{ docker_repo_url }}/{{ 'fedora' if ansible_facts.distribution == 'Fedora' else 'rhel' if ansible_facts.distribution == 'RedHat' else 'centos' }}/gpg"
```
(Used only for RedHat/CentOS.) You can enable the Nightly or Test repo by setting the respective vars to `1`.
You can change `docker_yum_gpg_key` to a different url if you are behind a firewall or provide a trustworthy mirror.
Usually in combination with changing `docker_yum_repository` as well.
```yaml
docker_users:
- user1
- user2
```
A list of system users to be added to the `docker` group (so they can use Docker on the server).
```yaml
docker_daemon_options:
storage-driver: "overlay2"
log-opts:
max-size: "100m"
```
Custom `dockerd` options can be configured through this dictionary representing the json file `/etc/docker/daemon.json`.
## Use with Ansible (and `docker` Python library)
Many users of this role wish to also use Ansible to then _build_ Docker images and manage Docker containers on the server where Docker is installed. In this case, you can easily add in the `docker` Python library using the `geerlingguy.pip` role:
```yaml
- hosts: all
vars:
pip_install_packages:
- name: docker
roles:
- geerlingguy.pip
- geerlingguy.docker
```
## Dependencies
None.
## Example Playbook
```yaml
- hosts: all
roles:
- geerlingguy.docker
```
## License
MIT / BSD
## Sponsors
* [We Manage](https://we-manage.de): Helping start-ups and grown-ups scaling their infrastructure in a sustainable way.
The above sponsor(s) are supporting Jeff Geerling on [GitHub Sponsors](https://github.com/sponsors/geerlingguy). You can sponsor Jeff's work too, to help him continue improving these Ansible open source projects!
## Author Information
This role was created in 2017 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).

View File

@ -1,66 +0,0 @@
---
# Edition can be one of: 'ce' (Community Edition) or 'ee' (Enterprise Edition).
docker_edition: 'ce'
docker_version: ''
docker_packages:
- "docker-{{ docker_edition }}{{ docker_version }}"
- "docker-{{ docker_edition }}-cli{{ docker_version }}"
- "docker-{{ docker_edition }}-rootless-extras{{ docker_version }}"
- "containerd.io"
- docker-buildx-plugin
docker_packages_state: present
docker_obsolete_packages:
- docker
- docker.io
- docker-engine
- docker-doc
- docker-compose
- docker-compose-v2
- podman-docker
- containerd
- runc
# Service options.
docker_service_manage: true
docker_service_state: started
docker_service_enabled: true
docker_service_start_command: ""
docker_restart_handler_state: restarted
# Docker Compose Plugin options.
docker_install_compose_plugin: true
docker_compose_package: docker-compose-plugin
docker_compose_package_state: present
# Docker Compose options.
docker_install_compose: false
docker_compose_version: "v2.32.1"
docker_compose_arch: "{{ ansible_facts.architecture }}"
docker_compose_url: "https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-linux-{{ docker_compose_arch }}"
docker_compose_path: /usr/local/bin/docker-compose
# Enable repo setup
docker_add_repo: true
# Docker repo URL.
docker_repo_url: https://download.docker.com/linux
# Used only for Debian/Ubuntu/Pop!_OS/Linux Mint. Switch 'stable' to 'nightly' if needed.
docker_apt_release_channel: stable
# docker_apt_ansible_distribution is a workaround for Ubuntu variants which can't be identified as such by Ansible,
# and is only necessary until Docker officially supports them.
docker_apt_ansible_distribution: "{{ 'ubuntu' if ansible_facts.distribution in ['Pop!_OS', 'Linux Mint'] else ansible_facts.distribution }}"
docker_apt_gpg_key: "{{ docker_repo_url }}/{{ docker_apt_ansible_distribution | lower }}/gpg"
docker_apt_filename: "docker"
# Used only for RedHat/CentOS/Fedora.
docker_yum_repo_url: "{{ docker_repo_url }}/{{ 'fedora' if ansible_facts.distribution == 'Fedora' else 'rhel' if ansible_facts.distribution == 'RedHat' else 'centos' }}/docker-{{ docker_edition }}.repo"
docker_yum_repo_enable_nightly: '0'
docker_yum_repo_enable_test: '0'
docker_yum_gpg_key: "{{ docker_repo_url }}/{{ 'fedora' if ansible_facts.distribution == 'Fedora' else 'rhel' if ansible_facts.distribution == 'RedHat' else 'centos' }}/gpg"
# A list of users who will be added to the docker group.
docker_users: []
# Docker daemon options as a dict
docker_daemon_options: {}

View File

@ -1,11 +0,0 @@
---
- name: restart docker
ansible.builtin.service:
name: docker
state: "{{ docker_restart_handler_state }}"
ignore_errors: "{{ ansible_check_mode }}"
when: docker_service_manage | bool
- name: apt update
ansible.builtin.apt:
update_cache: true

View File

@ -1,2 +0,0 @@
install_date: Thu Nov 13 15:43:34 2025
version: 7.8.0

View File

@ -1,46 +0,0 @@
---
dependencies: []
galaxy_info:
role_name: docker
author: geerlingguy
description: Docker for Linux.
company: "Midwestern Mac, LLC"
license: "license (BSD, MIT)"
min_ansible_version: 2.15.1
platforms:
- name: Fedora
versions:
- all
- name: Debian
versions:
- buster
- bullseye
- bookworm
- trixie
- name: Ubuntu
versions:
- bionic
- focal
- jammy
- noble
- name: Alpine
version:
- all
- name: ArchLinux
versions:
- all
- name: SLES
versions:
- all
- name: openSUSE
versions:
- all
galaxy_tags:
- web
- system
- containers
- docker
- orchestration
- compose
- server

View File

@ -1,24 +0,0 @@
---
- name: Converge
hosts: all
# become: true
pre_tasks:
- name: Update apt cache.
apt: update_cache=yes cache_valid_time=600
when: ansible_facts.os_family == 'Debian'
- name: Wait for systemd to complete initialization. # noqa 303
command: systemctl is-system-running
register: systemctl_status
until: >
'running' in systemctl_status.stdout or
'degraded' in systemctl_status.stdout
retries: 30
delay: 5
when: ansible_facts.service_mgr == 'systemd'
changed_when: false
failed_when: systemctl_status.rc > 1
roles:
- role: geerlingguy.docker

View File

@ -1,21 +0,0 @@
---
role_name_check: 1
dependency:
name: galaxy
options:
ignore-errors: true
driver:
name: docker
platforms:
- name: instance
image: "geerlingguy/docker-${MOLECULE_DISTRO:-rockylinux9}-ansible:latest"
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:rw
cgroupns_mode: host
privileged: true
pre_build_image: true
provisioner:
name: ansible
playbooks:
converge: ${MOLECULE_PLAYBOOK:-converge.yml}

View File

@ -1,51 +0,0 @@
---
- name: Verify Docker Role
hosts: all
tasks:
- name: Verify Docker binary is available
command: docker version
register: docker_version_result
changed_when: false
failed_when: docker_version_result.rc != 0
- name: Show Docker version details
debug:
msg: >
Docker Version Output:
{{ docker_version_result.stdout_lines | join('\n') }}
- name: Verify Docker service is running
command: systemctl is-active docker
register: docker_service_status
when: ansible_facts.service_mgr == 'systemd'
changed_when: false
failed_when: docker_service_status.stdout.strip() != "active"
- name: Display Docker service status
debug:
msg: "Docker service is {{ docker_service_status.stdout.strip() }}"
when: ansible_facts.service_mgr == 'systemd'
- name: Pull the 'hello-world' image
command: docker pull hello-world
register: docker_pull_result
changed_when: true
failed_when: docker_pull_result.rc != 0
- name: Show result of pulling the 'hello-world' image
debug:
msg: >
Pulling 'hello-world' completed with output:
{{ docker_pull_result.stdout_lines | join('\n') }}
- name: Run a test container (hello-world)
command: docker run --rm hello-world
register: docker_run_result
changed_when: true
failed_when: docker_run_result.rc != 0
- name: Display test container output
debug:
msg: >
Running 'hello-world' container completed with output:
{{ docker_run_result.stdout_lines | join('\n') }}

View File

@ -1,31 +0,0 @@
---
- name: Check current docker-compose version.
command: "{{ docker_compose_path }} --version"
register: docker_compose_vsn
check_mode: false
changed_when: false
failed_when: false
- set_fact:
docker_compose_current_version: "{{ docker_compose_vsn.stdout | regex_search('(\\d+(\\.\\d+)+)') }}"
when: >
docker_compose_vsn.stdout is defined
and (docker_compose_vsn.stdout | length > 0)
- name: Delete existing docker-compose version if it's different.
file:
path: "{{ docker_compose_path }}"
state: absent
when: >
docker_compose_current_version is defined
and (docker_compose_version | regex_replace('v', '')) not in docker_compose_current_version
- name: Install Docker Compose (if configured).
get_url:
url: "{{ docker_compose_url }}"
dest: "{{ docker_compose_path }}"
mode: 0755
when: >
(docker_compose_current_version is not defined)
or (docker_compose_current_version | length == 0)
or (docker_compose_current_version is version((docker_compose_version | regex_replace('v', '')), '<'))

View File

@ -1,10 +0,0 @@
---
- name: Ensure docker users are added to the docker group.
user:
name: "{{ item }}"
groups: docker
append: true
with_items: "{{ docker_users }}"
- name: Reset ssh connection to apply user changes.
meta: reset_connection

View File

@ -1,122 +0,0 @@
---
- name: Load OS-specific vars.
include_vars: "{{ lookup('first_found', params) }}"
vars:
params:
files:
- '{{ansible_facts.distribution}}.yml'
- '{{ansible_facts.os_family}}.yml'
- main.yml
paths:
- 'vars'
- include_tasks: setup-RedHat.yml
when: ansible_facts.os_family == 'RedHat'
- include_tasks: setup-Suse.yml
when: ansible_facts.os_family == 'Suse'
- include_tasks: setup-Debian.yml
when: ansible_facts.os_family == 'Debian'
- name: Install Docker packages.
package:
name: "{{ docker_packages }}"
state: "{{ docker_packages_state }}"
notify: restart docker
ignore_errors: "{{ ansible_check_mode }}"
when: "ansible_version.full is version_compare('2.12', '<') or ansible_facts.os_family not in ['RedHat', 'Debian']"
- name: Install Docker packages (with downgrade option).
package:
name: "{{ docker_packages }}"
state: "{{ docker_packages_state }}"
allow_downgrade: true
notify: restart docker
ignore_errors: "{{ ansible_check_mode }}"
when: "ansible_version.full is version_compare('2.12', '>=') and ansible_facts.os_family in ['RedHat', 'Debian']"
- name: Install docker-compose plugin.
package:
name: "{{ docker_compose_package }}"
state: "{{ docker_compose_package_state }}"
notify: restart docker
ignore_errors: "{{ ansible_check_mode }}"
when: "docker_install_compose_plugin | bool == true and (ansible_version.full is version_compare('2.12', '<') or ansible_facts.os_family not in ['RedHat', 'Debian', 'Suse'])"
- name: Install docker-compose-plugin (with downgrade option).
package:
name: "{{ docker_compose_package }}"
state: "{{ docker_compose_package_state }}"
allow_downgrade: true
notify: restart docker
ignore_errors: "{{ ansible_check_mode }}"
when: "docker_install_compose_plugin | bool == true and ansible_version.full is version_compare('2.12', '>=') and ansible_facts.os_family in ['RedHat', 'Debian']"
- name: Ensure /etc/docker/ directory exists.
file:
path: /etc/docker
state: directory
mode: 0755
when: docker_daemon_options.keys() | length > 0
- name: Configure Docker daemon options.
copy:
content: "{{ docker_daemon_options | to_nice_json }}"
dest: /etc/docker/daemon.json
mode: 0644
when: docker_daemon_options.keys() | length > 0
notify: restart docker
- name: Replace Docker service ExecStart command if configured.
when: docker_service_start_command != ""
notify: restart docker
block:
- name: Get Docker service status
ansible.builtin.systemd_service:
name: docker
register: docker_service_status
- name: Patch docker.service
ansible.builtin.replace:
path: "{{ docker_service_status.status['FragmentPath'] }}"
regexp: "^ExecStart=.*$"
replace: "ExecStart={{ docker_service_start_command }}"
register: docker_service_patch
- name: Reload systemd services
service:
daemon_reload: true
when: docker_service_patch is changed
- name: Ensure Docker is started and enabled at boot.
service:
name: docker
state: "{{ docker_service_state }}"
enabled: "{{ docker_service_enabled }}"
ignore_errors: "{{ ansible_check_mode }}"
when: docker_service_manage | bool
- name: Ensure handlers are notified now to avoid firewall conflicts.
meta: flush_handlers
- include_tasks: docker-compose.yml
when: docker_install_compose | bool
- name: Get docker group info using getent.
getent:
database: group
key: docker
split: ':'
when: docker_users | length > 0
- name: Check if there are any users to add to the docker group.
set_fact:
at_least_one_user_to_modify: true
when:
- docker_users | length > 0
- item not in ansible_facts.getent_group["docker"][2]
with_items: "{{ docker_users }}"
- include_tasks: docker-users.yml
when: at_least_one_user_to_modify is defined

View File

@ -1,42 +0,0 @@
---
- name: Ensure apt key is not present in trusted.gpg.d
ansible.builtin.file:
path: /etc/apt/trusted.gpg.d/docker.asc
state: absent
- name: Ensure old apt source list is not present in /etc/apt/sources.list.d
ansible.builtin.file:
path: "/etc/apt/sources.list.d/download_docker_com_linux_{{ docker_apt_ansible_distribution | lower }}.list"
state: absent
# See https://docs.docker.com/engine/install/debian/#uninstall-old-versions
- name: Ensure old versions of Docker are not installed.
ansible.builtin.package:
name: "{{ docker_obsolete_packages }}"
state: absent
- name: Ensure legacy repo file is not present.
ansible.builtin.file:
path: "/etc/apt/sources.list.d/docker.list"
state: absent
- name: Ensure dependencies are installed.
ansible.builtin.apt:
name:
- ca-certificates
- python3-debian
state: present
- name: Add or remove Docker repository.
ansible.builtin.deb822_repository:
name: "{{ docker_apt_filename }}"
types: deb
uris: "{{ docker_repo_url }}/{{ docker_apt_ansible_distribution | lower }}"
suites: "{{ ansible_facts.distribution_release }}"
components: "{{ docker_apt_release_channel }}"
signed_by: "{{ docker_apt_gpg_key }}"
state: "{{ 'present' if docker_add_repo | bool else 'absent' }}"
notify: apt update
- name: Ensure handlers are notified immediately to update the apt cache.
ansible.builtin.meta: flush_handlers

View File

@ -1,58 +0,0 @@
---
- name: Ensure old versions of Docker are not installed.
package:
name: "{{ docker_obsolete_packages }}"
state: absent
- name: Add Docker GPG key.
rpm_key:
key: "{{ docker_yum_gpg_key }}"
state: present
when: docker_add_repo | bool
- name: Add Docker repository.
get_url:
url: "{{ docker_yum_repo_url }}"
dest: '/etc/yum.repos.d/docker-{{ docker_edition }}.repo'
owner: root
group: root
mode: 0644
when: docker_add_repo | bool
- name: Configure Docker Nightly repo.
ini_file:
dest: '/etc/yum.repos.d/docker-{{ docker_edition }}.repo'
section: 'docker-{{ docker_edition }}-nightly'
option: enabled
value: '{{ docker_yum_repo_enable_nightly }}'
mode: 0644
no_extra_spaces: true
when: docker_add_repo | bool
- name: Configure Docker Test repo.
ini_file:
dest: '/etc/yum.repos.d/docker-{{ docker_edition }}.repo'
section: 'docker-{{ docker_edition }}-test'
option: enabled
value: '{{ docker_yum_repo_enable_test }}'
mode: 0644
no_extra_spaces: true
when: docker_add_repo | bool
- name: Configure containerd on RHEL 8.
block:
- name: Ensure runc is not installed.
package:
name: runc
state: absent
- name: Ensure container-selinux is installed.
package:
name: container-selinux
state: present
- name: Ensure containerd.io is installed.
package:
name: containerd.io
state: present
when: ansible_facts.distribution_major_version | int == 8

View File

@ -1,39 +0,0 @@
---
# Remove old or conflicting Docker packages
- name: Ensure old versions of Docker are not installed
package:
name: "{{ docker_obsolete_packages }}"
state: absent
check_mode: no
changed_when: false
# Add Docker repository (openSUSE / SLES)
- name: Add Docker repository
zypper_repository:
name: "docker-ce"
repo: "{{ docker_zypper_repo_url }}"
state: present
auto_import_keys: yes
when: docker_add_repo | bool
# Refresh zypper repositories only if the repo was added
- name: Refresh zypper repositories
command: zypper --non-interactive refresh
when: docker_add_repo | bool
register: zypper_refresh
changed_when: false # idempotent for Molecule
# Install Docker packages
- name: Ensure Docker packages are installed
ansible.legacy.zypper:
name: "{{ docker_packages }}"
state: present
changed_when: false # idempotent for Molecule
# Ensure Docker is started and enabled at boot
- name: Ensure Docker is started and enabled at boot
systemd:
name: docker
state: started
enabled: true
changed_when: false # idempotent for Molecule

View File

@ -1,3 +0,0 @@
---
docker_packages: "docker"
docker_compose_package: docker-cli-compose

View File

@ -1,3 +0,0 @@
---
docker_packages: "docker"
docker_compose_package: docker-compose

View File

@ -1,14 +0,0 @@
---
# Used only for Debian/Ubuntu (Debian OS-Family)
# https://docs.docker.com/engine/install/debian/#uninstall-old-versions
docker_obsolete_packages:
- docker
- docker.io
- docker-engine
- docker-doc
- docker-compose
- docker-compose-v2
- podman-docker
- containerd
- runc

View File

@ -1,14 +0,0 @@
---
# Used only for Fedora/Rocky (RedHat OS-Family)
# https://docs.docker.com/engine/install/fedora/#uninstall-old-versions
# https://docs.docker.com/engine/install/centos/#uninstall-old-versions
docker_obsolete_packages:
- docker
- docker-client
- docker-client-latest
- docker-common
- docker-latest
- docker-latest-logrotate
- docker-logrotate
- docker-engine

View File

@ -1,41 +0,0 @@
---
# Used only for openSUSE / SLES (SUSE OS-Family)
# https://en.opensuse.org/Docker
# https://docs.docker.com/engine/install/binaries/
# Packages to remove if present (old or conflicting Docker packages)
docker_obsolete_packages:
- docker-engine
- docker.io
- docker-ce
- docker-ce-cli
- docker-buildx-plugin
- docker-ce-rootless-extras
- containerd.io
- runc
# Packages to install on openSUSE / SLES
# Use 'runc' from repo, not 'docker-runc' (avoids conflicts on Leap 15.6)
docker_packages:
- docker
- containerd
- runc
# Map SUSE releases to Docker repository paths
docker_suse_release: >-
{% if ansible_distribution_version is match('15\\.6') %}
openSUSE_Leap_15.6
{% elif ansible_distribution_version is match('15\\.5') %}
openSUSE_Leap_15.5
{% elif ansible_distribution_version is match('15\\.4') %}
openSUSE_Leap_15.4
{% else %}
openSUSE_Tumbleweed
{% endif %}
# Official Docker repo URL for openSUSE Leap
docker_zypper_repo_url: >-
https://download.opensuse.org/repositories/Virtualization:/containers/{{ docker_suse_release | trim }}/
# Control whether to add the Docker repository
docker_add_repo: true

View File

@ -1,2 +0,0 @@
---
# Empty file

View File

@ -1,3 +0,0 @@
skip_list:
- 'yaml'
- 'role-name'

View File

@ -1,4 +0,0 @@
# These are supported funding model platforms
---
github: geerlingguy
patreon: geerlingguy

View File

@ -1,70 +0,0 @@
---
name: CI
'on':
pull_request:
push:
branches:
- master
schedule:
- cron: "0 4 * * 5"
defaults:
run:
working-directory: 'geerlingguy.pip'
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- name: Check out the codebase.
uses: actions/checkout@v3
with:
path: 'geerlingguy.pip'
- name: Set up Python 3.
uses: actions/setup-python@v4
with:
python-version: '3.x'
- name: Install test dependencies.
run: pip3 install yamllint
- name: Lint code.
run: |
yamllint .
molecule:
name: Molecule
runs-on: ubuntu-latest
strategy:
matrix:
distro:
- rockylinux9
- fedora39
- ubuntu2204
- ubuntu2004
- debian12
- debian11
steps:
- name: Check out the codebase.
uses: actions/checkout@v3
with:
path: 'geerlingguy.pip'
- name: Set up Python 3.
uses: actions/setup-python@v4
with:
python-version: '3.x'
- name: Install test dependencies.
run: pip3 install ansible molecule molecule-plugins[docker] docker
- name: Run Molecule tests.
run: molecule test
env:
PY_COLORS: '1'
ANSIBLE_FORCE_COLOR: '1'
MOLECULE_DISTRO: ${{ matrix.distro }}

View File

@ -1,40 +0,0 @@
---
# This workflow requires a GALAXY_API_KEY secret present in the GitHub
# repository or organization.
#
# See: https://github.com/marketplace/actions/publish-ansible-role-to-galaxy
# See: https://github.com/ansible/galaxy/issues/46
name: Release
'on':
push:
tags:
- '*'
defaults:
run:
working-directory: 'geerlingguy.pip'
jobs:
release:
name: Release
runs-on: ubuntu-latest
steps:
- name: Check out the codebase.
uses: actions/checkout@v4
with:
path: 'geerlingguy.pip'
- name: Set up Python 3.
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install Ansible.
run: pip3 install ansible-core
- name: Trigger a new import on Galaxy.
run: >-
ansible-galaxy role import --api-key ${{ secrets.GALAXY_API_KEY }}
$(echo ${{ github.repository }} | cut -d/ -f1) $(echo ${{ github.repository }} | cut -d/ -f2)

View File

@ -1,34 +0,0 @@
---
name: Close inactive issues
'on':
schedule:
- cron: "55 21 * * 5" # semi-random time
jobs:
close-issues:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v8
with:
days-before-stale: 120
days-before-close: 60
exempt-issue-labels: bug,pinned,security,planned
exempt-pr-labels: bug,pinned,security,planned
stale-issue-label: "stale"
stale-pr-label: "stale"
stale-issue-message: |
This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution!
Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark issues as stale.
close-issue-message: |
This issue has been closed due to inactivity. If you feel this is in error, please reopen the issue or file a new issue with the relevant details.
stale-pr-message: |
This pr has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution!
Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark issues as stale.
close-pr-message: |
This pr has been closed due to inactivity. If you feel this is in error, please reopen the issue or file a new issue with the relevant details.
repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,5 +0,0 @@
*.retry
*/__pycache__
*.pyc
.cache

View File

@ -1,10 +0,0 @@
---
extends: default
rules:
line-length:
max: 120
level: warning
ignore: |
.github/workflows/stale.yml

View File

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2017 Jeff Geerling
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,80 +0,0 @@
# Ansible Role: Pip (for Python)
[![CI](https://github.com/geerlingguy/ansible-role-pip/actions/workflows/ci.yml/badge.svg)](https://github.com/geerlingguy/ansible-role-pip/actions/workflows/ci.yml)
An Ansible Role that installs [Pip](https://pip.pypa.io) on Linux.
## Requirements
On RedHat/CentOS, you may need to have EPEL installed before running this role. You can use the `geerlingguy.repo-epel` role if you need a simple way to ensure it's installed.
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
pip_package: python3-pip
The name of the package to install to get `pip` on the system. For older systems that don't have Python 3 available, you can set this to `python-pip`.
pip_executable: pip3
The role will try to autodetect the pip executable based on the `pip_package` (e.g. `pip` for Python 2 and `pip3` for Python 3). You can also override this explicitly, e.g. `pip_executable: pip3.6`.
pip_install_packages: []
A list of packages to install with pip. Examples below:
pip_install_packages:
# Specify names and versions.
- name: docker
version: "1.2.3"
- name: awscli
version: "1.11.91"
# Or specify bare packages to get the latest release.
- docker
- awscli
# Or uninstall a package.
- name: docker
state: absent
# Or update a package to the latest version.
- name: docker
state: latest
# Or force a reinstall.
- name: docker
state: forcereinstall
# Or install a package in a particular virtualenv.
- name: docker
virtualenv: /my_app/venv
# Or pass through any extra arguments.
- name: my_special_package_from_my_special_repo
extra_args: --extra-index-url https://my-domain/pypi/pypi-master/simple
## Dependencies
None.
## Example Playbook
- hosts: all
vars:
pip_install_packages:
- name: docker
- name: awscli
roles:
- geerlingguy.pip
## License
MIT / BSD
## Author Information
This role was created in 2017 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).

View File

@ -1,6 +0,0 @@
---
# For Python 3, use python3-pip.
pip_package: python3-pip
pip_executable: "{{ 'pip3' if pip_package.startswith('python3') else 'pip' }}"
pip_install_packages: []

View File

@ -1,2 +0,0 @@
install_date: Thu Nov 13 15:51:53 2025
version: 3.1.1

View File

@ -1,28 +0,0 @@
---
dependencies: []
galaxy_info:
role_name: pip
author: geerlingguy
description: Pip (Python package manager) for Linux.
issue_tracker_url: https://github.com/geerlingguy/ansible-role-pip/issues
company: "Midwestern Mac, LLC"
license: "MIT"
min_ansible_version: 2.10
platforms:
- name: Fedora
versions:
- all
- name: Debian
versions:
- all
- name: Ubuntu
versions:
- all
galaxy_tags:
- system
- server
- packaging
- python
- pip
- tools

View File

@ -1,28 +0,0 @@
---
- name: Converge
hosts: all
#become: true
vars:
pip_install_packages:
# Test installing a specific version of a package.
- name: ipaddress
version: "1.0.18"
# Test installing a package by name.
- colorama
pre_tasks:
- name: Update apt cache.
apt: update_cache=true cache_valid_time=600
when: ansible_facts.os_family == 'Debian'
- name: Set package name for older OSes.
set_fact:
pip_package: python-pip
when: >
(ansible_facts.os_family == 'RedHat') and (ansible_facts.distribution_major_version | int < 8)
or (ansible_facts.distribution == 'Debian') and (ansible_facts.distribution_major_version | int < 10)
or (ansible_facts.distribution == 'Ubuntu') and (ansible_facts.distribution_major_version | int < 18)
roles:
- role: geerlingguy.pip

View File

@ -1,21 +0,0 @@
---
role_name_check: 1
dependency:
name: galaxy
options:
ignore-errors: true
driver:
name: docker
platforms:
- name: instance
image: "geerlingguy/docker-${MOLECULE_DISTRO:-rockylinux9}-ansible:latest"
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:rw
cgroupns_mode: host
privileged: true
pre_build_image: true
provisioner:
name: ansible
playbooks:
converge: ${MOLECULE_PLAYBOOK:-converge.yml}

View File

@ -1,20 +0,0 @@
---
- name: Ensure Pip is installed.
package:
name: "{{ pip_package }}"
state: present
- name: Remove EXTERNALLY-MANAGED
ansible.builtin.file:
path: /usr/lib/python3.{{ ansible_facts.python.version.minor }}/EXTERNALLY-MANAGED
state: absent
- name: Ensure pip_install_packages are installed.
pip:
name: "{{ item.name | default(item) }}"
version: "{{ item.version | default(omit) }}"
virtualenv: "{{ item.virtualenv | default(omit) }}"
state: "{{ item.state | default(omit) }}"
extra_args: "{{ item.extra_args | default(omit) }}"
executable: "{{ item.virtualenv | default(false) | ternary(omit, pip_executable) }}"
loop: "{{ pip_install_packages }}"

View File

@ -1,2 +0,0 @@
dependencies:
- role: docker

View File

@ -1,2 +0,0 @@
---
- import_tasks: nextcloud_docker_aio.yaml

View File

@ -1,18 +0,0 @@
---
- name: Create volumes
debug:
msg:
- "TODO!!!"
- name: Download compose file
become: true
become_user: docker
ansible.builtin.git:
repo: "https://gitea-s2i2s.isti.cnr.it/sinibaldi/SSE-Lab"
dest: SSE-Lab
- name: create and start docker compose services
become: true
become_user: docker
community.docker.docker_compose_v2:
project_src: ~/SSE-Lab/dockerized/nextcloud-aio/compose.yaml

View File

@ -1,3 +0,0 @@
.idea/
*.iml
.vagrant/

View File

@ -1,38 +0,0 @@
---
sudo: required
language: python
python: "2.7"
env:
- SITE=test.yml
before_install:
- sudo apt-get update -qq
- sudo apt-get install -y curl
install:
# Install Ansible.
- pip install ansible
# Add ansible.cfg to pick up roles path.
#- "{ echo '[defaults]'; echo 'roles_path = ../'; } >> ansible.cfg"
script:
# Check the role/playbook's syntax.
- "ansible-playbook -i tests/inventory tests/$SITE --syntax-check"
# Run the role/playbook with ansible-playbook.
- "ansible-playbook -i tests/inventory tests/$SITE --connection=local --sudo"
# Run the role/playbook again, checking to make sure it's idempotent.
- >
ansible-playbook -i tests/inventory tests/$SITE --connection=local --sudo
| grep -q 'changed=2.*failed=0'
&& (echo 'Idempotence test: pass' && exit 0)
|| (echo 'Idempotence test: fail' && exit 1)
# TODO - get the test working.
# Request a file via FTP, to make sure pure-ftpd is running and responds.
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@ -1,106 +0,0 @@
# Ansible Role: Pure-FTPd
[![Build Status](https://travis-ci.org/robgmills/ansible-pure-ftpd.svg?branch=master)](https://travis-ci.org/robgmills/ansible-pure-ftpd)
Installs Pure-FTPd on Debian/Ubuntu Linux.
This role installs and configures the latest version of Pure-FTPd from the Pure-FTPd via apt (on Debian-based systems). You will likely need to do extra setup work after this role has installed Pure-FTPd.
## Requirements
None.
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
pure_ftpd_root: "/var/ftp"
A directory path at which to scope the FTP server access.
pure_ftpd_user: "ftp"
The system-level user that the FTP daemon performs operations under. This user is setup without login permissions (can't ssh into system) but owns all of the files uploaded via the FTP server.
pure_ftpd_group: "ftp-sys-group"
The system-level group that the FTP daemon performs operations under. This is the group assigned to all files uploaded via the FTP server.
pure_ftpd_vusers:
- name: "ftp"
password: "FTPisSoC00l?"
dir: "/var/ftp" # optional
A list of user definitions virtual FTP users. If left empty, defaults to a single user with the username `ftp` and password `ftp`. `name` and `password` are required fields. `dir` is optional and defaults to the value of `ftp_root`.
Since the array of `virtual_users` needs to contain secret credentials, it is recommended to create an [Ansible Vault][vault]-encrypted variable file to include that contains your users and overrides the role default vars.
pure_ftpd_tls: true
Turns on/off support for FTP TLS encryption. It is strongly recommended that this remain `true`.
pure_ftpd_allow_insecure: false
When TLS encryption is enabled, the default is to not allow non-encrypted, insecure connections. Setting this value to `true` will allow both secure and insecure connections. Requires that `enable_tls` be `true`.
pure_ftpd_pem: ""
The contents of the PEM certificate to use for FTP TLS encryption. It is recommended to create an [Ansible Vault][vault]-encrypted variable file to include that contains your PEM certificate.
If no `pure_ftpd_pem` is provided, a PEM certificate is generated using `openssl`.
pure_ftpd_openssl_config: {}
The `openssl_config` vars object controls the generation of an openssl PEM certificate + key combination.
The sub-properties of the `openssl_config` object are as follows:
days: "365"
The number of days for which the certificate is valid.
size: "2048"
The size of the certificate key. The larger, the more secure.
country: ""
state: ""
locality: ""
org: ""
unit: ""
common: ""
email: ""
The values of the certificate subject information.
## Dependencies
None.
## Example Playbook
- hosts: server
roles:
- role: robgmills.pure-ftpd
## Try It!
From the root of the project:
vagrant up
...then...
ansible-playbook -i inventory -b -u vagrant -k playbook.yml
...then use your favorite FTPS client to connect to `ftps://ftp:FTPisSoC00l?@192.168.50.2`
## License
MIT / BSD
## Author Information
This role was created in 2016 by [Rob Mills](https://robgmills.com/).
[vault]: http://docs.ansible.com/ansible/playbooks_vault.html

View File

@ -1,4 +0,0 @@
Vagrant.configure(2) do |config|
config.vm.box = "debian/jessie64"
config.vm.network "private_network", ip: "192.168.50.2"
end

View File

@ -1,2 +0,0 @@
[defaults]
roles_path = ../

View File

@ -1,23 +0,0 @@
---
# Used only for Debian/Ubuntu installation, as the -t option for apt.
pureftpd_default_release: ""
pure_ftpd_user: "ftp"
pure_ftpd_group: "ftp"
pure_ftpd_root: "/var/ftp"
pure_ftpd_vusers:
- name: ftp
dir: "{{ pure_ftpd_root }}"
password: "FTPisSoC00l?"
pure_ftpd_tls: true
pure_ftpd_allow_insecure: false
pure_ftpd_openssl_config:
days: "365"
size: "2048"
country: ""
state: ""
locality: ""
org: ""
unit: ""
common: ""
email: ""

View File

@ -1 +0,0 @@
192.168.50.2

View File

@ -1,2 +0,0 @@
install_date: Thu Sep 25 13:49:03 2025
version: 1.0.1

View File

@ -1,18 +0,0 @@
---
galaxy_info:
author: Rob Mills
description: Installs and configures a Pure-FTPd server
company: RGM
license: MIT
min_ansible_version: 2.0
platforms:
- name: Debian
versions:
- all
galaxy_tags:
- ftp
- server
- ftps
- tls
- openssl
dependencies: []

View File

@ -1,5 +0,0 @@
---
- hosts: 192.168.50.2
become: true
roles:
- ansible-pure-ftpd

View File

@ -1,38 +0,0 @@
---
# Variable setup.
- name: Include OS-specific variables.
include_vars: "{{ ansible_os_family }}.yml"
# Setup/install tasks.
- include_tasks: setup-Debian.yml
when: ansible_os_family == 'Debian'
# Pure-FTPd setup
- name: Create the FTP system group '{{ pure_ftpd_group }}'
group: name={{ pure_ftpd_group }} state=present
- name: Create the FTP system user '{{ pure_ftpd_user }}'
user: name={{ pure_ftpd_user }} group={{ pure_ftpd_group }} home=/dev/null shell=/sbin/nologin state=present
- name: Create FTP server root directory '{{ pure_ftpd_root }}'
file: dest={{ pure_ftpd_root }} mode=0755 state=directory owner={{ pure_ftpd_user }} group={{ pure_ftpd_group }}
# Create and/or install SSL certificate
- include_tasks: tls.yml
when: pure_ftpd_tls
# Create the virtual FTP users and set their passwords
- include_tasks: virtual-user.yml
with_items: "{{ pure_ftpd_vusers }}"
- name: Link virtual FTP user database to the correct location
file: src={{ __ftp_user_db }} dest={{ __ftp_user_db_sym }} state=link
register: r_linkdb
- name: Restart pure-ftpd
service: name=pure-ftpd state=restarted
when: r_linkdb.changed
# - Ensure that FTP service is running
- name: Ensure pure-ftpd is started and enabled to start at boot.
service: name=pure-ftpd state=started enabled=yes

View File

@ -1,15 +0,0 @@
---
- name: Update apt cache.
apt: update_cache=yes cache_valid_time=86400
- name: Ensure requisite apt packages are installed.
apt: name={{ item }} state=present default_release={{ pureftpd_default_release }}
with_items:
- pure-ftpd
- python-pip
- openssl
- name: Install pexpect
pip:
name: "pexpect"
state: present

View File

@ -1,41 +0,0 @@
---
# assumes that `enable_tls: true`
- name: Define TLS support level.
no_log: true
set_fact:
tls_level: 2
when: not pure_ftpd_allow_insecure
- name: Define TLS support level.
no_log: true
set_fact:
tls_level: 1
when: pure_ftpd_allow_insecure
- name: Set TLS config level ({{ tls_level | default(2) }})
copy: content={{ tls_level | default(2) }} dest={{ __ftp_conf_root }}/TLS owner=root group=root
- name: Install configured TLS PEM for pure-ftpd
no_log: true
copy: content="{{ pure_ftpd_pem }}" dest=/etc/ssl/private/pure-ftpd.pem owner=root group=root
when: pure_ftpd_pem is defined
- name: Check if pure-pw centificate file exists
stat: path=/etc/ssl/private/pure-ftpd.pem
register: r_ftppem
- name: Generate TLS PEM for pure-ftpd
expect:
command: openssl req -x509 -nodes -days {{ pure_ftpd_openssl_config.days }} -newkey rsa:{{ pure_ftpd_openssl_config.size }} -keyout /etc/ssl/private/pure-ftpd.pem -out /etc/ssl/private/pure-ftpd.pem
responses:
(?i)country name: "{{ pure_ftpd_openssl_config.country }}"
(?i)state or province name: "{{ pure_ftpd_openssl_config.state }}"
(?i)locality name: "{{ pure_ftpd_openssl_config.locality }}"
(?i)organization name: "{{ pure_ftpd_openssl_config.org }}"
(?i)organizational unit name: "{{ pure_ftpd_openssl_config.unit }}"
(?i)common name: "{{ pure_ftpd_openssl_config.common }}"
(?i)email address: "{{ pure_ftpd_openssl_config.email }}"
when: pure_ftpd_pem is not defined and not r_ftppem.stat.exists
- name: Restrict permissions on PEM
file: state=file path=/etc/ssl/private/pure-ftpd.pem mode=0600 owner=root group=root

View File

@ -1,34 +0,0 @@
---
- name: Create directory {{ item.dir | default( pure_ftpd_root ) }} for virtual FTP user {{ item.name }}
file: dest={{ item.dir | default( pure_ftpd_root ) }} mode=0755 state=directory owner={{ pure_ftpd_user }} group={{ pure_ftpd_group}}
- name: Check if pure-pw passwords file exists
stat: path={{ __ftp_passwd }}
register: r_passwd
- name: Check if virtual FTP user {{ item.name }} exists
command: pure-pw show {{ item.name }}
register: r_userexists
when: r_passwd.stat.exists
changed_when: "r_userexists.rc != 0"
ignore_errors: true
- name: Create virtual FTP user {{ item.name }}
expect:
command: pure-pw useradd {{ item.name }} -u {{ pure_ftpd_user }} -g {{ pure_ftpd_group }} -d {{ item.dir | default( pure_ftpd_root ) }} -m
responses:
(?i)password: "{{ item.password }}"
(?i)enter it again: "{{ item.password }}"
when: (r_userexists|failed) or (not r_passwd.stat.exists)
- name: Update virtual FTP user {{ item.name }}
command: pure-pw usermod {{ item.name }} -u {{ pure_ftpd_user }} -g {{ pure_ftpd_group }} -d {{ item.dir | default( pure_ftpd_root ) }} -m
when: r_userexists|success
- name: Update virtual FTP user {{ item.name }} password
expect:
command: pure-pw passwd {{ item.name }}
responses:
(?i)password: "{{ item.password }}"
(?i)enter it again: "{{ item.password }}"
when: r_userexists|success

Some files were not shown because too many files have changed in this diff Show More