diff --git a/ansible/playbooks/collections/ansible_collections/bodsch.core-2.10.1.info/GALAXY.yml b/ansible/playbooks/collections/ansible_collections/bodsch.core-2.10.1.info/GALAXY.yml new file mode 100644 index 0000000..9fe2da2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch.core-2.10.1.info/GALAXY.yml @@ -0,0 +1,8 @@ +download_url: https://galaxy.ansible.com/api/v3/plugin/ansible/content/published/collections/artifacts/bodsch-core-2.10.1.tar.gz +format_version: 1.0.0 +name: core +namespace: bodsch +server: https://galaxy.ansible.com/api/ +signatures: [] +version: 2.10.1 +version_url: /api/v3/plugin/ansible/content/published/collections/index/bodsch/core/versions/2.10.1/ diff --git a/ansible/playbooks/collections/ansible_collections/bodsch.dns-1.4.0.info/GALAXY.yml b/ansible/playbooks/collections/ansible_collections/bodsch.dns-1.4.0.info/GALAXY.yml new file mode 100644 index 0000000..fa66dbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch.dns-1.4.0.info/GALAXY.yml @@ -0,0 +1,8 @@ +download_url: https://galaxy.ansible.com/api/v3/plugin/ansible/content/published/collections/artifacts/bodsch-dns-1.4.0.tar.gz +format_version: 1.0.0 +name: dns +namespace: bodsch +server: https://galaxy.ansible.com/api/ +signatures: [] +version: 1.4.0 +version_url: /api/v3/plugin/ansible/content/published/collections/index/bodsch/dns/versions/1.4.0/ diff --git a/ansible/playbooks/collections/ansible_collections/bodsch.systemd-1.4.0.info/GALAXY.yml b/ansible/playbooks/collections/ansible_collections/bodsch.systemd-1.4.0.info/GALAXY.yml new file mode 100644 index 0000000..67f2321 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch.systemd-1.4.0.info/GALAXY.yml @@ -0,0 +1,8 @@ +download_url: https://galaxy.ansible.com/api/v3/plugin/ansible/content/published/collections/artifacts/bodsch-systemd-1.4.0.tar.gz +format_version: 1.0.0 +name: systemd +namespace: bodsch +server: https://galaxy.ansible.com/api/ +signatures: [] +version: 1.4.0 +version_url: /api/v3/plugin/ansible/content/published/collections/index/bodsch/systemd/versions/1.4.0/ diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/FILES.json b/ansible/playbooks/collections/ansible_collections/bodsch/core/FILES.json new file mode 100644 index 0000000..f6c34a6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/FILES.json @@ -0,0 +1,3134 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "125570eb1111331438b33d1119184c1beb27ee6bb4660f4e2b2eb3f8154bf667", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b73ae4774fdb779be02da3be3113ff68bd68eed23a81379314dd7e0800f22ae", + "format": 1 + }, + { + "name": "plugins/filter", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/filter/clean_dictionary.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f48d273a1aebbb4d47aa23e8efe78451741d1ac8fe1d96508e8b52f5c082569b", + "format": 1 + }, + { + "name": "plugins/filter/mount_fstypes.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b7fedf08bc3975bb26ba34c773e1ed03a8bd06a651de9e779949cfb135907fa6", + "format": 1 + }, + { + "name": "plugins/filter/parse_checksum.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05bc85df07eb1bf5c832f2d766b85316a9f2f99e3f0d14155b5d177423254c74", + "format": 1 + }, + { + "name": "plugins/filter/syslog_ng.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "756190661af24b81ce7565f5de8b7065dc4e3680d9204837ba8b2f98ed8cb546", + "format": 1 + }, + { + "name": "plugins/filter/verify.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09335ed83617990eb6245aa7138f3988b648f255522c4016155d707e505e1d0e", + "format": 1 + }, + { + "name": "plugins/filter/linked_version.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ebf14a189018766bcf24989ee73d89d34384d1da9ad6e2440df1a664ab27c81", + "format": 1 + }, + { + "name": "plugins/filter/fail2ban_jails.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a5e85ce0e01aa0c0635b6f94e2211e5c14d217e19fbfafd1a8e83d046dd95b8f", + "format": 1 + }, + { + "name": "plugins/filter/openvpn_persistent_pool.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a5c48074d26553ddfb5320d2c698ccbbfe5e0f32b8c3e5f87fbac3c333d3e057", + "format": 1 + }, + { + "name": "plugins/filter/python.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a5c1e2819ac677c932be378d6ad6d959fd2ca1c6c091448d1d1c41102c4bf7f9", + "format": 1 + }, + { + "name": "plugins/filter/union_by.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3a8cce4538f1bcc2660e1ba708c598dfcca99656f644afa65a8bf3c61a17090", + "format": 1 + }, + { + "name": "plugins/filter/remove_empty_values.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/dns_lookup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/merge_jails.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/linked_version.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/fstypes.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/persistent_pool.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/clients_type.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a6841467bc2ab1e0156fb18f5c1881cd42f31ec8fb1cd683f7d85fc9c9956313", + "format": 1 + }, + { + "name": "plugins/filter/openvpn_clients.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/parse_checksum.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/python_extra_args.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/merge_lists.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/sshd_values.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/support_tls.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/tls_directory.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/get_service.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/log_directories.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/syslog_network_definition.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/verify_syslog_options.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/type.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/config_bool.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/string_to_list.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/union_by.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/compare_list.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/upgrade.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "plugins/filter/dns.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f1c56b7cb0d8fa58e3f496a592fc8798e2ccf843f91c859f201cb451be33f5b9", + "format": 1 + }, + { + "name": "plugins/filter/openvpn.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e79f31c8bdfa61c18634614411f15cfb358d929cd7c81202e8dd24a74ca1bf8", + "format": 1 + }, + { + "name": "plugins/filter/sshd.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3632e78e4aeb1d06bd6ac3d6197d55ea74e103e835db1c709b07d0f78bdb9696", + "format": 1 + }, + { + "name": "plugins/filter/support_tls.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bd3b6755fc65d749b92a0108cd15429048cbddf395bc3c688ce4ca1f14bf5a08", + "format": 1 + }, + { + "name": "plugins/filter/types.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aa5e06d9ac743c89a15b37b8a56585cda11254151aa43f3422730d8838a418fa", + "format": 1 + }, + { + "name": "plugins/filter/inventory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c01dae724db04ac096485f0609edff384669e879b3519e986fad75f3fb9ac407", + "format": 1 + }, + { + "name": "plugins/lookup", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/lookup/file_glob.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dec9f6b1d46666c79c246ee27844b297d5bf783ae7b0c623711018d7585dac26", + "format": 1 + }, + { + "name": "plugins/lookup/rbw.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee806f22a00609023ee951d60be0e2429418b541f7733bec420863eb91808f7f", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/cache", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/cache/cache_valid.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09a8133fbe451c22713ad5aac2888dc76f2e9f7ecb6f9c5c069e99bfec690052", + "format": 1 + }, + { + "name": "plugins/module_utils/template", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/template/template.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8fe93e74ef5df662d222fb19d2fdf89d99a1b747c53814e0e4b0b5dd8234eb75", + "format": 1 + }, + { + "name": "plugins/module_utils/checksum.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5ae967139872aa408b4bdb25feaf2f3a27b4b86a0e44ba37df0768b37d944d7e", + "format": 1 + }, + { + "name": "plugins/module_utils/diff.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "30e164e79288ba93094db8d473d111d86ee2194b6d6aa841f21639be88779947", + "format": 1 + }, + { + "name": "plugins/module_utils/directory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e96895b0580728ba517019a1da3c72f8223cdd8ce16bff4bb1ac3cc831d49bda", + "format": 1 + }, + { + "name": "plugins/module_utils/dns_lookup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5ea9ed577f2c1657a75c21738d9c47a69292f175206988230f048b666fcd0a9b", + "format": 1 + }, + { + "name": "plugins/module_utils/easyrsa.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8c11ff37687846060ad91ff5bb87e8d8b0ef0a1969838f0063ef060d1e9c76f9", + "format": 1 + }, + { + "name": "plugins/module_utils/file.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c3ff51045a8edf1c5c0f8d01617bc3c5cb280bea49eb05b95021789f2cbee181", + "format": 1 + }, + { + "name": "plugins/module_utils/lists.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "384d1d8170dc59911af287104334efd49d369a381b70e9989914968fa7339aeb", + "format": 1 + }, + { + "name": "plugins/module_utils/module_results.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "26ef7ea8c75b17c7c81707fa2c3dc216318c26eab934f5c556cfb90d44345137", + "format": 1 + }, + { + "name": "plugins/module_utils/validate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f6fbdef2c33fa385621318ca90e207ef602f1495c8dc00a23a2bd575eb2ab09", + "format": 1 + }, + { + "name": "plugins/module_utils/crypto_utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "720052c0e3d6297800619e5cecb76c98876a1773ec340ab617e6314eb12a86ef", + "format": 1 + }, + { + "name": "plugins/module_utils/passlib_bcrypt5_compat.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "30dc8740bd4fd216d6c0f7b3fbd3567de3cb2fdac75b035a665a6993aad1f39c", + "format": 1 + }, + { + "name": "plugins/module_utils/deb822_repo.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c1b70372e3fd5ec33f2445018bc7ccfe5dda2c7478c0a0895215f73490e4f9a2", + "format": 1 + }, + { + "name": "plugins/module_utils/versioned_deployment.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f419b651e8d2fd0f95b980c423ae079991f3808b109fe2ac7ffc05b8dcc53f72", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/check_mode.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "10a2daa00bc24a0343afbc90f8b438b9d276baf2af05da8a9d31b0c72e23b815", + "format": 1 + }, + { + "name": "plugins/modules/easyrsa.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af0fbdaaa76b18404760bcbf64d6805a372df62925e017a73136136270d89a87", + "format": 1 + }, + { + "name": "plugins/modules/facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b39fb46c28220d423710bc277015c5d6520ccb5fea2514ae31a70b8a61821a58", + "format": 1 + }, + { + "name": "plugins/modules/journalctl.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "94fd6be964c8423e7164a01ed577f9d7392e265729d902b3eb3af9b75f216653", + "format": 1 + }, + { + "name": "plugins/modules/openvpn_client_certificate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0f1f33c54265306a1f6f27cf66485c45bc72fe37fb1f5dd74eb55ac93647d053", + "format": 1 + }, + { + "name": "plugins/modules/openvpn_crl.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36868248a03444277bc7710f944033315cc27f0ce610f5a3f485aa5ebef90549", + "format": 1 + }, + { + "name": "plugins/modules/openvpn_ovpn.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0a329fcb4477a339891b1e4b9e54d72a8d8f0ba6ed6e36491dd6afeb85a87e96", + "format": 1 + }, + { + "name": "plugins/modules/openvpn_version.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee885f175f1cc477b97e1fe8405e94c2dcb1f7df08413d27ba2f86e64f46576c", + "format": 1 + }, + { + "name": "plugins/modules/package_version.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c563dc7745b464500e9b9d75701771c2402d2cef321dab71714eafd5d38e236", + "format": 1 + }, + { + "name": "plugins/modules/pip_requirements.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4bc669791ee9e2a3e0d69f03c4769f2c13a60efc9e3734d5ac5990394ada78b5", + "format": 1 + }, + { + "name": "plugins/modules/remove_ansible_backups.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f70e2af949773c05a31ae6642c660b6ff40b7a8f17a27f8bbc244f762ca173d6", + "format": 1 + }, + { + "name": "plugins/modules/snakeoil_openssl.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "53fd1cdad185913f2307badacae35348590d4f3354967166c5a54017b73abfc5", + "format": 1 + }, + { + "name": "plugins/modules/sync_directory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "567f5ad1b19b36c45afe9518e70fabf4d39409e6e956016e4fdb2e36d71674ae", + "format": 1 + }, + { + "name": "plugins/modules/syslog_cmd.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c56b95b8c268700194f6f019a13a608645051643de922dad5dc48b1150ddac34", + "format": 1 + }, + { + "name": "plugins/modules/apt_sources.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65f6f2ff5df063fe2c43c8c3d114c31544dba87b151d897b08a13d888e537249", + "format": 1 + }, + { + "name": "plugins/modules/aur.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9f4ad096a62935e1505d09b026033ba687af87d62a0720a742698bcfa78491b8", + "format": 1 + }, + { + "name": "plugins/modules/mysql_schema.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "167cca5cbe99e3158fe18207b7b32186a3e20a5d31329ef754f1903e082147a6", + "format": 1 + }, + { + "name": "plugins/modules/openvpn.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1d68578d4719b7f6bddb9cbcbd6211abbf9731143830f3ed74031ce65fc3e20d", + "format": 1 + }, + { + "name": "plugins/modules/deploy_and_activate.SAVE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "76150e63ec42c8156a2badbdde4bea802b76c6df5f9cd547ea4218ad3ea74af5", + "format": 1 + }, + { + "name": "plugins/modules/deploy_and_activate_remote.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "27de088020e66a6a8f1ede6bcd6623ac7b39c321b24fe07edc949dfa4e6ac7c4", + "format": 1 + }, + { + "name": "plugins/action", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/action/activate_version.save", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5076b619df12e25ed645857d9a4476e0394267c7fe44a49e829dfe7140b8c90e", + "format": 1 + }, + { + "name": "plugins/action/deploy_and_activate.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "038b8aaa4d50f14d2cbbd7e1b8236b9c1ca7011c4be5f0181b3f6ead5555ff66", + "format": 1 + }, + { + "name": "roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/ca", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fail2ban", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fail2ban/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f79cdace5e0ac1512d6b3e35517aa678069b483ffb80ca63e5b51718be5bc31", + "format": 1 + }, + { + "name": "roles/fail2ban/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/fail2ban/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/fail2ban/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/fail2ban/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a8f34854e6109024a678dbdc16a082b21e0134067fcfab10072b547991d8339", + "format": 1 + }, + { + "name": "roles/fail2ban/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/fail2ban/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4", + "format": 1 + }, + { + "name": "roles/fail2ban/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/fail2ban/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0d8376a990d85ec183134a13b0b573c995c78640a6e1f0dd0383999bb9de2d4f", + "format": 1 + }, + { + "name": "roles/fail2ban/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fail2ban/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4f57d6863cf61bc95c27ae35f6a520c3abd2f166e2a25b478e10a410c791744a", + "format": 1 + }, + { + "name": "roles/fail2ban/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fail2ban/handlers/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72fc487909352a1819a6ffdc64190d1bcd9c0dcdee2ace9a7bdbe886d6964c9f", + "format": 1 + }, + { + "name": "roles/fail2ban/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fail2ban/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/fail2ban/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/fail2ban/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/fail2ban/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fbafd0a44904bdb4b3b880ef8d2776279fd50416e825c23e43110a71e96bf90", + "format": 1 + }, + { + "name": "roles/fail2ban/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/fail2ban/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c32820da1cc4358e7fa4731358bfa38fcbb221154bd45412aa2d9202f60c8419", + "format": 1 + }, + { + "name": "roles/fail2ban/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/fail2ban/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fail2ban/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "554fd168bccb9865efea9fb128866551de7c23c5c8d9a04947ae89d319c6ef97", + "format": 1 + }, + { + "name": "roles/fail2ban/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fail2ban/tasks/configure.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a8d648314155ead817eb72e4c133cb7f61c961d91df018e746a33d986df4d209", + "format": 1 + }, + { + "name": "roles/fail2ban/tasks/install.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c358bd8214f81825d25ce85ea9cb1a31aa189b3edbd1ec80009aa84c0fa0e7f", + "format": 1 + }, + { + "name": "roles/fail2ban/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3080a1d664c6a4aa692bb6b9c9dd3d303fda68da096288dbddad4103324c1dec", + "format": 1 + }, + { + "name": "roles/fail2ban/tasks/service.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09ca35e371aed54dd642c05ac3b0c5fa8316a49491f864ab555be76034b622c3", + "format": 1 + }, + { + "name": "roles/fail2ban/tasks/prepare.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4116a03c0d1242ae0c83c675be07b2adabc7f4cab02c9b5f8fb0aa1ad6f5c65a", + "format": 1 + }, + { + "name": "roles/fail2ban/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fail2ban/templates/etc", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fail2ban/templates/etc/fail2ban", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fail2ban/templates/etc/fail2ban/actions.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c9b8f5922385f55a0c7b2408b3585ddc4d04fd0d269f88dc9632a0aec09b72e", + "format": 1 + }, + { + "name": "roles/fail2ban/templates/etc/fail2ban/fail2ban.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c1882f35dd974ba7fd67337e793032dbcf158223edda2159aefc53e9dfaf5c91", + "format": 1 + }, + { + "name": "roles/fail2ban/templates/etc/fail2ban/filters.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dde2a2e4f8c84111ba8a32d71bf77f52b68357f46eb18bb165b6610c23c1ef02", + "format": 1 + }, + { + "name": "roles/fail2ban/templates/etc/fail2ban/jail.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e21f6f5d688ee01ccc39e4310d521f106162ccf4b899d0f4027c6c186a3d70bc", + "format": 1 + }, + { + "name": "roles/fail2ban/templates/etc/fail2ban/jail.local.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c5b269da514a4f6318812d08c9f5c1d23ea9731b3f1d0351d7ea9955a945898e", + "format": 1 + }, + { + "name": "roles/fail2ban/templates/etc/fail2ban/paths.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b51e689e702bdd192ee568e0497f6de201f44e32e7318df0e913f9c428e07f66", + "format": 1 + }, + { + "name": "roles/fail2ban/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fail2ban/vars/archlinux-openrc.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e13e2aa1854d50cea60e109ee0b7a5dd89992fbb787bbe800c2ad4560cd0c72", + "format": 1 + }, + { + "name": "roles/fail2ban/vars/archlinux.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a78c8e8b0b020cfcb7b5c53feac651b82daf8030b030a2eab29db5fb3f236c6", + "format": 1 + }, + { + "name": "roles/fail2ban/vars/artixlinux-openrc.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e13e2aa1854d50cea60e109ee0b7a5dd89992fbb787bbe800c2ad4560cd0c72", + "format": 1 + }, + { + "name": "roles/fail2ban/vars/debian.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "036e7b129da4a51db7f3b6ac80edb7e3b5625e5226e613c7668f5ba2f900e608", + "format": 1 + }, + { + "name": "roles/fail2ban/vars/default.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88941473c57349985ad00d0d0f72a3c53ec4dabb48a97e289cdda7e6210034ba", + "format": 1 + }, + { + "name": "roles/fail2ban/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1bf37d978f9062d73ddaf5d24e25c02b4bae672474944018a5c700c06b8d8e66", + "format": 1 + }, + { + "name": "roles/logrotate", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logrotate/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f79cdace5e0ac1512d6b3e35517aa678069b483ffb80ca63e5b51718be5bc31", + "format": 1 + }, + { + "name": "roles/logrotate/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/logrotate/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/logrotate/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/logrotate/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a8f34854e6109024a678dbdc16a082b21e0134067fcfab10072b547991d8339", + "format": 1 + }, + { + "name": "roles/logrotate/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/logrotate/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/logrotate/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/logrotate/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3793dd269609aa1f344a950271fae6686f62ce7e0584b8b61b110972b2916469", + "format": 1 + }, + { + "name": "roles/logrotate/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logrotate/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e9d846f794af4bf769529697f4a6a7c1567ab29c52412715c03fd7c5b5ad8a96", + "format": 1 + }, + { + "name": "roles/logrotate/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logrotate/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/logrotate/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/logrotate/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/logrotate/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fbafd0a44904bdb4b3b880ef8d2776279fd50416e825c23e43110a71e96bf90", + "format": 1 + }, + { + "name": "roles/logrotate/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/logrotate/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c32820da1cc4358e7fa4731358bfa38fcbb221154bd45412aa2d9202f60c8419", + "format": 1 + }, + { + "name": "roles/logrotate/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/logrotate/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logrotate/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bba6c3d3d1e0cee37cfd5e29a24eceb1e20f5aef3efa2c33bbaf2ed62c5096db", + "format": 1 + }, + { + "name": "roles/logrotate/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logrotate/tasks/cron.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88fdd312207db167c50087b7e65e04bec0893d73945d18344ea28c8532789f5a", + "format": 1 + }, + { + "name": "roles/logrotate/tasks/install.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c3d0fd1126c35b30279279fae055d9a04966e1082021da70bef12ddf9139fc86", + "format": 1 + }, + { + "name": "roles/logrotate/tasks/configure.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5b5e8b5343798fcdca5281f2274f93cb5a8188b48cc7788c987d4012260b5745", + "format": 1 + }, + { + "name": "roles/logrotate/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61fd220d6a4ae3a3cb4fbb24d3f1f7431394afa6171a6bd9b0f756e89fa31704", + "format": 1 + }, + { + "name": "roles/logrotate/tasks/prepare.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/logrotate/tasks/systemd.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ab94e7c760cb5d38af7edff952070595e62018f1fc6969f9e511e759194c4ddd", + "format": 1 + }, + { + "name": "roles/logrotate/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logrotate/templates/cron_logrotate.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d02c0b1ab65f623d37e8cc8b89ff0ce3dcb31a69eff12763fa23913d38d0b091", + "format": 1 + }, + { + "name": "roles/logrotate/templates/logrotate.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "54d0419fb614bbf4fad4d754993062e03f89c87e29b8c7fbbafe4a9dde900875", + "format": 1 + }, + { + "name": "roles/logrotate/templates/logrotate.d.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0923d8fd33a08ee1c37755dc60a3635fee919b2c89322edb4a03c746aa03e152", + "format": 1 + }, + { + "name": "roles/logrotate/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logrotate/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dacc49162b0d3af2bbf483e205697ccdb262a8683d82646d006b5f54c2dc600c", + "format": 1 + }, + { + "name": "roles/mount", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/mount/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f79cdace5e0ac1512d6b3e35517aa678069b483ffb80ca63e5b51718be5bc31", + "format": 1 + }, + { + "name": "roles/mount/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/mount/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/mount/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/mount/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a8f34854e6109024a678dbdc16a082b21e0134067fcfab10072b547991d8339", + "format": 1 + }, + { + "name": "roles/mount/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/mount/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/mount/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/mount/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac4bffba4a184b2e665d23441b6401af3e1ff5e5f0f72fee995ee4a63f071b0a", + "format": 1 + }, + { + "name": "roles/mount/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/mount/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5265171adb9941f5a543c3523930aa26748f3b51b883fa5202d9beedb03c0673", + "format": 1 + }, + { + "name": "roles/mount/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/mount/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/mount/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/mount/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/mount/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fbafd0a44904bdb4b3b880ef8d2776279fd50416e825c23e43110a71e96bf90", + "format": 1 + }, + { + "name": "roles/mount/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/mount/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c32820da1cc4358e7fa4731358bfa38fcbb221154bd45412aa2d9202f60c8419", + "format": 1 + }, + { + "name": "roles/mount/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/mount/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/mount/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf8e52baed6a38a0b5a725fa4c59f43132aac99089a3ac8d8d5817675c0da0d9", + "format": 1 + }, + { + "name": "roles/mount/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/mount/tasks/installation.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "769103e6dee48e3b0feb272f511302282dacfe0a872366b4fadc019f5a8dd9db", + "format": 1 + }, + { + "name": "roles/mount/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72667a253abeb063bcdc398636c37ae96aa2409d2fdc3141cbeecddad3856d1c", + "format": 1 + }, + { + "name": "roles/mount/tasks/configure.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "085ee139658d940615a69d9886f04e4424fe0b28b1a562a59ea5b4ecdd7bb8dc", + "format": 1 + }, + { + "name": "roles/mount/tasks/prepare.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "347201a179a7b66c17148bf4406fd518cb8840b716d30aea89a501e327f8922b", + "format": 1 + }, + { + "name": "roles/mount/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/mount/templates/credentials.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "997c6bc298efe0ce0626b204ffaf382f2e68f4c41d1c6ee924beb2aca2727abc", + "format": 1 + }, + { + "name": "roles/mount/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/mount/vars/archlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "431df0a4a6bd9d57e0b0abae8f8ddf6fe6296fb5b51dd7406bd614e458d36e4b", + "format": 1 + }, + { + "name": "roles/mount/vars/artixlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "431df0a4a6bd9d57e0b0abae8f8ddf6fe6296fb5b51dd7406bd614e458d36e4b", + "format": 1 + }, + { + "name": "roles/mount/vars/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "18ef913dd5249540ab1df43e962c58fd80be03d83077704aa586d32bb1ad2f85", + "format": 1 + }, + { + "name": "roles/mount/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "746257b23b8fc324064a71b7f26bc1379bfd08f1637a8971e3b4a845599750a2", + "format": 1 + }, + { + "name": "roles/mount/vars/redhat.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dff9a5642d5ce1623b68559a3a70652db2a367d5555466a2ed6cba015993a83c", + "format": 1 + }, + { + "name": "roles/openvpn", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "66a18ca0a3d12f377d3a8cc184e8aa429d9941137fb66328fdeb28cf7b9eba13", + "format": 1 + }, + { + "name": "roles/openvpn/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/openvpn/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/openvpn/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/openvpn/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a8f34854e6109024a678dbdc16a082b21e0134067fcfab10072b547991d8339", + "format": 1 + }, + { + "name": "roles/openvpn/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/openvpn/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/openvpn/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/openvpn/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "10532299eb76101c3a51d2d6e445e263102fa1405c4c2cf7b7829a98ccb91090", + "format": 1 + }, + { + "name": "roles/openvpn/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/files/down.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "39debebcd8c899f20e6d355cbc8eaab46e28b83a9f6c33a94c065688a4f3d2c7", + "format": 1 + }, + { + "name": "roles/openvpn/files/up.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d887ee065261affd849227fa27e092cf66549d824a698f302312d15f787dd840", + "format": 1 + }, + { + "name": "roles/openvpn/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb18b92e87111cacddcfb8f519e2a77cac4316666637917e2994c55393c3a13b", + "format": 1 + }, + { + "name": "roles/openvpn/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/openvpn/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/openvpn/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/openvpn/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fbafd0a44904bdb4b3b880ef8d2776279fd50416e825c23e43110a71e96bf90", + "format": 1 + }, + { + "name": "roles/openvpn/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/openvpn/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c32820da1cc4358e7fa4731358bfa38fcbb221154bd45412aa2d9202f60c8419", + "format": 1 + }, + { + "name": "roles/openvpn/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/openvpn/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1609a86aa7dc8aab644ae47f7574c2ff92b557d00ebfe13b090c44bb5920203e", + "format": 1 + }, + { + "name": "roles/openvpn/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/tasks/configure.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "044550b0f81d115fb81e984f042defde717e4e2e2f11500f3a611db70c7558e5", + "format": 1 + }, + { + "name": "roles/openvpn/tasks/configure", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/tasks/configure/client.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8151e7843364d54eb9a49731ebef1f77ffcdca4f83437d61e20abb1e9a83be44", + "format": 1 + }, + { + "name": "roles/openvpn/tasks/configure/roadrunners.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c56c7f544bc6355670145384625e8ae36bf954a89d74e8131e87d06139d34c48", + "format": 1 + }, + { + "name": "roles/openvpn/tasks/configure/server.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7ba3c555eaac483b81dfeb814d84de99630e9aa780aa9517da65b162310d62b8", + "format": 1 + }, + { + "name": "roles/openvpn/tasks/configure/static_client_instances.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "db71cc7e488ccb0b95e7d8fd33442e6afb713a71cc0f5d5fffc5969537477956", + "format": 1 + }, + { + "name": "roles/openvpn/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "966976135b2dfd58d8675132e516138c76cbc29a43229a1876cb3880663568e9", + "format": 1 + }, + { + "name": "roles/openvpn/tasks/install.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d0f5442474a05f32a1156460645220283422a47e437fc4dc6491727d56f70063", + "format": 1 + }, + { + "name": "roles/openvpn/tasks/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fdaf88a844e6103bea9cf347c229339ed19c4392ab738d4d147051dd39c09cb9", + "format": 1 + }, + { + "name": "roles/openvpn/tasks/service.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91aeccd6c6584c1458fca9a27f3f843a3582a6cd3a9f71d104d8603527f9acf9", + "format": 1 + }, + { + "name": "roles/openvpn/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/templates/easy-rsa", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/templates/easy-rsa/openssl-easyrsa.cnf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "223fb4ce6833a09357bfa07993be422905289d4742f8d14fa5a95c8c5441f451", + "format": 1 + }, + { + "name": "roles/openvpn/templates/easy-rsa/vars.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92fe605e8661515751cbb9f9d7820de4a1daf299f1aa83512cde0c4e016e262f", + "format": 1 + }, + { + "name": "roles/openvpn/templates/helper", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/templates/helper/copy-config-to-sudo-user-home.sh.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b918a1d402780824a8d0b7e61f7cb031adbfd6179886f8c6631bcb475de2baca", + "format": 1 + }, + { + "name": "roles/openvpn/templates/helper/create-vpn-user.sh.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b282861055ab3a2c03afcf938ff440d556e2d964d8ff7e9639d0d9528bd6d838", + "format": 1 + }, + { + "name": "roles/openvpn/templates/helper/revoke-vpn-user.sh.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2db9d159436ad3c1f8a709389ca3af69d13fc0d8043da1e971a6e9663f2a077e", + "format": 1 + }, + { + "name": "roles/openvpn/templates/init", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/templates/init/systemd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/templates/init/systemd/openvpn-server@.service", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "56388b6c9785fb7a9103ee90c208ade590d725e596a89f3f4d65552e95d52e06", + "format": 1 + }, + { + "name": "roles/openvpn/templates/init/systemd/override.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "885392d2b75f3fe4a89b3d7dcbd80807e2b09a6cd7ff55792de8544fc1adc211", + "format": 1 + }, + { + "name": "roles/openvpn/templates/openvpn", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/templates/openvpn/client_users", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/templates/openvpn/client_users/client.ovpn.template.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ce16e164d77f5d1b68536a51850e2e4a513a51cadee11067884f10a94f2110b", + "format": 1 + }, + { + "name": "roles/openvpn/templates/openvpn/clients", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/templates/openvpn/clients/client.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "17ef178b133d2ebc0b021d51cecacae609f1b11ba6d877bbe86cc660ec89c09d", + "format": 1 + }, + { + "name": "roles/openvpn/templates/openvpn/server", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/templates/openvpn/server/ipp.txt.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21c84b4f93b8a0cf0e5c8d899533d326ca04e35dc000e00f82a07b0d1d519fd5", + "format": 1 + }, + { + "name": "roles/openvpn/templates/openvpn/server/server.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0ee916e8b8a44c2d814bc159a55ee419fd9bf71b0063387914e0d9b3cc287d8", + "format": 1 + }, + { + "name": "roles/openvpn/templates/openvpn/server/static-client.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "096c2149811ed07684af8fc5bc372bf5075dbf925f32bb8cb4e8bb1a43c29334", + "format": 1 + }, + { + "name": "roles/openvpn/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/openvpn/vars/archlinux-openrc.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d1ae50e0bfd92e7f24061bc72f0dff4f8898314db8b96cd604864d1bb2b2d670", + "format": 1 + }, + { + "name": "roles/openvpn/vars/archlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0ac2ddf257b3624a4408c3b8e80b0eaa2aa592a011ac7e5bbf21ba755494331", + "format": 1 + }, + { + "name": "roles/openvpn/vars/artixlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d1ae50e0bfd92e7f24061bc72f0dff4f8898314db8b96cd604864d1bb2b2d670", + "format": 1 + }, + { + "name": "roles/openvpn/vars/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "04bd49b832e3cac0ae9738e2c7f645facb20f2e54fa127d6ec891ffe30c6a2c5", + "format": 1 + }, + { + "name": "roles/openvpn/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fbfa5b722613df57a8eda310b6a9bef632647f6830c63bfcac99e80b0a7cfe0a", + "format": 1 + }, + { + "name": "roles/openvpn/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08a6fd8f0cae0675afa058280265e13d0a7e7a51c931f6ea2e47eef200f3658a", + "format": 1 + }, + { + "name": "roles/pacman", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pacman/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f79cdace5e0ac1512d6b3e35517aa678069b483ffb80ca63e5b51718be5bc31", + "format": 1 + }, + { + "name": "roles/pacman/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/pacman/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/pacman/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/pacman/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a8f34854e6109024a678dbdc16a082b21e0134067fcfab10072b547991d8339", + "format": 1 + }, + { + "name": "roles/pacman/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/pacman/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/pacman/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/pacman/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d7337fb2a2803ac33ecc0963085a1faba76360407399f6d553a9e65d629ccd39", + "format": 1 + }, + { + "name": "roles/pacman/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pacman/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6591a514c49dd215da23934ddcca6f6aaac73627fa71e358f65cf6662a42291c", + "format": 1 + }, + { + "name": "roles/pacman/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pacman/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "206d69c07e61e442c86e2663c875dc87302783cec950adc16df7e580de4e705a", + "format": 1 + }, + { + "name": "roles/pacman/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pacman/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/pacman/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/pacman/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/pacman/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fbafd0a44904bdb4b3b880ef8d2776279fd50416e825c23e43110a71e96bf90", + "format": 1 + }, + { + "name": "roles/pacman/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/pacman/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c32820da1cc4358e7fa4731358bfa38fcbb221154bd45412aa2d9202f60c8419", + "format": 1 + }, + { + "name": "roles/pacman/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/pacman/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pacman/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d1f74fbd2bb1a32ffa57e8ba1d7b5726517a10537049cbf38ad5eb8a253f880a", + "format": 1 + }, + { + "name": "roles/pacman/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pacman/tasks/configure.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8308fc17a2ab2928fe973ac38f80f5d1bf224dbfaa675a53bf32727f81c97884", + "format": 1 + }, + { + "name": "roles/pacman/tasks/configure", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pacman/tasks/configure/hooks.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf9840f25ecd3f5010bcd28a71bcee3dda570ab2bac65679d8c21435b913929e", + "format": 1 + }, + { + "name": "roles/pacman/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac977b894e8c72301722a2d8ef5437bcd210179c7c3b8a3cd3b2257744a820bb", + "format": 1 + }, + { + "name": "roles/pacman/tasks/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae53dc640b2a7533e7786dba06938153b82e2d4ca3a9e2effb82e69aeed76c89", + "format": 1 + }, + { + "name": "roles/pacman/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pacman/templates/pacman.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9b98c3f7e3cfb913e7731cf9b58ebd2bd922d4f91efa4dc6bff2bb833688cbc", + "format": 1 + }, + { + "name": "roles/pacman/templates/pacman.d", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pacman/templates/pacman.d/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pacman/templates/pacman.d/hooks/hook.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ed22e04586497f130c5c51bbab772445959fd91f767527ebb986b7a4ba74917", + "format": 1 + }, + { + "name": "roles/pacman/templates/pacman.d/hooks/linux-modules-post.hook.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6c17369202cef588fabb43fcabfb7ade89df4738acb4d0c16ed86bb682d13f5b", + "format": 1 + }, + { + "name": "roles/pacman/templates/pacman.d/hooks/linux-modules-pre.hook.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "059101d41dcc8d491a1eb35db95eed98120d70ec21a85e8c9b154bbcafad3b09", + "format": 1 + }, + { + "name": "roles/pacman/templates/pacman.d/mirrorlist.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "140e15336609f28dd1882106356dfdd06168a5358c30be173a45d5bef5b9a9a7", + "format": 1 + }, + { + "name": "roles/pacman/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pacman/vars/archlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a9d9cd35f2bf59d37ab01962eb134b54dbc4499d97bc8a30fb8df6eaf170484e", + "format": 1 + }, + { + "name": "roles/pacman/vars/artixlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7287f3f45ec57021b75aefda93cee9b9e779f8a414a12f815e1e8965759276fb", + "format": 1 + }, + { + "name": "roles/pacman/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f763eefc35c06c56f190a0598bc237a979f27a1376e33d00c1beeec54842362f", + "format": 1 + }, + { + "name": "roles/sshd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sshd/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f79cdace5e0ac1512d6b3e35517aa678069b483ffb80ca63e5b51718be5bc31", + "format": 1 + }, + { + "name": "roles/sshd/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/sshd/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/sshd/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/sshd/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a8f34854e6109024a678dbdc16a082b21e0134067fcfab10072b547991d8339", + "format": 1 + }, + { + "name": "roles/sshd/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/sshd/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c3f6f9f8a0920ee764edb6f78e368e60823c8f1c1b816ccf1a22c87af422d4d", + "format": 1 + }, + { + "name": "roles/sshd/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22a0d6abe4d830d86b79c2ef366996400d6b634b9e11d0834c1bb0a9ce1bd12d", + "format": 1 + }, + { + "name": "roles/sshd/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "db6fb2c41603843f18723fa2afacadd94daef1b8754483c3480e124034a3c132", + "format": 1 + }, + { + "name": "roles/sshd/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sshd/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f90e588fb6e924508557cfb99c0817dd5daf1e6e4f70753103dde7cf1a0e8e9", + "format": 1 + }, + { + "name": "roles/sshd/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sshd/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "980ef6378fbf8d406f6e8982958365907cf2dc32dbd517c42b31fd354a0e4c26", + "format": 1 + }, + { + "name": "roles/sshd/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sshd/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/sshd/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/sshd/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/sshd/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fbafd0a44904bdb4b3b880ef8d2776279fd50416e825c23e43110a71e96bf90", + "format": 1 + }, + { + "name": "roles/sshd/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/sshd/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c32820da1cc4358e7fa4731358bfa38fcbb221154bd45412aa2d9202f60c8419", + "format": 1 + }, + { + "name": "roles/sshd/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/sshd/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sshd/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9bc83adc243543844250f36a2bc37cb390ee528ad2cb4a103985bba8935e68e4", + "format": 1 + }, + { + "name": "roles/sshd/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sshd/tasks/configure.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "50068c3e1a394dc3230ebeee6a44d2f81f497e4bd38566dd1f14fec26c10eb84", + "format": 1 + }, + { + "name": "roles/sshd/tasks/install.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ff4b3d0163d808bbbf7ef3a726122894c15297983821f993e5059b1069f2fc0", + "format": 1 + }, + { + "name": "roles/sshd/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68fdaa86d6c51c631842432fa898bce854d43ef00f5e1a87156d09c17177e93d", + "format": 1 + }, + { + "name": "roles/sshd/tasks/service.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed0411fd0a005a00afce8d5961c21573f7d33622c0410a432ed097684d830e26", + "format": 1 + }, + { + "name": "roles/sshd/tasks/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9a574676d528f7b77556e3bd1337845207da2204bdff212da153008376dd8e8f", + "format": 1 + }, + { + "name": "roles/sshd/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sshd/templates/sshd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sshd/templates/sshd/ssh_config.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "34796c52ac38e2b2b307b0f5dce25c7b1771045fcda8ac8f85080446f195ed0b", + "format": 1 + }, + { + "name": "roles/sshd/templates/sshd/sshd_config.d", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sshd/templates/sshd/sshd_config.d/match_users.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "591edea6ed83c9dd4ae7763803fd031a77eee043f3c85ae83bd12ddd4d39233b", + "format": 1 + }, + { + "name": "roles/sshd/templates/sshd/sshd_config.d/subsystem.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ffa2171ae5f896ee9f34766580074b4d928548095e74497d1c1a164894841cae", + "format": 1 + }, + { + "name": "roles/sshd/templates/sshd/sshd_config.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1186ba6d2bbad9d7afc97dc6953fae805a1c388baa463e4cb68911e30a1b6af1", + "format": 1 + }, + { + "name": "roles/sshd/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sshd/vars/archlinux-openrc.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc47c1d5d5849903d6b34e46a091d0b2ced27330e480c2da0e030c91b16b2841", + "format": 1 + }, + { + "name": "roles/sshd/vars/archlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1e4cc01143e4eb6a2decc4a513d2f049b88ddd90a9ace5cc9c7182a0d8903588", + "format": 1 + }, + { + "name": "roles/sshd/vars/artixlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc47c1d5d5849903d6b34e46a091d0b2ced27330e480c2da0e030c91b16b2841", + "format": 1 + }, + { + "name": "roles/sshd/vars/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/sshd/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0d24451cb642529f804b2675db8241cbb1deb4f84495167063a5edcd6d45fece", + "format": 1 + }, + { + "name": "roles/sshd/vars/ubuntu-24.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f283d16b924cee0f19b1f9731f4d5a2283ed949b01e0172667754117585ad049", + "format": 1 + }, + { + "name": "roles/sysctl", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sysctl/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f79cdace5e0ac1512d6b3e35517aa678069b483ffb80ca63e5b51718be5bc31", + "format": 1 + }, + { + "name": "roles/sysctl/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/sysctl/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/sysctl/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/sysctl/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a8f34854e6109024a678dbdc16a082b21e0134067fcfab10072b547991d8339", + "format": 1 + }, + { + "name": "roles/sysctl/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/sysctl/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/sysctl/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/sysctl/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0f308b769d982de21968987077b13580694e3466fb751d64ebb1ac5a5f13fc68", + "format": 1 + }, + { + "name": "roles/sysctl/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sysctl/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d51c49e3856dd4558fddee689900cb1adc6695942b03d3ec8424d9ab49e02fe0", + "format": 1 + }, + { + "name": "roles/sysctl/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sysctl/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c648fb7afe6dc7f25aad81e8c2bae8d2da2adfb468037ca805cc3d5c31ede387", + "format": 1 + }, + { + "name": "roles/sysctl/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sysctl/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a87d6314894f03ce23da799e034a93af3e8db3268fa4b7311b57b1a2d1770d71", + "format": 1 + }, + { + "name": "roles/sysctl/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sysctl/tasks/configure", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sysctl/tasks/configure/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08513f913cc2ccee3e56c83ee4ad71dd724eedf7961796f81d11c8ca2120e475", + "format": 1 + }, + { + "name": "roles/sysctl/tasks/configure/sysctl.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "51ba5fc9a33af005df7a484f5ac19d025600517265fd12a8e18de6eaaccd1354", + "format": 1 + }, + { + "name": "roles/sysctl/tasks/installation.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb9c8c16e6c4434adee8dccec66345cbc8d89b5de67011effbe3395ab901e0a0", + "format": 1 + }, + { + "name": "roles/sysctl/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "554f0f2918110419eb841988f96543464aca1de2fffef7f215a7f1d3fb328cfd", + "format": 1 + }, + { + "name": "roles/sysctl/tasks/prepare.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "551b300e0151611f7d030276b0c357cac9ad7035aa2c5ce5c0a55c96926c1e8e", + "format": 1 + }, + { + "name": "roles/sysctl/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sysctl/templates/sysctl.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3ff24cb5fa7b904481eed9ad4f9edd19cdf010fa6f1587439343f9b8af3c7fef", + "format": 1 + }, + { + "name": "roles/sysctl/templates/etc", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sysctl/templates/etc/sysctl.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a9a932ee8f994d3200ae96341c6329aa826c587a976b44f0a853989a80af5d7", + "format": 1 + }, + { + "name": "roles/sysctl/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sysctl/vars/archlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/sysctl/vars/artixlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/sysctl/vars/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/sysctl/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "adbff666def010fe4ddce80795772a0b24749632a1c2acfe0317354a30def949", + "format": 1 + }, + { + "name": "roles/syslog_ng", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/syslog_ng/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f79cdace5e0ac1512d6b3e35517aa678069b483ffb80ca63e5b51718be5bc31", + "format": 1 + }, + { + "name": "roles/syslog_ng/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/syslog_ng/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/syslog_ng/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/syslog_ng/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a8f34854e6109024a678dbdc16a082b21e0134067fcfab10072b547991d8339", + "format": 1 + }, + { + "name": "roles/syslog_ng/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/syslog_ng/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c3f6f9f8a0920ee764edb6f78e368e60823c8f1c1b816ccf1a22c87af422d4d", + "format": 1 + }, + { + "name": "roles/syslog_ng/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/syslog_ng/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "481917b7bfa4e74bf45b2ceb24c47c5df1727779fb80f74d4bad5c5ffc4b1687", + "format": 1 + }, + { + "name": "roles/syslog_ng/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/syslog_ng/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79d7ce4f7e9b3798b645fbe5daae6781a90a155d3afa271df7ce19897ab2d517", + "format": 1 + }, + { + "name": "roles/syslog_ng/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/syslog_ng/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "29c8b9c20484b88164cdd4419b5bc84aebe99f5c3bae9eabb9fa87305d6dbb07", + "format": 1 + }, + { + "name": "roles/syslog_ng/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/syslog_ng/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/syslog_ng/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/syslog_ng/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/syslog_ng/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fbafd0a44904bdb4b3b880ef8d2776279fd50416e825c23e43110a71e96bf90", + "format": 1 + }, + { + "name": "roles/syslog_ng/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/syslog_ng/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c32820da1cc4358e7fa4731358bfa38fcbb221154bd45412aa2d9202f60c8419", + "format": 1 + }, + { + "name": "roles/syslog_ng/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/syslog_ng/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/syslog_ng/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a8281fdb96c569b7d377803e1c3b68588fc476e43c261f663ce63e9ff2f94a26", + "format": 1 + }, + { + "name": "roles/syslog_ng/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/syslog_ng/tasks/configure.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9aabd0f0c6ddd115de84328cf71d5b95a08e9e17a1497c848d51248158293483", + "format": 1 + }, + { + "name": "roles/syslog_ng/tasks/configure_journald.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9f6ed54721e30f8f00716a59df1bdfe2e510d536dc7dc7082016dfd98d582a6c", + "format": 1 + }, + { + "name": "roles/syslog_ng/tasks/install.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7e32b2d586ffe898b3e2716654dd3d3f10c03966eb81d626d2179191b6c9d81b", + "format": 1 + }, + { + "name": "roles/syslog_ng/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "004b2560f2945e7275e8741acff58e80c7d0df0893041ba7c99ed88abd61494c", + "format": 1 + }, + { + "name": "roles/syslog_ng/tasks/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cd9b5ad19160ebf0a2aacdd3e5e8d7e2af122b514a0fe98e4a559a810a25ff31", + "format": 1 + }, + { + "name": "roles/syslog_ng/tasks/service.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ca9aa14236b90ea7a048e53795d1bcc28580123171ace70ecde6f465b07f146", + "format": 1 + }, + { + "name": "roles/syslog_ng/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/syslog_ng/templates/conf.d", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/syslog_ng/templates/conf.d/destinations.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4da66d642142c539c7eeb6abb9e1fdb66fe4d7f1b625887ee8ed3d7b77b19c0b", + "format": 1 + }, + { + "name": "roles/syslog_ng/templates/conf.d/filters.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "60db52ffb6ba06fd8f9cf2dfb2c3c4c08a11501f9be3314d54227dc1890a3479", + "format": 1 + }, + { + "name": "roles/syslog_ng/templates/conf.d/logs.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec8632f3af71dcbd4337b3632adfaaa3e8159aaff1d8286d3f6260f4edf0e699", + "format": 1 + }, + { + "name": "roles/syslog_ng/templates/conf.d/sources.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc4e777be4c6f50f39a8dd27cc64daed069fd7cc96f3f61354bd0182c59a659e", + "format": 1 + }, + { + "name": "roles/syslog_ng/templates/journald.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "978f97ff8e7f8c0d94e7983f91a641ef85b3308cc3c3c4cca34f31dd965d0db6", + "format": 1 + }, + { + "name": "roles/syslog_ng/templates/syslog-ng.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7e2cbd72cee378d926b2c899ef2312d0e02309d71e2b71551e3e1cc6c7d467b7", + "format": 1 + }, + { + "name": "roles/syslog_ng/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/syslog_ng/vars/archlinux-openrc.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "54fe45c1d9d7a19a40bb062d2e045547c42a2eb1134164236e4c4850d12d1a03", + "format": 1 + }, + { + "name": "roles/syslog_ng/vars/archlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4851a656712b16ee4ec720bf8c0e4a4c6bd7b03ad39321589482866e00fe1c5b", + "format": 1 + }, + { + "name": "roles/syslog_ng/vars/artixlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "54fe45c1d9d7a19a40bb062d2e045547c42a2eb1134164236e4c4850d12d1a03", + "format": 1 + }, + { + "name": "roles/syslog_ng/vars/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7efae1329d6e26f34ab6e1fe4a1dfad7daf9659c2bd460a8d4b35cfd8c51e42e", + "format": 1 + }, + { + "name": "roles/syslog_ng/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc1eb340d7bda578955d9f39bfe1aabfaaa0a83b122c833738f5aa8d45413999", + "format": 1 + }, + { + "name": "roles/bash_alias", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bash_alias/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bash_alias/default/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "test-requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "13ea3ee6460c8dd30325c3f297b3db597332b848ac0d146fb3d408927fd47563", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/test_lookup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "042d34a1b7e2fdd25c66739b7f55b09c7acca13d4667c73b809b856c277a18ae", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b2245bcabfc44d528e01ae8a2629f60132557923edca72a422e073fa95840242", + "format": 1 + }, + { + "name": "tox.ini", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c94cefed134293a6a727beaaa4c235883ddc585c0c08deadd6043e4f9b80fc28", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/core/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/MANIFEST.json b/ansible/playbooks/collections/ansible_collections/bodsch/core/MANIFEST.json new file mode 100644 index 0000000..18d5b81 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/MANIFEST.json @@ -0,0 +1,42 @@ +{ + "collection_info": { + "namespace": "bodsch", + "name": "core", + "version": "2.10.1", + "authors": [ + "Bodo Schulz " + ], + "readme": "README.md", + "tags": [ + "pki", + "vpn", + "openvpn", + "easyrsa", + "certificate", + "security", + "automation" + ], + "description": "collection of core modules for my ansible roles", + "license": [ + "Apache-2.0" + ], + "license_file": null, + "dependencies": { + "ansible.utils": "*", + "ansible.posix": "*", + "community.general": ">=10.5" + }, + "repository": "https://github.com/bodsch/ansible-collection-core", + "documentation": "https://github.com/bodsch/ansible-collection-core/README.md", + "homepage": "https://github.com/bodsch/ansible-collection-core", + "issues": "https://github.com/bodsch/ansible-collection-core/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bf021256b84411724fe68c0611287919125acc1d3ea4ebc1fdef0fad58e10ced", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/README.md new file mode 100644 index 0000000..dd214fb --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/README.md @@ -0,0 +1,420 @@ +# Ansible Collection - bodsch.core + +Documentation for the collection. + +This collection aims to offer an set of ansible modules or helper functions. + +## supported Operating systems + +Tested on + +* ArchLinux +* Debian based + - Debian 10 / 11 / 12 / 13 + - Ubuntu 20.04 / 22.04 / 24.04 + +> **RedHat-based systems are no longer officially supported! May work, but does not have to.** + + +## Requirements & Dependencies + +- `dnspython` +- `dirsync` +- `netaddr` + +```bash +pip install dnspython +pip install dirsync +pip install netaddr +``` + +## Included content + + +### Roles + +| Role | Build State | Description | +|:---------------------------------------------------------------------------| :---------: | :---- | +| [bodsch.core.pacman](./roles/pacman/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-core/pacman.yml?branch=main)][pacman] | Ansible role to configure pacman. | +| [bodsch.core.fail2ban](./roles/fail2ban/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-core/fail2ban.yml?branch=main)][fail2ban] | Installs and configure fail2ban | +| [bodsch.core.syslog_ng](./roles/syslog_ng/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-core/syslog_ng.yml?branch=main)][syslog_ng] | Installs and configures a classic syslog-ng service for processing log files away from journald. | +| [bodsch.core.logrotate](./roles/logrotate/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-core/logrotate.yml?branch=main)][logrotate] | Installs logrotate and provides an easy way to setup additional logrotate scripts | +| [bodsch.core.mount](./roles/mount/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-core/mount.yml?branch=main)][mount] | Manage generic mountpoints | +| [bodsch.core.openvpn](./roles/openvpn/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-core/openvpn.yml?branch=main)][openvpn] | Ansible role to install and configure openvpn server. | +| [bodsch.core.sysctl](./roles/sysctl/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-core/sysctl.yml?branch=main)][sysctl] | Ansible role to configure sysctl. | +| [bodsch.core.sshd](./roles/sshd/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-core/sshd.yml?branch=main)][sshd] | Ansible role to configure sshd. | + +[pacman]: https://github.com/bodsch/ansible-collection-core/actions/workflows/pacman.yml +[fail2ban]: https://github.com/bodsch/ansible-collection-core/actions/workflows/fail2ban.yml +[snakeoil]: https://github.com/bodsch/ansible-collection-core/actions/workflows/snakeoil.yml +[syslog_ng]: https://github.com/bodsch/ansible-collection-core/actions/workflows/syslog_ng.yml +[logrotate]: https://github.com/bodsch/ansible-collection-core/actions/workflows/logrotate.yml +[mount]: https://github.com/bodsch/ansible-collection-core/actions/workflows/mount.yml +[openvpn]: https://github.com/bodsch/ansible-collection-core/actions/workflows/openvpn.yml +[sysctl]: https://github.com/bodsch/ansible-collection-core/actions/workflows/sysctl.yml +[sshd]: https://github.com/bodsch/ansible-collection-core/actions/workflows/sshd.yml + +### Modules + +| Name | Description | +|:--------------------------|:----| +| [bodsch.core.aur](./plugins/modules/aur.py) | Installing packages for ArchLinux with aur | +| [bodsch.core.check_mode](./plugins/modules/check_mode.py) | Replacement for `ansible_check_mode`. | +| [bodsch.core.facts](./plugins/modules/facts.py) | Creates a facts file for ansible. | +| [bodsch.core.remove_ansible_backups](./plugins/modules/remove_ansible_backups.py) | Remove older backup files created by ansible | +| [bodsch.core.package_version](./plugins/modules/package_version.py) | Attempts to determine the version of a package to be installed or already installed. | +| [bodsch.core.sync_directory](./plugins/modules/sync_directory.py) | Syncronises directories similar to rsync | +| [bodsch.core.easyrsa](.plugins/modules/easyrsa.py) | Manage a Public Key Infrastructure (PKI) using EasyRSA. | +| [bodsch.core.openvpn_client_certificate](.plugins/modules/openvpn_client_certificate.py) | Manage OpenVPN client certificates using EasyRSA. | +| [bodsch.core.openvpn_crl](.plugins/modules/openvpn_crl.py) | | +| [bodsch.core.openvpn_ovpn](.plugins/modules/openvpn_ovpn.py) | | +| [bodsch.core.openvpn](.plugins/modules/openvpn.py) | | +| [bodsch.core.openvpn_version](.plugins/modules/openvpn_version.py) | | +| [bodsch.core.pip_requirements](.plugins/modules/pip_requirements.py) | This modules creates an requirement file to install python modules via pip. | +| [bodsch.core.syslog_cmd](.plugins/modules/syslog_cmd.py) | Run syslog-ng with arbitrary command-line parameters | +| [bodsch.core.apt_sources](.plugins/modules/apt_sources.py) | Manage APT deb822 (.sources) repositories with repo-specific keyrings. | + + +### Module utils + +| Name | Description | +|:--------------------------|:----| +| [bodsch.core.passlib_bcrypt5_compat](./plugins/module_utils/passlib_bcrypt5_compat.py) | Compatibility helpers for using `passlib` 1.7.4 with `bcrypt` 5.x | + + +### Actions + +| Name | Description | +|:--------------------------|:----| +| [bodsch.core.deploy_and_activate](./plugins/sction/deploy_and_activate.py) | Controller-side orchestration for deploying versioned binaries and activating them via symlinks. | + + +## Installing this collection + +You can install the memsource collection with the Ansible Galaxy CLI: + +```bash +#> ansible-galaxy collection install bodsch.core +``` + +To install directly from GitHub: + +```bash +#> ansible-galaxy collection install git@github.com:bodsch/ansible-collection-core.git +``` + + +You can also include it in a `requirements.yml` file and install it with `ansible-galaxy collection install -r requirements.yml`, using the format: + +```yaml +--- +collections: + - name: bodsch.core + # version: ">=2.8.x" +``` + +The python module dependencies are not installed by `ansible-galaxy`. They can +be manually installed using pip: + +```bash +pip install -r requirements.txt +``` + +## Using this collection + + +You can either call modules by their Fully Qualified Collection Name (FQCN), such as `bodsch.core.remove_ansible_backups`, +or you can call modules by their short name if you list the `bodsch.core` collection in the playbook's `collections` keyword: + + +## Examples + +### `bodsch.core.aur` + +```yaml +- name: install collabora package via aur + become: true + become_user: aur_builder + bodsch.core.aur: + state: present + name: collabora-online-server + repository: "{{ collabora_arch.source_repository }}" + async: 3200 + poll: 10 + register: _collabora_installed +``` + +### `bodsch.core.check_mode` + +```yaml +- name: detect ansible check_mode + bodsch.core.check_mode: + register: _check_mode + +- name: define check_mode + ansible.builtin.set_fact: + check_mode: '{{ _check_mode.check_mode }}' +``` + +### `bodsch.core.deploy_and_activate` + +```yaml +- name: deploy and activate logstream_exporter version {{ logstream_exporter_version }} + bodsch.core.deploy_and_activate: + src_dir: "{{ logstream_exporter_local_tmp_directory }}" + install_dir: "{{ logstream_exporter_install_path }}" + link_dir: "/usr/bin" + remote_src: false # "{{ 'true' if logstream_exporter_direct_download else 'false' }}" + owner: "{{ logstream_exporter_system_user }}" + group: "{{ logstream_exporter_system_group }}" + mode: "0755" + items: + - name: "{{ logstream_exporter_release.binary }}" + capability: "cap_net_raw+ep" + notify: + - restart logstream exporter +``` + +### `bodsch.core.easyrsa` + +```yaml +- name: initialize easy-rsa - (this is going to take a long time) + bodsch.core.easyrsa: + pki_dir: '{{ openvpn_easyrsa.directory }}/pki' + req_cn_ca: "{{ openvpn_certificate.req_cn_ca }}" + req_cn_server: '{{ openvpn_certificate.req_cn_server }}' + ca_keysize: 4096 + dh_keysize: "{{ openvpn_diffie_hellman_keysize }}" + working_dir: '{{ openvpn_easyrsa.directory }}' + force: true + register: _easyrsa_result +``` + +### `bodsch.core.facts` + +```yaml +- name: create custom facts + bodsch.core.facts: + state: present + name: icinga2 + facts: + version: "2.10" + salt: fgmklsdfnjyxnvjksdfbkuser + user: icinga2 +``` + +### `bodsch.core.openvpn_client_certificate` + +```yaml +- name: create or revoke client certificate + bodsch.core.openvpn_client_certificate: + clients: + - name: molecule + state: present + roadrunner: false + static_ip: 10.8.3.100 + remote: server + port: 1194 + proto: udp + device: tun + ping: 20 + ping_restart: 45 + cert: molecule.crt + key: molecule.key + tls_auth: + enabled: true + - name: roadrunner_one + state: present + roadrunner: true + static_ip: 10.8.3.10 + remote: server + port: 1194 + proto: udp + device: tun + ping: 20 + ping_restart: 45 + cert: roadrunner_one.crt + key: roadrunner_one.key + tls_auth: + enabled: true + working_dir: /etc/easy-rsa +``` + +### `bodsch.core.openvpn_crl` + +```yaml +- name: Check CRL status and include revoked certificates + bodsch.core.openvpn_crl: + state: status + pki_dir: /etc/easy-rsa/pki + list_revoked_certificates: true + +- name: Warn if CRL expires within 14 days + bodsch.core.openvpn_crl: + state: status + pki_dir: /etc/easy-rsa/pki + warn_for_expire: true + expire_in_days: 14 + register: crl_status + +- name: Regenerate (renew) CRL using Easy-RSA + bodsch.core.openvpn_crl: + state: renew + pki_dir: /etc/easy-rsa/pki + working_dir: /etc/easy-rsa + register: crl_renew +``` + +### `bodsch.core.openvpn_ovpn` + +```yaml +- name: Force recreation of an existing .ovpn file + bodsch.core.openvpn_ovpn: + state: present + username: carol + destination_directory: /etc/openvpn/clients + force: true +``` + +### `bodsch.core.openvpn_version` + +```yaml +- name: Print parsed version + ansible.builtin.debug: + msg: "OpenVPN version: {{ openvpn.version }}" +``` + +### `bodsch.core.openvpn` + +```yaml +- name: Generate tls-auth key (ta.key) + bodsch.core.openvpn: + state: genkey + secret: /etc/openvpn/ta.key + +- name: Generate tls-auth key only if marker does not exist + bodsch.core.openvpn: + state: genkey + secret: /etc/openvpn/ta.key + creates: /var/lib/openvpn/ta.key.created + +- name: Force regeneration by removing marker first + bodsch.core.openvpn: + state: genkey + secret: /etc/openvpn/ta.key + creates: /var/lib/openvpn/ta.key.created + force: true + +- name: Create Easy-RSA client and write inline .ovpn + bodsch.core.openvpn: + state: create_user + secret: /dev/null # required by module interface, not used here + username: alice + destination_directory: /etc/openvpn/clients + chdir: /etc/easy-rsa + +- name: Create user only if marker does not exist + bodsch.core.openvpn: + state: create_user + secret: /dev/null + username: bob + destination_directory: /etc/openvpn/clients + chdir: /etc/easy-rsa + creates: /var/lib/openvpn/clients/bob.created +``` + +### `bodsch.core.package_version` + +```yaml +- name: get version of available package + bodsch.core.package_version: + package_name: nano + register: package_version +``` + +### `bodsch.core.pip_requirements` + +```yaml +- name: create pip requirements file + bodsch.core.pip_requirements: + name: docker + state: present + requirements: + - name: docker + compare_direction: "==" + version: 6.0.0 + + - name: setuptools + version: 39.1.0 + + - name: requests + versions: + - ">= 2.28.0" + - "< 2.30.0" + - "!~ 1.1.0" + register: pip_requirements +``` + +### `bodsch.core.remove_ansible_backups` + +```yaml +--- +- name: remove older ansible backup files + bodsch.core.remove_ansible_backups: + path: /etc + holds: 4 +``` + +### `bodsch.core.sync_directory` + +```yaml +- name: syncronize config for first run + bodsch.core.sync_directory: + source_directory: "{{ nextcloud_install_base_directory }}/nextcloud/{{ nextcloud_version }}/config_DIST" + destination_directory: "{{ nextcloud_install_base_directory }}/nextcloud/config" + arguments: + verbose: true + purge: false +``` + +### `bodsch.core.syslog_cmd` + +```yaml +- name: detect config version + bodsch.core.syslog_cmd: + parameters: + - --version + when: + - not running_in_check_mode + register: _syslog_config_version + +- name: validate syslog-ng config + bodsch.core.syslog_cmd: + parameters: + - --syntax-only + check_mode: true +``` + + + + +## Contribution + +Please read [Contribution](CONTRIBUTING.md) + +## Development, Branches (Git Tags) + +The `master` Branch is my *Working Horse* includes the "latest, hot shit" and can be complete broken! + +If you want to use something stable, please use a [Tagged Version](https://github.com/bodsch/ansible-collection-core/tags)! + + +## Author + +- Bodo Schulz + +## License + +[Apache](LICENSE) + +**FREE SOFTWARE, HELL YEAH!** diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/meta/runtime.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/meta/runtime.yml new file mode 100644 index 0000000..429e81d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/meta/runtime.yml @@ -0,0 +1,27 @@ +--- + +requires_ansible: '>=2.12' + +platforms: + - name: ArchLinux + - name: Debian + versions: + - bullseye + - bookworm + - trixie + - name: Ubuntu + versions: + # 20.04 + - focal + # 22.04 + - jammy + # 24.04 + - noble + # 26.04 + # - resolute + +python_versions: + - "3.10" + - "3.11" + - "3.12" + - "3.13" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/README.md new file mode 100644 index 0000000..d1e0715 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/README.md @@ -0,0 +1,162 @@ +# Collections Plugins Directory + +## modules + +### remove_ansible_backups + +```shell +ansible-doc --type module bodsch.core.remove_ansible_backups +> BODSCH.CORE.REMOVE_ANSIBLE_BACKUPS (./collections/ansible_collections/bodsch/core/plugins/modules/remove_ansible_backups.py) + + Remove older backup files created by ansible +``` + +### package_version + +```shell +ansible-doc --type module bodsch.core.package_version +> BODSCH.CORE.PACKAGE_VERSION (./collections/ansible_collections/bodsch/core/plugins/modules/package_version.py) + + Attempts to determine the version of a package to be installed or already installed. Supports apt, pacman, dnf (or yum) as + package manager. +``` + +### aur + +```shell +ansible-doc --type module bodsch.core.aur +> BODSCH.CORE.AUR (./collections/ansible_collections/bodsch/core/plugins/modules/aur.py) + + This modules manages packages for ArchLinux on a target with aur (like [ansible.builtin.yum], [ansible.builtin.apt], ...). +``` + +### journalctl + +```shell +> BODSCH.CORE.JOURNALCTL (./collections/ansible_collections/bodsch/core/plugins/modules/journalctl.py) + + Query the systemd journal with a very limited number of possible parameters. In certain cases there are errors that are not + clearly traceable but are logged in the journal. This module is intended to be a tool for error analysis. +``` + +### facts + +```shell + +> BODSCH.CORE.FACTS (./collections/ansible_collections/bodsch/core/plugins/modules/facts.py) + + Write Ansible Facts +``` + +## module_utils + +### `checksum` + +```python +from ansible_collections.bodsch.core.plugins.module_utils.checksum import Checksum + +c = Checksum() + +print(c.checksum("fooo")) +print(c.checksum_from_file("/etc/fstab")) + +# ??? +c.compare("aaa", "bbb") +c.save("test-check", "aaa") +c.load("test-check") +``` + +### `file` + +```python +from ansible_collections.bodsch.core.plugins.module_utils.file import remove_file, create_link +``` + +- `create_link(source, destination, force=False)` +- `remove_file(file_name)` + +### `directory` + +```python +from ansible_collections.bodsch.core.plugins.module_utils.directory import create_directory +``` + +- `create_directory(directory)` +- `permstr_to_octal(modestr, umask)` +- `current_state(directory)` +- `fix_ownership(directory, force_owner=None, force_group=None, force_mode=False)` + + +### `cache` + +```python +from ansible_collections.bodsch.core.plugins.module_utils.cache.cache_valid import cache_valid +``` + +- `cache_valid(module, cache_file_name, cache_minutes=60, cache_file_remove=True)` + +### `template` + +## lookup + +### `file_glob` + +## filter + +### `types` + +- `type()` +- `config_bool(data, true_as="yes", false_as="no")` + +### `verify` + +- `compare_list(data_list, compare_to_list)` +- `upgrade(install_path, bin_path)` + +### `dns` + +- `dns_lookup(timeout=3, extern_resolver=[])` + +### `python` + +- `python_extra_args(python_version=ansible_python.version, extra_args=[], break_system_packages=True)` + +### `union_by` + +- `union(docker_defaults_python_packages, union_by='name')` + +### - `parse_checksum` + +- `parse_checksum('nginx-prometheus-exporter', ansible_facts.system, system_architecture)` + +## misc + +This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that +is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that +would contain module utils and modules respectively. + +Here is an example directory of the majority of plugins currently supported by Ansible: + +``` +└── plugins + ├── action + ├── become + ├── cache + ├── callback + ├── cliconf + ├── connection + ├── filter + ├── httpapi + ├── inventory + ├── lookup + ├── module_utils + ├── modules + ├── netconf + ├── shell + ├── strategy + ├── terminal + ├── test + └── vars +``` + +A full list of plugin types can be found at [Working With Plugins](https://docs.ansible.com/ansible-core/2.14/plugins/plugins.html). diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/action/activate_version.save b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/action/activate_version.save new file mode 100644 index 0000000..4396eba --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/action/activate_version.save @@ -0,0 +1,430 @@ +""" +binary_deploy.py (action plugin) + +Controller-aware wrapper that supports: +- remote_src=true: src_dir is on the remote host -> use activate_version_remote to do everything in one remote call. +- remote_src=false: src_dir is on the controller -> verify local files, create install_dir remotely, transfer files, + then let activate_version_remote enforce caps and symlinks. + +This collapses the common "stat + fail + stat + stat + copy + file + capabilities + link" pattern into one task. +""" + +from __future__ import annotations + +import os +from typing import Any, Dict, List + +from ansible.errors import AnsibleError +from ansible.plugins.action import ActionBase +from ansible.utils.display import Display + + +display = Display() + + +class ActionModule(ActionBase): + """Deploy binaries to install_dir and activate them via symlinks.""" + + TRANSFERS_FILES = True + + def _get_items(self, args: Dict[str, Any]) -> List[Dict[str, Any]]: + """ """ + display.v(f"ActionModule::_get_items(args: {args})") + + items = args.get("items") or [] + if not isinstance(items, list) or not items: + raise AnsibleError("binary_deploy: 'items' must be a non-empty list") + return items + + def _local_item_path(self, src_dir: str, item: Dict[str, Any]) -> Tuple[str, str]: + """ + Returns (local_src_path, dest_filename). + dest_filename is always item['name']. + local source filename is item.get('src') or item['name']. + """ + display.v(f"ActionModule::_local_item_path(src_dir: {src_dir}, item: {item})") + + name = str(item["name"]) + src_name = str(item.get("src") or name) + return os.path.join(src_dir, src_name), name + + def _ensure_local_files_exist(self, src_dir: str, items: List[Dict[str, Any]]) -> None: + """ """ + display.v(f"ActionModule::_ensure_local_files_exist(src_dir: {src_dir}, items: {items})") + + for it in items: + local_src, _ = self._local_item_path(src_dir, it) + if not os.path.isfile(local_src): + raise AnsibleError(f"binary_deploy: missing extracted binary on controller: {local_src}") + + def _probe_remote( + self, + *, + tmp: Optional[str], + task_vars: Dict[str, Any], + module_args: Dict[str, Any], + ) -> Dict[str, Any]: + """ """ + display.v(f"ActionModule::_probe_remote(tmp: {tmp}, task_vars, module_args: {module_args})") + + return self._execute_module( + module_name="bodsch.core.activate_version_remote", + module_args=module_args, + task_vars=task_vars, + tmp=tmp, + ) + + def _remote_copy_from_controller( + self, + *, + tmp: Optional[str], + task_vars: Dict[str, Any], + src_dir: str, + install_dir: str, + items: List[Dict[str, Any]], + mode: str, + owner: Optional[str], + group: Optional[str], + ) -> bool: + """ + Transfer controller-local binaries to remote install_dir using ansible.builtin.copy. + Returns True if any file changed. + """ + display.v(f"ActionModule::_remote_copy_from_controller(tmp: {tmp}, task_vars, src_dir: {src_dir}, install_dir: {install_dir}, items: {items}, owner: {owner}, group: {group}, mode: {mode})") + + changed_any = False + + for it in items: + local_src, dest_name = self._local_item_path(src_dir, it) + dest_path = os.path.join(install_dir, dest_name) + + module_args: Dict[str, Any] = { + "src": local_src, + "dest": dest_path, + "remote_src": False, + "mode": mode, + } + if owner: + module_args["owner"] = owner + if group: + module_args["group"] = group + + res = self._execute_module( + module_name="ansible.builtin.copy", + module_args=module_args, + task_vars=task_vars, + tmp=tmp, + ) + changed_any = changed_any or bool(res.get("changed", False)) + + return changed_any + + def run(self, tmp: str | None = None, task_vars: Dict[str, Any] | None = None) -> Dict[str, Any]: + """ """ + display.v(f"ActionModule::run(tmp: {tmp}, task_vars)") + + if task_vars is None: + task_vars = {} + + result: Dict[str, Any] = super().run(tmp, task_vars) + args = self._task.args.copy() + + remote_src = bool(args.get("remote_src", False)) + install_dir = str(args["install_dir"]) + link_dir = str(args.get("link_dir", "/usr/bin")) + src_dir = args.get("src_dir") + mode = str(args.get("mode", "0755")) + owner = args.get("owner") + group = args.get("group") + cleanup_on_failure = bool(args.get("cleanup_on_failure", True)) + activation_name = args.get("activation_name") + + items = self._get_items(args) + + display.v(f" - remote_src : {remote_src}") + display.v(f" - install_dir : {install_dir}") + display.v(f" - src_dir : {src_dir}") + display.v(f" - link_dir : {link_dir}") + display.v(f" - owner : {owner}") + display.v(f" - group : {group}") + display.v(f" - cleanup_on_failure : {cleanup_on_failure}") + display.v(f" - activation_name : {activation_name}") + + # --- Probe --- + probe_args: Dict[str, Any] = { + "install_dir": install_dir, + "link_dir": link_dir, + "items": items, + "activation_name": activation_name, + "owner": owner, + "group": group, + "mode": mode, + "cleanup_on_failure": cleanup_on_failure, + "check_only": True, + "copy": remote_src, + } + + display.v(f" - probe_args : {probe_args}") + + # IMPORTANT: when remote_src=True (copy=True), src_dir must be passed and must be remote path + if remote_src: + if not src_dir: + raise AnsibleError("binary_deploy: 'src_dir' is required when remote_src=true (remote path)") + probe_args["src_dir"] = str(src_dir) + + probe = self._probe_remote(tmp=tmp, task_vars=task_vars, module_args=probe_args) + + display.v(f" - probe : {probe}") + + # Check mode: never change + if bool(task_vars.get("ansible_check_mode", False)): + probe["changed"] = False + return probe + + if not probe.get("needs_update", False): + probe["changed"] = False + return probe + + # --- Apply --- + try: + # Ensure install_dir exists on remote + self._execute_module( + module_name="ansible.builtin.file", + module_args={"path": install_dir, "state": "directory"}, + task_vars=task_vars, + tmp=tmp, + ) + + if remote_src: + # Remote -> Remote copy + perms/caps/links in one remote call + apply_args = dict(probe_args) + apply_args["check_only"] = False + apply_args["copy"] = True + apply_args["src_dir"] = str(src_dir) + + return self._probe_remote(tmp=tmp, task_vars=task_vars, module_args=apply_args) + + # Controller -> Remote transfer + if not src_dir: + raise AnsibleError("binary_deploy: 'src_dir' is required when remote_src=false (controller path)") + src_dir = str(src_dir) + + self._ensure_local_files_exist(src_dir, items) + + copied_any = self._remote_copy_from_controller( + tmp=tmp, + task_vars=task_vars, + src_dir=src_dir, + install_dir=install_dir, + items=items, + mode=mode, + owner=owner, + group=group, + ) + + # Enforce perms/caps/links in one remote call (no remote copy) + apply_args = { + "install_dir": install_dir, + "link_dir": link_dir, + "items": items, + "activation_name": activation_name, + "owner": owner, + "group": group, + "mode": mode, + "cleanup_on_failure": cleanup_on_failure, + "check_only": False, + "copy": False, + } + applied = self._probe_remote(tmp=tmp, task_vars=task_vars, module_args=apply_args) + applied["changed"] = bool(applied.get("changed", False)) or copied_any + return applied + + except Exception: + if cleanup_on_failure: + try: + self._execute_module( + module_name="ansible.builtin.file", + module_args={"path": install_dir, "state": "absent"}, + task_vars=task_vars, + tmp=tmp, + ) + except Exception: + pass + raise + + + # -- FRIEDHOF --- + + def run_OLD( + self, tmp: str | None = None, task_vars: Dict[str, Any] | None = None + ) -> Dict[str, Any]: + """ """ + display.v(f"ActionModule::run(tmp: {tmp}, task_vars: {task_vars})") + + if task_vars is None: + task_vars = {} + + result: Dict[str, Any] = super().run(tmp, task_vars) + args = self._task.args.copy() + + remote_src = bool(args.pop("remote_src", False)) + install_dir = str(args["install_dir"]) + items: List[Dict[str, Any]] = args.get("items") or [] + if not items: + raise AnsibleError("binary_deploy: items must not be empty") + + src_dir = args.get("src_dir") + link_dir = args.get("link_dir", "/usr/bin") + owner = args.get("owner") + group = args.get("group") + mode = args.get("mode", "0755") + cleanup_on_failure = bool(args.get("cleanup_on_failure", True)) + activation_name = args.get("activation_name") + + display.v(f" - remote_src : {remote_src}") + display.v(f" - install_dir : {install_dir}") + display.v(f" - src_dir : {src_dir}") + display.v(f" - link_dir : {link_dir}") + display.v(f" - owner : {owner}") + display.v(f" - group : {group}") + display.v(f" - cleanup_on_failure : {cleanup_on_failure}") + display.v(f" - activation_name : {activation_name}") + + # 1) Check-only probe (remote): decide whether we need to do anything. + probe_args: Dict[str, Any] = { + "install_dir": install_dir, + "link_dir": link_dir, + "items": items, + "activation_name": activation_name, + "owner": owner, + "group": group, + "mode": mode, + "cleanup_on_failure": cleanup_on_failure, + "check_only": True, + "copy": remote_src, + } + + display.v(f" - probe_args : {probe_args}") + + if remote_src: + if not src_dir: + raise AnsibleError( + "binary_deploy: src_dir is required when remote_src=true" + ) + probe_args["src_dir"] = src_dir + + probe = self._execute_module( + module_name="bodsch.core.activate_version_remote", + module_args=probe_args, + task_vars=task_vars, + tmp=tmp, + ) + + # In check mode: return probe result as-is (no changes). + if bool(task_vars.get("ansible_check_mode", False)): + probe["changed"] = False + return probe + + if not probe.get("needs_update", False): + probe["changed"] = False + return probe + + # 2) Apply + try: + if remote_src: + apply_args = dict(probe_args) + apply_args["check_only"] = False + apply_args["copy"] = True + apply_args["src_dir"] = src_dir + + applied = self._execute_module( + module_name="bodsch.core.activate_version_remote", + module_args=apply_args, + task_vars=task_vars, + tmp=tmp, + ) + return applied + + # Controller-local source: verify local files exist first. + if not src_dir: + raise AnsibleError( + "binary_deploy: src_dir is required when remote_src=false" + ) + + for it in items: + name = str(it["name"]) + src_name = str(it.get("src") or name) + local_path = os.path.join(src_dir, src_name) + if not os.path.isfile(local_path): + raise AnsibleError( + f"binary_deploy: missing extracted binary on controller: {local_path}" + ) + + # Ensure install_dir exists remotely + dir_res = self._execute_module( + module_name="ansible.builtin.file", + module_args={"path": install_dir, "state": "directory"}, + task_vars=task_vars, + tmp=tmp, + ) + + # Transfer binaries controller -> remote + copied_any = False + for it in items: + name = str(it["name"]) + src_name = str(it.get("src") or name) + + copy_res = self._execute_module( + module_name="ansible.builtin.copy", + module_args={ + "src": os.path.join(src_dir, src_name), + "dest": os.path.join(install_dir, name), + "mode": mode, + "owner": owner, + "group": group, + "remote_src": False, + }, + task_vars=task_vars, + tmp=tmp, + ) + copied_any = copied_any or bool(copy_res.get("changed", False)) + + # Enforce caps + symlinks (no remote copy; files already in install_dir) + apply_args = { + "install_dir": install_dir, + "link_dir": link_dir, + "items": items, + "activation_name": activation_name, + "owner": owner, + "group": group, + "mode": mode, + "cleanup_on_failure": cleanup_on_failure, + "check_only": False, + "copy": False, + } + applied = self._execute_module( + module_name="bodsch.core.activate_version_remote", + module_args=apply_args, + task_vars=task_vars, + tmp=tmp, + ) + + applied["changed"] = ( + bool(applied.get("changed", False)) + or bool(dir_res.get("changed", False)) + or copied_any + ) + return applied + + except Exception as exc: + if cleanup_on_failure: + try: + self._execute_module( + module_name="ansible.builtin.file", + module_args={"path": install_dir, "state": "absent"}, + task_vars=task_vars, + tmp=tmp, + ) + except Exception: + pass + raise diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/action/deploy_and_activate.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/action/deploy_and_activate.py new file mode 100644 index 0000000..7bc11aa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/action/deploy_and_activate.py @@ -0,0 +1,513 @@ +""" +deploy_and_activate.py (action plugin) + +Controller-side orchestration for deploying versioned binaries and activating them via symlinks. + +This action plugin wraps a remote worker module (bodsch.core.deploy_and_activate_remote) +and provides two operational modes: + +1) remote_src=False (controller-local source): + - Validate that extracted binaries exist on the Ansible controller in src_dir. + - Stage these files onto the remote host via ActionBase._transfer_file(). + - Invoke the remote worker module to copy into install_dir and enforce perms/caps/symlinks. + +2) remote_src=True (remote-local source): + - Assume binaries already exist on the remote host in src_dir. + - Invoke the remote worker module to copy into install_dir and enforce perms/caps/symlinks. + +Implementation note: +- Do not call ansible.builtin.copy via _execute_module() to transfer controller-local files. + That bypasses the copy action logic and will not perform controller->remote transfer reliably. +""" + +from __future__ import annotations + +import os +from dataclasses import dataclass +from typing import ( + Any, + Dict, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, + TypedDict, + cast, +) + +from ansible.errors import AnsibleError +from ansible.plugins.action import ActionBase +from ansible.utils.display import Display + +display = Display() + +REMOTE_WORKER_MODULE = "bodsch.core.deploy_and_activate_remote" + +DOCUMENTATION = r""" +--- +module: deploy_and_activate +short_description: Deploy binaries into a versioned directory and activate them via symlinks (action plugin) +description: + - Controller-side action plugin that orchestrates a remote worker module. + - Supports controller-local sources (C(remote_src=false)) via controller->remote staging. + - Supports remote-local sources (C(remote_src=true)) where binaries already exist on the target host. +options: + install_dir: + description: + - Versioned installation directory on the target host. + type: path + required: true + src_dir: + description: + - Directory containing extracted binaries. + - For C(remote_src=false) this path is on the controller. + - For C(remote_src=true) this path is on the target host. + type: path + required: true + remote_src: + description: + - If true, C(src_dir) is on the remote host (remote->remote copy). + - If false, C(src_dir) is on the controller (controller->remote staging). + type: bool + default: false + link_dir: + description: + - Directory where activation symlinks are created on the target host. + type: path + default: /usr/bin + items: + description: + - List of binaries to deploy. + - Each item supports C(name) (required), optional C(src), optional C(link_name), optional C(capability). + type: list + elements: dict + required: true + activation_name: + description: + - Item name or link_name used to determine "activated" status (worker module feature). + type: str + required: false + owner: + description: + - Owner name or uid for deployed binaries. + type: str + required: false + group: + description: + - Group name or gid for deployed binaries. + type: str + required: false + mode: + description: + - File mode for deployed binaries (octal string). + type: str + default: "0755" + cleanup_on_failure: + description: + - Remove install_dir if an exception occurs during apply. + type: bool + default: true +author: + - "Bodsch Core Collection" +notes: + - This is an action plugin. It delegates actual deployment work to C(bodsch.core.deploy_and_activate_remote). +""" + +EXAMPLES = r""" +- name: Deploy from controller cache (remote_src=false) + bodsch.core.deploy_and_activate: + remote_src: false + src_dir: "/home/bodsch/.cache/ansible/logstream_exporter/1.0.0" + install_dir: "/usr/local/opt/logstream_exporter/1.0.0" + link_dir: "/usr/bin" + owner: "logstream-exporter" + group: "logstream-exporter" + mode: "0755" + items: + - name: "logstream-exporter" + capability: "cap_net_raw+ep" + +- name: Deploy from remote extracted directory (remote_src=true) + bodsch.core.deploy_and_activate: + remote_src: true + src_dir: "/var/cache/ansible/logstream_exporter/1.0.0" + install_dir: "/usr/local/opt/logstream_exporter/1.0.0" + items: + - name: "logstream-exporter" +""" + +RETURN = r""" +changed: + description: Whether anything changed (as reported by the remote worker module). + type: bool +activated: + description: Whether the activation symlink points into install_dir (worker module result). + type: bool +needs_update: + description: Whether changes would be required (in probe/check mode output). + type: bool +plan: + description: Per-item plan (in probe/check mode output). + type: dict +details: + description: Per-item change details (in apply output). + type: dict +""" + + +class ItemSpec(TypedDict, total=False): + """User-facing item specification passed to the remote worker module.""" + + name: str + src: str + link_name: str + capability: str + + +@dataclass(frozen=True) +class _LocalItem: + """Normalized local item for controller-side existence checks and staging.""" + + name: str + src_rel: str + local_src: str + + +class ActionModule(ActionBase): + """Deploy binaries to install_dir and activate them via symlinks.""" + + TRANSFERS_FILES = True + + def _get_items(self, args: Mapping[str, Any]) -> List[ItemSpec]: + """Validate and normalize the 'items' argument.""" + display.vv(f"ActionModule::_get_items(args: {dict(args)})") + + raw_items = args.get("items") or [] + if not isinstance(raw_items, list) or not raw_items: + raise AnsibleError("deploy_and_activate: 'items' must be a non-empty list") + + out: List[ItemSpec] = [] + for idx, it in enumerate(raw_items): + if not isinstance(it, dict): + raise AnsibleError(f"deploy_and_activate: items[{idx}] must be a dict") + if "name" not in it: + raise AnsibleError( + f"deploy_and_activate: items[{idx}] missing required key 'name'" + ) + + name = str(it["name"]).strip() + if not name: + raise AnsibleError( + f"deploy_and_activate: items[{idx}].name must not be empty" + ) + + normalized: ItemSpec = cast(ItemSpec, dict(it)) + normalized["name"] = name + out.append(normalized) + + return out + + def _normalize_local_items( + self, controller_src_dir: str, items: Sequence[ItemSpec] + ) -> List[_LocalItem]: + """Build controller-local absolute paths for each item.""" + display.vv( + f"ActionModule::_normalize_local_items(controller_src_dir: {controller_src_dir}, items: {list(items)})" + ) + + out: List[_LocalItem] = [] + for it in items: + name = str(it["name"]) + src_rel = str(it.get("src") or name) + local_src = os.path.join(controller_src_dir, src_rel) + out.append(_LocalItem(name=name, src_rel=src_rel, local_src=local_src)) + return out + + def _ensure_local_files_exist( + self, controller_src_dir: str, items: Sequence[ItemSpec] + ) -> None: + """Fail early if any controller-local binary is missing.""" + display.vv( + f"ActionModule::_ensure_local_files_exist(controller_src_dir: {controller_src_dir}, items: {list(items)})" + ) + + for it in self._normalize_local_items(controller_src_dir, items): + display.vv(f"= local_src: {it.local_src}, src_rel: {it.src_rel}") + if not os.path.isfile(it.local_src): + raise AnsibleError( + f"deploy_and_activate: missing extracted binary on controller: {it.local_src}" + ) + + def _probe_remote( + self, + *, + tmp: Optional[str], + task_vars: Mapping[str, Any], + module_args: Dict[str, Any], + ) -> Dict[str, Any]: + """Execute the remote worker module and return its result.""" + display.vv( + f"ActionModule::_probe_remote(tmp: {tmp}, task_vars, module_args: {module_args})" + ) + + remote = self._execute_module( + module_name=REMOTE_WORKER_MODULE, + module_args=module_args, + task_vars=dict(task_vars), + tmp=tmp, + ) + display.vv(f"= result: {remote}") + return remote + + def _ensure_remote_dir( + self, + *, + tmp: Optional[str], + task_vars: Mapping[str, Any], + path: str, + mode: str = "0700", + ) -> None: + """Ensure a directory exists on the remote host.""" + display.vv( + f"ActionModule::_ensure_remote_dir(tmp: {tmp}, task_vars, path: {path}, mode: {mode})" + ) + + self._execute_module( + module_name="ansible.builtin.file", + module_args={"path": path, "state": "directory", "mode": mode}, + task_vars=dict(task_vars), + tmp=tmp, + ) + + def _create_remote_temp_dir( + self, *, tmp: Optional[str], task_vars: Mapping[str, Any] + ) -> str: + """ + Create a remote temporary directory. + + This avoids using ActionBase._make_tmp_path(), which is not available in all Ansible versions. + """ + display.vv(f"ActionModule::_create_remote_temp_dir(tmp: {tmp}, task_vars)") + + res = self._execute_module( + module_name="ansible.builtin.tempfile", + module_args={"state": "directory", "prefix": "deploy-and-activate-"}, + task_vars=dict(task_vars), + tmp=tmp, + ) + path = res.get("path") + if not path: + raise AnsibleError( + "deploy_and_activate: failed to create remote temporary directory" + ) + return str(path) + + def _stage_files_to_remote( + self, + *, + tmp: Optional[str], + task_vars: Mapping[str, Any], + controller_src_dir: str, + items: Sequence[ItemSpec], + ) -> Tuple[str, bool]: + """ + Stage controller-local files onto the remote host via ActionBase._transfer_file(). + + Returns: + Tuple(remote_stage_dir, created_by_us) + """ + normalized = self._normalize_local_items(controller_src_dir, items) + + if tmp: + remote_stage_dir = tmp + created_by_us = False + else: + remote_stage_dir = self._create_remote_temp_dir( + tmp=tmp, task_vars=task_vars + ) + created_by_us = True + + display.vv( + f"ActionModule::_stage_files_to_remote(remote_stage_dir: {remote_stage_dir}, created_by_us: {created_by_us})" + ) + + self._ensure_remote_dir( + tmp=tmp, task_vars=task_vars, path=remote_stage_dir, mode="0700" + ) + + # Create required subdirectories on remote if src_rel contains paths. + needed_dirs: Set[str] = set() + for it in normalized: + rel_dir = os.path.dirname(it.src_rel) + if rel_dir and rel_dir not in (".", "/"): + needed_dirs.add(os.path.join(remote_stage_dir, rel_dir)) + + for d in sorted(needed_dirs): + self._ensure_remote_dir(tmp=tmp, task_vars=task_vars, path=d, mode="0700") + + # Transfer files. + for it in normalized: + remote_dst = os.path.join(remote_stage_dir, it.src_rel) + display.vv(f"ActionModule::_transfer_file({it.local_src} -> {remote_dst})") + self._transfer_file(it.local_src, remote_dst) + + return remote_stage_dir, created_by_us + + def run( + self, tmp: str | None = None, task_vars: Dict[str, Any] | None = None + ) -> Dict[str, Any]: + """ + Action plugin entrypoint. + + Args: + tmp: Remote tmp directory (may be None depending on Ansible execution path). + task_vars: Task variables. + + Returns: + Result dict compatible with Ansible task output. + """ + display.vv(f"ActionModule::run(tmp: {tmp}, task_vars)") + + if task_vars is None: + task_vars = {} + + display.vv(f" - task_vars : {task_vars}") + + _ = super().run(tmp, task_vars) + args: Dict[str, Any] = self._task.args.copy() + + remote_src = bool(args.get("remote_src", False)) + install_dir = str(args["install_dir"]) + link_dir = str(args.get("link_dir", "/usr/bin")) + src_dir = args.get("src_dir") + mode = str(args.get("mode", "0755")) + owner = args.get("owner") + group = args.get("group") + cleanup_on_failure = bool(args.get("cleanup_on_failure", True)) + activation_name = args.get("activation_name") + + items = self._get_items(args) + + display.vv(f" - args : {args}") + + display.vv(f" - remote_src : {remote_src}") + display.vv(f" - install_dir : {install_dir}") + display.vv(f" - src_dir : {src_dir}") + display.vv(f" - link_dir : {link_dir}") + display.vv(f" - owner : {owner}") + display.vv(f" - group : {group}") + display.vv(f" - cleanup_on_failure : {cleanup_on_failure}") + display.vv(f" - activation_name : {activation_name}") + + # --- Probe (remote) --- + probe_args: Dict[str, Any] = { + "install_dir": install_dir, + "link_dir": link_dir, + "items": list(items), + "activation_name": activation_name, + "owner": owner, + "group": group, + "mode": mode, + "cleanup_on_failure": cleanup_on_failure, + "check_only": True, + "copy": remote_src, + } + + if remote_src: + if not src_dir: + raise AnsibleError( + "deploy_and_activate: 'src_dir' is required when remote_src=true (remote path)" + ) + probe_args["src_dir"] = str(src_dir) + + display.vv(f" - probe_args : {probe_args}") + probe = self._probe_remote(tmp=tmp, task_vars=task_vars, module_args=probe_args) + + # Check mode: never change. + if bool(task_vars.get("ansible_check_mode", False)): + probe["changed"] = False + return probe + + # Early exit if nothing to do. + if not probe.get("needs_update", False): + probe["changed"] = False + return probe + + # --- Apply --- + stage_dir: Optional[str] = None + stage_created_by_us = False + + try: + self._ensure_remote_dir( + tmp=tmp, task_vars=task_vars, path=install_dir, mode="0755" + ) + + if remote_src: + apply_args = dict(probe_args) + apply_args["check_only"] = False + apply_args["copy"] = True + apply_args["src_dir"] = str(src_dir) + return self._probe_remote( + tmp=tmp, task_vars=task_vars, module_args=apply_args + ) + + # Controller -> Remote staging -> Remote apply(copy=True) + if not src_dir: + raise AnsibleError( + "deploy_and_activate: 'src_dir' is required when remote_src=false (controller path)" + ) + + controller_src_dir = str(src_dir) + self._ensure_local_files_exist(controller_src_dir, items) + + stage_dir, stage_created_by_us = self._stage_files_to_remote( + tmp=tmp, + task_vars=task_vars, + controller_src_dir=controller_src_dir, + items=items, + ) + + apply_args = { + "install_dir": install_dir, + "link_dir": link_dir, + "items": list(items), + "activation_name": activation_name, + "owner": owner, + "group": group, + "mode": mode, + "cleanup_on_failure": cleanup_on_failure, + "check_only": False, + "copy": True, + "src_dir": stage_dir, + } + return self._probe_remote( + tmp=tmp, task_vars=task_vars, module_args=apply_args + ) + + except Exception: + if cleanup_on_failure: + try: + self._execute_module( + module_name="ansible.builtin.file", + module_args={"path": install_dir, "state": "absent"}, + task_vars=dict(task_vars), + tmp=tmp, + ) + except Exception: + pass + raise + + finally: + # Best-effort cleanup of the remote staging dir only if we created it. + if stage_dir and stage_created_by_us: + try: + self._execute_module( + module_name="ansible.builtin.file", + module_args={"path": stage_dir, "state": "absent"}, + task_vars=dict(task_vars), + tmp=tmp, + ) + except Exception: + pass diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/clean_dictionary.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/clean_dictionary.py new file mode 100644 index 0000000..e486668 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/clean_dictionary.py @@ -0,0 +1,60 @@ +# python 3 headers, required if submitting to Ansible +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible.utils.display import Display + +display = Display() + +""" +Diese Funktion geht rekursiv durch die Struktur (ob Dictionary oder Liste) und entfernt alle Einträge, +die entweder None, einen leeren String, ein leeres Dictionary, eine leere Liste enthalten. + +Für Dictionaries wird jedes Schlüssel-Wert-Paar überprüft, und es wird nur gespeichert, wenn der Wert nicht leer ist. +Für Listen werden nur nicht-leere Elemente in das Ergebnis aufgenommen. + +Es wurde eine Hilfsfunktion `is_empty` eingeführt, die überprüft, ob ein Wert als "leer" betrachtet werden soll. + +Diese Funktion berücksichtigt nun explizit, dass boolesche Werte (True und False) nicht als leer betrachtet werden, sondern erhalten bleiben. +In der is_empty-Funktion wurde eine Überprüfung hinzugefügt, um sicherzustellen, dass die Zahl 0 nicht als leer betrachtet wird. +Wenn der Wert 0 ist, wird er beibehalten. +""" + + +class FilterModule(object): + """ """ + + def filters(self): + return { + "remove_empty_values": self.remove_empty_values, + } + + def remove_empty_values(self, data): + """ """ + display.vv(f"bodsch.core.remove_empty_values(self, {data})") + + def is_empty(value): + """Überprüfen, ob der Wert leer ist (ignoriere boolesche Werte).""" + if isinstance(value, bool): + return False # Boolesche Werte sollen erhalten bleiben + if value == 0: + return False # Zahl 0 soll erhalten bleiben + + return value in [None, "", {}, [], False] + + if isinstance(data, dict): + # Durch alle Schlüssel-Wert-Paare iterieren + return { + key: self.remove_empty_values(value) + for key, value in data.items() + if not is_empty(value) + } + elif isinstance(data, list): + # Leere Listen und leere Elemente entfernen + return [ + self.remove_empty_values(item) for item in data if not is_empty(item) + ] + else: + # Andere Typen direkt zurückgeben (einschließlich boolesche Werte) + return data diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/clients_type.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/clients_type.yml new file mode 100644 index 0000000..a3ea20b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/clients_type.yml @@ -0,0 +1,21 @@ +DOCUMENTATION = """ + name: clients_type + author: Bodo Schulz + version_added: "1.0.4" + + short_description: TBD + + description: + - TBD + + options: {} + +""" + +EXAMPLES = """ + +""" + +RETURN = """ + +""" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/compare_list.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/compare_list.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/config_bool.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/config_bool.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/dns.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/dns.py new file mode 100644 index 0000000..556de0a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/dns.py @@ -0,0 +1,34 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +from ansible.utils.display import Display +from ansible_collections.bodsch.core.plugins.module_utils.dns_lookup import dns_lookup + +__metaclass__ = type +display = Display() + + +class FilterModule(object): + def filters(self): + return {"dns_lookup": self.lookup} + + def lookup(self, dns_name, timeout=3, dns_resolvers=["9.9.9.9"]): + """ + use a simple DNS lookup, return results in a dictionary + + similar to + {'addrs': [], 'error': True, 'error_msg': 'No such domain instance', 'name': 'instance'} + """ + display.vv(f"bodsch.core.dns_lookup({dns_name}, {timeout}, {dns_resolvers})") + + result = dns_lookup(dns_name, timeout, dns_resolvers) + + display.vv(f"= return : {result}") + + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/dns_lookup.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/dns_lookup.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/fail2ban_jails.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/fail2ban_jails.py new file mode 100644 index 0000000..bf800ef --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/fail2ban_jails.py @@ -0,0 +1,74 @@ +# python 3 headers, required if submitting to Ansible +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible.utils.display import Display + +display = Display() + + +class FilterModule(object): + """ + Ansible file jinja2 tests + """ + + def filters(self): + return { + "merge_jails": self.merge_jails, + } + + def __merge_two_dicts(self, x, y): + z = x.copy() # start with x's keys and values + z.update(y) # modifies z with y's keys and values & returns None + return z + + def __search(self, d, name): + res = None + for sub in d: + if sub["name"] == name: + res = sub + break + + return res + + def __sort_list(self, _list, _filter): + return sorted(_list, key=lambda k: k.get(_filter)) + + def merge_jails(self, defaults, data): + """ """ + count_defaults = len(defaults) + count_data = len(data) + + # display.v("defaults: ({type}) {len} - {data} entries".format(data=defaults, type=type(defaults), len=count_defaults)) + # display.vv(json.dumps(data, indent=2, sort_keys=False)) + # display.v("data : ({type}) {len} - {data} entries".format(data=data, type=type(data), len=count_data)) + + result = [] + + # short way + if count_defaults == 0: + return self.__sort_list(data, "name") + + if count_data == 0: + return self.__sort_list(defaults, "name") + + # our new list from users input + for d in data: + _name = d["name"] + # search the name in the default map + _defaults_name = self.__search(defaults, _name) + # when not found, put these on the new result list + if not _defaults_name: + result.append(_defaults_name) + else: + # when found, remove these entry from the defaults list, its obsolete + for i in range(len(defaults)): + if defaults[i]["name"] == _name: + del defaults[i] + break + + # add both lists and sort + result = self.__sort_list(data + defaults, "name") + + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/fstypes.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/fstypes.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/get_service.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/get_service.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/inventory.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/inventory.py new file mode 100644 index 0000000..94b42dc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/inventory.py @@ -0,0 +1,88 @@ +# python 3 headers, required if submitting to Ansible +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from typing import Any, Dict, Iterable, Optional, Tuple + +from ansible.errors import AnsibleFilterError +from ansible.utils.display import Display + +display = Display() + +""" +Ansible filter plugin: host_id + +Resolves a stable host identifier across Ansible versions and fact-injection styles. + +Resolution order: +1) ansible_facts['host'] (if present) +2) ansible_facts['hostname'] (standard setup fact) +3) inventory_hostname (always available as magic var) + +Usage: + {{ (ansible_facts | default({})) | host_id(inventory_hostname) }} +""" + + +class FilterModule: + """ """ + + def filters(self): + return { + "hostname": self.hostname, + } + + def hostname( + self, + facts: Optional[Dict[str, Any]] = None, + inventory_hostname: Optional[str] = None, + prefer: Optional[Iterable[str]] = None, + default: str = "", + ) -> str: + """ + Return a host identifier string using a preference list over facts. + + Args: + facts: Typically 'ansible_facts' (may be undefined/None). + inventory_hostname: Magic var 'inventory_hostname' as last-resort fallback. + prefer: Iterable of fact keys to try in order (default: ('host', 'hostname')). + default: Returned if nothing else is available. + + Returns: + Resolved host identifier as string. + """ + display.vv( + f"bodsch.core.hostname(self, facts, inventory_hostname: '{inventory_hostname}', prefer: '{prefer}', default: '{default}')" + ) + + facts_dict = self._as_dict(facts) + keys: Tuple[str, ...] = ( + tuple(prefer) if prefer is not None else ("host", "hostname") + ) + + for key in keys: + val = facts_dict.get(key) + if val not in (None, ""): + display.vv(f"= result: {str(val)}") + return str(val) + + if inventory_hostname not in (None, ""): + display.vv(f"= result: {str(inventory_hostname)}") + return str(inventory_hostname) + + display.vv(f"= result: {str(default)}") + + return str(default) + + def _as_dict(self, value: Any) -> Dict[str, Any]: + """ """ + if value is None: + return {} + + if isinstance(value, dict): + return value + + raise AnsibleFilterError( + f"hostname expects a dict-like ansible_facts, got: {type(value)!r}" + ) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/linked_version.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/linked_version.py new file mode 100644 index 0000000..56094b9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/linked_version.py @@ -0,0 +1,69 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +__metaclass__ = type + +import os + +from ansible.utils.display import Display + +display = Display() + + +class FilterModule(object): + def filters(self): + return { + "linked_version": self.linked_version, + } + + def linked_version(self, data: dict, install_path: str, version: str): + """ + check for linked version in `install_path` + + `data` are dictionary: + { + 'exists': True, + ''path': '/usr/bin/influxd', ..., + 'islnk': True, ..., + 'lnk_source': '/opt/influxd/2.8.0/influxd', + 'lnk_target': '/opt/influxd/2.8.0/influxd', ... + } + `install_path`are string and NOT the filename! + /opt/influxd/2.8.0 + + result: TRUE, when destination is a link and the base path equal with install path + otherwise FALSE + """ + display.vv( + f"bodsch.core.linked_version(self, data: {data}, install_path: {install_path}, version: {version})" + ) + + _is_activated = False + + _destination_exists = data.get("exists", False) + + display.vvv(f" - destination exists : {_destination_exists}") + + if _destination_exists: + _destination_islink = data.get("islnk", False) + _destination_lnk_source = data.get("lnk_source", None) + _destination_path = data.get("path", None) + + if _destination_lnk_source: + _destination_path = os.path.dirname(_destination_lnk_source) + + display.vvv(f" - is link : {_destination_islink}") + display.vvv(f" - link src : {_destination_lnk_source}") + display.vvv(f" - base path : {_destination_path}") + + _is_activated = install_path == _destination_path + + display.vv(f"= is activated: {_is_activated}") + + return _is_activated diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/linked_version.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/linked_version.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/log_directories.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/log_directories.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/merge_jails.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/merge_jails.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/merge_lists.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/merge_lists.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/mount_fstypes.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/mount_fstypes.py new file mode 100644 index 0000000..59bb620 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/mount_fstypes.py @@ -0,0 +1,30 @@ +# python 3 headers, required if submitting to Ansible + +from __future__ import absolute_import, print_function + +__metaclass__ = type + +from ansible.utils.display import Display + +display = Display() + + +class FilterModule(object): + """ + Ansible file jinja2 tests + """ + + def filters(self): + return {"fstypes": self.fstypes} + + def fstypes(self, data): + """ """ + result = [] + + display.vv(f"bodsch.core.fstypes({data}") + + result = [d["fstype"] for d in data] + + display.v("result {} {}".format(result, type(result))) + + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/openvpn.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/openvpn.py new file mode 100644 index 0000000..eb2d77c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/openvpn.py @@ -0,0 +1,29 @@ +# python 3 headers, required if submitting to Ansible +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible.utils.display import Display + +display = Display() + + +class FilterModule(object): + """ """ + + def filters(self): + return { + "openvpn_clients": self.openvpn_clients, + } + + def openvpn_clients(self, data, hostvars): + """ + combined_list: "{{ combined_list | default([]) + hostvars[item].openvpn_mobile_clients }}" + """ + display.vv(f"bodsch.core.openvpn_clients({data}, {hostvars})") + + client = hostvars.get("openvpn_mobile_clients", None) + if client and isinstance(client, list): + data += client + + return data diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/openvpn_clients.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/openvpn_clients.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/openvpn_persistent_pool.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/openvpn_persistent_pool.py new file mode 100644 index 0000000..b3034ca --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/openvpn_persistent_pool.py @@ -0,0 +1,55 @@ +# python 3 headers, required if submitting to Ansible +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible.utils.display import Display + +display = Display() + + +class FilterModule(object): + """ + Ansible file jinja2 tests + """ + + def filters(self): + return { + "persistent_pool": self.persistent_pool, + "clients_type": self.clients_type, + } + + def persistent_pool(self, data): + """ + Get the type of a variable + """ + result = [] + + for i in data: + name = i.get("name") + if i.get("static_ip", None) is not None: + d = dict( + name=name, + state=i.get("state", "present"), + static_ip=i.get("static_ip"), + ) + result.append(d) + + display.v(f" = result : {result}") + return result + + def clients_type(self, data, type="static"): + """ """ + result = [] + + for d in data: + roadrunner = d.get("roadrunner", False) + + if type == "static" and not roadrunner: + result.append(d) + + if type == "roadrunner" and roadrunner: + result.append(d) + + display.v(f" = result : {result}") + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/parse_checksum.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/parse_checksum.py new file mode 100644 index 0000000..8015d29 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/parse_checksum.py @@ -0,0 +1,59 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +__metaclass__ = type + +import re + +from ansible.utils.display import Display + +display = Display() + + +class FilterModule(object): + def filters(self): + return { + "parse_checksum": self.parse_checksum, + } + + def parse_checksum(self, data, application, os, arch, file_extension="tar.gz"): + """ + parse version string + """ + display.vv( + f"bodsch.core.parse_checksum(self, data, {application}, {os}, {arch})" + ) + + checksum = None + os = os.lower() + display.vvv(f" data: {data}") + display.vvv(f" os: {os}") + display.vvv(f" arch: {arch}") + display.vvv(f" file_extension: {file_extension}") + + if isinstance(data, list): + # 206cf787c01921574ca171220bb9b48b043c3ad6e744017030fed586eb48e04b alertmanager-0.25.0.linux-amd64.tar.gz + # (?P[a-zA-Z0-9]+).*alertmanager[-_].*linux-amd64\.tar\.gz$ + checksum = [ + x + for x in data + if re.search( + rf"(?P[a-zA-Z0-9]+).*{application}[-_].*{os}[-_]{arch}\.{file_extension}", + x, + ) + ][0] + + display.vvv(f" found checksum: {checksum}") + + if isinstance(checksum, str): + checksum = checksum.split(" ")[0] + + display.vv(f"= checksum: {checksum}") + + return checksum diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/parse_checksum.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/parse_checksum.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/persistent_pool.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/persistent_pool.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/python.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/python.py new file mode 100644 index 0000000..9a0a12a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/python.py @@ -0,0 +1,44 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +from ansible.utils.display import Display + +__metaclass__ = type +display = Display() + + +class FilterModule(object): + def filters(self): + return {"python_extra_args": self.python_extra_args} + + def python_extra_args( + self, data, python_version, extra_args=[], break_system_packages=True + ): + """ + add extra args for python pip installation + """ + result = list(set(extra_args)) + + python_version_major = python_version.get("major", None) + python_version_minor = python_version.get("minor", None) + + if ( + int(python_version_major) == 3 + and int(python_version_minor) >= 11 + and break_system_packages + ): + result.append("--break-system-packages") + + # deduplicate + result = list(set(result)) + + result = " ".join(result) + + display.vv(f"= {result}") + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/python_extra_args.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/python_extra_args.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/remove_empty_values.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/remove_empty_values.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/sshd.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/sshd.py new file mode 100644 index 0000000..c2faf56 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/sshd.py @@ -0,0 +1,185 @@ +# python 3 headers, required if submitting to Ansible +from __future__ import absolute_import, division, print_function + +import json + +from ansible.utils.display import Display + +__metaclass__ = type + +display = Display() + + +class FilterModule(object): + """ """ + + def filters(self): + return { + "merge_lists": self.merge_lists, + "sshd_values": self.sshd_values, + } + + def merge_lists(self, defaults, data): + """ """ + count_defaults = len(defaults) + count_data = len(data) + + display.vv( + "defaults: ({type}) {len} - {data} entries".format( + data=defaults, type=type(defaults), len=count_defaults + ) + ) + display.vv(json.dumps(data, indent=2, sort_keys=False)) + display.vv( + "data : ({type}) {len} - {data} entries".format( + data=data, type=type(data), len=count_data + ) + ) + + result = [] + + # short way + if count_defaults == 0: + return data + + if count_data == 0: + return defaults + + # our new list from users input + for d in data: + _name = d["host"] + # search the name in the default map + _defaults_name = self.__search(defaults, _name) + # display.vv(f" _defaults_name : {_defaults_name}") + # when not found, put these on the new result list + if not _defaults_name: + result.append(_defaults_name) + else: + # when found, remove these entry from the defaults list, its obsolete + for i in range(len(defaults)): + if defaults[i]["host"] == _name: + del defaults[i] + break + + # add both lists and sort + result = data + defaults + + display.vv(f"= result: {result}") + + return result + + def sshd_values(self, data): + """ + Ersetzt die Keys in einer YAML-Struktur basierend auf einer gegebenen Key-Map. + + :param data: Ansible Datenkonstrukt + :return: Ansible Datenkonstrukt mit den ersetzten Keys. + """ + display.vv(f"bodsch.core.sshd_values({data})") + + # Hilfsfunktion zur Rekursion + def replace_keys(obj): + """ + :param key_map: Dictionary, das alte Keys mit neuen Keys mappt. + """ + key_map = { + "port": "Port", + "address_family": "AddressFamily", + "listen_address": "ListenAddress", + "host_keys": "HostKey", + "rekey_limit": "RekeyLimit", + "syslog_facility": "SyslogFacility", + "log_level": "LogLevel", + "log_verbose": "LogVerbose", + "login_grace_time": "LoginGraceTime", + "permit_root_login": "PermitRootLogin", + "strict_modes": "StrictModes", + "max_auth_tries": "MaxAuthTries", + "max_sessions": "MaxSessions", + "pubkey_authentication": "PubkeyAuthentication", + "authorized_keys_file": "AuthorizedKeysFile", + "authorized_principals_file": "AuthorizedPrincipalsFile", + "authorized_keys_command": "AuthorizedKeysCommand", + "authorized_keys_command_user": "AuthorizedKeysCommandUser", + "hostbased_authentication": "HostbasedAuthentication", + "hostbased_accepted_algorithms": "HostbasedAcceptedAlgorithms", + "host_certificate": "HostCertificate", + "host_key": "HostKey", + "host_key_agent": "HostKeyAgent", + "host_key_algorithms": "HostKeyAlgorithms", + "ignore_user_known_hosts": "IgnoreUserKnownHosts", + "ignore_rhosts": "IgnoreRhosts", + "password_authentication": "PasswordAuthentication", + "permit_empty_passwords": "PermitEmptyPasswords", + "challenge_response_authentication": "ChallengeResponseAuthentication", + "kerberos_authentication": "KerberosAuthentication", + "kerberos_or_local_passwd": "KerberosOrLocalPasswd", + "kerberos_ticket_cleanup": "KerberosTicketCleanup", + "kerberos_get_afs_token": "KerberosGetAFSToken", + "kex_algorithms": "KexAlgorithms", + "gss_api_authentication": "GSSAPIAuthentication", + "gss_api_cleanup_credentials": "GSSAPICleanupCredentials", + "gss_api_strict_acceptor_check": "GSSAPIStrictAcceptorCheck", + "gss_api_key_exchange": "GSSAPIKeyExchange", + "use_pam": "UsePAM", + "allow_agent_forwarding": "AllowAgentForwarding", + "allow_tcp_forwarding": "AllowTcpForwarding", + "gateway_ports": "GatewayPorts", + "x11_forwarding": "X11Forwarding", + "x11_display_offset": "X11DisplayOffset", + "x11_use_localhost": "X11UseLocalhost", + "permit_tty": "PermitTTY", + "print_motd": "PrintMotd", + "print_last_log": "PrintLastLog", + "tcp_keep_alive": "TCPKeepAlive", + "permituser_environment": "PermitUserEnvironment", + "compression": "Compression", + "client_alive_interval": "ClientAliveInterval", + "client_alive_count_max": "ClientAliveCountMax", + "ciphers": "Ciphers", + "deny_groups": "DenyGroups", + "deny_users": "DenyUsers", + "macs": "MACs", + "use_dns": "UseDNS", + "pid_file": "PidFile", + "max_startups": "MaxStartups", + "permit_tunnel": "PermitTunnel", + "chroot_directory": "ChrootDirectory", + "version_addendum": "VersionAddendum", + "banner": "Banner", + "accept_env": "AcceptEnv", + "subsystem": "Subsystem", + "match_users": "Match", + # ssh_config + "hash_known_hosts": "HashKnownHosts", + "send_env": "SendEnv", + # "": "", + } + + if isinstance(obj, dict): + # Ersetze die Keys und rufe rekursiv für die Werte auf + return {key_map.get(k, k): replace_keys(v) for k, v in obj.items()} + elif isinstance(obj, list): + # Falls es eine Liste ist, rekursiv die Elemente bearbeiten + return [replace_keys(item) for item in obj] + else: + return obj + + # Ersetze die Keys im geladenen YAML + result = replace_keys(data) + + display.v(f"= result: {result}") + + return result + + def __sort_list(self, _list, _filter): + return sorted(_list, key=lambda k: k.get(_filter)) + + def __search(self, d, name): + res = None + for sub in d: + if sub["host"] == name: + res = sub + break + + return res diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/sshd_values.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/sshd_values.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/string_to_list.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/string_to_list.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/support_tls.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/support_tls.py new file mode 100644 index 0000000..da42e3c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/support_tls.py @@ -0,0 +1,79 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2022-2024, Bodo Schulz + +from __future__ import absolute_import, print_function + +__metaclass__ = type + +import os + +from ansible.utils.display import Display + +display = Display() + + +class FilterModule(object): + """ """ + + def filters(self): + return { + "support_tls": self.support_tls, + "tls_directory": self.tls_directory, + } + + def support_tls(self, data): + """ + collabora_config: + ssl: + enabled: true + cert_file: /etc/coolwsd/cert.pem + key_file: /etc/coolwsd/key.pem + ca_file: /etc/coolwsd/ca-chain.cert.pem + storage: + ssl: + enabled: "" + cert_file: /etc/coolwsd/cert.pem + key_file: /etc/coolwsd/key.pem + ca_file: /etc/coolwsd/ca-chain.cert.pem + """ + display.vv(f"bodsch.core.support_tls({data})") + + ssl_data = data.get("ssl", {}) + + ssl_enabled = ssl_data.get("enabled", None) + ssl_ca = ssl_data.get("ca_file", None) + ssl_cert = ssl_data.get("cert_file", None) + ssl_key = ssl_data.get("key_file", None) + + if ssl_enabled and ssl_ca and ssl_cert and ssl_key: + return True + else: + return False + + def tls_directory(self, data): + """ """ + display.vv(f"bodsch.core.tls_directory({data})") + + directory = [] + + ssl_data = data.get("ssl", {}) + + ssl_ca = ssl_data.get("ca_file", None) + ssl_cert = ssl_data.get("cert_file", None) + ssl_key = ssl_data.get("key_file", None) + + if ssl_ca and ssl_cert and ssl_key: + directory.append(os.path.dirname(ssl_ca)) + directory.append(os.path.dirname(ssl_cert)) + directory.append(os.path.dirname(ssl_key)) + + directory = list(set(directory)) + + if len(directory) == 1: + result = directory[0] + + display.vv(f" = {result}") + + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/support_tls.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/support_tls.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/syslog_network_definition.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/syslog_network_definition.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/syslog_ng.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/syslog_ng.py new file mode 100644 index 0000000..d3af3b8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/syslog_ng.py @@ -0,0 +1,138 @@ +# python 3 headers, required if submitting to Ansible +from __future__ import absolute_import, division, print_function + +import os +import re + +from ansible.plugins.test.core import version_compare +from ansible.utils.display import Display + +__metaclass__ = type + +display = Display() + + +class FilterModule(object): + """ """ + + def filters(self): + return { + "get_service": self.get_service, + "log_directories": self.log_directories, + "syslog_network_definition": self.syslog_network_definition, + "verify_syslog_options": self.verify_syslog_options, + } + + def get_service(self, data, search_for): + """ """ + display.vv(f"bodsch.core.get_service(self, {data}, {search_for})") + + name = None + regex_list_compiled = re.compile(f"^{search_for}.*") + + match = {k: v for k, v in data.items() if re.match(regex_list_compiled, k)} + + # display.vv(f"found: {match} {type(match)} {len(match)}") + + if isinstance(match, dict) and len(match) > 0: + values = list(match.values())[0] + name = values.get("name", search_for).replace(".service", "") + + # display.vv(f"= result {name}") + return name + + def log_directories(self, data, base_directory): + """ + return a list of directories + """ + display.vv(f"bodsch.core.log_directories(self, {data}, {base_directory})") + + log_dirs = [] + log_files = sorted( + [v.get("file_name") for k, v in data.items() if v.get("file_name")] + ) + unique = list(dict.fromkeys(log_files)) + for d in unique: + if "/$" in d: + clean_dir_name = d.split("/$")[0] + log_dirs.append(clean_dir_name) + + unique_dirs = list(dict.fromkeys(log_dirs)) + + log_dirs = [] + + for file_name in unique_dirs: + full_file_name = os.path.join(base_directory, file_name) + log_dirs.append(full_file_name) + + # display.v(f"= result {log_dirs}") + return log_dirs + + def validate_syslog_destination(self, data): + """ """ + pass + + def syslog_network_definition(self, data, conf_type="source"): + """ """ + display.vv(f"bodsch.core.syslog_network_definition({data}, {conf_type})") + + def as_boolean(value): + return "yes" if value else "no" + + def as_string(value): + return f'"{value}"' + + def as_list(value): + return ", ".join(value) + + res = {} + if isinstance(data, dict): + + for key, value in data.items(): + if key == "ip": + if conf_type == "source": + res = dict(ip=f"({value})") + else: + res = dict(ip=f'"{value}"') + else: + if isinstance(value, bool): + value = f"({as_boolean(value)})" + elif isinstance(value, str): + value = f"({as_string(value)})" + elif isinstance(value, int): + value = f"({value})" + elif isinstance(value, list): + value = f"({as_list(value)})" + elif isinstance(value, dict): + value = self.syslog_network_definition(value, conf_type) + + res.update({key: value}) + + if isinstance(data, str): + res = data + + # display.v(f"= res {res}") + return res + + def verify_syslog_options(self, data, version): + """ """ + display.vv(f"bodsch.core.verify_syslog_options({data}, {version})") + + if version_compare(str(version), "4.1", ">="): + if data.get("stats_freq") is not None: + stats_freq = data.pop("stats_freq") + """ + obsoleted keyword, please update your configuration; keyword='stats_freq' + change='Use the stats() block. E.g. stats(freq(1)); + """ + # sicherstellen, dass 'stats' ein dict ist + if not isinstance(data.get("stats"), dict): + data["stats"] = {} + + data["stats"]["freq"] = stats_freq + + if version_compare(str(version), "4.1", "<"): + data.pop("stats", None) # kein KeyError + + # display.v(f"= result {data}") + return data diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/tls_directory.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/tls_directory.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/type.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/type.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/types.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/types.py new file mode 100644 index 0000000..0332283 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/types.py @@ -0,0 +1,121 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +__metaclass__ = type + +# filter_plugins/var_type.py +from collections.abc import Mapping, Sequence +from collections.abc import Set as ABCSet + +from ansible.utils.display import Display + +# optional: we vermeiden harte Abhängigkeit von Ansible, behandeln aber deren Wrapper als str +_STR_WRAPPERS = {"AnsibleUnsafeText", "AnsibleUnicode", "AnsibleVaultEncryptedUnicode"} + +display = Display() + + +class FilterModule(object): + def filters(self): + return { + "type": self.var_type, + "config_bool": self.config_bool_as_string, + "string_to_list": self.string_to_list, + } + + def var_type(self, value): + """ + Liefert kanonische Python-Typnamen: str, int, float, bool, list, tuple, set, dict, NoneType. + Fällt bei fremden/Wrapper-Typen auf die jeweilige ABC-Kategorie zurück. + """ + # None + if value is None: + return "NoneType" + + t = type(value) + + # String-ähnliche Wrapper (z.B. AnsibleUnsafeText) + if isinstance(value, str) or t.__name__ in _STR_WRAPPERS: + return "string" + + # Bytes + if isinstance(value, bytes): + return "bytes" + if isinstance(value, bytearray): + return "bytearray" + + # Bool vor int (bool ist Subklasse von int) + if isinstance(value, bool): + return "bool" + + # Grundtypen + if isinstance(value, int): + return "int" + if isinstance(value, float): + return "float" + + # Konkrete eingebaute Container zuerst + if isinstance(value, list): + return "list" + if isinstance(value, tuple): + return "tuple" + if isinstance(value, set): + return "set" + if isinstance(value, dict): + return "dict" + + # ABC-Fallbacks für Wrapper (z.B. _AnsibleLazyTemplateList, AnsibleMapping ...) + if isinstance(value, Mapping): + return "dict" + if isinstance(value, ABCSet): + return "set" + if isinstance(value, Sequence) and not isinstance( + value, (str, bytes, bytearray) + ): + # Unbekannte sequenzartige Wrapper -> als list behandeln + return "list" + + # Letzter Ausweg: konkreter Klassenname + return t.__name__ + + def config_bool_as_string(self, data, true_as="yes", false_as="no"): + """ + return string for boolean + """ + # display.vv(f"bodsch.core.config_bool({data}, {type(data)}, {true_as}, {false_as})") + + result = false_as + + if isinstance(data, bool): + result = true_as if data else false_as + + if type(data) is None: + result = False + elif type(data) is bool: + result = true_as if data else false_as + else: + result = data + + return result + + def string_to_list(self, data): + """ """ + display.vv(f"bodsch.core.string_to_list({data})") + + result = [] + if isinstance(data, str): + result.append(data) + elif isinstance(data, int): + result.append(str(data)) + elif isinstance(data, list): + result = data + + display.vv(f"= result: {result}") + + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/union_by.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/union_by.py new file mode 100644 index 0000000..98a6522 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/union_by.py @@ -0,0 +1,47 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +__metaclass__ = type + +from ansible.utils.display import Display + +display = Display() + + +class FilterModule(object): + def filters(self): + return { + "union_by": self.union, + } + + def union(self, data, defaults, union_by): + """ + union by .. + """ + result = [] + + if len(data) == 0: + result = defaults + else: + for i in data: + display.vv(f" - {i}") + x = i.get(union_by, None) + + if x: + found = [d for d in defaults if d.get(union_by) == x] + + if found: + result.append(i) + else: + result.append(found[0]) + else: + result.append(i) + + display.vv(f"= {result}") + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/union_by.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/union_by.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/upgrade.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/upgrade.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/verify.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/verify.py new file mode 100644 index 0000000..8501ac8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/verify.py @@ -0,0 +1,67 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible.utils.display import Display + +display = Display() + + +class FilterModule(object): + def filters(self): + return { + "compare_list": self.compare_list, + "upgrade": self.upgrade, + } + + def compare_list(self, data_list, compare_to_list): + """ + compare two lists + """ + display.vv(f"bodsch.core.compare_list({data_list}, {compare_to_list})") + + result = [] + + for i in data_list: + if i in compare_to_list: + result.append(i) + + display.vv(f"return : {result}") + return result + + def upgrade(self, install_path, bin_path): + """ + upgrade ... + """ + display.vv(f"bodsch.core.upgrade({install_path}, {bin_path})") + + directory = None + link_to_bin = None + + install_path_stats = install_path.get("stat", None) + bin_path_stats = bin_path.get("stat", None) + install_path_exists = install_path_stats.get("exists", False) + bin_path_exists = bin_path_stats.get("exists", False) + + if install_path_exists: + directory = install_path_stats.get("isdir", False) + + if bin_path_exists: + link_to_bin = bin_path_stats.get("islnk", False) + + if bin_path_exists and not link_to_bin: + result = True + elif install_path_exists and directory: + result = False + else: + result = False + + display.vv(f"return : {result}") + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/verify_syslog_options.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/filter/verify_syslog_options.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/lookup/file_glob.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/lookup/file_glob.py new file mode 100644 index 0000000..9652eda --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/lookup/file_glob.py @@ -0,0 +1,222 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan +# (c) 2017 Ansible Project +# (c) 2022-2023, Bodo Schulz +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# "MODIFED WITH https://github.com/philfry/ansible/blob/37c616dc76d9ebc3cbf0285a22e55f0e4db4185e/lib/ansible/plugins/lookup/fileglob.py" + +from __future__ import absolute_import, division, print_function + +import os +import re +from typing import Any, Dict, List, Optional + +# from ansible.utils.listify import listify_lookup_plugin_terms as listify +from ansible.plugins.lookup import LookupBase +from ansible.utils.display import Display + +__metaclass__ = type + +DOCUMENTATION = """ + name: fileglob + author: Bodo Schulz + version_added: "1.0.4" + short_description: list files matching a pattern + description: + - Find all files in a directory tree that match a pattern (recursively). + options: + _terms: + required: False + description: File extension on which a comparison is to take place. + type: str + search_path: + required: False + description: A list of additional directories to be searched. + type: list + default: [] + version_added: "1.0.4" + notes: + - Patterns are only supported on files, not directory/paths. + - Matching is against local system files on the Ansible controller. + To iterate a list of files on a remote node, use the M(ansible.builtin.find) module. + - Returns a string list of paths joined by commas, or an empty list if no files match. For a 'true list' pass C(wantlist=True) to the lookup. +""" + +EXAMPLES = """ +- name: Display paths of all .tpl files + ansible.builtin.debug: + msg: "{{ lookup('bodsch.core.file_glob', '.tpl') }}" + +- name: Show paths of all .tpl files, extended by further directories + ansible.builtin.debug: + msg: "{{ lookup('bodsch.core.file_glob', '.tpl') }}" + vars: + search_path: + - ".." + - "../.." + +- name: Copy each file over that matches the given pattern + ansible.builtin.copy: + src: "{{ item }}" + dest: "/etc/fooapp/" + owner: "root" + mode: 0600 + with_file_glob: + - "*.tmpl" + +- name: Copy each template over that matches the given pattern + ansible.builtin.copy: + src: "{{ item }}" + dest: "/etc/alertmanager/templates/" + owner: "root" + mode: 0640 + with_file_glob: + - ".tmpl" + vars: + search_path: + - ".." + - "../.." +""" + +RETURN = """ + _list: + description: + - list of files + type: list + elements: path +""" + +display = Display() + + +class LookupModule(LookupBase): + """ + Ansible lookup plugin that finds files matching an extension in role + or playbook search paths. + + The plugin: + * Resolves search locations based on Ansible's search paths and optional + user-specified paths. + * Recursively walks the "templates" and "files" directories. + * Returns a flat list of matching file paths. + """ + + def __init__(self, basedir: Optional[str] = None, **kwargs: Any) -> None: + """ + Initialize the lookup module. + + The base directory is stored for potential use by Ansible's lookup base + mechanisms. + + Args: + basedir: Optional base directory for lookups, usually supplied by Ansible. + **kwargs: Additional keyword arguments passed from Ansible. + + Returns: + None + """ + self.basedir = basedir + + def run( + self, + terms: List[str], + variables: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> List[str]: + """ + Execute the fileglob lookup. + + For each term (interpreted as a file extension), this method searches + recursively under all derived search paths and returns a flattened list + of matching file paths. + + Args: + terms: A list of file extensions or patterns (e.g. ['.tpl']). + variables: The Ansible variable context, used to determine + - ansible_search_path + - role_path + - search_path (custom additional paths) + - search_regex (optional filename regex filter) + **kwargs: Additional lookup options, passed through to set_options(). + + Returns: + list[str]: A list containing the full paths of all files matching + the provided extensions within the resolved search directories. + """ + display.vv(f"run({terms}, variables, {kwargs})") + self.set_options(direct=kwargs) + + paths: List[str] = [] + ansible_search_path = variables.get("ansible_search_path", None) + role_path = variables.get("role_path") + lookup_search_path = variables.get("search_path", None) + lookup_search_regex = variables.get("search_regex", None) + + if ansible_search_path: + paths = ansible_search_path + else: + paths.append(self.get_basedir(variables)) + + if lookup_search_path: + if isinstance(lookup_search_path, list): + for p in lookup_search_path: + paths.append(os.path.join(role_path, p)) + + search_path = ["templates", "files"] + + ret: List[str] = [] + found_files: List[List[str]] = [] + + for term in terms: + """ """ + for p in paths: + for sp in search_path: + path = os.path.join(p, sp) + display.vv(f" - lookup in directory: {path}") + r = self._find_recursive( + folder=path, extension=term, search_regex=lookup_search_regex + ) + # display.vv(f" found: {r}") + if len(r) > 0: + found_files.append(r) + + ret = self._flatten(found_files) + + return ret + + def _find_recursive( + self, + folder: str, + extension: str, + search_regex: Optional[str] = None, + ) -> List[str]: + """ + Recursively search for files in the given folder that match an extension + and an optional regular expression. + + Args: + folder: The root directory to walk recursively. + extension: The file extension to match (e.g. ".tpl"). + search_regex: Optional regular expression string. If provided, only + filenames matching this regex are included. + + Returns: + list[str]: A list containing the full paths of matching files found + under the given folder. If no files match, an empty list is returned. + """ + # display.vv(f"_find_recursive({folder}, {extension}, {search_regex})") + matches: List[str] = [] + + for root, dirnames, filenames in os.walk(folder): + for filename in filenames: + if filename.endswith(extension): + if search_regex: + reg = re.compile(search_regex) + if reg.match(filename): + matches.append(os.path.join(root, filename)) + else: + matches.append(os.path.join(root, filename)) + + return matches diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/lookup/rbw.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/lookup/rbw.py new file mode 100644 index 0000000..70a4280 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/lookup/rbw.py @@ -0,0 +1,463 @@ +from __future__ import absolute_import, division, print_function + +""" +Ansible lookup plugin to read secrets from Vaultwarden using the rbw CLI. + +This module provides the `LookupModule` class, which integrates the `rbw` +command line client into Ansible as a lookup plugin. It supports optional +index-based lookups, JSON parsing of secrets, and on-disk caching for both +the rbw index and retrieved secrets. +""" + +import hashlib +import json +import os +import subprocess +import time +from pathlib import Path +from typing import Any, Dict, List, Optional + +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase +from ansible.utils.display import Display + +display = Display() + +DOCUMENTATION = """ +lookup: rbw +author: + - Bodo 'bodsch' (@bodsch) +version_added: "1.0.0" +short_description: Read secrets from Vaultwarden via the rbw CLI +description: + - This lookup plugin retrieves entries from Vaultwarden using the 'rbw' CLI client. + - It supports selecting specific fields, optional JSON parsing, and structured error handling. + - Supports index-based lookups for disambiguation by name/folder/user. +options: + _terms: + description: + - The Vault entry to retrieve, specified by path, name, or UUID. + required: true + field: + description: + - Optional field within the entry to return (e.g., username, password). + required: false + type: str + parse_json: + description: + - If set to true, the returned value will be parsed as JSON. + required: false + type: bool + default: false + strict_json: + description: + - If true and parse_json is enabled, invalid JSON will raise an error. + - If false, invalid JSON will return an empty dictionary. + required: false + type: bool + default: false + use_index: + description: + - If true, the index will be used to map name/folder/user to a unique id. + required: false + type: bool + default: false +""" + +EXAMPLES = """ +- name: Read a password from Vault by UUID + debug: + msg: "{{ lookup('bodsch.core.rbw', '0123-uuid-4567', field='password') }}" + +- name: Read a password using index + debug: + msg: "{{ lookup('bodsch.core.rbw', + {'name': 'expresszuschnitt.de', 'folder': '.immowelt.de', 'user': 'immo@boone-schulz.de'}, + field='password', + use_index=True) }}" + +- name: Multi-fetch + set_fact: + multi: "{{ lookup('bodsch.core.rbw', + [{'name': 'foo', 'folder': '', 'user': ''}, 'some-uuid'], + field='username', + use_index=True) }}" +""" + +RETURN = """ +_raw: + description: + - The raw value from the Vault entry, either as a string or dictionary (if parse_json is true). + type: raw +""" + + +class LookupModule(LookupBase): + """ + Ansible lookup module for retrieving secrets from Vaultwarden via the rbw CLI. + + The plugin supports: + * Lookup by UUID or by a combination of name, folder, and user. + * Optional index-based resolution to derive a stable entry ID. + * On-disk caching of both the rbw index and individual lookups. + * Optional JSON parsing of retrieved secret values. + + Attributes: + CACHE_TTL (int): Time-to-live for cache entries in seconds. + cache_directory (str): Base directory path for index and value caches. + """ + + CACHE_TTL = 300 # 5 Minuten + cache_directory = f"{Path.home()}/.cache/ansible/lookup/rbw" + + def __init__(self, *args: Any, **kwargs: Any) -> None: + """ + Initialize the lookup module and ensure the cache directory exists. + + Args: + *args: Positional arguments passed through to the parent class. + **kwargs: Keyword arguments passed through to the parent class. + + Returns: + None + """ + super(LookupModule, self).__init__(*args, **kwargs) + if not os.path.exists(self.cache_directory): + os.makedirs(self.cache_directory, exist_ok=True) + + def run(self, terms, variables=None, **kwargs) -> List[Any]: + """ + Execute the lookup and return the requested values. + + This method is called by Ansible when the lookup plugin is used. It + resolves each term into an rbw entry ID (optionally using the index), + retrieves and caches the value, and optionally parses the value as JSON. + + Args: + terms: A list of lookup terms. Each term can be either: + * A string representing an entry ID or name. + * A dict with keys "name", "folder", and "user" for index-based lookup. + variables: Ansible variables (unused, but part of the standard interface). + **kwargs: Additional keyword arguments: + * field (str): Optional field within the entry to return. + * parse_json (bool): Whether to parse the result as JSON. + * strict_json (bool): If True, invalid JSON raises an error. + * use_index (bool): If True, resolve name/folder/user via rbw index. + + Returns: + list: A list of values corresponding to the supplied terms. Each element + is either: + * A string (raw secret) when parse_json is False. + * A dict (parsed JSON) when parse_json is True. + + Raises: + AnsibleError: If input terms are invalid, the index lookup fails, + the rbw command fails, or JSON parsing fails in strict mode. + """ + display.v(f"run(terms={terms}, kwargs={kwargs})") + + if not terms or not isinstance(terms, list) or not terms[0]: + raise AnsibleError("At least one Vault entry must be specified.") + + field = kwargs.get("field", "").strip() + parse_json = kwargs.get("parse_json", False) + strict_json = kwargs.get("strict_json", False) + use_index = kwargs.get("use_index", False) + + index_data: Optional[Dict[str, Any]] = None + if use_index: + index_data = self._read_index() + if index_data is None: + index_data = self._fetch_index() + display.v(f"Index has {len(index_data['entries'])} entries") + + results: List[Any] = [] + + for term in terms: + if isinstance(term, dict): + name = term.get("name", "").strip() + folder = term.get("folder", "").strip() + user = term.get("user", "").strip() + raw_entry = f"{name}|{folder}|{user}" + else: + name = term.strip() + folder = "" + user = "" + raw_entry = name + + if not name: + continue + + entry_id = name # fallback: use directly + + if index_data: + matches = [ + e + for e in index_data["entries"] + if e["name"] == name + and (not folder or e["folder"] == folder) + and (not user or e["user"] == user) + ] + + if not matches: + raise AnsibleError( + f"No matching entry found in index for: {raw_entry}" + ) + + if len(matches) > 1: + raise AnsibleError( + f"Multiple matches found in index for: {raw_entry}" + ) + + entry_id = matches[0]["id"] + display.v(f"Resolved {raw_entry} → id={entry_id}") + + cache_key = self._cache_key(entry_id, field) + cached = self._read_cache(cache_key) + + if cached is not None: + value = cached + display.v(f"Cache HIT for {entry_id}") + else: + value = self._fetch_rbw(entry_id, field) + self._write_cache(cache_key, value) + display.v(f"Cache MISS for {entry_id} — fetched with rbw") + + if parse_json: + try: + results.append(json.loads(value)) + except json.decoder.JSONDecodeError as e: + if strict_json: + raise AnsibleError( + f"JSON parsing failed for entry '{entry_id}': {e}" + ) + else: + display.v( + f"Warning: Content of '{entry_id}' is not valid JSON." + ) + results.append({}) + except Exception as e: + raise AnsibleError(f"Unexpected error parsing '{entry_id}': {e}") + else: + results.append(value) + + return results + + def _fetch_rbw(self, entry_id: str, field: str) -> str: + """ + Call the rbw CLI to retrieve a specific entry or entry field. + + Args: + entry_id: The rbw entry identifier (UUID or resolved ID from index). + field: Optional field name to retrieve (e.g. "username", "password"). + If empty, the default value for the entry is returned. + + Returns: + str: The trimmed stdout of the rbw command, representing the secret value. + + Raises: + AnsibleError: If the rbw command exits with a non-zero status. + """ + cmd = ["rbw", "get"] + if field: + cmd.extend(["--field", field]) + cmd.append(entry_id) + + try: + result = subprocess.run( + cmd, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + return result.stdout.strip() + except subprocess.CalledProcessError as e: + err_msg = e.stderr.strip() or e.stdout.strip() + raise AnsibleError(f"Error retrieving Vault entry '{entry_id}': {err_msg}") + + def _fetch_index(self) -> Dict[str, Any]: + """ + Fetch the rbw index and persist it in the local cache. + + The index contains a list of entries, each with id, user, name, and folder. + It is stored on disk together with a timestamp and used for subsequent + lookups until it expires. + + Returns: + dict: A dictionary with: + * "timestamp" (float): Unix timestamp when the index was fetched. + * "entries" (list[dict]): List of index entries. + + Raises: + AnsibleError: If the rbw index command fails. + """ + cmd = ["rbw", "list", "--fields", "id,user,name,folder"] + + try: + result = subprocess.run( + cmd, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + lines = [ + line.strip() for line in result.stdout.splitlines() if line.strip() + ] + + headers = ["id", "user", "name", "folder"] + + entries: List[Dict[str, str]] = [] + for line in lines: + parts = line.split("\t") + if len(parts) < len(headers): + parts += [""] * (len(headers) - len(parts)) + entry = dict(zip(headers, parts)) + entries.append(entry) + + index_payload: Dict[str, Any] = { + "timestamp": time.time(), + "entries": entries, + } + + self._write_index(index_payload) + return index_payload + + except subprocess.CalledProcessError as e: + err_msg = e.stderr.strip() or e.stdout.strip() + raise AnsibleError(f"Error retrieving rbw index: {err_msg}") + + def _index_path(self) -> str: + """ + Compute the absolute file path of the index cache. + + Returns: + str: The full path to the index cache file. + """ + return os.path.join(self.cache_directory, "index.json") + + def _read_index(self) -> Optional[Dict[str, Any]]: + """ + Read the rbw index from the cache if it exists and is still valid. + + The index is considered valid if its age is less than or equal to + CACHE_TTL. If the index is expired or cannot be read, it is removed. + + Returns: + dict | None: The cached index payload if available and not expired, + otherwise None. + + """ + path = self._index_path() + if not os.path.exists(path): + return None + + try: + with open(path, "r", encoding="utf-8") as f: + payload = json.load(f) + age = time.time() - payload["timestamp"] + if age <= self.CACHE_TTL: + return payload + else: + os.remove(path) + return None + except Exception as e: + display.v(f"Index cache read error: {e}") + return None + + def _write_index(self, index_payload: Dict[str, Any]) -> None: + """ + Persist the rbw index payload to disk. + + Args: + index_payload: The payload containing the index data and timestamp. + + Returns: + None + """ + path = self._index_path() + try: + with open(path, "w", encoding="utf-8") as f: + json.dump(index_payload, f) + except Exception as e: + display.v(f"Index cache write error: {e}") + + def _cache_key(self, entry_id: str, field: str) -> str: + """ + Create a deterministic cache key for a given entry and field. + + Args: + entry_id: The rbw entry identifier. + field: The requested field name. May be an empty string. + + Returns: + str: A SHA-256 hash hex digest representing the cache key. + """ + raw_key = f"{entry_id}|{field}".encode("utf-8") + return hashlib.sha256(raw_key).hexdigest() + + def _cache_path(self, key: str) -> str: + """ + Compute the absolute file path for a given cache key. + + Args: + key: The cache key as returned by `_cache_key`. + + Returns: + str: The full path to the cache file for the given key. + """ + return os.path.join(self.cache_directory, key + ".json") + + def _read_cache(self, key: str) -> Optional[str]: + """ + Read a cached value for the given key if present and not expired. + + The cache entry is considered valid if its age is less than or equal to + CACHE_TTL. If the entry is expired or cannot be read, it is removed. + + Args: + key: The cache key as returned by `_cache_key`. + + Returns: + str | None: The cached value if present and not expired, + otherwise None. + """ + path = self._cache_path(key) + if not os.path.exists(path): + return None + + try: + with open(path, "r", encoding="utf-8") as f: + payload = json.load(f) + age = time.time() - payload["timestamp"] + if age <= self.CACHE_TTL: + return payload["value"] + else: + os.remove(path) + return None + except Exception as e: + display.v(f"Cache read error for key {key}: {e}") + return None + + def _write_cache(self, key: str, value: str) -> None: + """ + Write a value to the cache using the given key. + + Args: + key: The cache key as returned by `_cache_key`. + value: The value to be cached, typically the raw secret string. + + Returns: + None + """ + path = self._cache_path(key) + payload = { + "timestamp": time.time(), + "value": value, + } + try: + with open(path, "w", encoding="utf-8") as f: + json.dump(payload, f) + except Exception as e: + display.v(f"Cache write error for key {key}: {e}") diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/cache/cache_valid.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/cache/cache_valid.py new file mode 100644 index 0000000..b4ec612 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/cache/cache_valid.py @@ -0,0 +1,113 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2025, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import datetime +import os +from pathlib import Path +from typing import Any + + +def cache_valid_old( + module, cache_file_name, cache_minutes=60, cache_file_remove=True +) -> bool: + """ + read local file and check the creation time against local time + + returns 'False' when cache are out of sync + """ + out_of_cache = False + + if os.path.isfile(cache_file_name): + module.debug(msg=f"read cache file '{cache_file_name}'") + now = datetime.datetime.now() + creation_time = datetime.datetime.fromtimestamp( + os.path.getctime(cache_file_name) + ) + diff = now - creation_time + # define the difference from now to the creation time in minutes + cached_time = diff.total_seconds() / 60 + out_of_cache = cached_time > cache_minutes + + module.debug(msg=f" - now {now}") + module.debug(msg=f" - creation_time {creation_time}") + module.debug(msg=f" - cached since {cached_time}") + module.debug(msg=f" - out of cache {out_of_cache}") + + if out_of_cache and cache_file_remove: + os.remove(cache_file_name) + else: + out_of_cache = True + + module.debug(msg="cache is {0}valid".format("not " if out_of_cache else "")) + + return out_of_cache + + +def cache_valid( + module: Any, + cache_file_name: str, + cache_minutes: int = 60, + cache_file_remove: bool = True, +) -> bool: + """ + Prüft, ob eine Cache-Datei älter als `cache_minutes` ist oder gar nicht existiert. + + Gibt True zurück, wenn der Cache abgelaufen ist (oder nicht existiert) und + ggf. gelöscht wurde (wenn cache_file_remove=True). Sonst False. + + :param module: Ansible-Modulobjekt, um Debug-Logs zu schreiben. + :param cache_file_name: Pfad zur Cache-Datei (String). + :param cache_minutes: Maximales Alter in Minuten, danach gilt der Cache als ungültig. + :param cache_file_remove: Ob abgelaufene Cache-Datei gelöscht werden soll. + """ + path = Path(cache_file_name) + + # Existiert die Datei nicht? → Cache gilt sofort als ungültig + if not path.is_file(): + module.debug(msg=f"Cache-Datei '{cache_file_name}' existiert nicht → ungültig") + return True + + try: + # Verwende mtime (Zeitpunkt der letzten Inhaltsänderung) statt ctime, + # denn ctime kann sich auch durch Metadaten-Änderungen verschieben. + modification_time = datetime.datetime.fromtimestamp(path.stat().st_mtime) + except OSError as e: + module.debug( + msg=f"Fehler beim Lesen der Modifikationszeit von '{cache_file_name}': {e} → Cache ungültig" + ) + return True + + now = datetime.datetime.now() + diff_minutes = (now - modification_time).total_seconds() / 60 + is_expired = diff_minutes > cache_minutes + + module.debug( + msg=f"Cache-Datei '{cache_file_name}' gefunden. Letzte Änderung: {modification_time.isoformat()}" + ) + module.debug(msg=f" → Jetzt: {now.isoformat()}") + module.debug( + msg=f" → Alter: {diff_minutes:.2f} Minuten (Limit: {cache_minutes} Minuten)" + ) + module.debug(msg=f" → Abgelaufen: {is_expired}") + + # Wenn abgelaufen und löschen erwünscht, versuche die Datei zu entfernen + if is_expired and cache_file_remove: + try: + path.unlink() + module.debug( + msg=f" → Alte Cache-Datei '{cache_file_name}' wurde gelöscht." + ) + except OSError as e: + module.debug( + msg=f" → Fehler beim Löschen der Cache-Datei '{cache_file_name}': {e}" + ) + + return is_expired diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/checksum.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/checksum.py new file mode 100644 index 0000000..45a3e4d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/checksum.py @@ -0,0 +1,240 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function + +import hashlib +import json +import os +import time +from typing import Any, Optional, Tuple + +ChecksumValidationResult = Tuple[bool, str, Optional[str]] +ChecksumValidationFromFileResult = Tuple[bool, Optional[str], str] + + +class Checksum: + """ + Helper class for calculating and validating checksums. + + This class is typically used in an Ansible-module context and keeps a reference + to the calling module for optional logging. + + Attributes: + module: An Ansible-like module object. Currently only stored for potential logging. + """ + + def __init__(self, module: Any) -> None: + """ + Initialize the checksum helper. + + Args: + module: An Ansible-like module instance. + + Returns: + None + """ + self.module = module + + def checksum(self, plaintext: Any, algorithm: str = "sha256") -> str: + """ + Compute a checksum for arbitrary input data. + + The input is normalized via :meth:`_harmonize_data` and then hashed with + the requested algorithm. + + Args: + plaintext: Data to hash. Commonly a string, dict, or list. + algorithm: Hashlib algorithm name (e.g. "md5", "sha256", "sha512"). + Defaults to "sha256". + + Returns: + str: Hex digest of the computed checksum. + + Raises: + ValueError: If the hash algorithm is not supported by hashlib. + AttributeError: If the normalized value does not support ``encode("utf-8")``. + """ + _data = self._harmonize_data(plaintext) + checksum = hashlib.new(algorithm) + checksum.update(_data.encode("utf-8")) + + return checksum.hexdigest() + + def validate( + self, checksum_file: str, data: Any = None + ) -> ChecksumValidationResult: + """ + Validate (and optionally reset) a checksum file against given data. + + Behavior: + - If ``data`` is ``None`` and ``checksum_file`` exists, the checksum file is removed. + - If ``checksum_file`` exists, its first line is treated as the previous checksum. + - A new checksum is computed from ``data`` and compared to the previous one. + + Args: + checksum_file: Path to the checksum file holding a single checksum line. + data: Input data to hash and compare. Can be string/dict/list or another type + supported by :meth:`_harmonize_data`. If ``None``, the checksum file may be removed. + + Returns: + tuple[bool, str, Optional[str]]: (changed, checksum, old_checksum) + changed: True if the checksum differs from the stored value (or no stored value exists). + checksum: Newly computed checksum hex digest. + old_checksum: Previously stored checksum (first line), or ``None`` if not available. + + Raises: + ValueError: If the hash algorithm used internally is unsupported. + AttributeError: If the normalized data does not support ``encode("utf-8")``. + """ + # self.module.log(msg=f" - checksum_file '{checksum_file}'") + old_checksum: Optional[str] = None + + if not isinstance(data, str) or not isinstance(data, dict): + # self.module.log(msg=f" - {type(data)} {len(data)}") + if data is None and os.path.exists(checksum_file): + os.remove(checksum_file) + + if os.path.exists(checksum_file): + with open(checksum_file, "r") as f: + old_checksum = f.readlines()[0].strip() + + _data = self._harmonize_data(data) + checksum = self.checksum(_data) + changed = not (old_checksum == checksum) + + return (changed, checksum, old_checksum) + + def validate_from_file( + self, checksum_file: str, data_file: str + ) -> ChecksumValidationFromFileResult: + """ + Validate a checksum file against the contents of another file. + + Behavior: + - If ``data_file`` does not exist but ``checksum_file`` exists, the checksum file is removed. + - If ``checksum_file`` exists, its first line is treated as the previous checksum. + - A checksum is computed from ``data_file`` and compared to the previous one. + + Args: + checksum_file: Path to the checksum file holding a single checksum line. + data_file: Path to the file whose contents should be hashed. + + Returns: + tuple[bool, Optional[str], str]: (changed, checksum_from_file, old_checksum) + changed: True if the checksum differs from the stored value. + checksum_from_file: Hex digest checksum of ``data_file`` contents, or ``None`` if + ``data_file`` is not a file. + old_checksum: Previously stored checksum (first line), or empty string if not available. + """ + # self.module.log(msg=f" - checksum_file '{checksum_file}'") + old_checksum = "" + + if not os.path.exists(data_file) and os.path.exists(checksum_file): + """ + remove checksum_file, when data_file are removed + """ + os.remove(checksum_file) + + if os.path.exists(checksum_file): + with open(checksum_file, "r", encoding="utf-8") as f: + old_checksum = f.readlines()[0].strip() + + checksum_from_file = self.checksum_from_file(data_file) + changed = not (old_checksum == checksum_from_file) + + return (changed, checksum_from_file, old_checksum) + + def checksum_from_file( + self, + path: str, + read_chunksize: int = 65536, + algorithm: str = "sha256", + ) -> Optional[str]: + """ + Compute checksum of a file's contents. + + The file is read in chunks to avoid loading the full file into memory. + A small ``time.sleep(0)`` is performed per chunk (noop in most cases). + + Args: + path: Path to the file. + read_chunksize: Maximum number of bytes read at once. Defaults to 65536 (64 KiB). + algorithm: Hash algorithm name to use. Defaults to "sha256". + + Returns: + Optional[str]: Hex digest string of the checksum if ``path`` is a file, + otherwise ``None``. + + Raises: + ValueError: If the hash algorithm is not supported by hashlib. + OSError: If the file cannot be opened/read. + """ + if os.path.isfile(path): + checksum = hashlib.new(algorithm) # Raises appropriate exceptions. + with open(path, "rb") as f: + for chunk in iter(lambda: f.read(read_chunksize), b""): + checksum.update(chunk) + # Release greenthread, if greenthreads are not used it is a noop. + time.sleep(0) + + return checksum.hexdigest() + + return None + + def write_checksum(self, checksum_file: str, checksum: Any) -> None: + """ + Write a checksum value to disk (single line with trailing newline). + + Args: + checksum_file: Destination path for the checksum file. + checksum: Checksum value to write. Only written if it is truthy and its string + representation is not empty. + + Returns: + None + + Raises: + OSError: If the file cannot be opened/written. + """ + if checksum and len(str(checksum)) != 0: + with open(checksum_file, "w", encoding="utf-8") as f: + f.write(checksum + "\n") + + def _harmonize_data(self, data: Any) -> Any: + """ + Normalize data into a stable representation for hashing. + + Rules: + - dict: JSON serialized with sorted keys + - list: Concatenation of stringified elements + - str: returned as-is + - other: returns ``data.copy()`` + + Args: + data: Input data. + + Returns: + Any: Normalized representation. For typical input types (dict/list/str) this + is a string. For other types, the return value depends on ``data.copy()``. + + Raises: + AttributeError: If ``data`` is not dict/list/str and does not implement ``copy()``. + TypeError: If JSON serialization fails for dictionaries. + """ + # self.module.log(msg=f" - type before: '{type(data)}'") + if isinstance(data, dict): + _data = json.dumps(data, sort_keys=True) + elif isinstance(data, list): + _data = "".join(str(x) for x in data) + elif isinstance(data, str): + _data = data + else: + _data = data.copy() + + # self.module.log(msg=f" - type after : '{type(_data)}'") + return _data diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/crypto_utils.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/crypto_utils.py new file mode 100644 index 0000000..f3783a9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/crypto_utils.py @@ -0,0 +1,646 @@ +# -*- coding: utf-8 -*- + +from __future__ import annotations + +from dataclasses import dataclass +from datetime import datetime, timedelta, timezone +from typing import Any, Dict, List, Optional, Union + +try: + from cryptography import x509 + from cryptography.hazmat.backends import default_backend + from cryptography.hazmat.primitives import hashes + from cryptography.x509.oid import ExtensionOID +except ImportError as exc: # pragma: no cover + raise RuntimeError( + "The 'cryptography' Python library is required to use crypto_utils" + ) from exc + + +class OpenSSLObjectError(Exception): + """ + Einfacher Fehler-Typ, um Parsing-/Krypto-Probleme konsistent zu signalisieren. + """ + + pass + + +# ====================================================================== +# Hilfsfunktionen für Zeitverarbeitung +# ====================================================================== + +_ASN1_TIME_FORMAT = "%Y%m%d%H%M%SZ" + + +def _to_utc_naive(dt: datetime) -> datetime: + """ + Konvertiert ein datetime-Objekt nach UTC und entfernt tzinfo. + Naive Datumswerte werden als UTC interpretiert. + """ + if dt.tzinfo is None: + return dt.replace(tzinfo=None) + return dt.astimezone(timezone.utc).replace(tzinfo=None) + + +def _format_asn1_time(dt: Optional[datetime]) -> Optional[str]: + """ + datetime -> ASN.1 TIME (YYYYMMDDHHMMSSZ) oder None. + """ + if dt is None: + return None + dt_utc_naive = _to_utc_naive(dt) + return dt_utc_naive.strftime(_ASN1_TIME_FORMAT) + + +def _parse_asn1_time(value: str, input_name: str) -> datetime: + """ + ASN.1 TIME (YYYYMMDDHHMMSSZ) -> datetime (naiv, UTC). + """ + try: + dt = datetime.strptime(value, _ASN1_TIME_FORMAT) + except ValueError as exc: + raise OpenSSLObjectError( + f"{input_name!r} is not a valid ASN.1 TIME value: {value!r}" + ) from exc + return dt + + +def _parse_relative_spec(spec: str, input_name: str) -> timedelta: + """ + Parsen des relativen Formats (z.B. +32w1d2h3m4s) in ein timedelta. + + Unterstützte Einheiten: + - w: Wochen + - d: Tage + - h: Stunden + - m: Minuten + - s: Sekunden + """ + weeks = days = hours = minutes = seconds = 0 + pos = 0 + length = len(spec) + + while pos < length: + start = pos + while pos < length and spec[pos].isdigit(): + pos += 1 + if start == pos: + raise OpenSSLObjectError( + f"Invalid relative time spec in {input_name!r}: {spec!r}" + ) + number = int(spec[start:pos]) + + if pos >= length: + raise OpenSSLObjectError( + f"Missing time unit in relative time spec for {input_name!r}: {spec!r}" + ) + + unit = spec[pos] + pos += 1 + + if unit == "w": + weeks += number + elif unit == "d": + days += number + elif unit == "h": + hours += number + elif unit == "m": + minutes += number + elif unit == "s": + seconds += number + else: + raise OpenSSLObjectError( + f"Unknown time unit {unit!r} in relative time spec for {input_name!r}: {spec!r}" + ) + + return timedelta( + weeks=weeks, + days=days, + hours=hours, + minutes=minutes, + seconds=seconds, + ) + + +def get_relative_time_option( + value: Optional[str], + input_name: str, + with_timezone: bool = False, + now: Optional[datetime] = None, +) -> Optional[datetime]: + """ + Grob kompatibel zu community.crypto._time.get_relative_time_option. + + Unterstützte Werte: + - None / "" / "none" => None + - ASN.1 TIME: "YYYYMMDDHHMMSSZ" + - relative Zeiten: "[+-]timespec" mit w/d/h/m/s (z.B. "+32w1d2h") + - "always" / "forever" + + Hinweis: + - with_timezone=True gibt tz-aware UTC-datetime zurück. + - with_timezone=False (Default) gibt naives datetime zurück. + + Rückgabe: + - datetime (UTC, tz-aware oder naiv) oder None. + """ + if value is None: + return None + + value = str(value).strip() + if not value or value.lower() == "none": + return None + + # Sonderfälle: always / forever + if value.lower() == "always": + dt = datetime(1970, 1, 1, 0, 0, 1, tzinfo=timezone.utc) + return dt if with_timezone else dt.replace(tzinfo=None) + + if value.lower() == "forever": + dt = datetime(9999, 12, 31, 23, 59, 59, tzinfo=timezone.utc) + return dt if with_timezone else dt.replace(tzinfo=None) + + # Relative Zeitangaben + if value[0] in "+-": + sign = 1 if value[0] == "+" else -1 + spec = value[1:] + delta = _parse_relative_spec(spec, input_name) + + if now is None: + # wir rechnen intern in UTC + now = datetime.utcnow().replace(tzinfo=timezone.utc) + + dt = now + sign * delta + + return dt if with_timezone else dt.replace(tzinfo=None) + + # Absolute Zeit – zuerst ASN.1 TIME probieren + try: + dt = _parse_asn1_time(value, input_name) + + # _parse_asn1_time gibt naiv (UTC) zurück + if with_timezone: + return dt.replace(tzinfo=timezone.utc) + return dt + except OpenSSLObjectError: + # als Fallback ein paar ISO-Formate unterstützen + pass + + # einfache ISO-Formate + # ISO-Formate: YYYY-MM-DD, YYYY-MM-DDTHH:MM:SS, YYYY-MM-DD HH:MM:SS + iso_formats = [ + "%Y-%m-%d", + "%Y-%m-%dT%H:%M:%S", + "%Y-%m-%d %H:%M:%S", + ] + for fmt in iso_formats: + try: + dt = datetime.strptime(value, fmt) + # interpretieren als UTC + dt = dt.replace(tzinfo=timezone.utc) + return dt if with_timezone else dt.replace(tzinfo=None) + except ValueError: + continue + + # Wenn alles scheitert, Fehler werfen + raise OpenSSLObjectError(f"Invalid time format for {input_name!r}: {value!r}") + + +# ====================================================================== +# CRL-Parsing (Ersatz für community.crypto.module_backends.crl_info.get_crl_info) +# ====================================================================== + + +@dataclass +class RevokedCertificateInfo: + serial_number: int + revocation_date: Optional[str] + reason: Optional[str] = None + reason_critical: Optional[bool] = None + invalidity_date: Optional[str] = None + invalidity_date_critical: Optional[bool] = None + issuer: Optional[List[str]] = None + issuer_critical: Optional[bool] = None + + +def _load_crl_from_bytes(data: bytes) -> (x509.CertificateRevocationList, str): + """ + Lädt eine CRL aus PEM- oder DER-Daten und gibt (crl_obj, format) zurück. + + format: "pem" oder "der" + """ + if not isinstance(data, (bytes, bytearray)): + raise OpenSSLObjectError("CRL data must be bytes") + + # Einfache Heuristik: BEGIN-Header => PEM + try: + if b"-----BEGIN" in data: + crl = x509.load_pem_x509_crl(data, default_backend()) + return crl, "pem" + else: + crl = x509.load_der_x509_crl(data, default_backend()) + return crl, "der" + + except Exception as exc: + raise OpenSSLObjectError(f"Failed to parse CRL data: {exc}") from exc + + +def get_crl_info( + module, + data: bytes, + list_revoked_certificates: bool = True, +) -> Dict[str, Any]: + """ + CRL-Informationen ähnlich zu community.crypto.module_backends.crl_info.get_crl_info. + + Gibt ein Dict zurück mit u.a.: + - format: "pem" | "der" + - digest: Signaturalgorithmus (z.B. "sha256") + - last_update: ASN.1 TIME (UTC) + - next_update: ASN.1 TIME (UTC) oder None + - revoked_certificates: Liste von Dicts (wenn list_revoked_certificates=True) + """ + crl, crl_format = _load_crl_from_bytes(data) + + # Signaturalgorithmus + try: + digest = crl.signature_hash_algorithm.name + except Exception: + digest = None + + # Zeitstempel + # cryptography hat je nach Version last_update(_utc)/next_update(_utc) + last_update_raw = getattr( + crl, + "last_update", + getattr(crl, "last_update_utc", None), + ) + next_update_raw = getattr( + crl, + "next_update", + getattr(crl, "next_update_utc", None), + ) + + last_update_asn1 = _format_asn1_time(last_update_raw) if last_update_raw else None + next_update_asn1 = _format_asn1_time(next_update_raw) if next_update_raw else None + + # Issuer als einfaches Dict (nicht 1:1 wie community.crypto, aber nützlich) + issuer = {} + try: + for attr in crl.issuer: + # attr.oid._name ist intern, aber meist "commonName", "organizationName", ... + key = getattr(attr.oid, "_name", attr.oid.dotted_string) + issuer[key] = attr.value + except Exception: + issuer = {} + + result: Dict[str, Any] = { + "format": crl_format, + "digest": digest, + "issuer": issuer, + "last_update": last_update_asn1, + "next_update": next_update_asn1, + } + + # Liste der widerrufenen Zertifikate + if list_revoked_certificates: + revoked_list: List[Dict[str, Any]] = [] + for r in crl: + info = RevokedCertificateInfo( + serial_number=r.serial_number, + revocation_date=_format_asn1_time(r.revocation_date), + ) + + # Extensions auswerten (Reason, InvalidityDate, CertificateIssuer) + for ext in r.extensions: + try: + if ext.oid == ExtensionOID.CRL_REASON: + # ext.value.reason.name ist Enum-Name (z.B. "KEY_COMPROMISE") + info.reason = ext.value.reason.name.lower() + info.reason_critical = ext.critical + elif ext.oid == ExtensionOID.INVALIDITY_DATE: + info.invalidity_date = _format_asn1_time(ext.value) + info.invalidity_date_critical = ext.critical + elif ext.oid == ExtensionOID.CERTIFICATE_ISSUER: + # Liste von GeneralNames in Strings umwandeln + info.issuer = [str(g) for g in ext.value] + info.issuer_critical = ext.critical + except Exception: + # Fehler in einzelnen Extensions ignorieren, CRL trotzdem weiter auswerten + continue + + revoked_list.append(info.__dict__) + + result["revoked_certificates"] = revoked_list + + return result + + +# ====================================================================== +# Zertifikats-Parsing (Ersatz für CertificateInfoRetrieval) +# ====================================================================== + + +def _split_pem_certificates(data: bytes) -> List[bytes]: + """ + Splittet ein PEM-Blob mit mehreren CERTIFICATE-Objekten in einzelne PEM-Blöcke. + """ + begin = b"-----BEGIN CERTIFICATE-----" + end = b"-----END CERTIFICATE-----" + + parts: List[bytes] = [] + while True: + start = data.find(begin) + if start == -1: + break + stop = data.find(end, start) + if stop == -1: + break + stop = stop + len(end) + block = data[start:stop] + parts.append(block) + data = data[stop:] + return parts + + +def _load_certificates(content: Union[bytes, bytearray, str]) -> List[x509.Certificate]: + """ + Lädt ein oder mehrere X.509-Zertifikate aus PEM oder DER. + """ + if isinstance(content, str): + content_bytes = content.encode("utf-8") + elif isinstance(content, (bytes, bytearray)): + content_bytes = bytes(content) + else: + raise OpenSSLObjectError("Certificate content must be bytes or str") + + certs: List[x509.Certificate] = [] + + try: + if b"-----BEGIN CERTIFICATE-----" in content_bytes: + for block in _split_pem_certificates(content_bytes): + certs.append(x509.load_pem_x509_certificate(block, default_backend())) + else: + certs.append( + x509.load_der_x509_certificate(content_bytes, default_backend()) + ) + except Exception as exc: + raise OpenSSLObjectError(f"Failed to parse certificate(s): {exc}") from exc + + if not certs: + raise OpenSSLObjectError("No certificate found in content") + + return certs + + +def _name_to_dict_and_ordered(name: x509.Name) -> (Dict[str, str], List[List[str]]): + """ + Konvertiert ein x509.Name in + - dict: {oid_name: value} + - ordered: [[oid_name, value], ...] + Letzte Wiederholung gewinnt im Dict (wie x509_certificate_info). + """ + result: Dict[str, str] = {} + ordered: List[List[str]] = [] + + for rdn in name.rdns: + for attr in rdn: + key = getattr(attr.oid, "_name", attr.oid.dotted_string) + value = attr.value + result[key] = value + ordered.append([key, value]) + + return result, ordered + + +def _get_subject_alt_name( + cert: x509.Certificate, +) -> (Optional[List[str]], Optional[bool]): + """ + Liest subjectAltName und gibt (liste, critical) zurück. + Liste-Elemente sind Strings wie "DNS:example.com", "IP:1.2.3.4". + """ + try: + ext = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName) + except x509.ExtensionNotFound: + return None, None + + values: List[str] = [] + for gn in ext.value: + # cryptography gibt sinnvolle __str__()-Repräsentationen + values.append(str(gn)) + + return values, ext.critical + + +def _compute_fingerprints(cert: x509.Certificate) -> Dict[str, str]: + """ + Fingerprints des gesamten Zertifikats, für gängige Hashes. + Hex mit ":" getrennt (wie community.crypto). + """ + algorithms = [ + ("sha1", hashes.SHA1()), + ("sha224", hashes.SHA224()), + ("sha256", hashes.SHA256()), + ("sha384", hashes.SHA384()), + ("sha512", hashes.SHA512()), + ] + result: Dict[str, str] = {} + + for name, algo in algorithms: + try: + fp_bytes = cert.fingerprint(algo) + except Exception: + continue + result[name] = ":".join(f"{b:02x}" for b in fp_bytes) + + return result + + +class CertificateInfoRetrieval: + """ + Ersatz für community.crypto CertificateInfoRetrieval. + + Nutzung: + cert_info = CertificateInfoRetrieval( + module=module, + content=data, + valid_at=module.params.get("valid_at"), + ) + info = cert_info.get_info(prefer_one_fingerprint=False) + + Wichtige Keys im Rückgabewert: + - not_before (ASN.1 TIME) + - not_after (ASN.1 TIME) + - expired (bool) + - subject, subject_ordered + - issuer, issuer_ordered + - subject_alt_name + - fingerprints + - valid_at + """ + + def __init__( + self, + module=None, + content: Union[bytes, bytearray, str] = None, + valid_at: Optional[Dict[str, str]] = None, + ) -> None: + self.module = module + if content is None: + raise OpenSSLObjectError("CertificateInfoRetrieval requires 'content'") + self._certs: List[x509.Certificate] = _load_certificates(content) + self._valid_at_specs: Dict[str, str] = valid_at or {} + + def _get_primary_cert(self) -> x509.Certificate: + """ + Für deine Nutzung reicht das erste Zertifikat (Leaf). + """ + return self._certs[0] + + def _compute_valid_at( + self, + not_before_raw: Optional[datetime], + not_after_raw: Optional[datetime], + ) -> Dict[str, bool]: + """ + Erzeugt das valid_at-Dict basierend auf self._valid_at_specs. + Semantik: gültig, wenn + not_before <= t <= not_after + (alle Zeiten in UTC). + """ + result: Dict[str, bool] = {} + if not self._valid_at_specs: + return result + + # Grenzen in UTC-aware umwandeln + nb_utc: Optional[datetime] = None + na_utc: Optional[datetime] = None + + if not_before_raw is not None: + # _to_utc_naive gibt naive UTC; hier machen wir tz-aware + nb_utc = _to_utc_naive(not_before_raw).replace(tzinfo=timezone.utc) + if not_after_raw is not None: + na_utc = _to_utc_naive(not_after_raw).replace(tzinfo=timezone.utc) + + for name, spec in self._valid_at_specs.items(): + try: + point = get_relative_time_option( + value=spec, + input_name=f"valid_at[{name}]", + with_timezone=True, + ) + except OpenSSLObjectError: + # ungültige Zeitangabe → False + result[name] = False + continue + + if point is None: + # None interpretieren wir als "kein Check" + result[name] = False + continue + + is_valid = True + if nb_utc is not None and point < nb_utc: + is_valid = False + if na_utc is not None and point > na_utc: + is_valid = False + + result[name] = is_valid + + return result + + def get_info(self, prefer_one_fingerprint: bool = False) -> Dict[str, Any]: + """ + Liefert ein Info-Dict. + + prefer_one_fingerprint: + - False (Default): 'fingerprints' enthält mehrere Hashes. + - True: zusätzlich 'fingerprint' / 'public_key_fingerprint' mit bevorzugtem Algo + (sha256, Fallback sha1). + """ + cert = self._get_primary_cert() + + # Zeit + not_before_raw = getattr( + cert, + "not_valid_before", + getattr(cert, "not_valid_before_utc", None), + ) + not_after_raw = getattr( + cert, + "not_valid_after", + getattr(cert, "not_valid_after_utc", None), + ) + + not_before_asn1 = _format_asn1_time(not_before_raw) if not_before_raw else None + not_after_asn1 = _format_asn1_time(not_after_raw) if not_after_raw else None + + now_utc_naive = datetime.utcnow() + expired = False + if not_after_raw is not None: + expired = now_utc_naive > _to_utc_naive(not_after_raw) + + # Subject / Issuer + subject, subject_ordered = _name_to_dict_and_ordered(cert.subject) + issuer, issuer_ordered = _name_to_dict_and_ordered(cert.issuer) + + # SAN + subject_alt_name, subject_alt_name_critical = _get_subject_alt_name(cert) + + # Fingerprints + fingerprints = _compute_fingerprints(cert) + + # Optional: Public-Key-Fingerprints, wenn du sie brauchst + public_key_fingerprints: Dict[str, str] = {} + try: + pk = cert.public_key() + der = pk.public_bytes( + encoding=x509.Encoding.DER, # type: ignore[attr-defined] + format=x509.PublicFormat.SubjectPublicKeyInfo, # type: ignore[attr-defined] + ) + # kleines Re-Mapping, um _compute_fingerprints wiederzuverwenden + pk_cert = x509.load_der_x509_certificate(der, default_backend()) + public_key_fingerprints = _compute_fingerprints(pk_cert) + except Exception: + public_key_fingerprints = {} + + # valid_at + valid_at = self._compute_valid_at(not_before_raw, not_after_raw) + + info: Dict[str, Any] = { + "not_before": not_before_asn1, + "not_after": not_after_asn1, + "expired": expired, + "subject": subject, + "subject_ordered": subject_ordered, + "issuer": issuer, + "issuer_ordered": issuer_ordered, + "subject_alt_name": subject_alt_name, + "subject_alt_name_critical": subject_alt_name_critical, + "fingerprints": fingerprints, + "public_key_fingerprints": public_key_fingerprints, + "valid_at": valid_at, + } + + # prefer_one_fingerprint: wähle "bevorzugten" Algo (sha256, sonst sha1) + if prefer_one_fingerprint: + + def _pick_fp(src: Dict[str, str]) -> Optional[str]: + if not src: + return None + for algo in ("sha256", "sha1", "sha512"): + if algo in src: + return src[algo] + # Fallback: irgend einen nehmen + return next(iter(src.values())) + + fp = _pick_fp(fingerprints) + if fp is not None: + info["fingerprint"] = fp + + pk_fp = _pick_fp(public_key_fingerprints) + if pk_fp is not None: + info["public_key_fingerprint"] = pk_fp + + return info diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/deb822_repo.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/deb822_repo.py new file mode 100644 index 0000000..eab3ba8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/deb822_repo.py @@ -0,0 +1,1148 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2025, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations + +import os +import re +import tempfile +from dataclasses import dataclass +from hashlib import sha256 +from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple + +from ansible.module_utils.urls import fetch_url + +_FILENAME_RE = re.compile(r"^[A-Za-z0-9_.-]+$") + + +@dataclass(frozen=True) +class KeyResult: + """ + Result of key management operations. + + Attributes: + changed: Whether the operation modified (or would modify in check mode) any managed key material. + key_path: Path to the keyring file that should be referenced by ``Signed-By`` (if applicable). + deb_path: Path to a cached keyring ``.deb`` file when ``method=deb`` is used. + package_name: Name of the keyring package (when ``method=deb`` is used and the name could be determined). + package_version: Package version from the downloaded ``.deb`` (when ``method=deb`` is used). + messages: Human-readable messages describing performed actions. + """ + + changed: bool + key_path: Optional[str] + deb_path: Optional[str] + package_name: Optional[str] + package_version: Optional[str] + messages: Tuple[str, ...] = () + + +@dataclass(frozen=True) +class RepoResult: + """ + Result of repository file management. + + Attributes: + changed: Whether the repository file was created/updated (or would be updated in check mode). + repo_path: Absolute path of the managed ``.sources`` file. + rendered: Rendered deb822 file content (newline terminated). + """ + + changed: bool + repo_path: str + rendered: str + + +@dataclass(frozen=True) +class Deb822RepoSpec: + """ + Minimal deb822 stanza representation for APT ``.sources`` files. + + This data structure models the fields written to a deb822 repository file. Multi-value + fields are rendered as space-separated lists. + + Validation rules: + - ``types``, ``uris`` and ``suites`` must be non-empty. + - If any suite ends with ``/`` (path-style suite), ``components`` must be empty. + - Otherwise at least one component is required. + + The rendered output is stable and always ends with a newline. + """ + + types: Sequence[str] + uris: Sequence[str] + suites: Sequence[str] + components: Sequence[str] + architectures: Sequence[str] + enabled: bool + signed_by: Optional[str] + + def validate(self) -> None: + """ + Validate the spec for deb822 output. + + Raises: + ValueError: If required fields are missing or if suites/components violate deb822 rules. + """ + if not self.types: + raise ValueError("'types' must not be empty.") + if not self.uris: + raise ValueError("'uris' must not be empty.") + if not self.suites: + raise ValueError("'suites' must not be empty.") + + # Per Ansible's deb822_repository docs: if suite is a path (ends with '/'), + # components must be omitted; otherwise at least one component is required. :contentReference[oaicite:2]{index=2} + any_path_suite = any(s.endswith("/") for s in self.suites) + if any_path_suite and self.components: + raise ValueError( + "When any suite ends with '/', 'components' must be empty." + ) + if (not any_path_suite) and (not self.components): + raise ValueError( + "When suite is not a path, at least one 'component' is required." + ) + + def render(self) -> str: + """ + Render the spec as a deb822 formatted stanza. + + Returns: + The rendered ``.sources`` content as UTF-8 text, always newline terminated. + + Raises: + ValueError: If the spec is invalid (see :meth:`validate`). + """ + self.validate() + + lines: List[str] = [] + lines.append(f"Types: {' '.join(self.types)}") + lines.append(f"URIs: {' '.join(self.uris)}") + lines.append(f"Suites: {' '.join(self.suites)}") + + if self.components: + lines.append(f"Components: {' '.join(self.components)}") + if self.architectures: + lines.append(f"Architectures: {' '.join(self.architectures)}") + + # Enabled accepts yes/no (deb822); keep output stable. :contentReference[oaicite:3]{index=3} + lines.append(f"Enabled: {'yes' if self.enabled else 'no'}") + + if self.signed_by: + lines.append(f"Signed-By: {self.signed_by}") + + return "\n".join(lines) + "\n" + + +class Deb822RepoManager: + """ + High-level helper for managing deb822 APT repositories and key material. + + Responsibilities: + - Ensure repository signing keys using one of the supported methods: + * ``none``: do not manage keys + * ``download``: download a key file (optional checksum), optionally dearmor and validate it + * ``deb``: download and install a keyring ``.deb`` package + - Ensure/remove a deb822 ``.sources`` file with a stable rendering. + - Optionally run ``apt-get update`` after changes. + + Error handling: + - All unrecoverable errors are reported via the provided Ansible module's ``fail_json``. + """ + + def __init__(self, module: Any) -> None: + """ + Create a new manager bound to an Ansible module instance. + + Args: + module: Ansible module object providing ``run_command()``, ``fail_json()``, ``atomic_move()`` + and (optionally) ``tmpdir``. + """ + self._m = module + self._m.log("Deb822RepoManager::__init__()") + + # ------------------------- + # Public orchestration API + # ------------------------- + + def ensure_key(self, *, key_cfg: Mapping[str, Any], check_mode: bool) -> KeyResult: + """ + Ensure key material for the repository according to ``key_cfg``. + + Args: + key_cfg: Key configuration mapping. Expected keys depend on the chosen method. + check_mode: If True, perform a dry-run and report whether changes would occur. + + Returns: + A :class:`KeyResult` describing what was changed and where the key material is located. + + Raises: + The module will call ``fail_json`` if the configuration is invalid or required tooling fails. + """ + self._m.log( + f"Deb822RepoManager::ensure_key(key_cfg: {key_cfg}, check_mode: {check_mode})" + ) + + method = (key_cfg.get("method") or "none").lower() + if method == "none": + return KeyResult( + changed=False, + key_path=None, + deb_path=None, + package_name=None, + package_version=None, + ) + + if method == "download": + return self._ensure_key_download(key_cfg=key_cfg, check_mode=check_mode) + + if method == "deb": + return self._ensure_key_deb(key_cfg=key_cfg, check_mode=check_mode) + + self._m.fail_json( + msg=f"Unsupported key.method={method!r}. Use one of: none, download, deb." + ) + + def ensure_repo_file( + self, + *, + repo_path: str, + spec: Deb822RepoSpec, + mode: int, + check_mode: bool, + ) -> RepoResult: + """ + Ensure the deb822 repository file exists with the desired content. + + Args: + repo_path: Destination path of the ``.sources`` file. + spec: Repository spec to render. + mode: File permissions mode (octal int, e.g. ``0o644``). + check_mode: If True, do not write, only report whether content would change. + + Returns: + A :class:`RepoResult` including the rendered content. + """ + self._m.log( + f"Deb822RepoManager::ensure_repo_file(repo_path: {repo_path}, spec: {spec}, mode: {mode}, check_mode: {check_mode})" + ) + + rendered = spec.render() + changed = self._ensure_file_contents( + dest=repo_path, + data=rendered.encode("utf-8"), + mode=mode, + check_mode=check_mode, + ) + return RepoResult(changed=changed, repo_path=repo_path, rendered=rendered) + + def remove_file(self, *, path: str, check_mode: bool) -> bool: + """ + Remove a file if it exists. + + Args: + path: Path to remove. + check_mode: If True, do not remove but report that a change would occur. + + Returns: + True if the file was removed (or would be removed in check mode), otherwise False. + """ + self._m.log( + f"Deb822RepoManager::remove_file(path: {path}, check_mode: {check_mode})" + ) + + if not os.path.exists(path): + return False + if check_mode: + return True + try: + os.remove(path) + return True + except OSError as exc: + self._m.fail_json(msg=f"Failed to remove {path}: {exc!s}") + + def apt_update(self, *, check_mode: bool) -> Tuple[bool, str]: + """ + Run ``apt-get update`` non-interactively. + + Args: + check_mode: If True, do not execute and return a message indicating a dry-run. + + Returns: + Tuple ``(changed, output)`` where ``changed`` is True when the command would run/ran. + """ + self._m.log(f"Deb822RepoManager::apt_update(check_mode: {check_mode})") + + if check_mode: + return True, "check_mode: would run apt-get update" + rc, out, err = self._m.run_command( + ["apt-get", "update"], + check_rc=False, + environ_update={"DEBIAN_FRONTEND": "noninteractive"}, + ) + if rc != 0: + self._m.fail_json(msg=f"apt-get update failed (rc={rc}): {err or out}") + return True, out.strip() + + def remove_key( + self, + *, + key_cfg: Mapping[str, Any], + signed_by: Optional[str] = None, + check_mode: bool, + ) -> KeyResult: + """ + Remove key material managed by this module. + + Behavior depends on ``key_cfg.method``: + + - ``download``: removes the key file at ``key_cfg.dest`` (or ``signed_by`` as fallback). + - ``deb``: removes the keyring package if installed and optionally removes the cached ``.deb`` file. + + Args: + key_cfg: Key configuration mapping. + signed_by: Explicit ``Signed-By`` path used by the repo (optional). + check_mode: If True, do not change the system but report what would be removed. + + Returns: + A :class:`KeyResult` with ``changed=True`` if anything was removed (or would be removed). + """ + self._m.log( + f"Deb822RepoManager::remove_key(key_cfg: {key_cfg}, signed_by: {signed_by}, check_mode: {check_mode})" + ) + + method = (key_cfg.get("method") or "none").lower() + + if method == "none": + return KeyResult( + changed=False, + key_path=None, + deb_path=None, + package_name=None, + package_version=None, + ) + + if method == "download": + dest = key_cfg.get("dest") or signed_by + if not dest: + self._m.fail_json( + msg="key.method=download requires key.dest (or signed_by to remove)" + ) + + removed = self.remove_file(path=str(dest), check_mode=check_mode) + return KeyResult( + changed=removed, + key_path=str(dest), + deb_path=None, + package_name=None, + package_version=None, + messages=( + (f"{'would remove' if check_mode else 'removed'} key file: {dest}",) + if removed + else ("key file already absent",) + ), + ) + + if method == "deb": + deb_path = ( + key_cfg.get("deb_cache_path") or "/var/cache/apt/repo-keyring.deb" + ) + explicit_pkg = key_cfg.get("package_name") + keyring_path = key_cfg.get("deb_keyring_path") or signed_by + + pkg_name: Optional[str] = None + pkg_ver: Optional[str] = None + messages: List[str] = [] + + if explicit_pkg: + pkg_name = str(explicit_pkg) + messages.append(f"using explicit package_name={pkg_name}") + elif deb_path and os.path.exists(deb_path): + pkg_name, pkg_ver = self._dpkg_deb_fields(deb_path) + messages.append(f"determined package from deb: {pkg_name}") + elif keyring_path: + pkg_name = self._dpkg_owns_path(str(keyring_path)) + if pkg_name: + messages.append( + f"determined package from keyring owner: {pkg_name}" + ) + + changed = False + + # Remove package if installed + if pkg_name: + installed_ver = self._dpkg_query_version(pkg_name) + if installed_ver: + if check_mode: + changed = True + messages.append( + f"check_mode: would remove package {pkg_name} (installed {installed_ver})" + ) + else: + self._apt_remove_package(pkg_name) + changed = True + messages.append( + f"removed package {pkg_name} (was {installed_ver})" + ) + else: + messages.append(f"package {pkg_name} not installed") + else: + messages.append( + "could not determine package name to remove (set key.package_name, or provide deb_cache_path, or signed_by/deb_keyring_path)" + ) + + # Remove cached deb file (best-effort) + if deb_path and os.path.exists(deb_path): + if check_mode: + changed = True + messages.append(f"check_mode: would remove cached deb: {deb_path}") + else: + try: + os.remove(deb_path) + changed = True + messages.append(f"removed cached deb: {deb_path}") + except OSError as exc: + self._m.fail_json( + msg=f"Failed to remove cached deb {deb_path}: {exc!s}" + ) + + return KeyResult( + changed=changed, + key_path=str(keyring_path) if keyring_path else None, + deb_path=str(deb_path) if deb_path else None, + package_name=pkg_name, + package_version=pkg_ver, + messages=tuple(messages), + ) + + self._m.fail_json( + msg=f"Unsupported key.method={method!r}. Use one of: none, download, deb." + ) + + def _dpkg_owns_path(self, path: str) -> Optional[str]: + """ + Determine which dpkg package owns a given file path. + + Args: + path: Absolute file path. + + Returns: + The owning package name (including optional architecture suffix) or None if not owned. + """ + self._m.log(f"Deb822RepoManager::_dpkg_owns_path(path: {path})") + + rc, out, _ = self._m.run_command(["dpkg-query", "-S", path], check_rc=False) + if rc != 0: + return None + + # Format examples: + # debsuryorg-archive-keyring:amd64: /usr/share/keyrings/debsuryorg-archive-keyring.gpg + # somepkg: /etc/apt/keyrings/some.gpg + for line in (ln.strip() for ln in out.splitlines() if ln.strip()): + pkg_part = line.split(": ", 1)[0].strip() # keeps optional ":amd64" + if pkg_part: + return pkg_part + return None + + def _apt_remove_package(self, pkg_name: str) -> None: + """ + Remove a package using ``apt-get`` in non-interactive mode. + + Args: + pkg_name: Package name to remove. + + Raises: + The module will call ``fail_json`` if removal fails. + """ + self._m.log(f"Deb822RepoManager::_apt_remove_package(pkg_name: {pkg_name})") + + rc, out, err = self._m.run_command( + ["apt-get", "-y", "remove", pkg_name], + check_rc=False, + environ_update={"DEBIAN_FRONTEND": "noninteractive"}, + ) + if rc != 0: + self._m.fail_json( + msg=f"Failed to remove package {pkg_name} (rc={rc}): {err or out}" + ) + + # ------------------------- + # Key: download method + # ------------------------- + + def _ensure_key_download( + self, *, key_cfg: Mapping[str, Any], check_mode: bool + ) -> KeyResult: + """ + Ensure a repo-specific keyring file by downloading it. + + This method: + 1) downloads a key file to a temporary location (optional checksum verification) + 2) optionally dearmors ASCII keys into a binary keyring via ``gpg --dearmor`` + 3) optionally validates the key via ``gpg --show-keys`` + 4) atomically installs/updates the destination file and enforces file permissions + + Args: + key_cfg: Key configuration (expects ``url`` and ``dest``). + check_mode: If True, do not write any files. + + Returns: + A :class:`KeyResult`. + """ + self._m.log( + f"Deb822RepoManager::_ensure_key_download(key_cfg: {key_cfg}, check_mode: {check_mode})" + ) + + url = key_cfg.get("url") + if not url: + self._m.fail_json(msg="key.method=download requires key.url") + + dest = key_cfg.get("dest") + if not dest: + self._m.fail_json(msg="key.method=download requires key.dest") + + file_mode = self._parse_mode(key_cfg.get("mode", "0644")) + dearmor = bool(key_cfg.get("dearmor", True)) + validate = bool(key_cfg.get("validate", True)) + checksum = key_cfg.get("checksum") + + # Download to temp, compute sha256, optional verify checksum + tmp_raw, raw_sha = self._download_to_temp(url=url, checksum=checksum) + + # Decide ASCII armored or binary + is_ascii = self._looks_like_ascii_armored(tmp_raw) + + # Optionally dearmor + tmp_final = tmp_raw + messages: List[str] = [] + if is_ascii and dearmor: + tmp_final = self._temp_path(suffix=".gpg") + self._run_gpg_dearmor(src=tmp_raw, dst=tmp_final) + messages.append("dearmored ASCII key to binary keyring") + + # Optional validation (requires gpg) + if validate: + self._run_gpg_show_keys(path=tmp_final) + messages.append("validated key via gpg --show-keys") + + # If destination exists and content is identical, keep unchanged + final_sha = self._sha256_file(tmp_final) + if os.path.exists(dest) and self._sha256_file(dest) == final_sha: + self._safe_unlink(tmp_raw) + if tmp_final != tmp_raw: + self._safe_unlink(tmp_final) + self._ensure_mode(dest, file_mode, check_mode=check_mode) + return KeyResult( + changed=False, + key_path=dest, + deb_path=None, + package_name=None, + package_version=None, + messages=tuple(messages + ["key unchanged"]), + ) + + if check_mode: + self._safe_unlink(tmp_raw) + if tmp_final != tmp_raw: + self._safe_unlink(tmp_final) + return KeyResult( + changed=True, + key_path=dest, + deb_path=None, + package_name=None, + package_version=None, + messages=tuple(messages + ["check_mode: would write key"]), + ) + + # Write/update dest atomically + self._atomic_move(tmp_final, dest, mode=file_mode) + + # Cleanup raw temp if different + if tmp_final != tmp_raw: + self._safe_unlink(tmp_raw) + + return KeyResult( + changed=True, + key_path=dest, + deb_path=None, + package_name=None, + package_version=None, + messages=tuple(messages + ["key updated"]), + ) + + # ------------------------- + # Key: deb method + # ------------------------- + + def _ensure_key_deb( + self, *, key_cfg: Mapping[str, Any], check_mode: bool + ) -> KeyResult: + """ + Ensure repo key material by installing a keyring ``.deb`` package. + + This method ensures the keyring ``.deb`` is downloaded to ``deb_cache_path`` (idempotent by + content hash), extracts its package name/version, and installs it if required. + + Args: + key_cfg: Key configuration (expects ``url``; optionally ``deb_cache_path`` and ``deb_keyring_path``). + check_mode: If True, do not install packages or write files. + + Returns: + A :class:`KeyResult` describing downloads/installs and detected keyring path. + """ + self._m.log( + f"Deb822RepoManager::_ensure_key_deb(key_cfg: {key_cfg}, check_mode: {check_mode})" + ) + + url = key_cfg.get("url") + if not url: + self._m.fail_json(msg="key.method=deb requires key.url") + + deb_path = key_cfg.get("deb_cache_path") or "/var/cache/apt/repo-keyring.deb" + file_mode = self._parse_mode(key_cfg.get("mode", "0644")) + checksum = key_cfg.get("checksum") + + # Ensure deb file on disk (idempotent by hash compare) + deb_changed = self._ensure_downloaded_file( + url=url, + dest=deb_path, + mode=file_mode, + checksum=checksum, + check_mode=check_mode, + ) + + # Extract package metadata from deb + pkg_name, pkg_ver = self._dpkg_deb_fields(deb_path) + + installed_ver = self._dpkg_query_version(pkg_name) + needs_install = (installed_ver is None) or (installed_ver != pkg_ver) + + keyring_path = key_cfg.get("deb_keyring_path") + if not keyring_path: + keyring_path = self._find_keyring_path_in_deb(deb_path) + + msgs: List[str] = [] + if deb_changed: + msgs.append("downloaded/updated keyring .deb") + if installed_ver is None: + msgs.append("keyring package not installed") + else: + msgs.append(f"installed version: {installed_ver}") + + if needs_install: + if check_mode: + return KeyResult( + changed=True, + key_path=keyring_path, + deb_path=deb_path, + package_name=pkg_name, + package_version=pkg_ver, + messages=tuple( + msgs + [f"check_mode: would install {pkg_name}={pkg_ver}"] + ), + ) + + rc, out, err = self._m.run_command( + ["apt-get", "-y", "install", deb_path], + check_rc=False, + environ_update={"DEBIAN_FRONTEND": "noninteractive"}, + ) + if rc != 0: + self._m.fail_json( + msg=f"Failed to install keyring deb (rc={rc}): {err or out}" + ) + msgs.append(f"installed {pkg_name}={pkg_ver}") + + return KeyResult( + changed=True, + key_path=keyring_path, + deb_path=deb_path, + package_name=pkg_name, + package_version=pkg_ver, + messages=tuple(msgs), + ) + + # No install needed (same version) + return KeyResult( + changed=bool(deb_changed), + key_path=keyring_path, + deb_path=deb_path, + package_name=pkg_name, + package_version=pkg_ver, + messages=tuple(msgs + ["install not required"]), + ) + + # ------------------------- + # Helpers: download / files + # ------------------------- + + def validate_filename(self, filename: str) -> None: + # deb822 repo files must end with .sources, name restrictions are conventional; + # keep it strict to avoid weird paths. :contentReference[oaicite:4]{index=4} + """ + Validate a repository filename for use under ``sources.list.d``. + + Args: + filename: Filename to validate. + + Raises: + ValueError: If the filename contains invalid characters or does not end with ``.sources``. + """ + self._m.log(f"Deb822RepoManager::validate_filename(filename: {filename})") + + if not _FILENAME_RE.match(filename): + raise ValueError( + "filename may only contain letters, digits, underscore, hyphen, and period" + ) + if not filename.endswith(".sources"): + raise ValueError("filename must end with .sources") + + def _ensure_downloaded_file( + self, + *, + url: str, + dest: str, + mode: int, + checksum: Optional[str], + check_mode: bool, + ) -> bool: + """ + Download a URL to ``dest`` if content differs. + + The destination file is updated only when the downloaded content hash differs from the + existing file, providing stable idempotency. + + Args: + url: Source URL. + dest: Destination file path. + mode: File permissions mode (octal int). + checksum: Optional expected SHA256 checksum for integrity. + check_mode: If True, do not write any files. + + Returns: + True if the destination would change/changed, otherwise False. + """ + self._m.log( + f"Deb822RepoManager::_ensure_downloaded_file(url: {url}, dest: {dest}, mode: {mode}, checksum: {checksum}, check_mode: {check_mode})" + ) + + tmp, _ = self._download_to_temp(url=url, checksum=checksum) + # Compare hash to dest + tmp_sha = self._sha256_file(tmp) + if os.path.exists(dest) and self._sha256_file(dest) == tmp_sha: + self._safe_unlink(tmp) + self._ensure_mode(dest, mode, check_mode=check_mode) + return False + + if check_mode: + self._safe_unlink(tmp) + return True + + self._atomic_move(tmp, dest, mode=mode) + return True + + def _download_to_temp( + self, *, url: str, checksum: Optional[str] + ) -> Tuple[str, str]: + """ + Download a URL to a temporary file and compute its SHA256. + + Args: + url: Source URL. + checksum: Optional expected SHA256 checksum; mismatches cause ``fail_json``. + + Returns: + Tuple ``(tmp_path, sha256_hex)``. + """ + self._m.log( + f"Deb822RepoManager::_download_to_temp(url: {url}, checksum: {checksum})" + ) + + tmp = self._temp_path() + resp, info = fetch_url(self._m, url, method="GET") + status = int(info.get("status", 0)) + if status < 200 or status >= 300: + self._safe_unlink(tmp) + self._m.fail_json( + msg=f"Failed to download {url} (HTTP {status}): {info.get('msg')}" + ) + + h = sha256() + try: + with open(tmp, "wb") as f: + while True: + chunk = resp.read(1024 * 256) + if not chunk: + break + f.write(chunk) + h.update(chunk) + finally: + try: + resp.close() + except Exception: + pass + + digest = h.hexdigest() + if checksum and checksum.lower() != digest.lower(): + self._safe_unlink(tmp) + self._m.fail_json( + msg=f"Checksum mismatch for {url}: expected {checksum}, got {digest}" + ) + + return tmp, digest + + def _ensure_file_contents( + self, *, dest: str, data: bytes, mode: int, check_mode: bool + ) -> bool: + """ + Ensure that ``dest`` contains exactly ``data``. + + Args: + dest: Destination file path. + data: Desired file content. + mode: File permissions mode (octal int). + check_mode: If True, do not write any files. + + Returns: + True if the file would change/changed, otherwise False. + """ + self._m.log( + f"Deb822RepoManager::_ensure_file_contents(dest: {dest}, data, mode: {mode}, check_mode: {check_mode})" + ) + + current = None + if os.path.exists(dest): + try: + with open(dest, "rb") as f: + current = f.read() + except OSError as exc: + self._m.fail_json(msg=f"Failed to read {dest}: {exc!s}") + + if current == data: + self._ensure_mode(dest, mode, check_mode=check_mode) + return False + + if check_mode: + return True + + tmp = self._temp_path() + try: + with open(tmp, "wb") as f: + f.write(data) + except OSError as exc: + self._safe_unlink(tmp) + self._m.fail_json(msg=f"Failed to write temp file for {dest}: {exc!s}") + + self._atomic_move(tmp, dest, mode=mode) + + return True + + def _atomic_move(self, src: str, dest: str, *, mode: int) -> None: + """ + Atomically move ``src`` to ``dest`` and apply file permissions. + + Args: + src: Temporary source file path. + dest: Destination file path. + mode: File permissions mode (octal int). + + Raises: + The module will call ``fail_json`` on failure. + """ + self._m.log( + f"Deb822RepoManager::_atomic_move(src: {src}, dest: {dest}, mode: {mode})" + ) + + dest_dir = os.path.dirname(dest) + if dest_dir and not os.path.isdir(dest_dir): + os.makedirs(dest_dir, exist_ok=True) + + # Use Ansible's atomic_move for correctness across FS boundaries when possible + try: + self._m.atomic_move(src, dest) + except Exception as exc: + self._safe_unlink(src) + self._m.fail_json(msg=f"atomic_move failed for {dest}: {exc!s}") + + self._ensure_mode(dest, mode, check_mode=False) + + def _ensure_mode(self, path: str, mode: int, *, check_mode: bool) -> None: + """ + Ensure a file has the desired permission bits. + + Args: + path: File path. + mode: File permissions mode (octal int). + check_mode: If True, do not chmod, only evaluate. + """ + self._m.log( + f"Deb822RepoManager::_ensure_mode(path: {path}, mode: {mode}, check_mode: {check_mode})" + ) + + if not os.path.exists(path): + return + try: + st = os.stat(path) + except OSError: + return + if (st.st_mode & 0o777) == mode: + return + if check_mode: + return + try: + os.chmod(path, mode) + except OSError as exc: + self._m.fail_json(msg=f"Failed to chmod {path} to {oct(mode)}: {exc!s}") + + def _sha256_file(self, path: str) -> str: + """ + Compute the SHA256 hash of a file. + + Args: + path: File path. + + Returns: + SHA256 digest as a lowercase hex string. + """ + h = sha256() + with open(path, "rb") as f: + while True: + chunk = f.read(1024 * 256) + if not chunk: + break + h.update(chunk) + return h.hexdigest() + + def _temp_path(self, *, suffix: str = "") -> str: + """ + Create a temporary file path for intermediate downloads/writes. + + Args: + suffix: Optional file suffix. + + Returns: + Path to a newly created temporary file (empty file is created). + """ + tmpdir = getattr(self._m, "tmpdir", None) or None + fd, p = tempfile.mkstemp(prefix="ansible-deb822-", suffix=suffix, dir=tmpdir) + os.close(fd) + return p + + def _safe_unlink(self, path: str) -> None: + """ + Best-effort removal of a file path. + + Errors are ignored to simplify cleanup paths. + """ + try: + os.remove(path) + except OSError: + pass + + # ------------------------- + # Helpers: gpg + # ------------------------- + + def _looks_like_ascii_armored(self, path: str) -> bool: + """ + Heuristically detect whether a file is an ASCII-armored PGP key. + + Args: + path: File path. + + Returns: + True if the file begins with a typical ASCII armored PGP public key header. + """ + try: + with open(path, "rb") as f: + head = f.read(128) + except OSError: + return False + return b"-----BEGIN PGP PUBLIC KEY BLOCK-----" in head + + def _run_gpg_dearmor(self, *, src: str, dst: str) -> None: + """ + Convert an ASCII-armored key to a binary keyring using GnuPG. + + Args: + src: Source key file path. + dst: Destination keyring file path. + + Raises: + The module will call ``fail_json`` if gpg fails. + """ + rc, out, err = self._m.run_command( + ["gpg", "--dearmor", "--yes", "--output", dst, src], + check_rc=False, + ) + if rc != 0: + self._safe_unlink(dst) + self._m.fail_json(msg=f"gpg --dearmor failed (rc={rc}): {err or out}") + + def _run_gpg_show_keys(self, *, path: str) -> None: + """ + Validate that a key file contains at least one public key. + + Args: + path: Key file path. + + Raises: + The module will call ``fail_json`` if gpg fails or the output does not contain a public key. + """ + rc, out, err = self._m.run_command( + ["gpg", "--show-keys", "--with-colons", path], + check_rc=False, + ) + if rc != 0: + self._m.fail_json(msg=f"gpg --show-keys failed (rc={rc}): {err or out}") + if "pub" not in out: + self._m.fail_json( + msg="Downloaded key does not look like a public key (no 'pub' record in gpg output)." + ) + + # ------------------------- + # Helpers: dpkg + # ------------------------- + + def _dpkg_deb_fields(self, deb_path: str) -> Tuple[str, str]: + """ + Read package metadata (Package and Version) from a ``.deb`` file. + + Args: + deb_path: Path to the ``.deb`` file. + + Returns: + Tuple ``(package_name, package_version)``. + + Raises: + The module will call ``fail_json`` on unexpected output. + """ + self._m.log(f"Deb822RepoManager::_dpkg_deb_fields(deb_path: {deb_path})") + + _cmd: List[str] + _cmd = ["dpkg-deb", "--field", deb_path, "Package", "Version"] + + rc, out, err = self._m.run_command( + _cmd, + check_rc=False, + ) + + if rc != 0: + self._m.fail_json( + msg=f"dpkg-deb --field failed for {deb_path}: {err or out}" + ) + + lines = [ln.strip() for ln in out.splitlines() if ln.strip()] + + if len(lines) < 2: + self._m.fail_json(msg=f"Unexpected dpkg-deb output for {deb_path}: {out!r}") + + data: Dict = {} + + for match in re.finditer(r"^(\w+):\s+(.+)$", out, re.MULTILINE): + key, value = match.groups() + data[key.lower()] = value + + return (data.get("package", ""), data.get("version", None)) + + def _dpkg_query_version(self, pkg_name: str) -> Optional[str]: + """ + Query the installed version of a dpkg package. + + Args: + pkg_name: Package name. + + Returns: + The installed version string, or None if the package is not installed. + """ + self._m.log(f"Deb822RepoManager::_dpkg_query_version(pkg_name: {pkg_name})") + + _cmd: List[str] + _cmd = ["dpkg-query", "--show", "-f=${Version}", pkg_name] + + rc, out, err = self._m.run_command( + _cmd, + check_rc=False, + ) + + if rc != 0: + return None + + ver = out.strip() + return ver or None + + def _find_keyring_path_in_deb(self, deb_path: str) -> str: + # Find typical keyring locations inside the deb + """ + Determine a likely keyring file path contained in a ``.deb`` package. + + The method scans the package file list for ``.gpg`` files under typical keyring directories + and chooses a stable, preferred candidate. + + Args: + deb_path: Path to the downloaded ``.deb`` file. + + Returns: + The selected keyring path inside the filesystem (leading ``/``). + + Raises: + The module will call ``fail_json`` if no suitable keyring path can be determined. + """ + rc, out, err = self._m.run_command(["dpkg-deb", "-c", deb_path], check_rc=False) + if rc != 0: + self._m.fail_json(msg=f"dpkg-deb -c failed for {deb_path}: {err or out}") + + candidates: List[str] = [] + for line in out.splitlines(): + parts = line.split() + if not parts: + continue + p = parts[-1] + if p.endswith(".gpg") and ( + p.startswith("./usr/share/keyrings/") + or p.startswith("./etc/apt/keyrings/") + ): + candidates.append(p.lstrip(".")) + if not candidates: + self._m.fail_json( + msg=( + "Could not determine keyring path from deb contents. " + "Set key.deb_keyring_path explicitly." + ) + ) + # Prefer /usr/share/keyrings + candidates.sort( + key=lambda x: (0 if x.startswith("/usr/share/keyrings/") else 1, x) + ) + return candidates[0] + + # ------------------------- + # Helpers: misc + # ------------------------- + + def _parse_mode(self, mode_str: Any) -> int: + """ + Parse a file mode from an int or an octal string. + + Args: + mode_str: Mode value, e.g. ``"0644"`` or ``0o644``. + + Returns: + Mode as an integer suitable for ``os.chmod``. + + Raises: + The module will call ``fail_json`` for invalid values. + """ + if isinstance(mode_str, int): + return mode_str + s = str(mode_str).strip() + if not s: + return 0o644 + try: + return int(s, 8) + except ValueError: + self._m.fail_json(msg=f"Invalid file mode: {mode_str!r}") diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/diff.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/diff.py new file mode 100644 index 0000000..912b8a3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/diff.py @@ -0,0 +1,284 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import difflib +import itertools +import json +import textwrap +import typing +from pathlib import Path + + +class SideBySide: + """ + Erlaubt nebeneinanderstehende Vergleiche (Side‐by‐Side) von zwei Text-Versionen. + Jetzt mit Ausgabe der Zeilennummern bei Änderungen. + """ + + def __init__( + self, + module, + left: typing.Union[str, dict, typing.List[str]], + right: typing.Union[str, dict, typing.List[str]], + ): + """ + :param module: Objekt mit einer .log(...)‐Methode zum Debuggen + :param left: Ursprünglicher Text (dict, String oder Liste von Zeilen) + :param right: Neuer Text (dict, String oder Liste von Zeilen) + """ + self.module = module + self.default_separator = " | " + self.left = self._normalize_input(left) + self.right = self._normalize_input(right) + + @staticmethod + def _normalize_input( + data: typing.Union[str, dict, typing.List[str]], + ) -> typing.List[str]: + """ + Konvertiert dict → JSON‐String, String → Liste von Zeilen (splitlines), + Liste bleibt unverändert (kopiert). + """ + if isinstance(data, dict): + data = json.dumps(data, indent=2) + if isinstance(data, str): + return data.splitlines() + if isinstance(data, list): + return data.copy() + raise TypeError(f"Erwartet dict, str oder List[str], nicht {type(data)}") + + @staticmethod + def _wrap_and_flatten(lines: typing.List[str], width: int) -> typing.List[str]: + """ + Wrappt jede Zeile auf maximal `width` Zeichen und flacht verschachtelte Listen ab. + Leere Zeilen bleiben als [""] erhalten. + """ + wrapper = textwrap.TextWrapper( + width=width, + break_long_words=False, + replace_whitespace=False, + ) + flat: typing.List[str] = [] + for line in lines: + wrapped = wrapper.wrap(line) + if not wrapped: + # Wenn wrapper.wrap("") → [] → wir wollen [""] erhalten + flat.append("") + else: + flat.extend(wrapped) + return flat + + def side_by_side( + self, + left: typing.List[str], + right: typing.List[str], + width: int = 78, + as_string: bool = False, + separator: typing.Optional[str] = None, + left_title: typing.Optional[str] = None, + right_title: typing.Optional[str] = None, + ) -> typing.Union[str, typing.List[str]]: + """ + Gibt nebeneinanderstehende Zeilen zurück: + [Links-Text][Padding][separator][Rechts-Text] + + :param left: Liste von Zeilen (bereits nummeriert/aufbereitet) + :param right: Liste von Zeilen (bereits nummeriert/aufbereitet) + :param width: Maximale Gesamtbreite (inkl. Separator) + :param as_string: True → Rückgabe als einziger String mit "\n" + :param separator: String, der links und rechts trennt (Default " | ") + :param left_title: Überschrift ganz oben links (optional) + :param right_title: Überschrift ganz oben rechts (optional) + :return: Entweder List[str] oder ein einziger String + """ + sep = separator or self.default_separator + # Berechne, wie viele Zeichen pro Seite bleiben: + side_width = (width - len(sep) - (1 - width % 2)) // 2 + + # Wrap/flatten beide Seiten + left_wrapped = self._wrap_and_flatten(left, side_width) + right_wrapped = self._wrap_and_flatten(right, side_width) + + # Paare bilden, fehlende Zeilen mit leerem String auffüllen + pairs = list(itertools.zip_longest(left_wrapped, right_wrapped, fillvalue="")) + + # Falls Überschriften angegeben, voranstellen (einschließlich Unterstreichung) + if left_title or right_title: + lt = left_title or "" + rt = right_title or "" + underline = "-" * side_width + header = [(lt, rt), (underline, underline)] + pairs = header + pairs + + # Jetzt jede Zeile zusammenbauen + lines: typing.List[str] = [] + for l_line, r_line in pairs: + l_text = l_line or "" + r_text = r_line or "" + pad = " " * max(0, side_width - len(l_text)) + lines.append(f"{l_text}{pad}{sep}{r_text}") + + return "\n".join(lines) if as_string else lines + + def better_diff( + self, + left: typing.Union[str, typing.List[str]], + right: typing.Union[str, typing.List[str]], + width: int = 78, + as_string: bool = True, + separator: typing.Optional[str] = None, + left_title: typing.Optional[str] = None, + right_title: typing.Optional[str] = None, + ) -> typing.Union[str, typing.List[str]]: + """ + Gibt einen Side-by-Side-Diff mit Markierung von gleichen/entfernten/hinzugefügten Zeilen + und zusätzlich mit den Zeilennummern in den beiden Input-Dateien. + + Syntax der Prefixe: + " " → Zeile vorhanden in beiden Dateien + "- " → Zeile nur in der linken Datei + "+ " → Zeile nur in der rechten Datei + "? " → wird komplett ignoriert + + Die Ausgabe hat Form: + : | : + bzw. bei fehlender link/rechts-Zeile: + : | - + - | : + + :param left: Ursprungstext als String oder Liste von Zeilen + :param right: Vergleichstext als String oder Liste von Zeilen + :param width: Gesamtbreite inkl. Separator + :param as_string: True, um einen einzelnen String zurückzubekommen + :param separator: Trenner (Standard: " | ") + :param left_title: Überschrift links (optional) + :param right_title: Überschrift rechts (optional) + :return: Side-by-Side-Liste oder einzelner String + """ + # 1) Ausgangsdaten normalisieren + l_lines = left.splitlines() if isinstance(left, str) else left.copy() + r_lines = right.splitlines() if isinstance(right, str) else right.copy() + + # 2) Differenz-Berechnung + differ = difflib.Differ() + diffed = list(differ.compare(l_lines, r_lines)) + + # 3) Zähler für Zeilennummern + left_lineno = 1 + right_lineno = 1 + + left_side: typing.List[str] = [] + right_side: typing.List[str] = [] + + # 4) Durchlaufe alle Diff‐Einträge + for entry in diffed: + code = entry[:2] # " ", "- ", "+ " oder "? " + content = entry[2:] # Der eigentliche Text + + if code == " ": + # Zeile existiert in beiden Dateien + # Linke Seite: " : " + # Rechte Seite: " : " + left_side.append(f"{left_lineno:>4}: {content}") + right_side.append(f"{right_lineno:>4}: {content}") + left_lineno += 1 + right_lineno += 1 + + elif code == "- ": + # Nur in der linken Datei + left_side.append(f"{left_lineno:>4}: {content}") + # Rechts ein Platzhalter "-" ohne Nummer + right_side.append(" -") + left_lineno += 1 + + elif code == "+ ": + # Nur in der rechten Datei + # Links wird ein "+" angezeigt, ohne LNr + left_side.append(" +") + right_side.append(f"{right_lineno:>4}: {content}") + right_lineno += 1 + + # "? " ignorieren wir komplett + + # 5) Nun übergeben wir die nummerierten Zeilen an side_by_side() + return self.side_by_side( + left=left_side, + right=right_side, + width=width, + as_string=as_string, + separator=separator, + left_title=left_title, + right_title=right_title, + ) + + def diff( + self, + width: int = 78, + as_string: bool = True, + separator: typing.Optional[str] = None, + left_title: typing.Optional[str] = None, + right_title: typing.Optional[str] = None, + ) -> typing.Union[str, typing.List[str]]: + """ + Führt better_diff() für die in __init__ geladenen left/right‐Strings aus. + + :param width: Gesamtbreite inkl. Separator + :param as_string: True, um einen einzelnen String zurückzubekommen + :param separator: Trenner (Standard: " | ") + :param left_title: Überschrift links (optional) + :param right_title: Überschrift rechts (optional) + + :return: Side-by-Side-Liste oder einzelner String + """ + return self.better_diff( + left=self.left, + right=self.right, + width=width, + as_string=as_string, + separator=separator, + left_title=left_title, + right_title=right_title, + ) + + def diff_between_files( + self, + file_1: typing.Union[str, Path], + file_2: typing.Union[str, Path], + ) -> typing.Union[str, typing.List[str]]: + """ + Liest zwei Dateien ein und liefert ihren Side-by-Side‐Diff (mit Zeilennummern). + + :param file_1: Pfad zur ersten Datei + :param file_2: Pfad zur zweiten Datei + :return: Liste der formatierten Zeilen oder einziger String (as_string=True) + """ + f1 = Path(file_1) + f2 = Path(file_2) + + self.module.log(f"diff_between_files({f1}, {f2})") + + if not f1.is_file() or not f2.is_file(): + self.module.log(f" Eine oder beide Dateien existieren nicht: {f1}, {f2}") + # Hier geben wir für den Fall „Datei fehlt“ einfach einen leeren String zurück. + return "" + + # Dateien in Listen von Zeilen einlesen (ohne trailing "\n") + old_lines = f1.read_text(encoding="utf-8").splitlines() + new_lines = f2.read_text(encoding="utf-8").splitlines() + + self.module.log(f" Gelesen: {len(old_lines)} Zeilen aus {f1}") + self.module.log(f" Gelesen: {len(new_lines)} Zeilen aus {f2}") + + diffed = self.better_diff( + left=old_lines, + right=new_lines, + width=140, + as_string=True, + separator=self.default_separator, + left_title=" Original", + right_title=" Update", + ) + + # Nur einen Auszug fürs Logging (z.B. erste 200 Zeichen) + self.module.log(f" diffed output (gekürzt):\n{diffed[:200]}...") + return diffed diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/directory.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/directory.py new file mode 100644 index 0000000..f8b7d6d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/directory.py @@ -0,0 +1,221 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +import grp +import os +import pwd + +from ansible_collections.bodsch.core.plugins.module_utils.lists import find_in_list + + +def create_directory(directory, owner=None, group=None, mode=None): + """ """ + try: + os.makedirs(directory, exist_ok=True) + except FileExistsError: + pass + + if mode is not None: + os.chmod(directory, int(mode, base=8)) + + if owner is not None: + try: + owner = pwd.getpwnam(owner).pw_uid + except KeyError: + owner = int(owner) + pass + else: + owner = 0 + + if group is not None: + try: + group = grp.getgrnam(group).gr_gid + except KeyError: + group = int(group) + pass + else: + group = 0 + + if os.path.isdir(directory) and owner and group: + os.chown(directory, int(owner), int(group)) + + if os.path.isdir(directory): + return True + else: + return False + + +def create_directory_tree(directory_tree, current_state): + """ """ + for entry in directory_tree: + """ """ + source = entry.get("source") + source_handling = entry.get("source_handling", {}) + force_create = source_handling.get("create", None) + force_owner = source_handling.get("owner", None) + force_group = source_handling.get("group", None) + force_mode = source_handling.get("mode", None) + + curr = find_in_list(current_state, source) + + current_owner = curr[source].get("owner") + current_group = curr[source].get("group") + + # create directory + if force_create is not None and not force_create: + pass + else: + try: + os.makedirs(source, exist_ok=True) + except FileExistsError: + pass + + # change mode + if os.path.isdir(source) and force_mode is not None: + if isinstance(force_mode, int): + mode = int(str(force_mode), base=8) + if isinstance(force_mode, str): + mode = int(force_mode, base=8) + + os.chmod(source, mode) + + # change ownership + if force_owner is not None or force_group is not None: + """ """ + if os.path.isdir(source): + """ """ + if force_owner is not None: + try: + force_owner = pwd.getpwnam(str(force_owner)).pw_uid + except KeyError: + force_owner = int(force_owner) + pass + elif current_owner is not None: + force_owner = current_owner + else: + force_owner = 0 + + if force_group is not None: + try: + force_group = grp.getgrnam(str(force_group)).gr_gid + except KeyError: + force_group = int(force_group) + pass + elif current_group is not None: + force_group = current_group + else: + force_group = 0 + + os.chown(source, int(force_owner), int(force_group)) + + +def permstr_to_octal(modestr, umask): + """ + Convert a Unix permission string (rw-r--r--) into a mode (0644) + """ + revstr = modestr[::-1] + mode = 0 + for j in range(0, 3): + for i in range(0, 3): + if revstr[i + 3 * j] in ["r", "w", "x", "s", "t"]: + mode += 2 ** (i + 3 * j) + + return mode & ~umask + + +def current_state(directory): + """ """ + current_owner = None + current_group = None + current_mode = None + + if os.path.isdir(directory): + _state = os.stat(directory) + try: + current_owner = pwd.getpwuid(_state.st_uid).pw_uid + except KeyError: + pass + + try: + current_group = grp.getgrgid(_state.st_gid).gr_gid + except KeyError: + pass + + try: + current_mode = oct(_state.st_mode)[-4:] + except KeyError: + pass + + return current_owner, current_group, current_mode + + +def fix_ownership(directory, force_owner=None, force_group=None, force_mode=False): + """ """ + changed = False + error_msg = None + + if os.path.isdir(directory): + current_owner, current_group, current_mode = current_state(directory) + + # change mode + if force_mode is not None and force_mode != current_mode: + try: + if isinstance(force_mode, int): + mode = int(str(force_mode), base=8) + except Exception as e: + error_msg = f" - ERROR '{e}'" + print(error_msg) + + try: + if isinstance(force_mode, str): + mode = int(force_mode, base=8) + except Exception as e: + error_msg = f" - ERROR '{e}'" + print(error_msg) + + os.chmod(directory, mode) + + # change ownership + if ( + force_owner is not None + or force_group is not None + and (force_owner != current_owner or force_group != current_group) + ): + if force_owner is not None: + try: + force_owner = pwd.getpwnam(str(force_owner)).pw_uid + except KeyError: + force_owner = int(force_owner) + pass + elif current_owner is not None: + force_owner = current_owner + else: + force_owner = 0 + + if force_group is not None: + try: + force_group = grp.getgrnam(str(force_group)).gr_gid + except KeyError: + force_group = int(force_group) + pass + elif current_group is not None: + force_group = current_group + else: + force_group = 0 + + os.chown(directory, int(force_owner), int(force_group)) + + _owner, _group, _mode = current_state(directory) + + if ( + (current_owner != _owner) + or (current_group != _group) + or (current_mode != _mode) + ): + changed = True + + return changed, error_msg diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/dns_lookup.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/dns_lookup.py new file mode 100644 index 0000000..b4c20b6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/dns_lookup.py @@ -0,0 +1,80 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +import dns.exception +from dns.resolver import Resolver + +__metaclass__ = type + + +def dns_lookup(dns_name, timeout=3, dns_resolvers=[]): + """ + Perform a simple DNS lookup, return results in a dictionary + """ + resolver = Resolver() + resolver.timeout = float(timeout) + resolver.lifetime = float(timeout) + + result = {} + + if not dns_name: + return { + "addrs": [], + "error": True, + "error_msg": "No DNS Name for resolving given", + "name": dns_name, + } + + if dns_resolvers: + resolver.nameservers = dns_resolvers + try: + records = resolver.resolve(dns_name) + result = { + "addrs": [ii.address for ii in records], + "error": False, + "error_msg": "", + "name": dns_name, + } + except dns.resolver.NXDOMAIN: + result = { + "addrs": [], + "error": True, + "error_msg": "No such domain", + "name": dns_name, + } + except dns.resolver.NoNameservers as e: + result = { + "addrs": [], + "error": True, + "error_msg": repr(e), + "name": dns_name, + } + except dns.resolver.Timeout: + result = { + "addrs": [], + "error": True, + "error_msg": "Timed out while resolving", + "name": dns_name, + } + except dns.resolver.NameError as e: + result = { + "addrs": [], + "error": True, + "error_msg": repr(e), + "name": dns_name, + } + except dns.exception.DNSException as e: + result = { + "addrs": [], + "error": True, + "error_msg": f"Unhandled exception ({repr(e)})", + "name": dns_name, + } + + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/easyrsa.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/easyrsa.py new file mode 100644 index 0000000..f103ab6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/easyrsa.py @@ -0,0 +1,493 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function + +import os +from typing import Any, List, Sequence, Tuple, Union + +EasyRSAResult = Tuple[int, bool, Union[str, List[str]]] +ExecResult = Tuple[int, str, str] + + +class EasyRSA: + """ + Thin wrapper around the `easyrsa` CLI to manage a simple PKI lifecycle. + + The class is designed to be used from an Ansible context (``module``), + relying on the module to provide: + - ``module.params`` for runtime parameters (e.g. ``force``) + - ``module.log(...)`` for logging + - ``module.get_bin_path("easyrsa", required=True)`` to locate the binary + - ``module.run_command([...])`` to execute commands + + Attributes: + module: Ansible module-like object providing logging and command execution. + state: Internal state placeholder (currently unused). + force: Whether to force actions (read from ``module.params['force']``). + pki_dir: Path to the PKI directory (commonly ``/etc/easy-rsa/pki``). + req_cn_ca: Common name (CN) used when building the CA. + req_cn_server: Common name (CN) used for server requests/certificates. + ca_keysize: RSA key size for CA key generation. + dh_keysize: DH parameter size for DH generation. + working_dir: Working directory context (currently not used for chdir). + easyrsa: Resolved path to the ``easyrsa`` executable. + easyrsa_directory: Base directory used by some file existence checks + (defaults to ``/etc/easy-rsa``). + """ + + def __init__( + self, + module: Any, + force: bool = False, + pki_dir: str = "", + req_cn_ca: str = "", + req_cn_server: str = "", + ca_keysize: int = 4086, + dh_keysize: int = 2048, + working_dir: str = "", + ) -> None: + """ + Create an EasyRSA helper instance. + + Args: + module: Ansible module-like object used for logging and running commands. + force: Optional force flag (note: the effective value is read from + ``module.params.get("force", False)``). + pki_dir: Path to PKI directory (e.g. ``/etc/easy-rsa/pki``). + req_cn_ca: CA request common name (CN) used for ``build-ca``. + req_cn_server: Server common name (CN) used for ``gen-req`` and ``sign-req``. + ca_keysize: RSA key size for the CA. + dh_keysize: DH parameter size. + working_dir: Intended working directory for running commands (not applied). + + Returns: + None + """ + self.module = module + + self.module.log( + "EasyRSA::__init__(" + f"force={force}, pki_dir={pki_dir}, " + f"req_cn_ca={req_cn_ca}, req_cn_server={req_cn_server}, " + f"ca_keysize={ca_keysize}, dh_keysize={dh_keysize}, " + f"working_dir={working_dir}" + ")" + ) + + self.state = "" + + self.force = module.params.get("force", False) + self.pki_dir = pki_dir + self.req_cn_ca = req_cn_ca + self.req_cn_server = req_cn_server + self.ca_keysize = ca_keysize + self.dh_keysize = dh_keysize + self.working_dir = working_dir + + self.easyrsa = module.get_bin_path("easyrsa", True) + + self.easyrsa_directory = "/etc/easy-rsa" + + # ---------------------------------------------------------------------------------------------- + # Public API - create + def create_pki(self) -> Tuple[int, bool, str]: + """ + Initialize the PKI directory via ``easyrsa init-pki``. + + The method performs an idempotency check using :meth:`validate_pki` and + returns unchanged when the PKI directory already exists. + + Returns: + tuple[int, bool, str]: (rc, changed, message) + rc: 0 on success, non-zero on failure. + changed: True if the PKI was created, False if it already existed. + message: Human-readable status message. + """ + self.module.log(msg="EasyRsa::create_pki()") + + if self.validate_pki(): + return (0, False, "PKI already created") + + args: List[str] = [] + args.append(self.easyrsa) + args.append("init-pki") + + rc, out, err = self._exec(args) + + if self.validate_pki(): + return (0, True, "The PKI was successfully created.") + else: + return (1, True, "An error occurred while creating the PKI.") + + def build_ca(self) -> EasyRSAResult: + """ + Build a new certificate authority (CA) via ``easyrsa build-ca nopass``. + + Performs an idempotency check using :meth:`validate_ca`. When the CA does not + exist, this runs Easy-RSA in batch mode and checks for the existence of: + - ``/pki/ca.crt`` + - ``/pki/private/ca.key`` + + Returns: + tuple[int, bool, Union[str, list[str]]]: (rc, changed, output) + rc: 0 on success; 3 if expected files were not created; otherwise + the underlying command return code. + changed: False if the CA already existed; True if a build was attempted. + output: Combined stdout/stderr lines (list[str]) or a success message (str). + """ + if self.validate_ca(): + return (0, False, "CA already created") + + args: List[str] = [] + args.append(self.easyrsa) + args.append("--batch") + # args.append(f"--pki-dir={self._pki_dir}") + args.append(f"--req-cn={self.req_cn_ca}") + + if self.ca_keysize: + args.append(f"--keysize={self.ca_keysize}") + args.append("build-ca") + args.append("nopass") + + rc, out, err = self._exec(args) + _output: Union[str, List[str]] = self.result_values(out, err) + + ca_crt_file = os.path.join(self.easyrsa_directory, "pki", "ca.crt") + ca_key_file = os.path.join(self.easyrsa_directory, "pki", "private", "ca.key") + + if os.path.exists(ca_crt_file) and os.path.exists(ca_key_file): + rc = 0 + _output = "ca.crt and ca.key were successfully created." + else: + rc = 3 + + return (rc, True, _output) + + def gen_crl(self) -> EasyRSAResult: + """ + Generate a certificate revocation list (CRL) via ``easyrsa gen-crl``. + + Performs an idempotency check using :meth:`validate_crl` and checks for + ``/pki/crl.pem`` after execution. + + Returns: + tuple[int, bool, Union[str, list[str]]]: (rc, changed, output) + rc: 0 on success; 3 if expected file was not created; otherwise + the underlying command return code. + changed: False if CRL already existed; True if generation was attempted. + output: Combined stdout/stderr lines (list[str]) or a success message (str). + """ + self.module.log("EasyRSA::gen_crl()") + + if self.validate_crl(): + return (0, False, "CRL already created") + + args: List[str] = [] + args.append(self.easyrsa) + # args.append(f"--pki-dir={self._pki_dir}") + args.append("gen-crl") + + rc, out, err = self._exec(args) + + # self.module.log(f" rc : {rc}") + # self.module.log(f" out: {out}") + # self.module.log(f" err: {err}") + + _output: Union[str, List[str]] = self.result_values(out, err) + + crl_pem_file = os.path.join(self.easyrsa_directory, "pki", "crl.pem") + + if os.path.exists(crl_pem_file): + rc = 0 + _output = "crl.pem were successfully created." + else: + rc = 3 + + return (rc, True, _output) + + def gen_req(self) -> EasyRSAResult: + """ + Generate a private key and certificate signing request (CSR) via + ``easyrsa gen-req nopass``. + + Performs an idempotency check using :meth:`validate_req` and checks for: + - ``/pki/reqs/.req`` after execution. + + Returns: + tuple[int, bool, Union[str, list[str]]]: (rc, changed, output) + rc: 0 on success; 3 if expected file was not created; otherwise + the underlying command return code. + changed: False if request already existed; True if generation was attempted. + output: Combined stdout/stderr lines (list[str]) or a success message (str). + """ + if self.validate_req(): + return (0, False, "keypair and request already created") + + args: List[str] = [] + args.append(self.easyrsa) + args.append("--batch") + # args.append(f"--pki-dir={self._pki_dir}") + if self.req_cn_ca: + args.append(f"--req-cn={self.req_cn_ca}") + args.append("gen-req") + args.append(self.req_cn_server) + args.append("nopass") + + rc, out, err = self._exec(args) + _output: Union[str, List[str]] = self.result_values(out, err) + + req_file = os.path.join( + self.easyrsa_directory, "pki", "reqs", f"{self.req_cn_server}.req" + ) + + if os.path.exists(req_file): + rc = 0 + _output = f"{self.req_cn_server}.req were successfully created." + else: + rc = 3 + + return (rc, True, _output) + + def sign_req(self) -> EasyRSAResult: + """ + Sign the server request and generate a certificate via + ``easyrsa sign-req server ``. + + Performs an idempotency check using :meth:`validate_sign` and checks for: + - ``/pki/issued/.crt`` after execution. + + Returns: + tuple[int, bool, Union[str, list[str]]]: (rc, changed, output) + rc: 0 on success; 3 if expected file was not created; otherwise + the underlying command return code. + changed: False if the certificate already existed; True if signing was attempted. + output: Combined stdout/stderr lines (list[str]) or a success message (str). + """ + if self.validate_sign(): + return (0, False, "certificate alread signed") + + args: List[str] = [] + args.append(self.easyrsa) + args.append("--batch") + # args.append(f"--pki-dir={self._pki_dir}") + args.append("sign-req") + args.append("server") + args.append(self.req_cn_server) + + rc, out, err = self._exec(args) + _output: Union[str, List[str]] = self.result_values(out, err) + + crt_file = os.path.join( + self.easyrsa_directory, "pki", "issued", f"{self.req_cn_server}.crt" + ) + + if os.path.exists(crt_file): + rc = 0 + _output = f"{self.req_cn_server}.crt were successfully created." + else: + rc = 3 + + return (rc, True, _output) + + def gen_dh(self) -> EasyRSAResult: + """ + Generate Diffie-Hellman parameters via ``easyrsa gen-dh``. + + Performs an idempotency check using :meth:`validate_dh` and checks for: + - ``/pki/dh.pem`` after execution. + + Returns: + tuple[int, bool, Union[str, list[str]]]: (rc, changed, output) + rc: 0 on success; 3 if expected file was not created; otherwise + the underlying command return code. + changed: False if DH params already existed; True if generation was attempted. + output: Combined stdout/stderr lines (list[str]) or a success message (str). + """ + if self.validate_dh(): + return (0, False, "DH already created") + + args: List[str] = [] + args.append(self.easyrsa) + # args.append(f"--pki-dir={self._pki_dir}") + if self.dh_keysize: + args.append(f"--keysize={self.dh_keysize}") + # args.append(f"--pki-dir={self._pki_dir}") + args.append("gen-dh") + + rc, out, err = self._exec(args) + _output: Union[str, List[str]] = self.result_values(out, err) + + dh_pem_file = os.path.join(self.easyrsa_directory, "pki", "dh.pem") + + if os.path.exists(dh_pem_file): + rc = 0 + _output = "dh.pem were successfully created." + else: + rc = 3 + + return (rc, True, _output) + + # ---------------------------------------------------------------------------------------------- + # PRIVATE API - validate + def validate_pki(self) -> bool: + """ + Check whether the PKI directory exists. + + Returns: + bool: True if ``self.pki_dir`` exists on disk, otherwise False. + """ + self.module.log(msg="EasyRsa::validate_pki()") + + if os.path.exists(self.pki_dir): + return True + else: + return False + + def validate_ca(self) -> bool: + """ + Check whether the CA certificate and key exist. + + Expected files (relative to ``self.pki_dir``): + - ``ca.crt`` + - ``private/ca.key`` + + Returns: + bool: True if both CA files exist, otherwise False. + """ + self.module.log(msg="EasyRsa::validate__ca()") + + ca_crt_file = os.path.join(self.pki_dir, "ca.crt") + ca_key_file = os.path.join(self.pki_dir, "private", "ca.key") + + if os.path.exists(ca_crt_file) and os.path.exists(ca_key_file): + return True + else: + return False + + def validate_crl(self) -> bool: + """ + Check whether the CRL exists. + + Expected file (relative to ``self.pki_dir``): + - ``crl.pem`` + + Returns: + bool: True if CRL exists, otherwise False. + """ + self.module.log(msg="EasyRsa::validate__crl()") + + crl_pem_file = os.path.join(self.pki_dir, "crl.pem") + + if os.path.exists(crl_pem_file): + return True + else: + return False + + def validate_dh(self) -> bool: + """ + Check whether the DH parameters file exists. + + Expected file (relative to ``self.pki_dir``): + - ``dh.pem`` + + Returns: + bool: True if DH params exist, otherwise False. + """ + self.module.log(msg="EasyRsa::validate__dh()") + + dh_pem_file = os.path.join(self.pki_dir, "dh.pem") + + if os.path.exists(dh_pem_file): + return True + else: + return False + + def validate_req(self) -> bool: + """ + Check whether the server request (CSR) exists. + + Expected file (relative to ``self.pki_dir``): + - ``reqs/.req`` + + Returns: + bool: True if the CSR exists, otherwise False. + """ + self.module.log(msg="EasyRsa::validate__req()") + + req_file = os.path.join(self.pki_dir, "reqs", f"{self.req_cn_server}.req") + + if os.path.exists(req_file): + return True + else: + return False + + def validate_sign(self) -> bool: + """ + Check whether the signed server certificate exists. + + Expected file (relative to ``self.pki_dir``): + - ``issued/.crt`` + + Returns: + bool: True if the certificate exists, otherwise False. + """ + self.module.log(msg="EasyRsa::validate__sign()") + + crt_file = os.path.join(self.pki_dir, "issued", f"{self.req_cn_server}.crt") + + if os.path.exists(crt_file): + return True + else: + return False + + # ---------------------------------------------------------------------------------------------- + + def _exec(self, commands: Sequence[str], check_rc: bool = False) -> ExecResult: + """ + Execute a command via the underlying Ansible module. + + Args: + commands: Command and arguments as a sequence of strings. + check_rc: Passed through to ``module.run_command``; when True, the + module may raise/fail on non-zero return codes depending on its behavior. + + Returns: + tuple[int, str, str]: (rc, stdout, stderr) + rc: Process return code. + stdout: Captured standard output. + stderr: Captured standard error. + """ + self.module.log(msg=f"_exec(commands={commands}, check_rc={check_rc}") + + rc, out, err = self.module.run_command(commands, check_rc=check_rc) + + if int(rc) != 0: + self.module.log(msg=f" rc : '{rc}'") + self.module.log(msg=f" out: '{out}'") + self.module.log(msg=f" err: '{err}'") + + return rc, out, err + + def result_values(self, out: str, err: str) -> List[str]: + """ + Merge stdout and stderr into a single list of output lines. + + Args: + out: Raw stdout string. + err: Raw stderr string. + + Returns: + list[str]: Concatenated list of lines (stdout lines first, then stderr lines). + """ + _out = out.splitlines() + _err = err.splitlines() + _output: List[str] = [] + _output += _out + _output += _err + # self.module.log(msg=f"= output: {_output}") + return _output diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/file.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/file.py new file mode 100644 index 0000000..be44499 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/file.py @@ -0,0 +1,35 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +import os + + +def create_link(source, destination, force=False): + """ + create a link .. + """ + if force: + os.remove(destination) + os.symlink(source, destination) + else: + os.symlink(source, destination) + + +def remove_file(file_name): + """ """ + if os.path.exists(file_name): + os.remove(file_name) + return True + + return False + + +def chmod(file_name, mode): + """ """ + if os.path.exists(file_name): + if mode is not None: + os.chmod(file_name, int(mode, base=8)) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/lists.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/lists.py new file mode 100644 index 0000000..1792a50 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/lists.py @@ -0,0 +1,36 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + + +def find_in_list(list, value): + """ """ + for entry in list: + for k, v in entry.items(): + if k == value: + return entry + + return None + + +def compare_two_lists(list1: list, list2: list, debug=False): + """ + Compare two lists and logs the difference. + :param list1: first list. + :param list2: second list. + :return: if there is difference between both lists. + """ + debug_msg = [] + + diff = [x for x in list2 if x not in list1] + + changed = not (len(diff) == 0) + if debug: + if not changed: + debug_msg.append(f"There are {len(diff)} differences:") + debug_msg.append(f" {diff[:5]}") + + return changed, diff, debug_msg diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/module_results.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/module_results.py new file mode 100644 index 0000000..9ca0e47 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/module_results.py @@ -0,0 +1,87 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations + +from typing import Any, Dict, Iterable, Tuple + +ResultEntry = Dict[str, Dict[str, Any]] +ResultState = Iterable[ResultEntry] + +ResultsReturn = Tuple[ + bool, # has_state + bool, # has_changed + bool, # has_failed + Dict[str, Dict[str, Any]], # state + Dict[str, Dict[str, Any]], # changed + Dict[str, Dict[str, Any]], # failed +] + + +def results(module: Any, result_state: ResultState) -> ResultsReturn: + """ + Aggregate per-item module results into combined state/changed/failed maps. + + The function expects an iterable of dictionaries, where each dictionary maps + an item identifier (e.g. a container name) to a dict containing optional keys + like ``state``, ``changed``, and ``failed``. + + Example input: + [ + {"busybox-1": {"state": "container.env written", "changed": True}}, + {"hello-world-1": {"state": "hello-world-1.properties written"}}, + {"nginx-1": {"failed": True, "msg": "..." }} + ] + + Args: + module: An Ansible-like module object. Currently unused (kept for API symmetry + and optional debugging/logging). + result_state: Iterable of per-item result dictionaries as described above. + + Returns: + tuple[bool, bool, bool, dict[str, dict[str, Any]], dict[str, dict[str, Any]], dict[str, dict[str, Any]]]: + (has_state, has_changed, has_failed, state, changed, failed) + + has_state: + True if at least one item dict contains a truthy ``"state"`` key. + has_changed: + True if at least one item dict contains a truthy ``"changed"`` key. + has_failed: + True if at least one item dict contains a truthy ``"failed"`` key. + state: + Mapping of item_id -> item_dict for all items with a truthy ``"state"``. + changed: + Mapping of item_id -> item_dict for all items with a truthy ``"changed"``. + failed: + Mapping of item_id -> item_dict for all items with a truthy ``"failed"``. + + Notes: + If the same item_id appears multiple times in ``result_state``, later entries + overwrite earlier ones during the merge step. + """ + + # module.log(msg=f"{result_state}") + + combined_d: Dict[str, Dict[str, Any]] = { + key: value for d in result_state for key, value in d.items() + } + + state: Dict[str, Dict[str, Any]] = { + k: v for k, v in combined_d.items() if isinstance(v, dict) and v.get("state") + } + changed: Dict[str, Dict[str, Any]] = { + k: v for k, v in combined_d.items() if isinstance(v, dict) and v.get("changed") + } + failed: Dict[str, Dict[str, Any]] = { + k: v for k, v in combined_d.items() if isinstance(v, dict) and v.get("failed") + } + + has_state = len(state) > 0 + has_changed = len(changed) > 0 + has_failed = len(failed) > 0 + + return (has_state, has_changed, has_failed, state, changed, failed) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/passlib_bcrypt5_compat.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/passlib_bcrypt5_compat.py new file mode 100644 index 0000000..6945d04 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/passlib_bcrypt5_compat.py @@ -0,0 +1,210 @@ +from __future__ import annotations + +""" +Compatibility helpers for using passlib 1.7.4 with bcrypt 5.x. + +Background +---------- +passlib 1.7.4 performs a bcrypt backend self-test during import that uses a test +secret longer than 72 bytes. bcrypt 5.x raises a ValueError for inputs longer +than 72 bytes instead of silently truncating. This can abort imports of +passlib.apache (and other passlib components) even before user code runs. + +This module applies a targeted runtime patch: +- Patch bcrypt.hashpw/checkpw to truncate inputs to 72 bytes (bcrypt's effective + input limit) so that passlib's self-tests do not crash. +- Patch passlib.handlers.bcrypt.detect_wrap_bug() to handle the ValueError and + proceed with the wraparound test. + +The patch restores passlib importability on systems that ship passlib 1.7.4 +together with bcrypt 5.x. +""" + +import importlib.metadata +from importlib.metadata import PackageNotFoundError + + +def _major_version(dist_name: str) -> int | None: + """ + Return the major version number of an installed distribution. + + The value is derived from ``importlib.metadata.version(dist_name)`` and then + parsed as the leading numeric component. + + Args: + dist_name: The distribution name as used by importlib metadata + (e.g. "passlib", "bcrypt"). + + Returns: + The major version as an integer, or ``None`` if the distribution is not + installed or the version string cannot be interpreted. + """ + try: + v = importlib.metadata.version(dist_name) + except PackageNotFoundError: + return None + + # Extract the first dot-separated segment and keep digits only + # (works for typical versions like "5.0.1", "5rc1", "5.post1", etc.). + head = v.split(".", 1)[0] + try: + return int("".join(ch for ch in head if ch.isdigit()) or head) + except ValueError: + return None + + +def apply_passlib_bcrypt5_compat(module) -> None: + """ + Apply runtime patches to make passlib 1.7.4 work with bcrypt 5.x. + + What this does + ------------- + 1) Patches ``bcrypt.hashpw`` and ``bcrypt.checkpw`` to truncate any password + input longer than 72 bytes to 72 bytes. This prevents bcrypt 5.x from + raising ``ValueError`` when passlib runs its internal self-tests during + import. The patch is applied only once per Python process. + + 2) Patches ``passlib.handlers.bcrypt.detect_wrap_bug`` to tolerate the + bcrypt 5.x ``ValueError`` during the wraparound self-test and continue + the test using a 72-byte truncated secret. + + Preconditions + ------------- + This function is a no-op unless: + - passlib is installed and its major version is 1, and + - bcrypt is installed and its major version is >= 5. + + Logging + ------- + The function uses ``module.log(...)`` for diagnostic messages. The passed + ``module`` is expected to be an AnsibleModule (or a compatible object). + + Important + --------- + This patch does not remove bcrypt's effective 72-byte input limit. bcrypt + inherently only considers the first 72 bytes of a password. The patch + merely restores the historical "truncate silently" behavior in bcrypt 5.x + so that older passlib versions keep working. + + Args: + module: An object providing ``log(str)``. Typically an instance of + ``ansible.module_utils.basic.AnsibleModule``. + + Returns: + None. The patch is applied in-place to the imported modules. + """ + module.log("apply_passlib_bcrypt5_compat()") + + passlib_major = _major_version("passlib") + bcrypt_major = _major_version("bcrypt") + + module.log(f" - passlib_major {passlib_major}") + module.log(f" - bcrypt_major {bcrypt_major}") + + if passlib_major is None or bcrypt_major is None: + return + if bcrypt_major < 5: + return + + # --- Patch 1: bcrypt itself (so passlib self-tests don't crash) --- + import bcrypt as _bcrypt # bcrypt package + + if not getattr(_bcrypt, "_passlib_compat_applied", False): + _orig_hashpw = _bcrypt.hashpw + _orig_checkpw = _bcrypt.checkpw + + def hashpw(secret: bytes, salt: bytes) -> bytes: + """ + Wrapper around bcrypt.hashpw that truncates secrets to 72 bytes. + + Args: + secret: Password bytes to hash. + salt: bcrypt salt/config blob. + + Returns: + The bcrypt hash as bytes. + """ + if isinstance(secret, bytearray): + secret = bytes(secret) + if len(secret) > 72: + secret = secret[:72] + return _orig_hashpw(secret, salt) + + def checkpw(secret: bytes, hashed: bytes) -> bool: + """ + Wrapper around bcrypt.checkpw that truncates secrets to 72 bytes. + + Args: + secret: Password bytes to verify. + hashed: Existing bcrypt hash. + + Returns: + True if the password matches, otherwise False. + """ + if isinstance(secret, bytearray): + secret = bytes(secret) + if len(secret) > 72: + secret = secret[:72] + return _orig_checkpw(secret, hashed) + + _bcrypt.hashpw = hashpw # type: ignore[assignment] + _bcrypt.checkpw = checkpw # type: ignore[assignment] + _bcrypt._passlib_compat_applied = True + + module.log(" - patched bcrypt.hashpw/checkpw for >72 truncation") + + # --- Patch 2: passlib detect_wrap_bug() (handle bcrypt>=5 behavior) --- + import passlib.handlers.bcrypt as pl_bcrypt # noqa: WPS433 (runtime patch) + + if getattr(pl_bcrypt, "_bcrypt5_compat_applied", False): + return + + def detect_wrap_bug_patched(ident: str) -> bool: + """ + Replacement for passlib.handlers.bcrypt.detect_wrap_bug(). + + passlib's original implementation performs a detection routine to test + for a historical bcrypt "wraparound" bug. The routine uses a test secret + longer than 72 bytes. With bcrypt 5.x, this can raise ``ValueError``. + This patched version catches that error, truncates the secret to 72 + bytes, and completes the verification checks. + + Args: + ident: The bcrypt identifier prefix (e.g. "$2a$", "$2b$", etc.) + as provided by passlib. + + Returns: + True if the backend appears to exhibit the wraparound bug, + otherwise False. + + Raises: + RuntimeError: If the backend fails the expected self-test checks. + """ + secret = (b"0123456789" * 26)[:255] + + bug_hash = ( + ident.encode("ascii") + + b"04$R1lJ2gkNaoPGdafE.H.16.nVyh2niHsGJhayOHLMiXlI45o8/DU.6" + ) + try: + if pl_bcrypt.bcrypt.verify(secret, bug_hash): + return True + except ValueError: + # bcrypt>=5 kann bei >72 Bytes explizit ValueError werfen + secret = secret[:72] + + correct_hash = ( + ident.encode("ascii") + + b"04$R1lJ2gkNaoPGdafE.H.16.1MKHPvmKwryeulRe225LKProWYwt9Oi" + ) + if not pl_bcrypt.bcrypt.verify(secret, correct_hash): + raise RuntimeError( + f"bcrypt backend failed wraparound self-test for ident={ident!r}" + ) + + return False + + pl_bcrypt.detect_wrap_bug = detect_wrap_bug_patched # type: ignore[assignment] + pl_bcrypt._bcrypt5_compat_applied = True + + module.log(" - patched passlib.handlers.bcrypt.detect_wrap_bug") diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/template/template.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/template/template.py new file mode 100644 index 0000000..1865e3d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/template/template.py @@ -0,0 +1,67 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +import json + +from jinja2 import Template + +# from ansible_collections.bodsch.core.plugins.module_utils.checksum import Checksum + + +class TemplateHandler: + """ """ + + def __init__(self, module): + self.module = module + + def write_template(self, file_name, template, data): + """ """ + if isinstance(data, dict): + """ + sort data + """ + data = json.dumps(data, sort_keys=True) + if isinstance(data, str): + data = json.loads(data) + + if isinstance(data, list): + data = ":".join(data) + + tm = Template(template, trim_blocks=True, lstrip_blocks=True) + d = tm.render(item=data) + + with open(file_name, "w") as f: + f.write(d) + + def write_when_changed(self, tmp_file, data_file, **kwargs): + """ """ + self.module.log(f"write_when_changed(self, {tmp_file}, {data_file}, {kwargs})") + + # checksum = Checksum(self.module) + + return None + + +# OBSOLETE, BUT STILL SUPPORTED FOR COMPATIBILITY REASONS +def write_template(file_name, template, data): + """ """ + if isinstance(data, dict): + """ + sort data + """ + data = json.dumps(data, sort_keys=True) + if isinstance(data, str): + data = json.loads(data) + + if isinstance(data, list): + data = ":".join(data) + + tm = Template(template, trim_blocks=True, lstrip_blocks=True) + d = tm.render(item=data) + + with open(file_name, "w") as f: + f.write(d) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/validate.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/validate.py new file mode 100644 index 0000000..e5ea4c5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/validate.py @@ -0,0 +1,22 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + + +def validate(value, default=None): + """ """ + if value: + if isinstance(value, str) or isinstance(value, list) or isinstance(value, dict): + if len(value) > 0: + return value + + if isinstance(value, int): + return int(value) + + if isinstance(value, bool): + return bool(value) + + return default diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/versioned_deployment.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/versioned_deployment.py new file mode 100644 index 0000000..11b5a1e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/module_utils/versioned_deployment.py @@ -0,0 +1,741 @@ +""" +binary_deploy_impl.py + +Idempotent deployment helper for versioned binaries with activation symlinks. + +This module is intended to be used from Ansible modules (and optionally an action plugin) +to deploy one or multiple binaries into a versioned installation directory and activate +them via symlinks (e.g. /usr/bin/ -> /). + +Key features: +- Optional copy from a remote staging directory (remote -> remote) with atomic replacement. +- Permission and ownership enforcement (mode/owner/group). +- Optional Linux file capabilities via getcap/setcap with normalized, idempotent comparison. +- Activation detection based on symlink target. + +Public API: +- BinaryDeploy.run(): reads AnsibleModule params and returns module JSON via exit_json/fail_json. +""" + +from __future__ import annotations + +import grp +import hashlib +import os +import pwd +import re +import shutil +import tempfile +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple + +from ansible.module_utils.basic import AnsibleModule + +_CHUNK_SIZE = 1024 * 1024 +_CAP_ENTRY_RE = re.compile(r"^(cap_[a-z0-9_]+)([=+])([a-z]+)$", re.IGNORECASE) + + +@dataclass(frozen=True) +class BinaryItem: + """A single deployable binary with optional activation name and capability.""" + + name: str + src: str + link_name: str + capability: Optional[str] + + +class _PathOps: + """Filesystem helper methods used by the deployment logic.""" + + @staticmethod + def sha256_file(path: str) -> str: + """ + Calculate the SHA-256 checksum of a file. + + Args: + path: Path to the file. + + Returns: + Hex-encoded SHA-256 digest. + """ + h = hashlib.sha256() + with open(path, "rb") as f: + for chunk in iter(lambda: f.read(_CHUNK_SIZE), b""): + h.update(chunk) + return h.hexdigest() + + @staticmethod + def files_equal(src: str, dst: str) -> bool: + """ + Compare two files for equality by size and SHA-256 checksum. + + This is used to decide whether a copy is required. + + Args: + src: Source file path. + dst: Destination file path. + + Returns: + True if both files exist and are byte-identical, otherwise False. + """ + if os.path.abspath(src) == os.path.abspath(dst): + return True + try: + if os.path.samefile(src, dst): + return True + except FileNotFoundError: + return False + except OSError: + # samefile may fail on some filesystems; fall back to hashing + pass + + try: + s1 = os.stat(src) + s2 = os.stat(dst) + except FileNotFoundError: + return False + + if s1.st_size != s2.st_size: + return False + + # Hashing is the expensive path; size match is a cheap early filter. + return _PathOps.sha256_file(src) == _PathOps.sha256_file(dst) + + @staticmethod + def ensure_dir(path: str) -> bool: + """ + Ensure a directory exists. + + Args: + path: Directory path. + + Returns: + True if the directory was created, otherwise False. + """ + if os.path.isdir(path): + return False + os.makedirs(path, exist_ok=True) + return True + + @staticmethod + def safe_rmtree(path: str) -> None: + """ + Remove a directory tree with a minimal safety guard. + + Args: + path: Directory to remove. + + Raises: + ValueError: If the path is empty or points to '/'. + """ + if not path or os.path.abspath(path) in ("/",): + raise ValueError(f"Refusing to remove unsafe path: {path}") + shutil.rmtree(path) + + @staticmethod + def is_symlink_to(link_path: str, target_path: str) -> bool: + """ + Check whether link_path is a symlink pointing to target_path. + + Args: + link_path: Symlink location. + target_path: Expected symlink target. + + Returns: + True if link_path is a symlink to target_path, otherwise False. + """ + try: + if not os.path.islink(link_path): + return False + current = os.readlink(link_path) + except OSError: + return False + + # Normalize relative symlinks to absolute for comparison. + if not os.path.isabs(current): + current = os.path.abspath(os.path.join(os.path.dirname(link_path), current)) + + return os.path.abspath(current) == os.path.abspath(target_path) + + @staticmethod + def ensure_symlink(link_path: str, target_path: str) -> bool: + """ + Ensure link_path is a symlink to target_path. + + Args: + link_path: Symlink location. + target_path: Symlink target. + + Returns: + True if the symlink was created/updated, otherwise False. + """ + if _PathOps.is_symlink_to(link_path, target_path): + return False + + # Replace existing file/link. + try: + os.lstat(link_path) + os.unlink(link_path) + except FileNotFoundError: + pass + + os.symlink(target_path, link_path) + return True + + @staticmethod + def atomic_copy(src: str, dst: str) -> None: + """ + Copy a file to dst atomically (write to temp file and rename). + + Args: + src: Source file path. + dst: Destination file path. + """ + dst_dir = os.path.dirname(dst) + _PathOps.ensure_dir(dst_dir) + + fd, tmp_path = tempfile.mkstemp(prefix=".ansible-binary-", dir=dst_dir) + os.close(fd) + try: + shutil.copyfile(src, tmp_path) + os.replace(tmp_path, dst) + finally: + try: + os.unlink(tmp_path) + except FileNotFoundError: + pass + + +class _Identity: + """User/group resolution helpers.""" + + @staticmethod + def resolve_uid(owner: Optional[str]) -> Optional[int]: + """ + Resolve a user name or uid string to a numeric uid. + + Args: + owner: User name or numeric uid as string. + + Returns: + Numeric uid, or None if owner is None. + """ + if owner is None: + return None + if owner.isdigit(): + return int(owner) + return pwd.getpwnam(owner).pw_uid + + @staticmethod + def resolve_gid(group: Optional[str]) -> Optional[int]: + """ + Resolve a group name or gid string to a numeric gid. + + Args: + group: Group name or numeric gid as string. + + Returns: + Numeric gid, or None if group is None. + """ + if group is None: + return None + if group.isdigit(): + return int(group) + return grp.getgrnam(group).gr_gid + + +@dataclass(frozen=True) +class _CapsValue: + """Normalized representation of Linux file capabilities.""" + + value: str + + @staticmethod + def normalize(raw: str) -> "_CapsValue": + """ + Normalize capability strings so that setcap-style and getcap-style + representations compare equal. + + Examples: + - "cap_net_raw+ep" -> "cap_net_raw=ep" + - "cap_net_raw=pe" -> "cap_net_raw=ep" + - "cap_a+e, cap_b=ip" -> "cap_a=e,cap_b=ip" + """ + s = (raw or "").strip() + if not s: + return _CapsValue("") + + entries: List[str] = [] + for part in s.split(","): + p = part.strip() + if not p: + continue + + # Remove internal whitespace. + p = " ".join(p.split()) + + m = _CAP_ENTRY_RE.match(p) + if not m: + # Unknown format: keep as-is (but trimmed). + entries.append(p) + continue + + cap_name, _, flags = m.group(1), m.group(2), m.group(3) + flags_norm = "".join(sorted(flags)) + # Canonical operator is '=' (getcap output style). + entries.append(f"{cap_name}={flags_norm}") + + entries.sort() + return _CapsValue(",".join(entries)) + + +class _Caps: + """ + Linux file capabilities helper with idempotent detection via getcap/setcap. + + The helper normalizes both desired and current values to avoid false positives, + e.g. comparing 'cap_net_raw+ep' (setcap style) and 'cap_net_raw=ep' (getcap style). + """ + + def __init__(self, module: AnsibleModule) -> None: + self._module = module + + def _parse_getcap_output(self, path: str, out: str) -> _CapsValue: + """ + Parse getcap output for a single path. + + Supported formats: + - "/path cap_net_raw=ep" + - "/path = cap_net_raw=ep" + - "/path cap_net_raw+ep" (rare, but normalize handles it) + """ + text = (out or "").strip() + if not text: + return _CapsValue("") + + for line in text.splitlines(): + line = line.strip() + if not line: + continue + + # Example lines: + # /usr/bin/ping = cap_net_raw+ep + # /usr/bin/ping cap_net_raw=ep + if line.startswith(path): + _path_len = len(path) + rest = line[_path_len:].strip() + + # Strip optional leading '=' or split form. + if rest.startswith("="): + rest = rest[1:].strip() + + tokens = rest.split() + if tokens and tokens[0] == "=": + rest = " ".join(tokens[1:]).strip() + + return _CapsValue.normalize(rest) + + # Fallback: if getcap returned a single line but path formatting differs. + first = text.splitlines()[0].strip() + tokens = first.split() + if len(tokens) >= 2: + if tokens[1] == "=" and len(tokens) >= 3: + return _CapsValue.normalize(" ".join(tokens[2:])) + return _CapsValue.normalize(" ".join(tokens[1:])) + + return _CapsValue("") + + def get_current(self, path: str) -> Optional[_CapsValue]: + """ + Get the current capability set for a file. + + Returns: + - _CapsValue("") for no capabilities + - _CapsValue("cap_xxx=ep") for set capabilities + - None if getcap is missing (cannot do idempotent checks) + """ + rc, out, err = self._module.run_command(["getcap", path]) + if rc == 127: + return None + if rc != 0: + msg = (err or "").strip() + # No capabilities can be signaled via non-zero return with empty output. + if msg and "No such file" in msg: + self._module.fail_json(msg=f"getcap failed: {msg}", path=path) + return _CapsValue("") + return self._parse_getcap_output(path, out) + + def ensure(self, path: str, desired: str) -> bool: + """ + Ensure the desired capability is present on 'path'. + + Args: + path: File path. + desired: Capability string (setcap/getcap style), e.g. "cap_net_raw+ep". + + Returns: + True if a change was applied, otherwise False. + + Raises: + AnsibleModule.fail_json on errors or if getcap is missing. + """ + desired_norm = _CapsValue.normalize(desired) + current = self.get_current(path) + + if current is None: + self._module.fail_json( + msg="getcap is required for idempotent capability management", + hint="Install libcap tools (e.g. Debian/Ubuntu: 'libcap2-bin')", + path=path, + desired=desired_norm.value, + ) + + if current.value == desired_norm.value: + return False + + # setcap accepts both '+ep' and '=ep', but we pass canonical '=...'. + rc, out, err = self._module.run_command(["setcap", desired_norm.value, path]) + if rc != 0: + msg = (err or out or "").strip() or "setcap failed" + self._module.fail_json(msg=msg, path=path, capability=desired_norm.value) + + verified = self.get_current(path) + if verified is None or verified.value != desired_norm.value: + self._module.fail_json( + msg="capability verification failed after setcap", + path=path, + desired=desired_norm.value, + current=(verified.value if verified else None), + ) + + return True + + +class BinaryDeploy: + """ + Deployment engine used by Ansible modules. + + The instance consumes module parameters, plans whether an update is necessary, + and then applies changes idempotently: + - copy (optional) + - permissions and ownership + - capabilities (optional) + - activation symlink + """ + + def __init__(self, module: AnsibleModule) -> None: + self._module = module + self._module.log("BinaryDeploy::__init__()") + self._caps = _Caps(module) + + @staticmethod + def _parse_mode(mode: Any) -> int: + """ + Parse a file mode parameter into an int. + + Args: + mode: Octal mode as string (e.g. "0755") or int. + + Returns: + Parsed mode as int. + """ + if isinstance(mode, int): + return mode + s = str(mode).strip() + return int(s, 8) + + def _resolve_uid_gid( + self, owner: Optional[str], group: Optional[str] + ) -> Tuple[Optional[int], Optional[int]]: + """ + Resolve owner/group into numeric uid/gid. + + Raises: + ValueError: If the user or group does not exist. + """ + try: + return _Identity.resolve_uid(owner), _Identity.resolve_gid(group) + except KeyError as exc: + raise ValueError(str(exc)) from exc + + def _parse_items(self, raw: List[Dict[str, Any]]) -> List[BinaryItem]: + """ + Parse module 'items' parameter into BinaryItem objects. + + Each raw item supports: + - name (required) + - src (optional, defaults to name) + - link_name (optional, defaults to name) + - capability (optional) + """ + self._module.log(f"BinaryDeploy::_parse_items(raw: {raw})") + + items: List[BinaryItem] = [] + for it in raw: + name = str(it["name"]) + src = str(it.get("src") or name) + link_name = str(it.get("link_name") or name) + cap = it.get("capability") + items.append( + BinaryItem( + name=name, + src=src, + link_name=link_name, + capability=str(cap) if cap else None, + ) + ) + return items + + def _plan( + self, + *, + install_dir: str, + link_dir: str, + src_dir: Optional[str], + do_copy: bool, + items: List[BinaryItem], + activation_name: str, + owner: Optional[str], + group: Optional[str], + mode: int, + ) -> Tuple[bool, bool, Dict[str, Dict[str, bool]]]: + """ + Build an idempotent plan for all items. + + Returns: + Tuple of: + - activated: whether the activation symlink points into install_dir + - needs_update: whether any operation would be required + - per_item_plan: dict(item.name -> {copy, perms, cap, link}) + """ + self._module.log( + "BinaryDeploy::_plan(" + f"install_dir: {install_dir}, link_dir: {link_dir}, src_dir: {src_dir}, " + f"do_copy: {do_copy}, items: {items}, activation_name: {activation_name}, " + f"owner: {owner}, group: {group}, mode: {mode})" + ) + + activation = next( + ( + i + for i in items + if i.name == activation_name or i.link_name == activation_name + ), + items[0], + ) + activation_target = os.path.join(install_dir, activation.name) + activation_link = os.path.join(link_dir, activation.link_name) + activated = os.path.isfile(activation_target) and _PathOps.is_symlink_to( + activation_link, activation_target + ) + + try: + uid, gid = self._resolve_uid_gid(owner, group) + except ValueError as exc: + self._module.fail_json(msg=str(exc)) + + needs_update = False + per_item: Dict[str, Dict[str, bool]] = {} + + for item in items: + dst = os.path.join(install_dir, item.name) + lnk = os.path.join(link_dir, item.link_name) + src = os.path.join(src_dir, item.src) if (do_copy and src_dir) else None + + item_plan: Dict[str, bool] = { + "copy": False, + "perms": False, + "cap": False, + "link": False, + } + + if do_copy: + if src is None: + self._module.fail_json( + msg="src_dir is required when copy=true", item=item.name + ) + if not os.path.isfile(src): + self._module.fail_json( + msg="source binary missing on remote host", + src=src, + item=item.name, + ) + if not os.path.exists(dst) or not _PathOps.files_equal(src, dst): + item_plan["copy"] = True + + # perms/ownership (if file missing, perms will be set later) + try: + st = os.stat(dst) + if (st.st_mode & 0o7777) != mode: + item_plan["perms"] = True + if uid is not None and st.st_uid != uid: + item_plan["perms"] = True + if gid is not None and st.st_gid != gid: + item_plan["perms"] = True + except FileNotFoundError: + item_plan["perms"] = True + + if item.capability: + desired_norm = _CapsValue.normalize(item.capability) + + if not os.path.exists(dst): + item_plan["cap"] = True + else: + current = self._caps.get_current(dst) + if current is None: + # getcap missing -> cannot validate, apply will fail in ensure(). + item_plan["cap"] = True + elif current.value != desired_norm.value: + item_plan["cap"] = True + + if not _PathOps.is_symlink_to(lnk, dst): + item_plan["link"] = True + + if any(item_plan.values()): + needs_update = True + per_item[item.name] = item_plan + + return activated, needs_update, per_item + + def run(self) -> None: + """ + Execute the deployment based on module parameters. + + Module parameters (expected): + install_dir (str), link_dir (str), src_dir (optional str), copy (bool), + items (list[dict]), activation_name (optional str), + owner (optional str), group (optional str), mode (str), + cleanup_on_failure (bool), check_only (bool). + """ + self._module.log("BinaryDeploy::run()") + + p = self._module.params + + install_dir: str = p["install_dir"] + link_dir: str = p["link_dir"] + src_dir: Optional[str] = p.get("src_dir") + do_copy: bool = bool(p["copy"]) + cleanup_on_failure: bool = bool(p["cleanup_on_failure"]) + activation_name: str = str(p.get("activation_name") or "") + + owner: Optional[str] = p.get("owner") + group: Optional[str] = p.get("group") + mode_int = self._parse_mode(p["mode"]) + + items = self._parse_items(p["items"]) + if not items: + self._module.fail_json(msg="items must not be empty") + + if not activation_name: + activation_name = items[0].name + + check_only: bool = bool(p["check_only"]) or bool(self._module.check_mode) + + activated, needs_update, plan = self._plan( + install_dir=install_dir, + link_dir=link_dir, + src_dir=src_dir, + do_copy=do_copy, + items=items, + activation_name=activation_name, + owner=owner, + group=group, + mode=mode_int, + ) + + if check_only: + self._module.exit_json( + changed=False, activated=activated, needs_update=needs_update, plan=plan + ) + + changed = False + details: Dict[str, Dict[str, bool]] = {} + + try: + if _PathOps.ensure_dir(install_dir): + changed = True + + uid, gid = self._resolve_uid_gid(owner, group) + + for item in items: + src = os.path.join(src_dir, item.src) if (do_copy and src_dir) else None + dst = os.path.join(install_dir, item.name) + lnk = os.path.join(link_dir, item.link_name) + + item_changed: Dict[str, bool] = { + "copied": False, + "perms": False, + "cap": False, + "link": False, + } + + if do_copy: + if src is None: + self._module.fail_json( + msg="src_dir is required when copy=true", item=item.name + ) + if not os.path.exists(dst) or not _PathOps.files_equal(src, dst): + _PathOps.atomic_copy(src, dst) + item_changed["copied"] = True + changed = True + + if not os.path.exists(dst): + self._module.fail_json( + msg="destination binary missing in install_dir", + dst=dst, + hint="In controller-local mode this indicates the transfer/copy stage did not create the file.", + item=item.name, + ) + + st = os.stat(dst) + + if (st.st_mode & 0o7777) != mode_int: + os.chmod(dst, mode_int) + item_changed["perms"] = True + changed = True + + if uid is not None or gid is not None: + new_uid = uid if uid is not None else st.st_uid + new_gid = gid if gid is not None else st.st_gid + if new_uid != st.st_uid or new_gid != st.st_gid: + os.chown(dst, new_uid, new_gid) + item_changed["perms"] = True + changed = True + + if item.capability: + if self._caps.ensure(dst, item.capability): + item_changed["cap"] = True + changed = True + + if _PathOps.ensure_symlink(lnk, dst): + item_changed["link"] = True + changed = True + + details[item.name] = item_changed + + except Exception as exc: + if cleanup_on_failure: + try: + _PathOps.safe_rmtree(install_dir) + except Exception: + pass + self._module.fail_json(msg=str(exc), exception=repr(exc)) + + activation = next( + ( + i + for i in items + if i.name == activation_name or i.link_name == activation_name + ), + items[0], + ) + activation_target = os.path.join(install_dir, activation.name) + activation_link = os.path.join(link_dir, activation.link_name) + activated = os.path.isfile(activation_target) and _PathOps.is_symlink_to( + activation_link, activation_target + ) + + self._module.exit_json( + changed=changed, activated=activated, needs_update=False, details=details + ) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/apt_sources.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/apt_sources.py new file mode 100644 index 0000000..1a3f868 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/apt_sources.py @@ -0,0 +1,463 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2025, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function + +from typing import Any, Dict, List, Mapping, Optional, Protocol, Sequence, Tuple + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.deb822_repo import ( + Deb822RepoManager, + Deb822RepoSpec, +) + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +module: apt_sources +version_added: '2.9.0' +author: "Bodo Schulz (@bodsch) " + +short_description: Manage APT deb822 (.sources) repositories with repo-specific keyrings. +description: + - Creates/removes deb822 formatted APT repository files in /etc/apt/sources.list.d. + - Supports importing repo-specific signing keys either via downloading a key file (with optional dearmor/validation) + or by installing a keyring .deb package (e.g. Sury keyring). + - Optionally runs apt-get update when changes occur. +options: + name: + description: Logical name of the repository (used for defaults like filename). + type: str + required: true + state: + description: Whether the repository should be present or absent. + type: str + choices: [present, absent] + default: present + dest: + description: Full path of the .sources file. If omitted, computed from filename/name. + type: str + filename: + description: Filename under /etc/apt/sources.list.d/ (must end with .sources). + type: str + types: + description: Repository types (deb, deb-src). + type: list + elements: str + default: ["deb"] + uris: + description: Base URIs of the repository. + type: list + elements: str + required: true + suites: + description: Suites / distributions (e.g. bookworm). If suite ends with '/', Components must be omitted. + type: list + elements: str + required: true + components: + description: Components (e.g. main, contrib). Required unless suite is a path ending in '/'. + type: list + elements: str + default: [] + architectures: + description: Restrict repository to architectures (e.g. amd64). + type: list + elements: str + default: [] + enabled: + description: Whether the source is enabled (Enabled: yes/no). + type: bool + default: true + signed_by: + description: Absolute path to a keyring file used as Signed-By. If omitted and key.method is download/deb, derived from key config. + type: str + key: + description: Key import configuration. + type: dict + suboptions: + method: + description: How to manage keys. + type: str + choices: [none, download, deb] + default: none + url: + description: URL to download the key (download) or keyring .deb (deb). + type: str + dest: + description: Destination keyring path for method=download. + type: str + checksum: + description: Optional SHA256 checksum of downloaded content (raw download). Enables strict idempotence and integrity checks. + type: str + dearmor: + description: If true and downloaded key is ASCII armored, dearmor via gpg to a binary keyring. + type: bool + default: true + validate: + description: If true, validate the final key file via gpg --show-keys. + type: bool + default: true + mode: + description: File mode for key files / deb cache files. + type: str + default: "0644" + deb_cache_path: + description: Destination path for downloaded .deb when method=deb. + type: str + deb_keyring_path: + description: Explicit keyring path provided by that .deb (if auto-detection is not possible). + type: str + update_cache: + description: Run apt-get update if repo/key changed. + type: bool + default: false +""" + +EXAMPLES = r""" +- name: Add Sury repo via keyring deb package (Debian) + bodsch.core.apt_sources: + name: debsuryorg + uris: ["https://packages.sury.org/php/"] + suites: ["{{ ansible_facts.distribution_release }}"] + components: ["main"] + key: + method: deb + url: "https://packages.sury.org/debsuryorg-archive-keyring.deb" + deb_cache_path: "/var/cache/apt/debsuryorg-archive-keyring.deb" + # optional if auto-detect fails: + # deb_keyring_path: "/usr/share/keyrings/debsuryorg-archive-keyring.gpg" + update_cache: true + become: true + +- name: Add CZ.NIC repo via key download (bookworm) + bodsch.core.apt_sources: + name: cznic-labs-knot-resolver + uris: ["https://pkg.labs.nic.cz/knot-resolver"] + suites: ["bookworm"] + components: ["main"] + key: + method: download + url: "https://pkg.labs.nic.cz/gpg" + dest: "/usr/share/keyrings/cznic-labs-pkg.gpg" + dearmor: true + validate: true + update_cache: true + become: true +""" + +RETURN = r""" +repo_path: + description: Path to the managed .sources file. + returned: always + type: str +key_path: + description: Path to the keyring file used as Signed-By (if managed/derived). + returned: when key method used or signed_by provided + type: str +changed: + description: Whether any change was made. + returned: always + type: bool +messages: + description: Informational messages about performed actions. + returned: always + type: list + elements: str +""" + +# --------------------------------------------------------------------------------------- + + +class AnsibleModuleLike(Protocol): + """Minimal typing surface for the Ansible module used by this helper.""" + + params: Mapping[str, Any] + + def get_bin_path(self, arg: str, required: bool = False) -> Optional[str]: + """ + Return the absolute path to an executable. + + Args: + arg: Program name to look up in PATH. + required: If True, the module typically fails when the binary is not found. + + Returns: + Absolute path to the executable, or None if not found and not required. + """ + ... + + def run_command( + self, args: Sequence[str], check_rc: bool = True + ) -> Tuple[int, str, str]: + """ + Execute a command on the target host. + + Args: + args: Argument vector (already split). + check_rc: If True, non-zero return codes should be treated as errors. + + Returns: + Tuple ``(rc, stdout, stderr)``. + """ + ... + + def log(self, msg: str = "", **kwargs: Any) -> None: + """ + Write a log/debug message via the Ansible module. + + Args: + msg: Message text. + **kwargs: Additional structured log fields (module dependent). + """ + ... + + +class AptSources: + """ + Manage APT deb822 (.sources) repositories with repo-specific keyrings. + + This class is the orchestration layer used by the module entrypoint. It delegates the + actual file/key handling to :class:`Deb822RepoManager` and is responsible for: + - computing the target .sources path + - ensuring/removing repository key material (method=download or method=deb) + - ensuring/removing the repository file + - optionally running ``apt-get update`` when changes occur + """ + + module = None + + def __init__(self, module: AnsibleModuleLike): + """ + Initialize the handler and snapshot module parameters. + + Args: + module: An AnsibleModule-like object providing ``params``, logging and command execution. + """ + self.module = module + + self.module.log("AptSources::__init__()") + + self.name = module.params.get("name") + self.state = module.params.get("state") + self.destination = module.params.get("dest") + self.filename = module.params.get("filename") + self.types = module.params.get("types") + self.uris = module.params.get("uris") + self.suites = module.params.get("suites") + self.components = module.params.get("components") + self.architectures = module.params.get("architectures") + self.enabled = module.params.get("enabled") + self.update_cache = module.params.get("update_cache") + self.signed_by = module.params.get("signed_by") + self.keys = module.params.get("key") + + self.option_method = self.keys.get("method") + self.option_url = self.keys.get("url") + self.option_dest = self.keys.get("dest") + self.option_checksum = self.keys.get("checksum") + self.option_dearmor = self.keys.get("dearmor") + self.option_validate = self.keys.get("validate") + self.option_mode = self.keys.get("mode") + self.option_deb_cache_path = self.keys.get("deb_cache_path") + self.option_deb_keyring_path = self.keys.get("deb_keyring_path") + + def run(self) -> Dict[str, Any]: + """ + Apply the requested repository state. + + For ``state=present`` the method ensures the signing key (if configured) and then writes the + deb822 repository file. For ``state=absent`` it removes the repository file and any managed + key material. + + Returns: + A result dictionary intended for ``module.exit_json()``, containing: + + - ``changed``: Whether any managed resource changed. + - ``repo_path``: Path to the managed ``.sources`` file. + - ``key_path``: Path to the keyring file used for ``Signed-By`` (if any). + - ``messages``: Informational messages describing performed actions. + + Note: + When ``state=absent`` this method exits the module early via ``module.exit_json()``. + """ + self.module.log("AptSources::run()") + + # self.module.log(f" - update_cache: {self.update_cache}") + + mng = Deb822RepoManager(self.module) + + repo_path = self._ensure_sources_path( + mng, self.name, self.destination, self.filename + ) + + changed = False + messages: List[str] = [] + + if self.state == "absent": + key_cfg: Dict[str, Any] = self.keys or {"method": "none"} + + if mng.remove_file(path=repo_path, check_mode=bool(self.module.check_mode)): + changed = True + messages.append(f"removed repo file: {repo_path}") + + # remove managed key material as well + key_res = mng.remove_key( + key_cfg=key_cfg, + signed_by=self.signed_by, + check_mode=bool(self.module.check_mode), + ) + if key_res.messages: + messages.extend(list(key_res.messages)) + if key_res.changed: + changed = True + + self.module.exit_json( + changed=changed, + repo_path=repo_path, + key_path=(self.signed_by or key_res.key_path), + messages=messages, + ) + + # present + key_cfg: Dict[str, Any] = self.keys or {"method": "none"} + key_res = mng.ensure_key(key_cfg=key_cfg, check_mode=self.module.check_mode) + + # self.module.log(f" - key_res : {key_res}") + + if key_res.messages: + messages.extend(list(key_res.messages)) + + if key_res.changed: + changed = True + + signed_by: Optional[str] = self.signed_by or key_res.key_path + + spec = Deb822RepoSpec( + types=self.types, + uris=self.uris, + suites=self.suites, + components=self.components, + architectures=self.architectures, + enabled=self.enabled, + signed_by=signed_by, + ) + + repo_mode = 0o644 + repo_res = mng.ensure_repo_file( + repo_path=repo_path, + spec=spec, + mode=repo_mode, + check_mode=self.module.check_mode, + ) + + # self.module.log(f" - repo_res : {repo_res}") + + if repo_res.changed: + changed = True + messages.append(f"updated repo file: {repo_path}") + + # -------------------------------------------------------------------------- + # Optionally update cache only if something changed + if self.update_cache and (key_res.changed or repo_res.changed): + _, out = mng.apt_update(check_mode=self.module.check_mode) + messages.append("apt-get update executed") + if out: + # keep it short to avoid noisy output + messages.append("apt-get update: ok") + + return dict( + changed=changed, + repo_path=repo_path, + key_path=signed_by, + messages=messages, + ) + + def _ensure_sources_path( + self, + manager: Deb822RepoManager, + name: str, + dest: Optional[str], + filename: Optional[str], + ) -> str: + """ + Determine the destination path of the ``.sources`` file. + + If ``dest`` is provided it is returned unchanged. Otherwise a filename is derived from + ``filename`` or ``name``, validated, and placed under ``/etc/apt/sources.list.d/``. + + Args: + manager: Repo manager used for validation. + name: Logical repository name. + dest: Explicit destination path (optional). + filename: Filename (optional, must end in ``.sources``). + + Returns: + The absolute path of the repository file to manage. + """ + + if dest: + return dest + + fn = filename or f"{name}.sources" + # validate filename rules and suffix + manager.validate_filename(fn) + return f"/etc/apt/sources.list.d/{fn}" + + +def main() -> None: + """ + Entrypoint for the Ansible module. + + Parses module arguments, executes the handler and returns the result via ``exit_json``. + """ + args = dict( + name=dict(type="str", required=True), + state=dict(type="str", choices=["present", "absent"], default="present"), + dest=dict(type="str", required=False), + filename=dict(type="str", required=False), + types=dict(type="list", elements="str", default=["deb"]), + uris=dict(type="list", elements="str", required=True), + suites=dict(type="list", elements="str", required=True), + components=dict(type="list", elements="str", default=[]), + architectures=dict(type="list", elements="str", default=[]), + enabled=dict(type="bool", default=True), + signed_by=dict(type="str", required=False), + key=dict( + type="dict", + required=False, + options=dict( + method=dict( + type="str", choices=["none", "download", "deb"], default="none" + ), + url=dict(type="str", required=False), + dest=dict(type="str", required=False), + checksum=dict(type="str", required=False), + dearmor=dict(type="bool", default=True), + validate=dict(type="bool", default=True), + mode=dict(type="str", default="0644"), + deb_cache_path=dict(type="str", required=False), + deb_keyring_path=dict(type="str", required=False), + ), + ), + update_cache=dict(type="bool", default=False), + ) + module = AnsibleModule( + argument_spec=args, + supports_check_mode=False, + ) + + handler = AptSources(module) + result = handler.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/aur.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/aur.py new file mode 100644 index 0000000..090ee4c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/aur.py @@ -0,0 +1,959 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +import json +import os +import re +import tarfile +import urllib.parse +from contextlib import contextmanager +from pathlib import Path +from typing import ( + Any, + Dict, + Iterator, + List, + Mapping, + Optional, + Protocol, + Sequence, + Tuple, +) + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import open_url + +__metaclass__ = type + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: aur +short_description: Install or remove Arch Linux packages from the AUR +version_added: "0.9.0" +author: + - Bodo Schulz (@bodsch) + +description: + - Installs packages from the Arch User Repository (AUR) by building them with C(makepkg). + - Recommended: install from a Git repository URL (cloned into C($HOME/), then updated via C(git pull)). + - Fallback: if C(repository) is omitted, the module queries the AUR RPC API and downloads/extracts the source tarball to build it. + - Ensures idempotency by comparing the currently installed package version with the upstream version (prefers C(.SRCINFO), + - falls back to parsing C(PKGBUILD)); pkgrel-only updates trigger a rebuild. + +options: + state: + description: + - Whether the package should be installed or removed. + type: str + default: present + choices: [present, absent] + + name: + description: + - Package name to manage (pacman package name / AUR package name). + type: str + required: true + + repository: + description: + - Git repository URL that contains the PKGBUILD (usually under U(https://aur.archlinux.org)). + - If omitted, the module uses the AUR RPC API to download the source tarball. + type: str + required: false + + extra_args: + description: + - Additional arguments passed to C(makepkg) (for example C(--skippgpcheck), C(--nocheck)). + type: list + elements: str + required: false + version_added: "2.2.4" + +notes: + - Check mode is not supported. + - The module is expected to run as a non-root build user (e.g. via C(become_user: aur_builder)). + - The build user must be able to install packages non-interactively (makepkg/pacman), and to remove + - packages this module uses C(sudo pacman -R...) when C(state=absent). + - Network access to AUR is required for repository cloning/pulling or tarball download. + +requirements: + - pacman + - git (when C(repository) is used) + - makepkg (base-devel) + - sudo (for C(state=absent) removal path) +""" + +EXAMPLES = r""" +- name: Install package via AUR repository (recommended) + become: true + become_user: aur_builder + bodsch.core.aur: + state: present + name: icinga2 + repository: https://aur.archlinux.org/icinga2.git + +- name: Install package via AUR repository with makepkg extra arguments + become: true + become_user: aur_builder + bodsch.core.aur: + state: present + name: php-pear + repository: https://aur.archlinux.org/php-pear.git + extra_args: + - --skippgpcheck + +- name: Install package via AUR tarball download (repository omitted) + become: true + become_user: aur_builder + bodsch.core.aur: + state: present + name: yay + +- name: Remove package + become: true + bodsch.core.aur: + state: absent + name: yay +""" + +RETURN = r""" +changed: + description: + - Whether the module made changes. + - C(true) when a package was installed/rebuilt/removed, otherwise C(false). + returned: always + type: bool + +failed: + description: + - Indicates whether the module failed. + returned: always + type: bool + +msg: + description: + - Human readable status or error message. + - For idempotent runs, typically reports that the version is already installed. + returned: always + type: str + sample: + - "Package yay successfully installed." + - "Package yay successfully removed." + - "Version 1.2.3-1 is already installed." +""" + +# --------------------------------------------------------------------------------------- + + +class AnsibleModuleLike(Protocol): + """Minimal typing surface for the Ansible module used by this helper.""" + + params: Mapping[str, Any] + + def get_bin_path(self, arg: str, required: bool = False) -> Optional[str]: + """ + Return the absolute path to an executable. + + Args: + arg: Program name to look up in PATH. + required: If True, the module typically fails when the binary is not found. + + Returns: + Absolute path to the executable, or None if not found and not required. + """ + ... + + def run_command( + self, args: Sequence[str], check_rc: bool = True + ) -> Tuple[int, str, str]: + """ + Execute a command on the target host. + + Args: + args: Argument vector (already split). + check_rc: If True, non-zero return codes should be treated as errors. + + Returns: + Tuple ``(rc, stdout, stderr)``. + """ + ... + + def log(self, msg: str = "", **kwargs: Any) -> None: + """ + Write a log/debug message via the Ansible module. + + Args: + msg: Message text. + **kwargs: Additional structured log fields (module dependent). + """ + ... + + +_PACMAN_Q_RE = re.compile(r"^(?P\S+)\s+(?P\S+)\s*$", re.MULTILINE) +_PKGBUILD_PKGVER_RE = re.compile(r"^pkgver=(?P.*)\s*$", re.MULTILINE) +_PKGBUILD_EPOCH_RE = re.compile(r"^epoch=(?P.*)\s*$", re.MULTILINE) +_SRCINFO_PKGVER_RE = re.compile(r"^\s*pkgver\s*=\s*(?P.*)\s*$", re.MULTILINE) +_SRCINFO_EPOCH_RE = re.compile(r"^\s*epoch\s*=\s*(?P.*)\s*$", re.MULTILINE) +_PKGBUILD_PKGREL_RE = re.compile(r"^pkgrel=(?P.*)\s*$", re.MULTILINE) +_SRCINFO_PKGREL_RE = re.compile(r"^\s*pkgrel\s*=\s*(?P.*)\s*$", re.MULTILINE) + + +class Aur: + """ + Implements AUR package installation/removal. + + Notes: + - The module is expected to run as a non-root user that is allowed to build packages + via makepkg (e.g. a dedicated 'aur_builder' user). + - Repository-based installation is recommended. The tarball-based installation path + exists as a fallback when no repository URL is provided. + """ + + module = None + + def __init__(self, module: AnsibleModuleLike): + """ + Initialize helper state from Ansible module parameters. + """ + self.module = module + self.module.log("Aur::__init__()") + + self.state: str = module.params.get("state") + self.name: str = module.params.get("name") + self.repository: Optional[str] = module.params.get("repository") + self.extra_args: Optional[List[str]] = module.params.get("extra_args") + + # Cached state for idempotency decisions during this module run. + self._installed_version: Optional[str] = None + self._installed_version_full: Optional[str] = None + + self.pacman_binary: Optional[str] = self.module.get_bin_path("pacman", True) + self.git_binary: Optional[str] = self.module.get_bin_path("git", True) + + def run(self) -> Dict[str, Any]: + """ + Execute the requested state transition. + + Returns: + A result dictionary consumable by Ansible's exit_json(). + """ + self.module.log("Aur::run()") + + installed, installed_version = self.package_installed(self.name) + + # Store installed version for use by other code paths (e.g. AUR tarball installs). + self._installed_version = installed_version + self._installed_version_full = ( + self._package_installed_full_version(self.name) if installed else None + ) + + if self._installed_version_full: + self.module.log( + msg=f" {self.name} full version: {self._installed_version_full}" + ) + + self.module.log( + msg=f" {self.name} is installed: {installed} / version: {installed_version}" + ) + + if installed and self.state == "absent": + sudo_binary = self.module.get_bin_path("sudo", True) + + args: List[str] = [ + sudo_binary, + self.pacman_binary or "pacman", + "--remove", + "--cascade", + "--recursive", + "--noconfirm", + self.name, + ] + + rc, _, err = self._exec(args) + + if rc == 0: + return dict( + changed=True, msg=f"Package {self.name} successfully removed." + ) + return dict( + failed=True, + changed=False, + msg=f"An error occurred while removing the package {self.name}: {err}", + ) + + if self.state == "present": + if self.repository: + rc, out, err, changed = self.install_from_repository(installed_version) + + if rc == 99: + msg = out + rc = 0 + else: + msg = f"Package {self.name} successfully installed." + else: + rc, out, err, changed = self.install_from_aur() + msg = ( + out + if rc == 0 and out + else f"Package {self.name} successfully installed." + ) + + if rc == 0: + return dict(failed=False, changed=changed, msg=msg) + return dict(failed=True, msg=err) + + return dict( + failed=False, + changed=False, + msg="It's all right. Keep moving! There is nothing to see!", + ) + + def package_installed(self, package: str) -> Tuple[bool, Optional[str]]: + """ + Determine whether a package is installed and return its version key (epoch+pkgver, without pkgrel). + + Args: + package: Pacman package name to check. + + Returns: + Tuple (installed, version_string) + - installed: True if pacman reports the package is installed. + - version_string: comparable version key ':' without pkgrel (epoch optional) or None if not installed. + """ + self.module.log(f"Aur::package_installed(package: {package})") + + args: List[str] = [ + self.pacman_binary or "pacman", + "--query", + package, + ] + + rc, out, _ = self._exec(args, check=False) + + version_string: Optional[str] = None + if out: + m = _PACMAN_Q_RE.search(out) + if m and m.group("name") == package: + full_version = m.group("ver") + # pacman prints ":-" (epoch optional). + version_string = ( + full_version.rsplit("-", 1)[0] + if "-" in full_version + else full_version + ) + + return (rc == 0, version_string) + + def _package_installed_full_version(self, package: str) -> Optional[str]: + """ + Return the full pacman version string for an installed package. + + The returned string includes both epoch and pkgrel if present, matching the output + format of "pacman -Q": + - ":-" (epoch optional) + + Args: + package: Pacman package name to check. + + Returns: + The full version string or None if the package is not installed. + """ + self.module.log(f"Aur::_package_installed_full_version(package: {package})") + + args: List[str] = [ + self.pacman_binary or "pacman", + "--query", + package, + ] + + rc, out, _ = self._exec(args, check=False) + if rc != 0 or not out: + return None + + m = _PACMAN_Q_RE.search(out) + if m and m.group("name") == package: + return m.group("ver") + + return None + + def run_makepkg(self, directory: str) -> Tuple[int, str, str]: + """ + Run makepkg to build and install a package. + + Args: + directory: Directory containing the PKGBUILD. + + Returns: + Tuple (rc, out, err) from the makepkg execution. + """ + self.module.log(f"Aur::run_makepkg(directory: {directory})") + self.module.log(f" current dir : {os.getcwd()}") + + if not os.path.exists(directory): + return (1, "", f"Directory '{directory}' does not exist.") + + makepkg_binary = self.module.get_bin_path("makepkg", required=True) or "makepkg" + + args: List[str] = [ + makepkg_binary, + "--syncdeps", + "--install", + "--noconfirm", + "--needed", + "--clean", + ] + + if self.extra_args: + args += self.extra_args + + with self._pushd(directory): + rc, out, err = self._exec(args, check=False) + + return (rc, out, err) + + def install_from_aur(self) -> Tuple[int, str, str, bool]: + """ + Install a package by downloading its source tarball from AUR. + + Returns: + Tuple (rc, out, err, changed) + """ + self.module.log("Aur::install_from_aur()") + + import tempfile + + try: + rpc = self._aur_rpc_info(self.name) + except Exception as exc: + return (1, "", f"Failed to query AUR RPC API: {exc}", False) + + if rpc.get("resultcount") != 1: + return (1, "", f"Package '{self.name}' not found on AUR.", False) + + result = rpc["results"][0] + url_path = result.get("URLPath") + if not url_path: + return (1, "", f"AUR did not return a source URL for '{self.name}'.", False) + + tar_url = f"https://aur.archlinux.org/{url_path}" + self.module.log(f" tarball url {tar_url}") + + try: + f = open_url(tar_url) + except Exception as exc: + return (1, "", f"Failed to download AUR tarball: {exc}", False) + + try: + with tempfile.TemporaryDirectory() as tmpdir: + with tarfile.open(mode="r|*", fileobj=f) as tar: + self._safe_extract_stream(tar, tmpdir) + + build_dir = self._find_pkgbuild_dir(tmpdir) + if not build_dir: + return ( + 1, + "", + "Unable to locate PKGBUILD in extracted source tree.", + False, + ) + + upstream_version = self._read_upstream_version_key(build_dir) + upstream_full_version = self._read_upstream_full_version(build_dir) + + # Prefer comparing full versions (epoch:pkgver-pkgrel). This ensures pkgrel-only + # bumps trigger a rebuild, matching pacman's notion of a distinct package version. + if self._installed_version_full and upstream_full_version: + if self._installed_version_full == upstream_full_version: + return ( + 0, + f"Version {self._installed_version_full} is already installed.", + "", + False, + ) + elif self._installed_version and upstream_version: + if self._installed_version == upstream_version: + return ( + 0, + f"Version {self._installed_version} is already installed.", + "", + False, + ) + + rc, out, err = self.run_makepkg(build_dir) + except Exception as exc: + return (1, "", f"Failed to extract/build AUR source: {exc}", False) + + return (rc, out, err, rc == 0) + + def install_from_repository( + self, installed_version: Optional[str] + ) -> Tuple[int, str, str, bool]: + """ + Install a package from a Git repository (recommended). + + Args: + installed_version: Currently installed version key ':' without pkgrel (epoch optional) or None. + + Returns: + Tuple (rc, out, err, changed) + + Special return code: + - rc == 99 indicates "already installed / no change" (kept for backward compatibility). + """ + self.module.log( + f"Aur::install_from_repository(installed_version: {installed_version})" + ) + + base_dir = str(Path.home()) + repo_dir = os.path.join(base_dir, self.name) + + with self._pushd(base_dir): + if not os.path.exists(repo_dir): + rc, out, _err = self.git_clone(repository=self.repository or "") + if rc != 0: + return (rc, out, "Unable to run 'git clone'.", False) + + with self._pushd(repo_dir): + if os.path.exists(".git"): + rc, out, _err = self.git_pull() + if rc != 0: + return (rc, out, "Unable to run 'git pull'.", False) + + with self._pushd(repo_dir): + pkgbuild_file = "PKGBUILD" + if not os.path.exists(pkgbuild_file): + return (1, "", "Unable to find PKGBUILD.", False) + + upstream_version = self._read_upstream_version_key(os.getcwd()) + upstream_full_version = self._read_upstream_full_version(os.getcwd()) + + # Prefer comparing full versions (epoch:pkgver-pkgrel). This ensures pkgrel-only bumps + # trigger a rebuild even if pkgver stayed constant. + if self._installed_version_full and upstream_full_version: + if self._installed_version_full == upstream_full_version: + return ( + 99, + f"Version {self._installed_version_full} is already installed.", + "", + False, + ) + elif installed_version and upstream_version: + if installed_version == upstream_version: + return ( + 99, + f"Version {installed_version} is already installed.", + "", + False, + ) + + self.module.log( + msg=f"upstream version: {upstream_full_version or upstream_version}" + ) + + rc, out, err = self.run_makepkg(repo_dir) + + return (rc, out, err, rc == 0) + + def git_clone(self, repository: str) -> Tuple[int, str, str]: + """ + Clone the repository into a local directory named after the package. + + Returns: + Tuple (rc, out, err) + """ + self.module.log(f"Aur::git_clone(repository: {repository})") + + if not self.git_binary: + return (1, "", "git not found") + + args: List[str] = [ + self.git_binary, + "clone", + repository, + self.name, + ] + + rc, out, err = self._exec(args) + return (rc, out, err) + + def git_pull(self) -> Tuple[int, str, str]: + """ + Update an existing Git repository. + + Returns: + Tuple (rc, out, err) + """ + self.module.log("Aur::git_pull()") + + if not self.git_binary: + return (1, "", "git not found") + + args: List[str] = [ + self.git_binary, + "pull", + ] + + rc, out, err = self._exec(args) + return (rc, out, err) + + def _exec(self, cmd: Sequence[str], check: bool = False) -> Tuple[int, str, str]: + """ + Execute a command via Ansible's run_command(). + + Args: + cmd: Argument vector (already split). + check: If True, fail the module on non-zero return code. + + Returns: + Tuple (rc, out, err) + """ + self.module.log(f"Aur::_exec(cmd: {cmd}, check: {check})") + + rc, out, err = self.module.run_command(list(cmd), check_rc=check) + + if rc != 0: + self.module.log(f" rc : '{rc}'") + self.module.log(f" out: '{out}'") + self.module.log(f" err: '{err}'") + + return (rc, out, err) + + # ------------------------------------------------------------------------- + # Internal helpers + # ------------------------------------------------------------------------- + + @contextmanager + def _pushd(self, directory: str) -> Iterator[None]: + """ + Temporarily change the current working directory. + + This avoids leaking state across module runs and improves correctness of + commands like makepkg, git clone, and git pull. + """ + self.module.log(f"Aur::_pushd(directory: {directory})") + + prev = os.getcwd() + os.chdir(directory) + try: + yield + finally: + os.chdir(prev) + + def _aur_rpc_info(self, package: str) -> Dict[str, Any]: + """ + Query the AUR RPC API for a package. + + Returns: + Parsed JSON dictionary. + """ + self.module.log(f"Aur::_aur_rpc_info(package: {package})") + + url = "https://aur.archlinux.org/rpc/?v=5&type=info&arg=" + urllib.parse.quote( + package + ) + self.module.log(f" rpc url {url}") + + resp = open_url(url) + return json.loads(resp.read().decode("utf-8")) + + def _safe_extract_stream(self, tar: tarfile.TarFile, target_dir: str) -> None: + """ + Safely extract a tar stream into target_dir. + + This prevents path traversal attacks by validating each member's target path + before extraction. + """ + self.module.log( + f"Aur::_safe_extract_stream(tar: {tar}, target_dir: {target_dir})" + ) + + target_real = os.path.realpath(target_dir) + for member in tar: + member_path = os.path.realpath(os.path.join(target_dir, member.name)) + if ( + not member_path.startswith(target_real + os.sep) + and member_path != target_real + ): + raise ValueError(f"Blocked tar path traversal attempt: {member.name}") + tar.extract(member, target_dir) + + def _find_pkgbuild_dir(self, root_dir: str) -> Optional[str]: + """ + Locate the directory that contains the PKGBUILD file inside root_dir. + """ + self.module.log(f"Aur::_find_pkgbuild_dir(root_dir: {root_dir})") + + for dirpath, _, filenames in os.walk(root_dir): + if "PKGBUILD" in filenames: + return dirpath + return None + + def _read_pkgbuild_pkgver(self, pkgbuild_path: str) -> str: + """ + Read pkgver from a PKGBUILD file. + + Note: + This is a best-effort parse of 'pkgver='. It does not execute PKGBUILD code. + """ + self.module.log(f"Aur::_read_pkgbuild_pkgver(pkgbuild_path: {pkgbuild_path})") + + try: + with open(pkgbuild_path, "r", encoding="utf-8") as f: + data = f.read() + except OSError as exc: + self.module.log(msg=f"Unable to read PKGBUILD: {exc}") + return "" + + m = _PKGBUILD_PKGVER_RE.search(data) + return self._sanitize_scalar(m.group("version")) if m else "" + + def _read_pkgbuild_pkgrel(self, pkgbuild_path: str) -> str: + """ + Read pkgrel from a PKGBUILD file. + + Note: + This is a best-effort parse of 'pkgrel='. It does not execute PKGBUILD code. + """ + self.module.log(f"Aur::_read_pkgbuild_pkgrel(pkgbuild_path: {pkgbuild_path})") + + try: + with open(pkgbuild_path, "r", encoding="utf-8") as f: + data = f.read() + except OSError as exc: + self.module.log(msg=f"Unable to read PKGBUILD: {exc}") + return "" + + m = _PKGBUILD_PKGREL_RE.search(data) + return self._sanitize_scalar(m.group("pkgrel")) if m else "" + + def _read_pkgbuild_full_version(self, pkgbuild_path: str) -> str: + """ + Read epoch/pkgver/pkgrel from PKGBUILD and return a comparable full version string. + + The returned format matches pacman's version string without architecture: + - ":-" (epoch optional) + """ + self.module.log( + f"Aur::_read_pkgbuild_full_version(pkgbuild_path: {pkgbuild_path})" + ) + + pkgver = self._read_pkgbuild_pkgver(pkgbuild_path) + pkgrel = self._read_pkgbuild_pkgrel(pkgbuild_path) + epoch = self._read_pkgbuild_epoch(pkgbuild_path) + + return self._make_full_version(pkgver=pkgver, pkgrel=pkgrel, epoch=epoch) + + def _read_srcinfo_full_version(self, srcinfo_path: str) -> str: + """ + Read epoch/pkgver/pkgrel from a .SRCINFO file. + """ + self.module.log( + f"Aur::_read_srcinfo_full_version(srcinfo_path: {srcinfo_path})" + ) + + try: + with open(srcinfo_path, "r", encoding="utf-8") as f: + data = f.read() + except OSError: + return "" + + pkgver_m = _SRCINFO_PKGVER_RE.search(data) + pkgrel_m = _SRCINFO_PKGREL_RE.search(data) + epoch_m = _SRCINFO_EPOCH_RE.search(data) + + pkgver = self._sanitize_scalar(pkgver_m.group("version")) if pkgver_m else "" + pkgrel = self._sanitize_scalar(pkgrel_m.group("pkgrel")) if pkgrel_m else "" + epoch = self._sanitize_scalar(epoch_m.group("epoch")) if epoch_m else None + + return self._make_full_version(pkgver=pkgver, pkgrel=pkgrel, epoch=epoch) + + def _read_upstream_full_version(self, directory: str) -> str: + """ + Determine the upstream full version for idempotency decisions. + + The function prefers .SRCINFO (static metadata) and falls back to PKGBUILD parsing. + If pkgrel cannot be determined, the function may return an epoch/pkgver-only key. + """ + self.module.log(f"Aur::_read_upstream_full_version(directory: {directory})") + + srcinfo_path = os.path.join(directory, ".SRCINFO") + if os.path.exists(srcinfo_path): + v = self._read_srcinfo_full_version(srcinfo_path) + if v: + return v + + pkgbuild_path = os.path.join(directory, "PKGBUILD") + if os.path.exists(pkgbuild_path): + v = self._read_pkgbuild_full_version(pkgbuild_path) + if v: + return v + + return "" + + def _read_pkgbuild_version_key(self, pkgbuild_path: str) -> str: + """ + Read epoch/pkgver from PKGBUILD and return a comparable version key. + """ + self.module.log( + f"Aur::_read_pkgbuild_version_key(pkgbuild_path: {pkgbuild_path})" + ) + + pkgver = self._read_pkgbuild_pkgver(pkgbuild_path) + epoch = self._read_pkgbuild_epoch(pkgbuild_path) + + return self._make_version_key(pkgver=pkgver, epoch=epoch) + + def _read_srcinfo_version_key(self, srcinfo_path: str) -> str: + """ + Read epoch/pkgver from a .SRCINFO file. + """ + self.module.log(f"Aur::_read_srcinfo_version_key(srcinfo_path: {srcinfo_path})") + + try: + with open(srcinfo_path, "r", encoding="utf-8") as f: + data = f.read() + except OSError: + return "" + + pkgver_m = _SRCINFO_PKGVER_RE.search(data) + epoch_m = _SRCINFO_EPOCH_RE.search(data) + + pkgver = self._sanitize_scalar(pkgver_m.group("version")) if pkgver_m else "" + epoch = self._sanitize_scalar(epoch_m.group("epoch")) if epoch_m else None + + return self._make_version_key(pkgver=pkgver, epoch=epoch) + + def _read_pkgbuild_epoch(self, pkgbuild_path: str) -> Optional[str]: + """ + Read epoch from a PKGBUILD file. + """ + self.module.log(f"Aur::_read_pkgbuild_epoch(pkgbuild_path: {pkgbuild_path})") + + try: + with open(pkgbuild_path, "r", encoding="utf-8") as f: + data = f.read() + except OSError as exc: + self.module.log(msg=f"Unable to read PKGBUILD: {exc}") + return None + + m = _PKGBUILD_EPOCH_RE.search(data) + + return self._sanitize_scalar(m.group("epoch")) if m else None + + def _read_upstream_version_key(self, directory: str) -> str: + """ + Determine the upstream package version key for idempotency decisions. + + The function prefers .SRCINFO (static metadata) and falls back to PKGBUILD + parsing if .SRCINFO is missing. + """ + self.module.log(f"Aur::_read_upstream_version_key(directory: {directory})") + + srcinfo_path = os.path.join(directory, ".SRCINFO") + if os.path.exists(srcinfo_path): + v = self._read_srcinfo_version_key(srcinfo_path) + if v: + return v + + pkgbuild_path = os.path.join(directory, "PKGBUILD") + if os.path.exists(pkgbuild_path): + return self._read_pkgbuild_version_key(pkgbuild_path) + + return "" + + def _sanitize_scalar(self, value: str) -> str: + """ + Sanitize a scalar value extracted from PKGBUILD/.SRCINFO. + + This removes surrounding quotes and trims whitespace. It is intentionally conservative + and does not attempt to evaluate shell expansions or PKGBUILD functions. + """ + self.module.log(f"Aur::_sanitize_scalar(value: {value})") + + v = value.strip() + if (v.startswith('"') and v.endswith('"')) or ( + v.startswith("'") and v.endswith("'") + ): + v = v[1:-1].strip() + + return v + + def _make_version_key(self, pkgver: str, epoch: Optional[str]) -> str: + """ + Build a comparable version key. + + Pacman formats versions as: ':-' (epoch optional). + This module compares ':' (without pkgrel). + """ + self.module.log(f"Aur::_make_version_key(pkgver: {pkgver}, epoch: {epoch})") + + pv = pkgver.strip() + ep = self._sanitize_scalar(epoch) if epoch is not None else "" + if ep and ep != "0": + return f"{ep}:{pv}" if pv else f"{ep}:" + + return pv + + def _make_full_version(self, pkgver: str, pkgrel: str, epoch: Optional[str]) -> str: + """ + Build a comparable full version string. + + The returned format matches pacman's version string: + - ":-" (epoch optional) + + If pkgrel is empty, the function falls back to an epoch/pkgver-only key. + """ + self.module.log( + f"Aur::_make_full_version(pkgver: {pkgver}, pkgrel: {pkgrel}, epoch: {epoch})" + ) + + pv = pkgver.strip() + pr = pkgrel.strip() + ep = self._sanitize_scalar(epoch) if epoch is not None else "" + + base = f"{ep}:{pv}" if ep and ep != "0" else pv + if not pr: + return base + + return f"{base}-{pr}" if base else "" + + +# =========================================== +# Module execution. +# =========================================== + + +def main() -> None: + """ + Entrypoint for the Ansible module. + """ + args = dict( + state=dict(default="present", choices=["present", "absent"]), + repository=dict(type="str", required=False), + name=dict(type="str", required=True), + extra_args=dict(type="list", required=False), + ) + module = AnsibleModule( + argument_spec=args, + supports_check_mode=False, + ) + + aur = Aur(module) + result = aur.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/check_mode.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/check_mode.py new file mode 100644 index 0000000..be3ff69 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/check_mode.py @@ -0,0 +1,85 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2024, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +from ansible.module_utils.basic import AnsibleModule + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: check_mode +version_added: 2.5.0 +author: "Bodo Schulz (@bodsch) " + +short_description: Replacement for ansible_check_mode. + +description: + - Replacement for ansible_check_mode. + - The magic variable `ansible_check_mode` was not defined with the correct value in some cases. + +options: +""" + +EXAMPLES = r""" +- name: detect ansible check_mode + bodsch.core.check_mode: + register: _check_mode + +- name: define check_mode + ansible.builtin.set_fact: + check_mode: '{{ _check_mode.check_mode }}' +""" + +RETURN = r""" +check_mode: + description: + - Status for check_mode. + type: bool +""" + +# --------------------------------------------------------------------------------------- + + +class CheckMode(object): + """ """ + + module = None + + def __init__(self, module): + """ """ + self.module = module + + def run(self): + """ """ + result = dict(failed=False, changed=False, check_mode=False) + + if self.module.check_mode: + result = dict(failed=False, changed=False, check_mode=True) + + return result + + +def main(): + + args = dict() + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=True, + ) + + o = CheckMode(module) + result = o.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/deploy_and_activate.SAVE b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/deploy_and_activate.SAVE new file mode 100644 index 0000000..b8130d1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/deploy_and_activate.SAVE @@ -0,0 +1,155 @@ +""" +deploy_and_activate.py + +Deploy versioned binaries and activate them via symlinks. + +Note: +- When you want to deploy binaries that exist on the controller (remote_src=false scenario), + use the action plugin of the same name (this collection provides it). +- This module itself can only copy from a remote src_dir to install_dir (remote -> remote). +""" + +from __future__ import annotations + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.versioned_deployment import ( + BinaryDeploy, +) + +DOCUMENTATION = r""" +--- +module: deploy_and_activate +short_description: Deploy versioned binaries and activate them via symlinks +description: + - Ensures binaries are present in a versioned install directory and activates them via symlinks. + - Supports idempotent remote copy, permissions/ownership, Linux file capabilities, and symlink activation. + - For controller-local sources, use the action plugin (same task name) shipped by this collection. +options: + install_dir: + description: + - Versioned installation directory (e.g. C(/opt/app/1.2.3)). + type: path + required: true + link_dir: + description: + - Directory where activation symlinks are created (e.g. C(/usr/bin)). + type: path + default: /usr/bin + src_dir: + description: + - Remote directory containing extracted binaries (required when C(copy=true)). + type: path + required: false + copy: + description: + - If true, copy from C(src_dir) to C(install_dir) on the remote host (remote -> remote). + - If false, assume binaries already exist in C(install_dir) and only enforce perms/caps/links. + type: bool + default: true + items: + description: + - List of binaries to deploy. + - Each item supports C(name), optional C(src), optional C(link_name), optional C(capability). + type: list + elements: dict + required: true + activation_name: + description: + - Item name or link_name used to determine "activated" status. Defaults to the first item. + type: str + required: false + owner: + description: + - Owner name or uid for deployed binaries. + type: str + required: false + group: + description: + - Group name or gid for deployed binaries. + type: str + required: false + mode: + description: + - File mode (octal string). + type: str + default: "0755" + cleanup_on_failure: + description: + - Remove install_dir if an error occurs during apply. + type: bool + default: true + check_only: + description: + - If true, do not change anything; return whether an update would be needed. + type: bool + default: false +author: + - "Bodsch Core Collection" +""" + +EXAMPLES = r""" +- name: Deploy logstream_exporter (remote -> remote copy) + bodsch.core.binary_deploy: + src_dir: "/tmp/logstream_exporter" + install_dir: "/opt/logstream_exporter/1.2.3" + link_dir: "/usr/bin" + copy: true + owner: "logstream" + group: "logstream" + mode: "0755" + items: + - name: "logstream_exporter" + capability: "cap_net_raw+ep" + +- name: Only enforce symlinks/caps when files already exist in install_dir + bodsch.core.binary_deploy: + install_dir: "/opt/alertmanager/0.27.0" + link_dir: "/usr/bin" + copy: false + items: + - name: "alertmanager" + - name: "amtool" +""" + +RETURN = r""" +changed: + description: Whether anything changed. + type: bool +activated: + description: Whether the activation symlink points to the binary in install_dir. + type: bool +needs_update: + description: In check_only/check_mode, indicates whether changes would be applied. + type: bool +plan: + description: In check_only/check_mode, per-item flags for copy/perms/cap/link. + type: dict +results: + description: In apply mode, per-item change information. + type: dict +""" + + +def main() -> None: + module = AnsibleModule( + argument_spec={ + "install_dir": {"type": "path", "required": True}, + "link_dir": {"type": "path", "default": "/usr/bin"}, + "src_dir": {"type": "path", "required": False}, + "copy": {"type": "bool", "default": True}, + "items": {"type": "list", "elements": "dict", "required": True}, + "activation_name": {"type": "str", "required": False}, + "owner": {"type": "str", "required": False}, + "group": {"type": "str", "required": False}, + "mode": {"type": "str", "default": "0755"}, + "cleanup_on_failure": {"type": "bool", "default": True}, + "check_only": {"type": "bool", "default": False}, + }, + supports_check_mode=True, + ) + + BinaryDeploy(module).run() + + +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/deploy_and_activate_remote.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/deploy_and_activate_remote.py new file mode 100644 index 0000000..e303d1e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/deploy_and_activate_remote.py @@ -0,0 +1,38 @@ +""" +binary_deploy_remote.py + +Remote worker module for the binary_deploy action plugin. +This module expects that src_dir (when copy=true) is available on the remote host. +""" + +from __future__ import annotations + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.versioned_deployment import ( + BinaryDeploy, +) + + +def main() -> None: + module = AnsibleModule( + argument_spec={ + "install_dir": {"type": "path", "required": True}, + "link_dir": {"type": "path", "default": "/usr/bin"}, + "src_dir": {"type": "path", "required": False}, + "copy": {"type": "bool", "default": True}, + "items": {"type": "list", "elements": "dict", "required": True}, + "activation_name": {"type": "str", "required": False}, + "owner": {"type": "str", "required": False}, + "group": {"type": "str", "required": False}, + "mode": {"type": "str", "default": "0755"}, + "cleanup_on_failure": {"type": "bool", "default": True}, + "check_only": {"type": "bool", "default": False}, + }, + supports_check_mode=True, + ) + + BinaryDeploy(module).run() + + +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/easyrsa.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/easyrsa.py new file mode 100644 index 0000000..ac4018f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/easyrsa.py @@ -0,0 +1,253 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2022, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +import os +import shutil + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.easyrsa import EasyRSA +from ansible_collections.bodsch.core.plugins.module_utils.module_results import results + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: easyrsa +version_added: 1.1.3 +author: "Bodo Schulz (@bodsch) " + +short_description: Manage a Public Key Infrastructure (PKI) using EasyRSA. + +description: + - This module allows management of a PKI environment using EasyRSA. + - It supports initialization of a PKI directory, creation of a Certificate Authority (CA), + generation of certificate signing requests (CSR), signing of certificates, generation of + a certificate revocation list (CRL), and generation of Diffie-Hellman (DH) parameters. + - It is useful for automating the setup of secure communication infrastructure. + + +options: + pki_dir: + description: + - Path to the PKI directory where certificates and keys will be stored. + required: false + type: str + + force: + description: + - If set to true, the existing PKI directory will be deleted and recreated. + required: false + type: bool + default: false + + req_cn_ca: + description: + - Common Name (CN) to be used for the CA certificate. + required: false + type: str + + req_cn_server: + description: + - Common Name (CN) to be used for the server certificate request. + required: false + type: str + + ca_keysize: + description: + - Key size (in bits) for the CA certificate. + required: false + type: int + + dh_keysize: + description: + - Key size (in bits) for the Diffie-Hellman parameters. + required: false + type: int + + working_dir: + description: + - Directory in which to execute the EasyRSA commands. + - If not set, commands will be executed in the current working directory. + required: false + type: str + +""" + +EXAMPLES = r""" +- name: initialize easy-rsa - (this is going to take a long time) + bodsch.core.easyrsa: + pki_dir: '{{ openvpn_easyrsa.directory }}/pki' + req_cn_ca: "{{ openvpn_certificate.req_cn_ca }}" + req_cn_server: '{{ openvpn_certificate.req_cn_server }}' + ca_keysize: 4096 + dh_keysize: "{{ openvpn_diffie_hellman_keysize }}" + working_dir: '{{ openvpn_easyrsa.directory }}' + force: true + register: _easyrsa_result +""" + +RETURN = r""" +changed: + description: Indicates whether any changes were made during module execution. + type: bool + returned: always + +failed: + description: Indicates whether the module failed. + type: bool + returned: always + +state: + description: A detailed list of results from each EasyRSA operation. + type: list + elements: dict + returned: always + sample: + - init-pki: + failed: false + changed: true + msg: The PKI was successfully created. + - build-ca: + failed: false + changed: true + msg: ca.crt and ca.key were successfully created. + - gen-crl: + failed: false + changed: true + msg: crl.pem was successfully created. + - gen-req: + failed: false + changed: true + msg: server.req was successfully created. + - sign-req: + failed: false + changed: true + msg: server.crt was successfully created. + - gen-dh: + failed: false + changed: true + msg: dh.pem was successfully created. +""" + +# --------------------------------------------------------------------------------------- + + +class EasyRsa(object): + """ """ + + module = None + + def __init__(self, module): + """ """ + self.module = module + + self.state = "" + + self.force = module.params.get("force", False) + self.pki_dir = module.params.get("pki_dir", None) + self.req_cn_ca = module.params.get("req_cn_ca", None) + self.req_cn_server = module.params.get("req_cn_server", None) + self.ca_keysize = module.params.get("ca_keysize", None) + self.dh_keysize = module.params.get("dh_keysize", None) + self.working_dir = module.params.get("working_dir", None) + + self.easyrsa = module.get_bin_path("easyrsa", True) + + def run(self): + """ + runner + """ + result_state = [] + + if self.working_dir: + os.chdir(self.working_dir) + + # self.module.log(msg=f"-> pwd : {os.getcwd()}") + + if self.force: + # self.module.log(msg="force mode ...") + # self.module.log(msg=f"remove {self.pki_dir}") + + if os.path.isdir(self.pki_dir): + shutil.rmtree(self.pki_dir) + + ersa = EasyRSA( + module=self.module, + force=self.force, + pki_dir=self.pki_dir, + req_cn_ca=self.req_cn_ca, + req_cn_server=self.req_cn_server, + ca_keysize=self.ca_keysize, + dh_keysize=self.dh_keysize, + working_dir=self.working_dir, + ) + + steps = [ + ("init-pki", ersa.create_pki), + ("build-ca", ersa.build_ca), + ("gen-crl", ersa.gen_crl), + ("gen-req", ersa.gen_req), + ("sign-req", ersa.sign_req), + ("gen-dh", ersa.gen_dh), + ] + + for step_name, step_func in steps: + self.module.log(msg=f" - {step_name}") + rc, changed, msg = step_func() + + result_state.append( + {step_name: {"failed": rc != 0, "changed": changed, "msg": msg}} + ) + if rc != 0: + break + + _state, _changed, _failed, state, changed, failed = results( + self.module, result_state + ) + + result = dict(changed=_changed, failed=failed, state=result_state) + + return result + + def list_files(self, startpath): + for root, dirs, files in os.walk(startpath): + level = root.replace(startpath, "").count(os.sep) + indent = " " * 4 * (level) + self.module.log(msg=f"{indent}{os.path.basename(root)}/") + subindent = " " * 4 * (level + 1) + for f in files: + self.module.log(msg=f"{subindent}{f}") + + +def main(): + + args = dict( + pki_dir=dict(required=False, type="str"), + force=dict(required=False, default=False, type="bool"), + req_cn_ca=dict(required=False), + req_cn_server=dict(required=False), + ca_keysize=dict(required=False, type="int"), + dh_keysize=dict(required=False, type="int"), + working_dir=dict(required=False), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=False, + ) + + e = EasyRsa(module) + result = e.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/facts.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/facts.py new file mode 100644 index 0000000..248bffb --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/facts.py @@ -0,0 +1,250 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function + +import json +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.checksum import Checksum +from ansible_collections.bodsch.core.plugins.module_utils.directory import ( + create_directory, +) +from ansible_collections.bodsch.core.plugins.module_utils.file import chmod, remove_file + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: facts +version_added: 1.0.10 +author: "Bodo Schulz (@bodsch) " + +short_description: Write Ansible Facts + +description: + - Write Ansible Facts + +options: + state: + description: + - Whether to create (C(present)), or remove (C(absent)) a fact. + required: false + name: + description: + - The name of the fact. + type: str + required: true + facts: + description: + - A dictionary with information to be written in the facts. + type: dict + required: true +""" + +EXAMPLES = """ +- name: create custom facts + bodsch.core.facts: + state: present + name: icinga2 + facts: + version: "2.10" + salt: fgmklsdfnjyxnvjksdfbkuser + user: icinga2 +""" + +RETURN = """ +msg: + description: Module information + type: str +""" + +# --------------------------------------------------------------------------------------- + +TPL_FACT = """#!/usr/bin/env bash +# generated by ansible +cat < +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function + +from ansible.module_utils.basic import AnsibleModule + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: journalctl +version_added: 1.0.6 +author: "Bodo Schulz (@bodsch) " + +short_description: Query the systemd journal with a very limited number of possible parameters. + +description: + - Query the systemd journal with a very limited number of possible parameters. + - In certain cases there are errors that are not clearly traceable but are logged in the journal. + - This module is intended to be a tool for error analysis. + +options: + identifier: + description: + - Show entries with the specified syslog identifier + type: str + required: false + unit: + description: + - Show logs from the specified unit + type: str + required: false + lines: + description: + - Number of journal entries to show + type: int + required: false + reverse: + description: + - Show the newest entries first + type: bool + required: false + arguments: + description: + - A list of custom attributes + type: list + required: false +""" + +EXAMPLES = """ +- name: chrony entries from journalctl + journalctl: + identifier: chrony + lines: 50 + register: journalctl + when: + - ansible_facts.service_mgr == 'systemd' + +- name: journalctl entries from this module + journalctl: + identifier: ansible-journalctl + lines: 250 + register: journalctl + when: + - ansible_facts.service_mgr == 'systemd' +""" + +RETURN = """ +rc: + description: + - Return Value + type: int +cmd: + description: + - journalctl with the called parameters + type: string +stdout: + description: + - The output as a list on stdout + type: list +stderr: + description: + - The output as a list on stderr + type: list +""" + +# --------------------------------------------------------------------------------------- + + +class JournalCtl(object): + """ """ + + module = None + + def __init__(self, module): + """ """ + self.module = module + + self._journalctl = module.get_bin_path("journalctl", True) + + self.unit = module.params.get("unit") + self.identifier = module.params.get("identifier") + self.lines = module.params.get("lines") + self.reverse = module.params.get("reverse") + self.arguments = module.params.get("arguments") + + # module.log(msg="----------------------------") + # module.log(msg=f" journalctl : {self._journalctl}") + # module.log(msg=f" unit : {self.unit}") + # module.log(msg=f" identifier : {self.identifier}") + # module.log(msg=f" lines : {self.lines}") + # module.log(msg=f" reverse : {self.reverse}") + # module.log(msg=f" arguments : {self.arguments}") + # module.log(msg="----------------------------") + + def run(self): + """ + runner + """ + result = dict( + rc=1, + failed=True, + changed=False, + ) + + result = self.journalctl_lines() + + return result + + def journalctl_lines(self): + """ + journalctl --help + journalctl [OPTIONS...] [MATCHES...] + + Query the journal. + """ + args = [] + args.append(self._journalctl) + + if self.unit: + args.append("--unit") + args.append(self.unit) + + if self.identifier: + args.append("--identifier") + args.append(self.identifier) + + if self.lines: + args.append("--lines") + args.append(str(self.lines)) + + if self.reverse: + args.append("--reverse") + + if len(self.arguments) > 0: + for arg in self.arguments: + args.append(arg) + + # self.module.log(msg=f" - args {args}") + + rc, out, err = self._exec(args) + + return dict( + rc=rc, + cmd=" ".join(args), + stdout=out, + stderr=err, + ) + + def _exec(self, args): + """ """ + rc, out, err = self.module.run_command(args, check_rc=False) + + if rc != 0: + self.module.log(msg=f" rc : '{rc}'") + self.module.log(msg=f" out: '{out}'") + self.module.log(msg=f" err: '{err}'") + + return rc, out, err + + +# =========================================== +# Module execution. +# + + +def main(): + """ """ + args = dict( + identifier=dict(required=False, type="str"), + unit=dict(required=False, type="str"), + lines=dict(required=False, type="int"), + reverse=dict(required=False, default=False, type="bool"), + arguments=dict(required=False, default=[], type=list), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=False, + ) + + k = JournalCtl(module) + result = k.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/mysql_schema.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/mysql_schema.py new file mode 100644 index 0000000..62e20e1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/mysql_schema.py @@ -0,0 +1,293 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function + +import os +import warnings + +try: + from ansible.module_utils.common.text.converters import to_native +except ImportError: # pragma: no cover + from ansible.module_utils._text import to_native + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.mysql import mysql_driver, mysql_driver_fail_msg +from ansible.module_utils.six.moves import configparser + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +module: mysql_schema +version_added: '1.0.15' +author: "Bodo Schulz (@bodsch) " + +short_description: check the named schema exists in a mysql. + +description: + - check the named schema exists in a mysql (or compatible) database. + +options: + login_user: + description: + - user name to login into database. + type: str + required: false + + login_password: + description: + - password for user name to login into database. + type: str + required: false + + login_host: + description: + - database hostname + type: str + default: 127.0.0.1 + required: false + + login_port: + description: + - database port + type: int + default: 3306 + required: false + + login_unix_socket: + description: + - database socket + type: str + required: false + + database_config_file: + description: + - optional config file with credentials + type: str + required: false + + table_schema: + description: + - database schema to check + type: str + required: true + + table_name: + description: + - optional table name + type: str + required: false +""" + +EXAMPLES = r""" +- name: ensure, table_schema is present + bodsch.core.mysql_schema: + login_host: '::1' + login_user: root + login_password: password + table_schema: icingaweb2 + +- name: ensure table_schema is created + bodsch.core.mysql_schema: + login_host: database + login_user: root + login_password: root + table_schema: icingadb + register: mysql_icingawebdb_schema +""" + +RETURN = r""" +exists: + description: + - is the named schema present + type: bool +changed: + description: TODO + type: bool +failed: + description: TODO + type: bool +""" + +# --------------------------------------------------------------------------------------- + + +class MysqlSchema(object): + """ """ + + module = None + + def __init__(self, module): + """ """ + self.module = module + + self.login_user = module.params.get("login_user") + self.login_password = module.params.get("login_password") + self.login_host = module.params.get("login_host") + self.login_port = module.params.get("login_port") + self.login_unix_socket = module.params.get("login_unix_socket") + self.database_config_file = module.params.get("database_config_file") + self.table_schema = module.params.get("table_schema") + self.table_name = module.params.get("table_name") + + self.db_connect_timeout = 30 + + def run(self): + """ """ + if mysql_driver is None: + self.module.fail_json(msg=mysql_driver_fail_msg) + else: + warnings.filterwarnings("error", category=mysql_driver.Warning) + + if not mysql_driver: + return dict(failed=True, error=mysql_driver_fail_msg) + + state, error, error_message = self._information_schema() + + if error: + res = dict(failed=True, changed=False, msg=error_message) + else: + res = dict(failed=False, changed=False, exists=state) + + return res + + def _information_schema(self): + """ + get informations about schema + + return: + state: bool (exists or not) + count: int + error: boot (error or not) + error_message string error message + """ + cursor, conn, error, message = self.__mysql_connect() + + if error: + return None, error, message + + query = f"SELECT TABLE_SCHEMA, TABLE_NAME FROM information_schema.tables where TABLE_SCHEMA = '{self.table_schema}'" + + try: + cursor.execute(query) + + except mysql_driver.ProgrammingError as e: + errcode, message = e.args + + message = f"Cannot execute SQL '{query}' : {to_native(e)}" + self.module.log(msg=f"ERROR: {message}") + + return False, True, message + + records = cursor.fetchall() + cursor.close() + conn.close() + exists = len(records) + + if self.table_name is not None: + table_names = [] + for e in records: + table_names.append(e[1]) + + if self.table_name in table_names: + self.module.log( + msg=f" - table name {self.table_name} exists in table schema" + ) + + return True, False, None + + else: + self.module.log(msg=" - table schema exists") + + if int(exists) >= 4: + return True, False, None + + return False, False, None + + def __mysql_connect(self): + """ """ + config = {} + + config_file = self.database_config_file + + if config_file and os.path.exists(config_file): + config["read_default_file"] = config_file + + # TODO + # cp = self.__parse_from_mysql_config_file(config_file) + + if self.login_unix_socket: + config["unix_socket"] = self.login_unix_socket + else: + config["host"] = self.login_host + config["port"] = self.login_port + + # If login_user or login_password are given, they should override the + # config file + if self.login_user is not None: + config["user"] = self.login_user + if self.login_password is not None: + config["passwd"] = self.login_password + + if mysql_driver is None: + self.module.fail_json(msg=mysql_driver_fail_msg) + + try: + db_connection = mysql_driver.connect(**config) + + except Exception as e: + message = "unable to connect to database. " + message += "check login_host, login_user and login_password are correct " + message += f"or {config_file} has the credentials. " + message += f"Exception message: {to_native(e)}" + + self.module.log(msg=message) + + return (None, None, True, message) + + return db_connection.cursor(), db_connection, False, "successful connected" + + def __parse_from_mysql_config_file(self, cnf): + cp = configparser.ConfigParser() + cp.read(cnf) + return cp + + +# --------------------------------------------------------------------------------------- +# Module execution. +# + + +def main(): + + args = dict( + login_user=dict(type="str"), + login_password=dict(type="str", no_log=True), + login_host=dict(type="str", default="127.0.0.1"), + login_port=dict(type="int", default=3306), + login_unix_socket=dict(type="str"), + database_config_file=dict(required=False, type="path"), + table_schema=dict(required=True, type="str"), + table_name=dict(required=False, type="str"), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=False, + ) + + schema = MysqlSchema(module) + result = schema.run() + + module.log(msg=f"= result : '{result}'") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/openvpn.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/openvpn.py new file mode 100644 index 0000000..eb3b329 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/openvpn.py @@ -0,0 +1,431 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2022, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +import os +import sys + +from ansible.module_utils import distro +from ansible.module_utils.basic import AnsibleModule + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: openvpn +short_description: Generate OpenVPN tls-auth key or create an Easy-RSA client and inline .ovpn configuration +version_added: "1.1.3" +author: + - Bodo Schulz (@bodsch) + +description: + - Generates an OpenVPN static key (tls-auth / ta.key) using C(openvpn --genkey). + - Creates an Easy-RSA client certificate (C(build-client-full nopass)) and renders an inline client configuration + from C(/etc/openvpn/client.ovpn.template) into C(/.ovpn). + - Supports a marker file via C(creates) to make the operation idempotent. + +options: + state: + description: + - Operation mode. + - C(genkey) generates a static key file using the OpenVPN binary. + - C(create_user) creates an Easy-RSA client (key/cert) and generates an inline C(.ovpn) file using a template. + type: str + default: genkey + choices: + - genkey + - create_user + + secret: + description: + - Destination path for the generated OpenVPN static key when C(state=genkey). + - Required by the module interface even if C(state=create_user). + type: str + required: true + + username: + description: + - Client username to create when C(state=create_user). + type: str + required: false + + destination_directory: + description: + - Target directory where the generated client configuration C(.ovpn) is written when C(state=create_user). + type: str + required: false + + chdir: + description: + - Change into this directory before executing commands and accessing the PKI structure. + - For C(state=create_user), paths are expected relative to this working directory (e.g. C(pki/private), C(pki/issued), C(pki/reqs)). + type: path + required: false + + creates: + description: + - If this path exists, the module returns early with no changes. + - With C(state=genkey), the early-return message is C(tls-auth key already created). + - With C(force=true) and C(creates) set, the marker file is removed before the check. + type: path + required: false + + force: + description: + - If enabled and C(creates) is set, removes the marker file before checking C(creates). + type: bool + default: false + + easyrsa_directory: + description: + - Reserved for future use (currently not used by the module implementation). + type: str + required: false + +notes: + - Check mode is not supported. + - For Ubuntu 20.04, the module uses the legacy C(--genkey --secret ) variant; other systems use C(--genkey secret ). + +requirements: + - C(openvpn) binary available on the target for C(state=genkey). + - C(easyrsa) binary and a working Easy-RSA PKI for C(state=create_user). + - Python Jinja2 installed on the target for C(state=create_user) (template rendering). +""" + +EXAMPLES = r""" +- name: Generate tls-auth key (ta.key) + bodsch.core.openvpn: + state: genkey + secret: /etc/openvpn/ta.key + +- name: Generate tls-auth key only if marker does not exist + bodsch.core.openvpn: + state: genkey + secret: /etc/openvpn/ta.key + creates: /var/lib/openvpn/ta.key.created + +- name: Force regeneration by removing marker first + bodsch.core.openvpn: + state: genkey + secret: /etc/openvpn/ta.key + creates: /var/lib/openvpn/ta.key.created + force: true + +- name: Create Easy-RSA client and write inline .ovpn + bodsch.core.openvpn: + state: create_user + secret: /dev/null # required by module interface, not used here + username: alice + destination_directory: /etc/openvpn/clients + chdir: /etc/easy-rsa + +- name: Create user only if marker does not exist + bodsch.core.openvpn: + state: create_user + secret: /dev/null + username: bob + destination_directory: /etc/openvpn/clients + chdir: /etc/easy-rsa + creates: /var/lib/openvpn/clients/bob.created +""" + +RETURN = r""" +changed: + description: + - Whether the module changed anything. + returned: always + type: bool + +failed: + description: + - Indicates failure. + returned: always + type: bool + +result: + description: + - For C(state=genkey), contains stdout from the OpenVPN command. + - For C(state=create_user), contains a status message (or an Easy-RSA output-derived message). + returned: sometimes + type: str + sample: + - "OpenVPN 2.x.x ...\n..." # command output example + - "ovpn file successful written as /etc/openvpn/clients/alice.ovpn" + - "can not find key or certfile for user alice" + +message: + description: + - Status message returned by some early-exit paths (e.g. existing request file or marker file). + returned: sometimes + type: str + sample: + - "tls-auth key already created" + - "nothing to do." + - "cert req for user alice exists" +""" + +# --------------------------------------------------------------------------------------- + + +class OpenVPN(object): + """ + Main Class to implement the Icinga2 API Client + """ + + module = None + + def __init__(self, module): + """ + Initialize all needed Variables + """ + self.module = module + + self.state = module.params.get("state") + self.force = module.params.get("force", False) + self._secret = module.params.get("secret", None) + self._username = module.params.get("username", None) + + self._chdir = module.params.get("chdir", None) + self._creates = module.params.get("creates", None) + self._destination_directory = module.params.get("destination_directory", None) + + self._openvpn = module.get_bin_path("openvpn", True) + self._easyrsa = module.get_bin_path("easyrsa", True) + + self.distribution, self.version, self.codename = distro.linux_distribution( + full_distribution_name=False + ) + + def run(self): + """ + runner + """ + result = dict(failed=False, changed=False, ansible_module_results="none") + + if self._chdir: + os.chdir(self._chdir) + + if self.force and self._creates: + self.module.log(msg="force mode ...") + if os.path.exists(self._creates): + self.module.log(msg="remove {}".format(self._creates)) + os.remove(self._creates) + + if self._creates: + if os.path.exists(self._creates): + message = "nothing to do." + if self.state == "genkey": + message = "tls-auth key already created" + + return dict(changed=False, message=message) + + args = [] + + if self.state == "genkey": + args.append(self._openvpn) + args.append("--genkey") + if self.distribution.lower() == "ubuntu" and self.version == "20.04": + # OpenVPN 2.5.5 + # ubuntu 20.04 wants `--secret` + args.append("--secret") + else: + # WARNING: Using --genkey --secret filename is DEPRECATED. Use --genkey secret filename instead. + args.append("secret") + args.append(self._secret) + + if self.state == "create_user": + return self.__create_vpn_user() + # args.append(self._easyrsa) + # args.append("--batch") + # args.append("build-client-full") + # args.append(self._username) + # args.append("nopass") + + rc, out = self._exec(args) + + result["result"] = "{}".format(out.rstrip()) + + if rc == 0: + force_mode = "0600" + if isinstance(force_mode, str): + mode = int(force_mode, base=8) + + os.chmod(self._secret, mode) + + result["changed"] = True + else: + result["failed"] = True + + return result + + def __create_vpn_user(self): + """ """ + result = dict(failed=True, changed=False, ansible_module_results="none") + message = "init function" + + cert_exists = self.__vpn_user_req() + + if cert_exists: + return dict( + failed=False, + changed=False, + message="cert req for user {} exists".format(self._username), + ) + + args = [] + + # rc = 0 + args.append(self._easyrsa) + args.append("--batch") + args.append("build-client-full") + args.append(self._username) + args.append("nopass") + + rc, out = self._exec(args) + + result["result"] = "{}".format(out.rstrip()) + + if rc == 0: + """ """ + # read key file + key_file = os.path.join("pki", "private", "{}.key".format(self._username)) + cert_file = os.path.join("pki", "issued", "{}.crt".format(self._username)) + + self.module.log(msg=" key_file : '{}'".format(key_file)) + self.module.log(msg=" cert_file: '{}'".format(cert_file)) + + if os.path.exists(key_file) and os.path.exists(cert_file): + """ """ + with open(key_file, "r") as k_file: + k_data = k_file.read().rstrip("\n") + + cert = self.extract_certs_as_strings(cert_file)[0].rstrip("\n") + + # take openvpn client template and fill + from jinja2 import Template + + tpl = "/etc/openvpn/client.ovpn.template" + + with open(tpl) as file_: + tm = Template(file_.read()) + # self.module.log(msg=json.dumps(data, sort_keys=True)) + + d = tm.render(key=k_data, cert=cert) + + destination = os.path.join( + self._destination_directory, "{}.ovpn".format(self._username) + ) + + with open(destination, "w") as fp: + fp.write(d) + + force_mode = "0600" + if isinstance(force_mode, str): + mode = int(force_mode, base=8) + + os.chmod(destination, mode) + + result["failed"] = False + result["changed"] = True + message = "ovpn file successful written as {}".format(destination) + + else: + result["failed"] = True + message = "can not find key or certfile for user {}".format( + self._username + ) + + result["result"] = message + + return result + + def extract_certs_as_strings(self, cert_file): + certs = [] + with open(cert_file) as whole_cert: + cert_started = False + content = "" + for line in whole_cert: + if "-----BEGIN CERTIFICATE-----" in line: + if not cert_started: + content += line + cert_started = True + else: + print("Error, start cert found but already started") + sys.exit(1) + elif "-----END CERTIFICATE-----" in line: + if cert_started: + content += line + certs.append(content) + content = "" + cert_started = False + else: + print("Error, cert end found without start") + sys.exit(1) + elif cert_started: + content += line + + if cert_started: + print("The file is corrupted") + sys.exit(1) + + return certs + + def __vpn_user_req(self): + """ """ + req_file = os.path.join("pki", "reqs", "{}.req".format(self._username)) + + if os.path.exists(req_file): + return True + + return False + + def _exec(self, commands): + """ + execute shell program + """ + rc, out, err = self.module.run_command(commands, check_rc=True) + + if int(rc) != 0: + self.module.log(msg=f" rc : '{rc}'") + self.module.log(msg=f" out: '{out}'") + self.module.log(msg=f" err: '{err}'") + + return rc, out + + +# =========================================== +# Module execution. +# + + +def main(): + + args = dict( + state=dict(default="genkey", choices=["genkey", "create_user"]), + force=dict(required=False, default=False, type="bool"), + secret=dict(required=True, type="str"), + username=dict(required=False, type="str"), + easyrsa_directory=dict(required=False, type="str"), + destination_directory=dict(required=False, type="str"), + chdir=dict(required=False), + creates=dict(required=False), + ) + module = AnsibleModule( + argument_spec=args, + supports_check_mode=False, + ) + + o = OpenVPN(module) + result = o.run() + + module.log(msg="= result: {}".format(result)) + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/openvpn_client_certificate.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/openvpn_client_certificate.py new file mode 100644 index 0000000..7ee4119 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/openvpn_client_certificate.py @@ -0,0 +1,469 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2022-2025, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +import os +import shutil +from pathlib import Path + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.checksum import Checksum +from ansible_collections.bodsch.core.plugins.module_utils.directory import ( + create_directory, +) +from ansible_collections.bodsch.core.plugins.module_utils.module_results import results + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: openvpn_client_certificate +short_description: Manage OpenVPN client certificates using EasyRSA. +version_added: "1.1.3" +author: "Bodo Schulz (@bodsch) " + +description: + - This module manages OpenVPN client certificates using EasyRSA. + - It supports the creation and revocation of client certificates. + - Certificates are tracked via checksums to detect changes. + - Ideal for automated PKI workflows with OpenVPN infrastructure. + +options: + clients: + description: + - A list of client definitions, each representing a certificate to be created or revoked. + required: true + type: list + elements: dict + suboptions: + name: + description: + - Name of the OpenVPN client. + required: true + type: str + state: + description: + - Whether the certificate should exist or be revoked. + required: false + default: present + choices: [present, absent] + type: str + roadrunner: + description: + - Optional boolean flag, can be used for specific logic in a role. + required: false + type: bool + static_ip: + description: + - Static IP address for the client (optional, used in templating). + required: false + type: str + remote: + description: + - Remote server address or hostname. + required: false + type: str + port: + description: + - Port used by OpenVPN for this client. + required: false + type: int + proto: + description: + - Protocol to use (typically UDP or TCP). + required: false + type: str + device: + description: + - Network device (usually tun). + required: false + type: str + ping: + description: + - Ping interval for the client. + required: false + type: int + ping_restart: + description: + - Time before restarting connection after ping timeout. + required: false + type: int + cert: + description: + - Certificate file name for the client. + required: false + type: str + key: + description: + - Key file name for the client. + required: false + type: str + tls_auth: + description: + - TLS authentication settings. + required: false + type: dict + suboptions: + enabled: + description: + - Whether TLS auth is enabled. + type: bool + required: false + + force: + description: + - If true, the client certificate will be re-created even if it already exists. + required: false + type: bool + default: false + + working_dir: + description: + - Path to the EasyRSA working directory. + - All EasyRSA commands will be executed within this directory. + required: false + type: str +""" + +EXAMPLES = r""" +- name: create or revoke client certificate + bodsch.core.openvpn_client_certificate: + clients: + - name: molecule + state: present + roadrunner: false + static_ip: 10.8.3.100 + remote: server + port: 1194 + proto: udp + device: tun + ping: 20 + ping_restart: 45 + cert: molecule.crt + key: molecule.key + tls_auth: + enabled: true + - name: roadrunner_one + state: present + roadrunner: true + static_ip: 10.8.3.10 + remote: server + port: 1194 + proto: udp + device: tun + ping: 20 + ping_restart: 45 + cert: roadrunner_one.crt + key: roadrunner_one.key + tls_auth: + enabled: true + working_dir: /etc/easy-rsa + when: + - openvpn_client_list | default([]) | count > 0 +""" + +RETURN = r""" +changed: + description: Indicates whether any changes were made during module execution. + type: bool + returned: always +failed: + description: Indicates whether the module failed. + type: bool + returned: always +state: + description: List of results per client certificate operation. + type: list + elements: dict + returned: always + sample: + - molecule: + failed: false + changed: true + message: The client certificate has been successfully created. + - roadrunner_one: + failed: false + changed: false + message: The client certificate has already been created. +""" + +# --------------------------------------------------------------------------------------- + + +class OpenVPNClientCertificate(object): + """ """ + + def __init__(self, module): + """ """ + self.module = module + + self.module.log("OpenVPNClientCertificate::__init__(module)") + + self.state = module.params.get("state") + self.clients = module.params.get("clients", None) + self.force = module.params.get("force", False) + self.working_dir = module.params.get("working_dir", None) + + self.bin_openvpn = module.get_bin_path("openvpn", True) + self.bin_easyrsa = module.get_bin_path("easyrsa", True) + + def run(self): + """ + runner + """ + self.module.log("OpenVPNClientCertificate::run()") + + result_state = [] + + self.checksum = Checksum(self.module) + + if self.working_dir: + os.chdir(self.working_dir) + + for client in self.clients: + + self.module.log(f" - client: {client}") + + res = {} + username = client.get("name") + state = client.get("state", "present") + + self.checksum_directory = f"{Path.home()}/.ansible/cache/openvpn/{username}" + + if state == "absent": + res[username] = self.revoke_vpn_user(username=username) + if state == "present": + if self.force: + if os.path.isdir(self.checksum_directory): + shutil.rmtree(self.checksum_directory) + + res[username] = self.create_vpn_user(username=username) + + result_state.append(res) + + _state, _changed, _failed, state, changed, failed = results( + self.module, result_state + ) + + result = dict(changed=_changed, failed=failed, state=result_state) + + return result + + def create_vpn_user(self, username: str): + """ """ + self.module.log(msg=f"OpenVPNClientCertificate::create_vpn_user({username})") + + self.req_file = os.path.join("pki", "reqs", f"{username}.req") + self.key_file = os.path.join("pki", "private", f"{username}.key") + self.crt_file = os.path.join("pki", "issued", f"{username}.crt") + + self.req_checksum_file = os.path.join(self.checksum_directory, "req.sha256") + self.key_checksum_file = os.path.join(self.checksum_directory, "key.sha256") + self.crt_checksum_file = os.path.join(self.checksum_directory, "crt.sha256") + + if not self.vpn_user_req(username=username): + """ """ + create_directory(self.checksum_directory) + + args = [] + + # rc = 0 + args.append(self.bin_easyrsa) + args.append("--batch") + args.append("build-client-full") + args.append(username) + args.append("nopass") + + self.module.log(msg=f"args: {args}") + + rc, out, err = self._exec(args) + + if rc != 0: + """ """ + return dict(failed=True, changed=False, message=f"{out.rstrip()}") + else: + self.write_checksum( + file_name=self.req_file, checksum_file=self.req_checksum_file + ) + self.write_checksum( + file_name=self.key_file, checksum_file=self.key_checksum_file + ) + self.write_checksum( + file_name=self.crt_file, checksum_file=self.crt_checksum_file + ) + + return dict( + failed=False, + changed=True, + message="The client certificate has been successfully created.", + ) + else: + valid, msg = self.validate_checksums() + + if valid: + return dict( + failed=False, + changed=False, + message="The client certificate has already been created.", + ) + else: + return dict(failed=True, changed=False, message=msg) + + def revoke_vpn_user(self, username: str): + """ """ + self.module.log(msg=f"OpenVPNClientCertificate::revoke_vpn_user({username})") + + if not self.vpn_user_req(): + return dict( + failed=False, + changed=False, + message=f"There is no certificate request for the user {username}.", + ) + + args = [] + + # rc = 0 + args.append(self.bin_easyrsa) + args.append("--batch") + args.append("revoke") + args.append(username) + + rc, out, err = self._exec(args) + + if rc == 0: + # remove checksums + os.remove(self.checksum_directory) + # recreate CRL + args = [] + args.append(self.bin_easyrsa) + args.append("gen-crl") + + if os.path.isdir(self.checksum_directory): + shutil.rmtree(self.checksum_directory) + + return dict( + changed=True, + failed=False, + message=f"The certificate for the user {username} has been revoked successfully.", + ) + + def vpn_user_req(self, username: str): + """ """ + self.module.log(msg=f"OpenVPNClientCertificate::vpn_user_req({username})") + + req_file = os.path.join("pki", "reqs", f"{username}.req") + + if os.path.exists(req_file): + return True + + return False + + def validate_checksums(self): + """ """ + self.module.log(msg="OpenVPNClientCertificate::validate_checksums()") + msg = "" + + req_changed, req_msg = self.validate(self.req_checksum_file, self.req_file) + key_changed, key_msg = self.validate(self.req_checksum_file, self.req_file) + crt_changed, crt_msg = self.validate(self.req_checksum_file, self.req_file) + + if req_changed or key_changed or crt_changed: + _msg = [] + + if req_changed: + _msg.append(req_msg) + if key_changed: + _msg.append(key_msg) + if crt_changed: + _msg.append(crt_msg) + + msg = ", ".join(_msg) + valid = False + else: + valid = True + msg = "All Files are valid." + + return valid, msg + + def validate(self, checksum_file: str, file_name: str): + """ """ + self.module.log( + msg=f"OpenVPNClientCertificate::validate({checksum_file}, {file_name})" + ) + changed = False + msg = "" + + checksum = None + old_checksum = None + + changed, checksum, old_checksum = self.checksum.validate_from_file( + checksum_file, file_name + ) + + if os.path.exists(file_name) and not os.path.exists(checksum_file): + self.write_checksum(file_name=file_name, checksum_file=checksum_file) + changed = False + + if changed: + msg = f"{checksum_file} are changed" + + return (changed, msg) + + def write_checksum(self, file_name: str, checksum_file: str): + """ """ + self.module.log( + msg=f"OpenVPNClientCertificate::write_checksum({file_name}, {checksum_file})" + ) + + checksum = self.checksum.checksum_from_file(file_name) + self.checksum.write_checksum(checksum_file, checksum) + + def _exec(self, commands, check_rc=False): + """ + execute shell program + """ + self.module.log( + msg=f"OpenVPNClientCertificate::_exec(commands={commands}, check_rc={check_rc}" + ) + rc, out, err = self.module.run_command(commands, check_rc=check_rc) + return rc, out, err + + def result_values(self, out: str, err: str) -> list: + """ + " """ + _out = out.splitlines() + _err = err.splitlines() + _output = [] + _output += _out + _output += _err + # self.module.log(msg=f"= output: {_output}") + return _output + + +def main(): + """ """ + args = dict( + clients=dict(required=True, type="list"), + force=dict(required=False, default=False, type="bool"), + working_dir=dict(required=False), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=False, + ) + + o = OpenVPNClientCertificate(module) + result = o.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/openvpn_crl.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/openvpn_crl.py new file mode 100644 index 0000000..2ee220f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/openvpn_crl.py @@ -0,0 +1,403 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2022-2025, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.bodsch.core.plugins.module_utils.crypto_utils import ( + OpenSSLObjectError, + get_crl_info, + get_relative_time_option, +) +from ansible_collections.bodsch.core.plugins.module_utils.easyrsa import EasyRSA + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: openvpn_crl +short_description: Manage and inspect an OpenVPN Certificate Revocation List (CRL) generated by Easy-RSA +version_added: "1.1.3" +author: + - Bodo Schulz (@bodsch) + +description: + - Reads and parses an existing CRL file (C(crl.pem)) from an Easy-RSA PKI directory. + - Optionally returns the list of revoked certificates contained in the CRL. + - Can regenerate the CRL via Easy-RSA (removes the existing C(crl.pem) and runs C(gen-crl)). + +options: + state: + description: + - Operation mode. + - C(status) parses the CRL and returns metadata (last/next update) and optionally revoked certificates. + - C(renew) regenerates the CRL using Easy-RSA. + type: str + default: status + choices: + - status + - renew + + pki_dir: + description: + - Path to the Easy-RSA PKI directory. + - The module expects the CRL at C(/crl.pem). + type: str + default: /etc/easy-rsa/pki + + working_dir: + description: + - Working directory used before running Easy-RSA commands. + - Useful when Easy-RSA expects to be executed from a specific directory. + type: path + required: false + + list_revoked_certificates: + description: + - If enabled, include the parsed list of revoked certificates from the CRL in the result. + type: bool + default: false + + warn_for_expire: + description: + - If enabled, calculates whether the CRL is near expiry based on C(next_update) and C(expire_in_days). + - Adds C(expired) and (when expired) C(warn=true) to the result. + type: bool + default: true + + expire_in_days: + description: + - Threshold in days used to determine whether the CRL is considered "near expiry". + - If C(next_update - now <= expire_in_days), the module returns C(expired=true). + type: int + default: 10 + + force: + description: + - Reserved for future use. + type: bool + default: false + +notes: + - Check mode is not supported. + +requirements: + - Easy-RSA must be available for C(state=renew). + - OpenSSL libraries/tools as required by the collection crypto utilities. +""" + +EXAMPLES = r""" +- name: Check CRL status (parse crl.pem) + bodsch.core.openvpn_crl: + state: status + pki_dir: /etc/easy-rsa/pki + +- name: Check CRL status and include revoked certificates + bodsch.core.openvpn_crl: + state: status + pki_dir: /etc/easy-rsa/pki + list_revoked_certificates: true + +- name: Warn if CRL expires within 14 days + bodsch.core.openvpn_crl: + state: status + pki_dir: /etc/easy-rsa/pki + warn_for_expire: true + expire_in_days: 14 + register: crl_status + +- name: Regenerate (renew) CRL using Easy-RSA + bodsch.core.openvpn_crl: + state: renew + pki_dir: /etc/easy-rsa/pki + working_dir: /etc/easy-rsa + register: crl_renew + +- name: Show renew output + ansible.builtin.debug: + var: crl_renew +""" + +RETURN = r""" +last_update: + description: + - CRL last update time. + returned: when state=status and the CRL can be parsed + type: dict + contains: + raw: + description: Raw value as extracted from the CRL. + returned: always + type: str + parsed: + description: Parsed/converted representation (implementation-specific). + returned: always + type: raw + +next_update: + description: + - CRL next update time. + returned: when state=status and the CRL can be parsed + type: dict + contains: + raw: + description: Raw value as extracted from the CRL. + returned: always + type: str + parsed: + description: Parsed/converted representation (implementation-specific). + returned: always + type: raw + +expired: + description: + - Indicates whether the CRL will expire within C(expire_in_days). + returned: when state=status and warn_for_expire=true + type: bool + +warn: + description: + - Convenience flag set to C(true) when C(expired=true). + returned: when state=status, warn_for_expire=true and expired=true + type: bool + +revoked_certificates: + description: + - List of revoked certificates parsed from the CRL. + - The element schema depends on the underlying parser. + returned: when state=status and list_revoked_certificates=true + type: list + elements: dict + +changed: + description: + - Whether the module changed anything. + - Only relevant for C(state=renew). + returned: when state=renew + type: bool + +msg: + description: + - Human-readable result message (primarily from Easy-RSA on renew, or from error paths). + returned: on state=renew or on failure + type: str + +failed: + description: + - Indicates failure. + returned: always + type: bool +""" + +# --------------------------------------------------------------------------------------- + + +class OpenVPNCrl(object): + """ """ + + module = None + + def __init__(self, module): + """ """ + self.module = module + self.state = module.params.get("state") + self.pki_dir = module.params.get("pki_dir") + self.working_dir = module.params.get("working_dir", None) + self.list_revoked_certificates = module.params.get("list_revoked_certificates") + self.warn_for_expire = module.params.get("warn_for_expire") + self.expire_in_days = module.params.get("expire_in_days") + + self.crl_file = f"{self.pki_dir}/crl.pem" + + def run(self): + """ """ + if self.state == "status": + result = self.test_crl() + if self.state == "renew": + result = self.renew_crl() + + return result + + def test_crl(self): + """ """ + data = None + + if os.path.isfile(self.crl_file): + try: + with open(self.crl_file, "rb") as f: + data = f.read() + except (IOError, OSError) as e: + msg = f"Error while reading CRL file from disk: {e}" + self.module.log(msg) + # self.module.fail_json(msg) + + return dict(failed=True, msg=msg) + if not data: + return dict(failed=True, msg="Upps. This error should not occur.") + + try: + crl_info = get_crl_info( + self.module, + data, + list_revoked_certificates=self.list_revoked_certificates, + ) + + self.last_update = get_relative_time_option( + crl_info.get("last_update"), "last_update" + ) + self.next_update = get_relative_time_option( + crl_info.get("next_update"), "next_update" + ) + self.revoked_certificates = crl_info.get("revoked_certificates", []) + + result = dict( + failed=False, + last_update=dict( + raw=crl_info.get("last_update"), parsed=self.last_update + ), + next_update=dict( + raw=crl_info.get("next_update"), parsed=self.next_update + ), + ) + + if self.warn_for_expire: + expired = self.expired(self.next_update) + + result.update({"expired": expired}) + + if expired: + result.update({"warn": True}) + + if self.list_revoked_certificates: + result.update({"revoked_certificates": self.revoked_certificates}) + + return result + + except OpenSSLObjectError as e: + msg = f"Error while decoding CRL file: {to_native(e)}" + self.module.log(msg) + # self.module.fail_json(msg) + return dict(failed=True, msg=msg) + + def renew_crl(self): + """ + rm '{{ openvpn_easyrsa.directory }}/pki/crl.pem' + """ + if self.working_dir: + os.chdir(self.working_dir) + + if os.path.isfile(self.crl_file): + os.remove(self.crl_file) + + ersa = EasyRSA(module=self.module, force=True, working_dir=self.working_dir) + + rc, changed, msg = ersa.gen_crl() + + if rc == 0: + return dict(failed=False, changed=changed, msg=msg) + else: + return dict(failed=True, changed=changed, msg=msg) + + return (rc, changed, msg) + + def expired(self, next_update): + """ """ + from datetime import datetime + + result = False + now = datetime.now() + + time_diff = next_update - now + time_diff_in_days = time_diff.days + # self.module.log(f" - {time_diff_in_days} vs. {self.expire_in_days}") + + if time_diff_in_days <= self.expire_in_days: + result = True + + return result + + +def main(): + + args = dict( + state=dict(default="status", choices=["status", "renew"]), + pki_dir=dict(required=False, type="str", default="/etc/easy-rsa/pki"), + working_dir=dict(required=False), + list_revoked_certificates=dict(required=False, type="bool", default=False), + warn_for_expire=dict(required=False, type="bool", default=True), + expire_in_days=dict(required=False, type="int", default=10), + force=dict(required=False, type="bool", default=False), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=False, + ) + + o = OpenVPNCrl(module) + result = o.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() + + +""" + +# openssl crl -text -noout -in /etc/easy-rsa/pki/crl.pem +Certificate Revocation List (CRL): + Version 2 (0x1) + Signature Algorithm: sha512WithRSAEncryption + Issuer: CN = Open VPN + Last Update: Apr 24 03:51:14 2023 GMT + Next Update: Apr 23 03:51:14 2024 GMT + CRL extensions: + X509v3 Authority Key Identifier: + keyid:35:D2:22:90:B6:82:4A:DC:64:03:6B:17:8B:B0:07:E2:52:E0:60:1E + DirName:/CN=Open VPN + serial:64:41:E6:07:CC:D0:8E:1F:24:A8:F9:91:FC:34:0F:4B:47:06:D1:74 +No Revoked Certificates. + Signature Algorithm: sha512WithRSAEncryption + Signature Value: + d4:f4:fc:06:fa:ed:8b:cd:f4:eb:95:fb:88:c1:e8:ff:30:eb: + e7:2a:ea:fa:8f:60:a6:07:81:a6:1a:aa:72:4b:68:7d:46:3a: + 7c:a2:3c:df:a3:7b:7b:85:0e:ba:1b:ba:06:13:04:74:0a:9c: + 27:60:ec:09:df:1a:3d:b6:3b:71:d1:20:4a:55:dc:47:e7:60: + b5:65:82:09:ff:5d:e3:e2:4d:15:55:4f:1f:48:e8:c5:72:9b: + a8:61:fd:17:8e:c4:4e:98:59:fa:58:6d:b1:a8:3a:b1:87:79: + 76:c2:9d:4a:3c:a3:54:e1:14:10:02:96:e9:fe:bf:6f:ab:2d: + 56:35:6e:94:9d:b1:aa:4f:d6:3c:b0:9a:29:8a:17:c6:7d:18: + 0c:15:fa:30:a9:c8:a5:22:63:79:cd:31:a0:1f:d0:38:be:93: + c2:0f:be:73:97:2b:79:58:db:b9:bb:ec:aa:a9:f2:ac:cc:bb: + 4e:66:15:23:ae:1e:2b:86:40:79:4c:14:eb:58:e0:71:d7:3e: + c8:93:11:e5:7a:e5:26:7a:94:c1:57:4b:75:ca:cb:92:c2:ca: + 87:a3:b8:16:7b:3d:53:13:23:70:04:c3:35:c7:41:29:06:9d: + 32:63:96:90:3d:4f:82:7a:23:08:9d:d7:85:d9:ad:9d:09:d2: + e9:52:39:72:af:0d:4b:74:a2:39:c5:5c:80:4d:88:db:74:ae: + 87:a7:d3:cf:f3:0f:ae:44:94:bd:f8:21:c7:64:c7:bb:aa:46: + 68:ba:fb:42:37:ef:41:6f:0e:cb:c0:e9:c6:83:fb:15:8f:f0: + a4:d4:2b:34:40:b0:89:b1:f7:d0:ce:c8:2c:3e:7d:7c:e4:37: + c4:98:56:30:a2:42:89:36:fe:a8:3c:15:ec:fe:37:c7:a8:ba: + 78:39:70:54:c9:fc:6a:7f:05:5c:89:f3:4b:0f:c1:fe:1a:93: + 68:63:70:7b:ed:cb:82:85:3f:a2:8e:bc:d5:b7:21:b2:dc:2a: + e9:79:a3:8f:a8:ad:9e:d4:f0:5a:13:18:2f:ea:bc:00:cf:e4: + 76:fb:fa:f4:cb:c3:b6:d4:d9:d4:b7:f1:eb:16:10:e9:69:93: + 64:fa:d3:f6:1b:9b:2f:7a:fb:6b:99:8d:7a:07:51:62:ed:fa: + 38:51:2a:e7:70:e9:a2:83:be:cf:a4:8d:5d:35:b6:49:7a:56: + 17:2a:a7:88:7d:6c:43:69:f3:67:f7:ce:69:97:5c:b8:ad:90: + 4e:9b:ab:cf:6c:52:a8:3e:54:09:61:8f:f3:7b:98:b3:a8:1f: + 75:6e:94:a1:c1:89:b8:f7:df:5c:7a:b7:13:47:c0:b1:42:03: + c5:18:2a:77:6a:50:c9:8f + +https://serverfault.com/questions/979826/how-to-verify-certificate-revocation-lists-against-multiple-certification-path +""" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/openvpn_ovpn.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/openvpn_ovpn.py new file mode 100644 index 0000000..6d0cf87 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/openvpn_ovpn.py @@ -0,0 +1,389 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2022, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +import hashlib +import os +import sys + +from ansible.module_utils.basic import AnsibleModule + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: openvpn_ovpn +short_description: Create or remove an inline OpenVPN client configuration (.ovpn) from Easy-RSA client credentials +version_added: "1.1.3" +author: + - Bodo Schulz (@bodsch) + +description: + - Creates an inline OpenVPN client configuration file (C(.ovpn)) containing embedded client key and certificate. + - The client key and certificate are read from an Easy-RSA PKI structure (C(pki/private/.key) and C(pki/issued/.crt)). + - Uses a Jinja2 template file (C(/etc/openvpn/client.ovpn.template)) to render the final config. + - Writes a SHA256 checksum sidecar file (C(..ovpn.sha256)) to support basic change detection. + - Can remove both the generated C(.ovpn) and checksum file. + +options: + state: + description: + - Whether the OVPN configuration should be present or absent. + type: str + default: present + choices: + - present + - absent + + force: + description: + - If enabled, removes existing destination files before (re)creating the configuration. + - This also removes the checksum file. + type: bool + default: false + + username: + description: + - Client name/user to build the configuration for. + - Used to locate Easy-RSA key/certificate and to name the output files. + type: str + required: true + + destination_directory: + description: + - Directory where the generated C(.ovpn) and checksum file are written. + - The directory must exist. + type: str + required: true + + chdir: + description: + - Change into this directory before processing. + - Useful if Easy-RSA PKI paths are relative to a working directory. + type: path + required: false + + creates: + description: + - If this path exists, the module returns early with no changes. + - When C(state=present) and C(creates) exists, the message will indicate the configuration is already created. + type: path + required: false + +notes: + - Check mode is not supported. + - The template path is currently fixed to C(/etc/openvpn/client.ovpn.template). + - The module expects an Easy-RSA PKI layout under the (optional) C(chdir) working directory. + - File permissions for the generated C(.ovpn) are set to C(0600). + +requirements: + - Python Jinja2 must be available on the target node for C(state=present). +""" + +EXAMPLES = r""" +- name: Create an inline client configuration for user 'alice' + bodsch.core.openvpn_ovpn: + state: present + username: alice + destination_directory: /etc/openvpn/clients + +- name: Create config with PKI relative to a working directory + bodsch.core.openvpn_ovpn: + state: present + username: bob + destination_directory: /etc/openvpn/clients + chdir: /etc/easy-rsa + +- name: Force recreation of an existing .ovpn file + bodsch.core.openvpn_ovpn: + state: present + username: carol + destination_directory: /etc/openvpn/clients + force: true + +- name: Skip if a marker file already exists + bodsch.core.openvpn_ovpn: + state: present + username: dave + destination_directory: /etc/openvpn/clients + creates: /var/lib/openvpn/clients/dave.created + +- name: Remove client configuration and checksum file + bodsch.core.openvpn_ovpn: + state: absent + username: alice + destination_directory: /etc/openvpn/clients +""" + +RETURN = r""" +changed: + description: + - Whether the module changed anything. + returned: always + type: bool + +failed: + description: + - Indicates failure. + returned: always + type: bool + +message: + description: + - Human readable status message. + returned: always + type: str + sample: + - "ovpn file /etc/openvpn/clients/alice.ovpn exists." + - "ovpn file successful written as /etc/openvpn/clients/alice.ovpn." + - "ovpn file /etc/openvpn/clients/alice.ovpn successful removed." + - "can not find key or certfile for user alice." + - "user req already created" +""" + +# --------------------------------------------------------------------------------------- + + +class OpenVPNOvpn(object): + """ + Main Class to implement the Icinga2 API Client + """ + + module = None + + def __init__(self, module): + """ + Initialize all needed Variables + """ + self.module = module + + self.state = module.params.get("state") + self.force = module.params.get("force", False) + self._username = module.params.get("username", None) + self._destination_directory = module.params.get("destination_directory", None) + + self._chdir = module.params.get("chdir", None) + self._creates = module.params.get("creates", None) + + self._openvpn = module.get_bin_path("openvpn", True) + self._easyrsa = module.get_bin_path("easyrsa", True) + + self.key_file = os.path.join("pki", "private", f"{self._username}.key") + self.crt_file = os.path.join("pki", "issued", f"{self._username}.crt") + self.dst_file = os.path.join( + self._destination_directory, f"{self._username}.ovpn" + ) + + self.dst_checksum_file = os.path.join( + self._destination_directory, f".{self._username}.ovpn.sha256" + ) + + def run(self): + """ + runner + """ + result = dict(failed=False, changed=False, ansible_module_results="none") + + if self._chdir: + os.chdir(self._chdir) + + self.__validate_checksums() + + if self.force: + self.module.log(msg="force mode ...") + if os.path.exists(self.dst_file): + self.module.log(msg=f"remove {self.dst_file}") + os.remove(self.dst_file) + os.remove(self.dst_checksum_file) + + if self._creates: + if os.path.exists(self._creates): + message = "nothing to do." + if self.state == "present": + message = "user req already created" + + return dict(changed=False, message=message) + + if self.state == "present": + return self.__create_ovpn_config() + if self.state == "absent": + return self.__remove_ovpn_config() + + return result + + def __create_ovpn_config(self): + """ """ + if os.path.exists(self.dst_file): + return dict( + failed=False, + changed=False, + message=f"ovpn file {self.dst_file} exists.", + ) + + if os.path.exists(self.key_file) and os.path.exists(self.crt_file): + """ """ + from jinja2 import Template + + with open(self.key_file, "r") as k_file: + k_data = k_file.read().rstrip("\n") + + cert = self.__extract_certs_as_strings(self.crt_file)[0].rstrip("\n") + + tpl = "/etc/openvpn/client.ovpn.template" + + with open(tpl) as file_: + tm = Template(file_.read()) + + d = tm.render(key=k_data, cert=cert) + + with open(self.dst_file, "w") as fp: + fp.write(d) + + self.__create_checksum_file(self.dst_file, self.dst_checksum_file) + + force_mode = "0600" + if isinstance(force_mode, str): + mode = int(force_mode, base=8) + + os.chmod(self.dst_file, mode) + + return dict( + failed=False, + changed=True, + message=f"ovpn file successful written as {self.dst_file}.", + ) + + else: + return dict( + failed=True, + changed=False, + message=f"can not find key or certfile for user {self._username}.", + ) + + def __remove_ovpn_config(self): + """ """ + if os.path.exists(self.dst_file): + os.remove(self.dst_file) + + if os.path.exists(self.dst_checksum_file): + os.remove(self.dst_checksum_file) + + if self._creates and os.path.exists(self._creates): + os.remove(self._creates) + + return dict( + failed=False, + changed=True, + message=f"ovpn file {self.dst_file} successful removed.", + ) + + def __extract_certs_as_strings(self, cert_file): + """ """ + certs = [] + with open(cert_file) as whole_cert: + cert_started = False + content = "" + for line in whole_cert: + if "-----BEGIN CERTIFICATE-----" in line: + if not cert_started: + content += line + cert_started = True + else: + print("Error, start cert found but already started") + sys.exit(1) + elif "-----END CERTIFICATE-----" in line: + if cert_started: + content += line + certs.append(content) + content = "" + cert_started = False + else: + print("Error, cert end found without start") + sys.exit(1) + elif cert_started: + content += line + + if cert_started: + print("The file is corrupted") + sys.exit(1) + + return certs + + def __validate_checksums(self): + """ """ + dst_checksum = None + dst_old_checksum = None + + if os.path.exists(self.dst_file): + with open(self.dst_file, "r") as d: + dst_data = d.read().rstrip("\n") + dst_checksum = self.__checksum(dst_data) + + if os.path.exists(self.dst_checksum_file): + with open(self.dst_checksum_file, "r") as f: + dst_old_checksum = f.readlines()[0] + else: + if dst_checksum is not None: + dst_old_checksum = self.__create_checksum_file( + self.dst_file, self.dst_checksum_file + ) + + if dst_checksum is None or dst_old_checksum is None: + valid = False + else: + valid = dst_checksum == dst_old_checksum + + return valid + + def __create_checksum_file(self, filename, checksumfile): + """ """ + if os.path.exists(filename): + with open(filename, "r") as d: + _data = d.read().rstrip("\n") + _checksum = self.__checksum(_data) + + with open(checksumfile, "w") as f: + f.write(_checksum) + + return _checksum + + def __checksum(self, plaintext): + """ """ + _bytes = plaintext.encode("utf-8") + _hash = hashlib.sha256(_bytes) + return _hash.hexdigest() + + +# =========================================== +# Module execution. +# + + +def main(): + """ """ + module = AnsibleModule( + argument_spec=dict( + state=dict(default="present", choices=["present", "absent"]), + force=dict(required=False, default=False, type="bool"), + username=dict(required=True, type="str"), + destination_directory=dict(required=True, type="str"), + chdir=dict(required=False), + creates=dict(required=False), + ), + supports_check_mode=False, + ) + + o = OpenVPNOvpn(module) + result = o.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/openvpn_version.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/openvpn_version.py new file mode 100644 index 0000000..b48aa02 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/openvpn_version.py @@ -0,0 +1,164 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2023, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +import re + +from ansible.module_utils.basic import AnsibleModule + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: openvpn_version +short_description: Read the installed OpenVPN version +version_added: "1.1.3" +author: + - Bodo Schulz (@bodsch) + +description: + - Executes C(openvpn --version) on the target host. + - Parses the semantic version (C(X.Y.Z)) from the output. + - Returns the full stdout and stdout_lines for troubleshooting. + +options: {} + +notes: + - Check mode is not supported. + - The module fails if the C(openvpn) binary cannot be found on the target host. + +requirements: + - OpenVPN installed on the target host. +""" + +EXAMPLES = r""" +- name: Get OpenVPN version + bodsch.core.openvpn_version: + register: openvpn + +- name: Print parsed version + ansible.builtin.debug: + msg: "OpenVPN version: {{ openvpn.version }}" + +- name: Print raw stdout for troubleshooting + ansible.builtin.debug: + var: openvpn.stdout_lines +""" + +RETURN = r""" +version: + description: + - Parsed OpenVPN version (C(X.Y.Z)) if found, otherwise C(unknown). + returned: always + type: str + sample: "2.6.8" + +stdout: + description: + - Raw stdout from C(openvpn --version). + returned: always + type: str + +stdout_lines: + description: + - Stdout split into lines. + returned: always + type: list + elements: str + +failed: + description: + - Indicates whether parsing the version failed. + returned: always + type: bool +""" + +# --------------------------------------------------------------------------------------- + + +class OpenVPN(object): + """ """ + + module = None + + def __init__(self, module): + """ """ + self.module = module + + self._openvpn = module.get_bin_path("openvpn", True) + + def run(self): + """ + runner + """ + _failed = True + _version = "unknown" + _stdout = "" + _stdout_lines = [] + + args = [] + + args.append(self._openvpn) + args.append("--version") + + rc, out = self._exec(args) + + if "OpenVPN" in out: + pattern = re.compile( + r"OpenVPN (?P[0-9]+\.[0-9]+\.[0-9]+).*", re.MULTILINE + ) + found = re.search(pattern, out.rstrip()) + + if found: + _version = found.group("version") + _failed = False + else: + _failed = True + + _stdout = f"{out.rstrip()}" + _stdout_lines = _stdout.split("\n") + + return dict( + stdout=_stdout, stdout_lines=_stdout_lines, failed=_failed, version=_version + ) + + def _exec(self, commands): + """ """ + rc, out, err = self.module.run_command(commands, check_rc=False) + + if int(rc) != 0: + self.module.log(msg=f" rc : '{rc}'") + self.module.log(msg=f" out: '{out}'") + self.module.log(msg=f" err: '{err}'") + + return rc, out + + +# =========================================== +# Module execution. +# + + +def main(): + + args = dict() + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=False, + ) + + o = OpenVPN(module) + result = o.run() + + module.log(msg="= result: {}".format(result)) + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/package_version.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/package_version.py new file mode 100644 index 0000000..74c33c5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/package_version.py @@ -0,0 +1,407 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import print_function + +import re + +from ansible.module_utils import distro +from ansible.module_utils.basic import AnsibleModule + +__metaclass__ = type + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: package_version +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +short_description: Attempts to determine the version of a package to be installed or already installed. + +description: + - Attempts to determine the version of a package to be installed or already installed. + - Supports apt, pacman, dnf (or yum) as package manager. + +options: + state: + description: + - The status of a package. + - Defines whether the version of an already installed (C(installed)) package or the + version of a package available for installation (C(available)) is output. + default: available + required: true + repository: + description: + - Name of the repository in which the search is being conducted. + - This is only necessary for RedHat-based distributions. + type: str + default: "" + required: false + package_name: + description: + - Package name which is searched for in the system or via the package management. + type: str + required: true +""" + +EXAMPLES = r""" +- name: get version of available package + bodsch.core.package_version: + package_name: nano + register: package_version + +- name: get version of available mariadb-server + bodsch.core.package_version: + state: available + package_name: mariadb-server + register: package_version + +- name: get version of installed php-fpm + bodsch.core.package_version: + package_name: php-fpm + state: installed + register: package_version + +- name: detect available mariadb version for RedHat based + bodsch.core.package_version: + state: available + package_name: mariadb-server + repository: MariaDB + register: package_version + when: + - ansible_facts.os_family | lower == 'redhat' + - mariadb_use_external_repo +""" + +RETURN = r""" +full_version: + description: + - Version String + type: string +platform_version: + description: + - Version String with major and minor Part (e.g. 8.1) + type: string +major_version: + description: + - major Version (e.g. 8) + type: string +version_string_compressed: + description: + - Compressed variant of (C(platform_version)) (e.g. 81). + - Only needed for RedHat-based distributions. + type: string +""" + +# --------------------------------------------------------------------------------------- + + +class PackageVersion(object): + """ """ + + def __init__(self, module): + + self.module = module + + self.state = module.params.get("state") + self.package_name = module.params.get("package_name") + self.package_version = module.params.get("package_version") + self.repository = module.params.get("repository") + + self.distribution = distro.id() + self.version = distro.version() + self.codename = distro.codename() + + self.module.log( + msg=f" - pkg : {self.distribution} - {self.version} - {self.codename}" + ) + + def run(self): + """ """ + version = "" + error = True + msg = f"unknown or unsupported distribution: '{self.distribution}'" + + if self.distribution.lower() in ["debian", "ubuntu"]: + error, version, msg = self._search_apt() + + if self.distribution.lower() in ["arch", "artix"]: + error, version, msg = self._search_pacman() + + if self.distribution.lower() in [ + "centos", + "oracle", + "redhat", + "fedora", + "rocky", + "almalinux", + ]: + error, version, msg = self._search_yum() + + if error: + return dict(failed=True, available_versions=version, msg=msg) + + if version is not None: + major_version = None + minor_version = None + platform_version = None + + version_splitted = version.split(".") + + # self.module.log(msg=f" - version_splitted : {version_splitted}") + major_version = version_splitted[0] + + if len(version_splitted) > 1: + minor_version = version_splitted[1] + + if minor_version: + platform_version = ".".join([major_version, minor_version]) + else: + platform_version = major_version + + version = dict( + full_version=version, + platform_version=platform_version, + major_version=major_version, + version_string_compressed=version.replace(".", ""), + ) + + result = dict(failed=error, available=version, msg=msg) + + return result + + def _search_apt(self): + """ + support apt + """ + import apt + + pkg = None + + cache = apt.cache.Cache() + + # try: + # cache.update() + # except SystemError as error: + # self.module.log(msg=f"error : {error}") + # raise FetchFailedException(error) + # if not res and raise_on_error: + # self.module.log(msg="FetchFailedException()") + # raise FetchFailedException() + # else: + # cache.open() + + try: + cache.update() + cache.open() + except SystemError as error: + self.module.log(msg=f"error : {error}") + return False, None, f"package {self.package_name} is not installed" + except Exception as error: + self.module.log(msg=f"error : {error}") + + try: + pkg = cache[self.package_name] + version_string = None + + # debian:10 / buster: + # [php-fpm=2:7.3+69] + # ubuntu:20.04 / focal + # [php-fpm=2:7.4+75] + # debian:9 : 1:10.4.20+maria~stretch' + # debian 10: 1:10.4.20+maria~buster + # + except KeyError as error: + self.module.log(msg=f"error : {error}") + return False, None, f"package {self.package_name} is not installed" + + if pkg: + # self.module.log(msg=f" - pkg : {pkg} ({type(pkg)})") + # self.module.log(msg=f" - installed : {pkg.is_installed}") + # self.module.log(msg=f" - shortname : {pkg.shortname}") + # self.module.log(msg=f" - versions : {pkg.versions}") + # self.module.log(msg=f" - versions : {pkg.versions[0]}") + + pkg_version = pkg.versions[0] + version = pkg_version.version + + if version[1] == ":": + pattern = re.compile(r"(?<=\:)(?P.*?)(?=[-+])") + else: + pattern = re.compile(r"(?P.*?)(?=[-+])") + + result = re.search(pattern, version) + version_string = result.group("version") + + # self.module.log(msg=f" - version_string : {version_string}") + return False, version_string, "" + + def _search_yum(self): + """ + support dnf and - as fallback - yum + """ + package_mgr = self.module.get_bin_path("dnf", False) + + if not package_mgr: + package_mgr = self.module.get_bin_path("yum", True) + + if not package_mgr: + return True, "", "no valid package manager (yum or dnf) found" + + package_version = self.package_version + + if package_version: + package_version = package_version.replace(".", "") + + args = [] + args.append(package_mgr) + + args.append("info") + args.append(self.package_name) + + if self.repository: + args.append("--disablerepo") + args.append("*") + args.append("--enablerepo") + args.append(self.repository) + + rc, out, err = self.module.run_command(args, check_rc=False) + + version = "" + + if rc == 0: + versions = [] + + pattern = re.compile(r".*Version.*: (?P.*)", re.MULTILINE) + # pattern = re.compile( + # r"^{0}[0-9+].*\.x86_64.*(?P[0-9]+\.[0-9]+)\..*@(?P.*)".format(self.package_name), + # re.MULTILINE + # ) + + for line in out.splitlines(): + self.module.log(msg=f" line : {line}") + for match in re.finditer(pattern, line): + result = re.search(pattern, line) + versions.append(result.group("version")) + + self.module.log(msg=f"versions : '{versions}'") + + if len(versions) == 0: + msg = "nothing found" + error = True + + if len(versions) == 1: + msg = "" + error = False + version = versions[0] + + if len(versions) > 1: + msg = "more then one result found! choose one of them!" + error = True + version = ", ".join(versions) + else: + msg = f"package {self.package_name} not found" + error = False + version = None + + return error, version, msg + + def _search_pacman(self): + """ + pacman support + pacman --noconfirm --sync --search php7 | grep -E "^(extra|world)\\/php7 (.*)\\[installed\\]" | cut -d' ' -f2 + """ + pacman_bin = self.module.get_bin_path("pacman", True) + + version = None + args = [] + args.append(pacman_bin) + + if self.state == "installed": + args.append("--query") + else: + args.append("--noconfirm") + args.append("--sync") + + args.append("--search") + args.append(self.package_name) + + rc, out, err = self._pacman(args) + + if rc == 0: + pattern = re.compile( + # r'^(?Pcore|extra|community|world|local)\/{}[0-9\s]*(?P\d\.\d).*-.*'.format(self.package_name), + r"^(?Pcore|extra|community|world|local)\/{} (?P\d+(\.\d+){{0,2}}(\.\*)?)-.*".format( + self.package_name + ), + re.MULTILINE, + ) + + result = re.search(pattern, out) + + msg = "" + error = False + version = result.group("version") + + else: + msg = f"package {self.package_name} not found" + error = False + version = None + + return error, version, msg + + def _pacman(self, cmd): + """ + support pacman + """ + rc, out, err = self.module.run_command(cmd, check_rc=False) + + if rc != 0: + self.module.log(msg=f" rc : '{rc}'") + self.module.log(msg=f" out: '{out}'") + self.module.log(msg=f" err: '{err}'") + + return rc, out, err + + +# --------------------------------------------------------------------------------------- +# Module execution. +# + + +def main(): + + args = dict( + state=dict( + choices=[ + "installed", + "available", + ], + default="available", + ), + package_name=dict(required=True, type="str"), + package_version=dict(required=False, default=""), + repository=dict(required=False, default=""), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=False, + ) + + result = PackageVersion(module).run() + + module.log(msg=f"= result : '{result}'") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/pip_requirements.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/pip_requirements.py new file mode 100644 index 0000000..a54d8e6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/pip_requirements.py @@ -0,0 +1,268 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +import os +import os.path + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.checksum import Checksum +from ansible_collections.bodsch.core.plugins.module_utils.directory import ( + create_directory, +) +from ansible_collections.bodsch.core.plugins.module_utils.template.template import ( + write_template, +) + +__metaclass__ = type + +# --------------------------------------------------------------------------------------- + + +DOCUMENTATION = """ +module: pip_requirements +version_added: 1.0.16 +author: "Bodo Schulz (@bodsch) " + +short_description: This modules creates an requirement file to install python modules via pip. + +description: + - This modules creates an requirement file to install python modules via pip. + +options: + state: + description: + - Whether to install (C(present)), or remove (C(absent)) a package. + required: true + name: + description: + - Name of the running module. + - Needed to create the requirements file and the checksum + type: str + required: true + requirements: + description: + - A list with all python modules to install. + type: list + required: true +""" + +EXAMPLES = """ +- name: create pip requirements file + bodsch.core.pip_requirements: + name: docker + state: present + requirements: + - name: docker + compare_direction: "==" + version: 6.0.0 + + - name: setuptools + version: 39.1.0 + + - name: requests + versions: + - ">= 2.28.0" + - "< 2.30.0" + - "!~ 1.1.0" + register: pip_requirements +""" + +RETURN = """ +pip_present: + description: + - true if `pip` or `pip3` binary found + type: bool +requirements_file: + description: + - the created requirements file + type: str +""" + +# --------------------------------------------------------------------------------------- + +TPL_REQUIREMENTS = """# generated by ansible + +# # It is possible to specify requirements as plain names. +# pytest +# pytest-cov +# beautifulsoup4 +# +# # The syntax supported here is the same as that of requirement specifiers. +# docopt == 0.6.1 +# requests [security] >= 2.8.1, == 2.8.* ; python_version < "2.7" + +{% for k in item.split(':') %} +{{ k }} +{% endfor %} + +""" + + +class PipRequirements: + """ + Main Class + """ + + module = None + + def __init__(self, module): + """ """ + self.module = module + + self.state = module.params.get("state") + self.name = module.params.get("name") + self.requirements = module.params.get("requirements") + + self.cache_directory = "/var/cache/ansible/pip_requirements" + self.requirements_file_name = os.path.join( + self.cache_directory, f"{self.name}.txt" + ) + self.checksum_file_name = os.path.join( + self.cache_directory, f"{self.name}.checksum" + ) + + def run(self): + """ """ + # self.module.log(msg=f"{self.name}:") + # self.module.log(msg=f" {self.requirements}") + + create_directory(self.cache_directory) + + _changed = False + _msg = "There are no changes." + + checksum = None + + if self.state == "absent": + if os.path.exists(self.cache_directory): + os.remove(self.requirements_file_name) + os.remove(self.checksum_file_name) + _changed = True + _msg = "The pip requirements have been successfully removed." + + return dict(changed=_changed, msg=_msg) + + checksum = Checksum(self.module) + + changed, new_checksum, old_checksum = checksum.validate( + self.checksum_file_name, self.requirements + ) + + # self.module.log(f" changed : {changed}") + # self.module.log(f" new_checksum : {new_checksum}") + # self.module.log(f" old_checksum : {old_checksum}") + + self.pip_binary = self.module.get_bin_path("pip3", False) + + if not self.pip_binary: + self.pip_binary = self.module.get_bin_path("pip", False) + + if not self.pip_binary: + pip_present = False + else: + pip_present = True + + if not changed: + return dict( + changed=False, + requirements_file=self.requirements_file_name, + pip=dict(present=pip_present, bin_path=self.pip_binary), + ) + + req = self.pip_requirements(self.requirements) + + write_template(self.requirements_file_name, TPL_REQUIREMENTS, req) + + checksum.write_checksum(self.checksum_file_name, new_checksum) + + return dict( + changed=True, + requirements_file=self.requirements_file_name, + pip=dict(present=pip_present, bin_path=self.pip_binary), + ) + + def pip_requirements(self, data): + """ """ + result = [] + + valid_compare = [">=", "<=", ">", "<", "==", "!=", "~="] + + if isinstance(data, list): + for entry in data: + name = entry.get("name") + compare_direction = entry.get("compare_direction", None) + version = entry.get("version", None) + versions = entry.get("versions", []) + url = entry.get("url", None) + + if isinstance(version, str): + if compare_direction and compare_direction in valid_compare: + version = f"{compare_direction} {version}" + else: + version = f"== {version}" + + result.append(f"{name} {version}") + + elif isinstance(versions, list) and len(versions) > 0: + valid_versions = [ + x for x in versions if x.startswith(tuple(valid_compare)) + ] + versions = ", ".join(valid_versions) + result.append(f"{name} {versions}") + + elif isinstance(url, str): + result.append(f"{name} @ {url}") + + else: + result.append(name) + + return result + + +# =========================================== +# Module execution. +# + + +def main(): + + args = dict( + state=dict( + choices=[ + "present", + "absent", + ], + default="present", + ), + name=dict( + type="str", + required=True, + ), + requirements=dict( + type="list", + required=True, + ), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=False, + ) + + obj = PipRequirements(module) + result = obj.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/remove_ansible_backups.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/remove_ansible_backups.py new file mode 100644 index 0000000..70ef711 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/remove_ansible_backups.py @@ -0,0 +1,226 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function + +import os +import re + +from ansible.module_utils.basic import AnsibleModule + +__metaclass__ = type + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: remove_ansible_backups +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +short_description: Remove older backup files created by ansible + +description: + - Remove older backup files created by ansible + +options: + path: + description: + - Path for the search for backup files + type: str + required: true + hold: + description: + - How many backup files should be retained + type: int + default: 2 + required: false +""" + +EXAMPLES = """ +- name: remove older ansible backup files + bodsch.core.remove_ansible_backups: + path: /etc + holds: 4 +""" + +RETURN = """ +removed: + returned: on success + description: > + Job's up to date information + type: dict +""" + +# --------------------------------------------------------------------------------------- + + +class RemoveAnsibleBackups(object): + """ + Main Class + """ + + module = None + + def __init__(self, module): + """ + Initialize all needed Variables + """ + self.module = module + + self.verbose = module.params.get("verbose") + self.path = module.params.get("path") + self.hold = module.params.get("hold") + + def run(self): + """ + runner + """ + _failed = False + _changed = False + _msg = "no backups found" + + backups = self.find_backup_files() + removed = self.remove_backups(backups) + + if len(removed) > 0: + _changed = True + _msg = removed + + return dict(failed=_failed, changed=_changed, removed=_msg) + + def find_backup_files(self): + """ """ + _files = [] + _name = None + backup_files = [] + backups = dict() + + if os.path.isdir(self.path): + """ """ + os.chdir(self.path) + + # file_pattern = re.compile(r" + # (?P.*)\.(.*)\.(?P\d{4})-(?P.{2})- + # (?P\d+)@(?P\d+):(?P\d+):(?P\d{2})~", re.MULTILINE) + + file_pattern = re.compile( + r""" + (?P.*)\. # Alles vor dem ersten Punkt (Dateiname) + (.*)\. # Irgendein Teil nach dem ersten Punkt (z.B. Erweiterung) + (?P\d{4})- # Jahr (4-stellig) + (?P.{2})- # Monat (2 Zeichen – ggf. besser \d{2}?) + (?P\d+)@ # Tag, dann @ + (?P\d+): # Stunde + (?P\d+): # Minute + (?P\d{2})~ # Sekunde, dann Tilde + """, + re.VERBOSE | re.MULTILINE, + ) + + # self.module.log(msg=f"search files in {self.path}") + + # recursive file list + for root, dirnames, filenames in os.walk(self.path): + for filename in filenames: + _files.append(os.path.join(root, filename)) + + # filter file list wirth regex + backup_files = list(filter(file_pattern.match, _files)) + backup_files.sort() + + for f in backup_files: + """ """ + file_name = os.path.basename(f) + path_name = os.path.dirname(f) + + name = re.search(file_pattern, file_name) + + if name: + n = name.group("file_name") + _idx = os.path.join(path_name, n) + + if str(n) == str(_name): + backups[_idx].append(f) + else: + backups[_idx] = [] + backups[_idx].append(f) + + _name = n + + return backups + + else: + return None + + def remove_backups(self, backups): + """ """ + _backups = dict() + + for k, v in backups.items(): + backup_count = len(v) + + self.module.log(msg=f" - file: {k} has {backup_count} backup(s)") + + if backup_count > self.hold: + """ """ + _backups[k] = [] + + # bck_hold = v[self.hold:] + bck_to_remove = v[: -self.hold] + # self.module.log(msg=f" - hold backups: {bck_hold}") + # self.module.log(msg=f" - remove backups: {bck_to_remove}") + + for bck in bck_to_remove: + if os.path.isfile(bck): + if self.module.check_mode: + self.module.log(msg=f"CHECK MODE - remove {bck}") + else: + self.module.log(msg=f" - remove {bck}") + + if not self.module.check_mode: + os.remove(bck) + + _backups[k].append(bck) + + return _backups + + +# =========================================== +# Module execution. +# + + +def main(): + + args = dict( + verbose=dict( + type="bool", + required=False, + ), + path=dict( + type="path", + required=True, + ), + hold=dict(type="int", required=False, default=2), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=True, + ) + + postfix = RemoveAnsibleBackups(module) + result = postfix.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/snakeoil_openssl.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/snakeoil_openssl.py new file mode 100644 index 0000000..005f0f7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/snakeoil_openssl.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2021-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0/) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +import os +import re + +from ansible.module_utils.basic import AnsibleModule + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "0.1", + "status": ["preview"], + "supported_by": "community", +} + + +class SnakeoilOpenssl(object): + """ + Main Class + """ + + module = None + + def __init__(self, module): + """ + Initialize all needed Variables + """ + self.module = module + + self._openssl = module.get_bin_path("openssl", True) + self.state = module.params.get("state") + self.directory = module.params.get("directory") + self.domain = module.params.get("domain") + self.dhparam = module.params.get("dhparam") + self.cert_life_time = module.params.get("cert_life_time") + self.openssl_config = module.params.get("openssl_config") + + def run(self): + """ """ + result = dict(failed=True, changed=False, msg="failed") + + # base_directory = os.path.join(self.directory, self.domain) + # + # if not os.path.isdir(base_directory): + # return dict( + # failed=True, + # changed=False, + # msg=f"missing directory {base_directory}" + # ) + # + # os.chdir(base_directory) + _ssl_args = [] + + csr_file = os.path.join(self.directory, self.domain, f"{self.domain}.csr") + crt_file = os.path.join(self.directory, self.domain, f"{self.domain}.crt") + pem_file = os.path.join(self.directory, self.domain, f"{self.domain}.pem") + key_file = os.path.join(self.directory, self.domain, f"{self.domain}.key") + dh_file = os.path.join(self.directory, self.domain, "dh.pem") + + if self.state == "csr": + _ssl_args.append(self._openssl) + _ssl_args.append("req") + _ssl_args.append("-new") + _ssl_args.append("-sha512") + _ssl_args.append("-nodes") + _ssl_args.append("-out") + _ssl_args.append(csr_file) + _ssl_args.append("-newkey") + _ssl_args.append("rsa:4096") + _ssl_args.append("-keyout") + _ssl_args.append(key_file) + _ssl_args.append("-config") + _ssl_args.append(self.openssl_config) + + error, msg = self._base_directory() + + if error: + return dict(failed=True, changed=False, msg=msg) + + rc, out, err = self._exec(_ssl_args) + + result = dict(failed=False, changed=True, msg="success") + + if self.state == "crt": + _ssl_args.append(self._openssl) + _ssl_args.append("x509") + _ssl_args.append("-req") + _ssl_args.append("-in") + _ssl_args.append(csr_file) + _ssl_args.append("-out") + _ssl_args.append(crt_file) + _ssl_args.append("-signkey") + _ssl_args.append(key_file) + _ssl_args.append("-extfile") + _ssl_args.append(self.openssl_config) + _ssl_args.append("-extensions") + _ssl_args.append("req_ext") + _ssl_args.append("-days") + _ssl_args.append(str(self.cert_life_time)) + + error, msg = self._base_directory() + + if error: + return dict(failed=True, changed=False, msg=msg) + + rc, out, err = self._exec(_ssl_args) + + # cat {{ domain }}.crt {{ domain }}.key >> {{ domain }}.pem + if rc == 0: + filenames = [crt_file, key_file] + with open(pem_file, "w") as outfile: + for fname in filenames: + with open(fname) as infile: + outfile.write(infile.read()) + + result = dict(failed=False, changed=True, msg="success") + + if self.state == "dhparam": + _ssl_args.append(self._openssl) + _ssl_args.append("dhparam") + _ssl_args.append("-5") + _ssl_args.append("-out") + _ssl_args.append(dh_file) + _ssl_args.append(str(self.dhparam)) + + error, msg = self._base_directory() + + if error: + return dict(failed=True, changed=False, msg=msg) + + rc, out, err = self._exec(_ssl_args) + + result = dict(failed=False, changed=True, msg="success") + + if self.state == "dhparam_size": + _ssl_args.append(self._openssl) + _ssl_args.append("dhparam") + _ssl_args.append("-in") + _ssl_args.append(dh_file) + _ssl_args.append("-text") + + error, msg = self._base_directory() + + if error: + return dict(failed=False, changed=False, size=int(0)) + + rc, out, err = self._exec(_ssl_args) + + if rc == 0: + """ """ + output_string = 0 + pattern = re.compile(r".*DH Parameters: \((?P\d+) bit\).*") + + result = re.search(pattern, out) + if result: + output_string = result.group("size") + + result = dict(failed=False, changed=False, size=int(output_string)) + + return result + + def _base_directory(self): + """ """ + error = False + msg = "" + + base_directory = os.path.join(self.directory, self.domain) + + if os.path.isdir(base_directory): + os.chdir(base_directory) + else: + error = True + msg = f"missing directory {base_directory}" + + return (error, msg) + + def _exec(self, args): + """ """ + self.module.log(msg="args: {}".format(args)) + + rc, out, err = self.module.run_command(args, check_rc=True) + self.module.log(msg=" rc : '{}'".format(rc)) + if rc != 0: + self.module.log(msg=" out: '{}'".format(str(out))) + self.module.log(msg=" err: '{}'".format(err)) + + return rc, out, err + + +# =========================================== +# Module execution. +# + + +def main(): + """ """ + args = dict( + state=dict(required=True, choose=["crt", "csr", "dhparam" "dhparam_size"]), + directory=dict(required=True, type="path"), + domain=dict(required=True, type="path"), + dhparam=dict(default=2048, type="int"), + cert_life_time=dict(default=10, type="int"), + openssl_config=dict(required=False, type="str"), + # openssl_params=dict(required=True, type="path"), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=False, + ) + + openssl = SnakeoilOpenssl(module) + result = openssl.run() + + module.log(msg=f"= result : '{result}'") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/sync_directory.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/sync_directory.py new file mode 100644 index 0000000..4caed3c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/sync_directory.py @@ -0,0 +1,274 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function + +import collections +import logging +import os +import re + +import dirsync +from ansible.module_utils.basic import AnsibleModule + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: sync_directory +version_added: 1.1.3 +author: "Bodo Schulz (@bodsch) " + +short_description: Syncronises directories similar to rsync. + +description: + - Syncronises directories similar to rsync. + +options: + source_directory: + description: + - The source directory. + type: str + default: "" + required: true + destination_directory: + description: + - The destination directory. + type: str + default: "" + required: true + arguments: + description: + - a dictionary with custom arguments. + type: dict + required: false + include_pattern: + description: + - a list with regex patterns to include. + type: list + required: false + exclude_pattern: + description: + - a list with regex patterns to exclude. + type: list + required: false +""" + +EXAMPLES = r""" +- name: sync /opt/server/data to /opt/data + bodsch.core.sync_directory: + source_directory: /opt/server/data + destination_directory: /opt/data + arguments: + verbose: true + purge: false +""" + +RETURN = r""" +changed: + description: + - changed or not + type: bool +msg: + description: + - statusinformation + type: string +""" + +# --------------------------------------------------------------------------------------- + + +class TailLogHandler(logging.Handler): + + def __init__(self, log_queue): + logging.Handler.__init__(self) + self.log_queue = log_queue + + def emit(self, record): + self.log_queue.append(self.format(record)) + + +class TailLogger(object): + + def __init__(self, maxlen): + self._log_queue = collections.deque(maxlen=maxlen) + self._log_handler = TailLogHandler(self._log_queue) + + def contents(self): + return "\n".join(self._log_queue) + + @property + def log_handler(self): + return self._log_handler + + +class Sync(object): + """ """ + + def __init__(self, module): + """ """ + self.module = module + + self.source_directory = module.params.get("source_directory") + self.destination_directory = module.params.get("destination_directory") + + self.arguments = module.params.get("arguments") + + self.include_pattern = module.params.get("include_pattern") + self.exclude_pattern = module.params.get("exclude_pattern") + + def run(self): + """ """ + _failed = False + _changed = False + _msg = "The directory are synchronous." + + include_pattern = None + exclude_pattern = None + + tail = TailLogger(2) + + logger = logging.getLogger("dirsync") + formatter = logging.Formatter("%(message)s") + + log_handler = tail.log_handler + log_handler.setFormatter(formatter) + logger.addHandler(log_handler) + logger.setLevel(logging.DEBUG) + + if self.include_pattern and len(self.include_pattern) > 0: + include_pattern = "|".join(self.include_pattern) + include_pattern = f".*({include_pattern}).*" + + if self.exclude_pattern and len(self.exclude_pattern) > 0: + exclude_pattern = "|".join(self.exclude_pattern) + exclude_pattern = f".*({exclude_pattern}).*" + + # self.module.log(msg=f"include_pattern: {include_pattern}") + # include_pattern = ('^.*\\.json$',) + + if not os.path.isdir(self.source_directory): + return dict(failed=True, msg="The source directory does not exist.") + + if not os.path.isdir(self.destination_directory): + return dict(failed=True, msg="The destination directory does not exist.") + + if self.arguments and isinstance(self.arguments, dict): + _create = self.arguments.get("create", False) + _verbose = self.arguments.get("verbose", False) + _purge = self.arguments.get("purge", False) + + args = dict( + create=_create, + verbose=_verbose, + purge=_purge, + ) + + args.update({"logger": logger}) + + else: + args = { + "create": "False", + "verbose": "False", + "purge": "False", + "logger": logger, + } + + if include_pattern: + args.update( + { + "include": include_pattern, + } + ) + if exclude_pattern: + args.update( + { + "exclude": exclude_pattern, + } + ) + + args.update({"force": True}) + + self.module.log(msg=f"args: {args}") + + dirsync.sync(self.source_directory, self.destination_directory, "sync", **args) + + log_contents = tail.contents() + + self.module.log(msg=f"log_contents: {log_contents}") + + if len(log_contents) > 0: + if "directories were created" in log_contents: + pattern = re.compile( + r"(?P\d+).*directories were created.$" + ) + else: + pattern = re.compile( + r"(?P\d+).*directories parsed, (?P\d+) files copied" + ) + + re_result = re.search(pattern, log_contents) + + if re_result: + + directories = None + files_copied = None + + try: + directories = re_result.group("directories") + except Exception: + pass + + try: + files_copied = re_result.group("files_copied") + except Exception: + pass + + # self.module.log(msg=f"directories: {directories}") + # self.module.log(msg=f"files_copied: {files_copied}") + + if files_copied: + if int(files_copied) == 0: + _changed = False + _msg = "The directory are synchronous." + elif int(files_copied) > 0: + _changed = True + _msg = "The directory were successfully synchronised." + elif directories: + if int(directories) > 0: + _changed = True + _msg = "The directory were successfully synchronised." + + result = dict(changed=_changed, failed=_failed, msg=_msg) + + return result + + +def main(): + """ """ + args = dict( + source_directory=dict(required=True, type="str"), + destination_directory=dict(required=True, type="str"), + arguments=dict(required=False, type="dict"), + include_pattern=dict(required=False, type="list"), + exclude_pattern=dict(required=False, type="list"), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=True, + ) + + p = Sync(module) + result = p.run() + + module.log(msg=f"= result: {result}") + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/syslog_cmd.py b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/syslog_cmd.py new file mode 100644 index 0000000..13f8694 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/plugins/modules/syslog_cmd.py @@ -0,0 +1,257 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2022, Bodo Schulz +# BSD 2-clause (see LICENSE or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, print_function + +import re + +from ansible.module_utils.basic import AnsibleModule + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: syslog_cmd +version_added: "1.1.3" +short_description: Run syslog-ng with arbitrary command-line parameters +author: + - "Bodo Schulz (@bodsch) " + +description: + - Executes the C(syslog-ng) binary with the given list of parameters. + - Typical use cases are configuration syntax validation and querying the installed version. + +requirements: + - syslog-ng + +options: + parameters: + description: + - List of command-line parameters to pass to C(syslog-ng). + - Each list item may contain a single parameter or a parameter with a value. + - Items containing spaces are split into multiple arguments before execution. + type: list + elements: str + required: true + +notes: + - The module supports check mode. + - When used with C(--version) or C(--syntax-only) in check mode, no external command is executed and simulated results are returned. +""" + +EXAMPLES = r""" +- name: Validate syslog-ng configuration + bodsch.core.syslog_cmd: + parameters: + - --syntax-only + check_mode: true + when: + - not ansible_check_mode + +- name: Detect syslog-ng config version + bodsch.core.syslog_cmd: + parameters: + - --version + register: _syslog_config_version + +- name: Run syslog-ng with custom parameters + bodsch.core.syslog_cmd: + parameters: + - --control + - show-config + register: _syslog_custom_cmd +""" + +RETURN = r""" +rc: + description: + - Return code from the C(syslog-ng) command. + returned: always + type: int + +failed: + description: + - Indicates if the module execution failed. + returned: always + type: bool + +args: + description: + - Full command list used to invoke C(syslog-ng). + - Will be C(None) when running in check mode. + returned: when a supported command is executed or simulated + type: list + +version: + description: + - Detected C(syslog-ng) version string (for example C(3.38)). + returned: when C(--version) is present in I(parameters) + type: str + +msg: + description: + - Human readable message, for example C("syntax okay") for successful syntax checks or an error description. + returned: when available + type: str + +stdout: + description: + - Standard output from the C(syslog-ng) command. + - In check mode with C(--syntax-only), contains a simulated message. + returned: when C(--syntax-only) is used or on error + type: str + +stderr: + description: + - Standard error from the C(syslog-ng) command. + returned: when C(--syntax-only) is used and the command fails + type: str + +ansible_module_results: + description: + - Internal result marker, set to C("failed") when no supported action was executed. + returned: when no supported parameters were processed + type: str +""" + + +# --------------------------------------------------------------------------------------- + + +class SyslogNgCmd(object): + module = None + + def __init__(self, module): + """ + Initialize all needed Variables + """ + self.module = module + + self._syslog_ng_bin = module.get_bin_path("syslog-ng", False) + self.parameters = module.params.get("parameters") + + def run(self): + """...""" + result = dict(failed=True, ansible_module_results="failed") + + parameter_list = self._flatten_parameter() + + self.module.debug("-> {parameter_list}") + + if self.module.check_mode: + self.module.debug("In check mode.") + if "--version" in parameter_list: + return dict(rc=0, failed=False, args=None, version="1") + if "--syntax-only" in parameter_list: + return dict( + rc=0, + failed=False, + args=None, + stdout="In check mode.", + stderr="", + ) + + if not self._syslog_ng_bin: + return dict(rc=1, failed=True, msg="no installed syslog-ng found") + + args = [] + args.append(self._syslog_ng_bin) + + if len(parameter_list) > 0: + for arg in parameter_list: + args.append(arg) + + self.module.log(msg=f" - args {args}") + + rc, out, err = self._exec(args) + + if "--version" in parameter_list: + """ + get version" + """ + pattern = re.compile( + r".*Installer-Version: (?P\d\.\d+)\.", re.MULTILINE + ) + version = re.search(pattern, out) + version = version.group(1) + + self.module.log(msg=f" version: '{version}'") + + if rc == 0: + return dict(rc=0, failed=False, args=args, version=version) + + if "--syntax-only" in parameter_list: + """ + check syntax + """ + # self.module.log(msg=f" rc : '{rc}'") + # self.module.log(msg=f" out: '{out}'") + # self.module.log(msg=f" err: '{err}'") + + if rc == 0: + return dict(rc=rc, failed=False, args=args, msg="syntax okay") + else: + return dict( + rc=rc, + failed=True, + args=args, + stdout=out, + stderr=err, + ) + + return result + + def _exec(self, args): + """ """ + rc, out, err = self.module.run_command(args, check_rc=True) + # self.module.log(msg=" rc : '{}'".format(rc)) + # self.module.log(msg=" out: '{}' ({})".format(out, type(out))) + # self.module.log(msg=" err: '{}'".format(err)) + return rc, out, err + + def _flatten_parameter(self): + """ + split and flatten parameter list + + input: ['--validate', '--log-level debug'] + output: ['--validate', '--log-level', 'debug'] + """ + parameters = [] + + for _parameter in self.parameters: + if " " in _parameter: + _list = _parameter.split(" ") + for _element in _list: + parameters.append(_element) + else: + parameters.append(_parameter) + + return parameters + + +# =========================================== +# Module execution. +# + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + parameters=dict(required=True, type="list"), + ), + supports_check_mode=True, + ) + + c = SyslogNgCmd(module) + result = c.run() + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/bash_alias/default/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/bash_alias/default/main.yml new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/.yamllint new file mode 100644 index 0000000..20fd7aa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + +ignore: | + molecule/ + .github diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/README.md new file mode 100644 index 0000000..9d68c95 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/README.md @@ -0,0 +1,91 @@ + +# Ansible Role: `bodsch.core.fail2ban` + +An Ansible Role that installs and configure fail2ban 2.x on Debian/Ubuntu, ArchLinux and ArtixLinux (mabybe also on other `openrc` based Systemes). + +## Role Variables + +Available variables are listed below, along with default values (see `defaults/main.yaml`): + +`fail2ban_ignoreips` + +can be an IP address, a CIDR mask or a DNS host. + +`fail2ban_conf` + +`fail2ban_jail` + +`fail2ban_path_definitions` + +`fail2ban_jails` + +`fail2ban_jail` + + +## Example Playbook + +see into [molecule test](molecule/default/converge.yml) and [configuration](molecule/default/group_vars/all/vars.yml) + +```yaml +fail2ban_ignoreips: + - 127.0.0.1/8 + - 192.168.0.0/24 + +fail2ban_conf: + default: + loglevel: INFO + logtarget: "/var/log/fail2ban.log" + syslogsocket: auto + socket: /run/fail2ban/fail2ban.sock + pidfile: /run/fail2ban/fail2ban.pid + dbfile: /var/lib/fail2ban/fail2ban.sqlite3 + dbpurgeage: 1d + dbmaxmatches: 10 + definition: {} + thread: + stacksize: 0 + +fail2ban_jail: + default: + ignoreips: "{{ fail2ban_ignoreips }}" + bantime: 600 + maxretry: 3 + findtime: 3200 + backend: auto + usedns: warn + logencoding: auto + jails_enabled: false + actions: + destemail: root@localhost + sender: root@localhost + mta: sendmail + protocol: tcp + chain: INPUT + banaction: iptables-multiport + +fail2ban_jails: + - name: ssh + enabled: true + port: ssh + filter: sshd + logpath: /var/log/authlog.log + findtime: 3200 + bantime: 86400 + maxretry: 2 + - name: ssh-breakin + enabled: true + port: ssh + filter: sshd-break-in + logpath: /var/log/authlog.log + maxretry: 2 + - name: ssh-ddos + enabled: true + port: ssh + filter: sshd-ddos + logpath: /var/log/authlog.log + maxretry: 2 +``` + +## Author + +- Bodo Schulz diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/defaults/main.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/defaults/main.yaml new file mode 100644 index 0000000..2cf7a7b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/defaults/main.yaml @@ -0,0 +1,55 @@ +--- + +# "ignoreip" can be an IP address, a CIDR mask or a DNS host +fail2ban_ignoreips: + - 127.0.0.1/8 + +fail2ban_conf: {} + +fail2ban_jail: {} + +fail2ban_path_definitions: + # ARTIX - ArchLinux based, buth without systemd + artixlinux: + includes: + before: paths-common.conf + after: paths-overrides.local + defaults: + syslog_mail: /var/log/mail.log + # control the `mail.warn` setting, see `/etc/rsyslog.d/50-default.conf` (if commented `mail.*` wins). + # syslog_mail_warn = /var/log/mail.warn + syslog_mail_warn: '%(syslog_mail)s' + syslog_user: /var/log/user.log + syslog_daemon: /var/log/daemon.log + auth_log: /var/log/auth.log + # ARCH + archlinux: + includes: + before: paths-common.conf + after: paths-overrides.local + defaults: + apache_error_log: /var/log/httpd/*error_log + apache_access_log: /var/log/httpd/*access_log + exim_main_log: /var/log/exim/main.log + mysql_log: + - /var/log/mariadb/mariadb.log + - /var/log/mysqld.log + roundcube_errors_log: /var/log/roundcubemail/errors + # These services will log to the journal via syslog, so use the journal by + # default. + syslog_backend: systemd + sshd_backend: systemd + dropbear_backend: systemd + proftpd_backend: systemd + pureftpd_backend: systemd + wuftpd_backend: systemd + postfix_backend: systemd + dovecot_backend: systemd + +fail2ban_jails: [] + +fail2ban_actions: [] + +fail2ban_filters: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/handlers/main.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/handlers/main.yaml new file mode 100644 index 0000000..2de8feb --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/handlers/main.yaml @@ -0,0 +1,9 @@ +--- + +- name: restart fail2ban + ansible.builtin.service: + name: fail2ban + state: restarted + listen: "restart fail2ban" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/meta/main.yml new file mode 100644 index 0000000..7cbb0e4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/meta/main.yml @@ -0,0 +1,31 @@ +--- + +galaxy_info: + role_name: fail2ban + + author: Bodo Schulz + description: install and configure fail2ban on various systems + + license: Apache 2.0 + min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + - name: Ubuntu + versions: + # 20.04 + - focal + - name: Debian + versions: + # 10 + - buster + # 11 + - bullseye + - bookworm + + galaxy_tags: + - system + - networking + - security + +dependencies: [] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/tasks/configure.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/tasks/configure.yaml new file mode 100644 index 0000000..aa4a3d0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/tasks/configure.yaml @@ -0,0 +1,85 @@ +--- + +- name: merge fail2ban configuration between defaults and custom + ansible.builtin.set_fact: + fail2ban_jails: "{{ fail2ban_defaults_jails | bodsch.core.merge_jails(fail2ban_jails) }}" + fail2ban_filters: "{{ fail2ban_defaults_filters | bodsch.core.merge_jails(fail2ban_filters) }}" + fail2ban_actions: "{{ fail2ban_defaults_actions | bodsch.core.merge_jails(fail2ban_actions) }}" + fail2ban_conf: "{{ fail2ban_defaults_conf | combine(fail2ban_conf, recursive=True) }}" + fail2ban_jail: "{{ fail2ban_defaults_jail | combine(fail2ban_jail, recursive=True) }}" + +- name: update configuration file - /etc/fail2ban/fail2ban.conf + ansible.builtin.template: + src: etc/fail2ban/fail2ban.conf.j2 + dest: /etc/fail2ban/fail2ban.conf + owner: root + group: root + mode: "0644" + backup: true + notify: + - restart fail2ban + +- name: create path configs + ansible.builtin.template: + src: etc/fail2ban/paths.conf.j2 + dest: /etc/fail2ban/paths-{{ item.key }}.conf + owner: root + group: root + mode: "0644" + loop: + "{{ fail2ban_path_definitions | dict2items }}" + loop_control: + label: "{{ item.key }}" + when: + - fail2ban_path_definitions is defined + +- name: update configuration file - /etc/fail2ban/jail.conf + ansible.builtin.template: + src: etc/fail2ban/jail.conf.j2 + dest: /etc/fail2ban/jail.conf + owner: root + group: root + mode: "0644" + backup: true + notify: + - restart fail2ban + +- name: update configuration file - /etc/fail2ban/jail.local + ansible.builtin.template: + src: etc/fail2ban/jail.local.j2 + dest: /etc/fail2ban/jail.local + owner: root + group: root + mode: "0644" + notify: + - restart fail2ban + +- name: create filter configs + ansible.builtin.template: + src: etc/fail2ban/filters.conf.j2 + dest: /etc/fail2ban/filter.d/{{ item.name }}.conf + owner: root + group: root + mode: "0644" + loop: + "{{ fail2ban_filters }}" + loop_control: + label: "{{ item.name }}" + when: + - fail2ban_filters is defined + +- name: create action configs + ansible.builtin.template: + src: etc/fail2ban/actions.conf.j2 + dest: /etc/fail2ban/action.d/{{ item.name }}.conf + owner: root + group: root + mode: "0644" + loop: + "{{ fail2ban_actions }}" + loop_control: + label: "{{ item.name }}" + when: + - fail2ban_actions is defined + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/tasks/install.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/tasks/install.yaml new file mode 100644 index 0000000..e9e0291 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/tasks/install.yaml @@ -0,0 +1,8 @@ +--- + +- name: install + ansible.builtin.package: + name: "{{ fail2ban_packages }}" + state: present + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/tasks/main.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/tasks/main.yaml new file mode 100644 index 0000000..ba199e4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/tasks/main.yaml @@ -0,0 +1,24 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yaml + tags: + - fail2ban_prepare + - fail2ban_configure + +- name: install + ansible.builtin.include_tasks: install.yaml + tags: + - fail2ban_install + +- name: configure + ansible.builtin.include_tasks: configure.yaml + tags: + - fail2ban_configure + +- name: service + ansible.builtin.include_tasks: service.yaml + tags: + - fail2ban_service + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/tasks/prepare.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/tasks/prepare.yaml new file mode 100644 index 0000000..904a24b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/tasks/prepare.yaml @@ -0,0 +1,29 @@ +--- + +- name: include OS specific configuration + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yaml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yaml" + # eg. artixlinux-systemd / artixlinux-openrc + - "{{ ansible_facts.distribution | lower | replace(' ', '') }}-{{ ansible_facts.service_mgr | lower }}.yaml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yaml" + # eg. artixlinux + - "{{ ansible_facts.distribution | lower | replace(' ', '') }}.yaml" + # eg. redhat / debian / archlinux + - "{{ ansible_facts.os_family | lower }}.yaml" + - default.yaml + skip: true + +- name: update package cache + ansible.builtin.package: + update_cache: true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/tasks/service.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/tasks/service.yaml new file mode 100644 index 0000000..e5183e8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/tasks/service.yaml @@ -0,0 +1,9 @@ +--- + +- name: start and enable service + ansible.builtin.service: + name: fail2ban + state: started + enabled: true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/actions.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/actions.conf.j2 new file mode 100644 index 0000000..8e0cd06 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/actions.conf.j2 @@ -0,0 +1,57 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +{% if item.author is defined and + item.author | string | length > 0 %} + +# Author: {{ item.author }} +{% endif %} +{% if item.description is defined and + item.description | string | length > 0 %} + + {% for line in item.description.split('\n') %} +# {{ line }} + {% endfor %} +{% endif %} +{% if item.includes is defined %} + + {% set _attr = ['before', 'after'] %} +[INCLUDES] + {% for k, v in item.includes.items() %} + {% if k in _attr %} + {% if v | bodsch.core.type == 'list' %} +{{ "%-15s" | format(k,) }} = {{ v | join('\n') | indent(18, False) }} + {% else %} +{{ "%-15s" | format(k,) }} = {{ v }} + {% endif %} + {% endif %} + + {% endfor %} +{% endif %} +{% if item.definition is defined %} + +[Definition] + {% for k, v in item.definition.items() %} + {% if v | bodsch.core.type == 'list' %} +{{ "%-15s" | format(k,) }} = {{ v | join('\n') | indent(18, False) }} + {% else %} +{{ "%-15s" | format(k,) }} = {{ v }} + {% endif %} + + {% endfor %} +{% endif %} +{% if item.init is defined %} + +[Init] + {% for k, v in item.init.items() %} +{{ "%-15s" | format(k,) }} = {{ v }} + {% endfor %} + +{% endif %} +{% if item.init_ipv6 is defined %} + +[Init?family=inet6] + {% for k, v in item.init_ipv6.items() %} +{{ "%-15s" | format(k,) }} = {{ v }} + {% endfor %} + +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/fail2ban.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/fail2ban.conf.j2 new file mode 100644 index 0000000..8d150b5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/fail2ban.conf.j2 @@ -0,0 +1,79 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +{% set _loglevel_attr = ['CRITICAL','ERROR','WARNING','NOTICE','INFO','DEBUG'] %} + +[DEFAULT] + +# Option: loglevel +# Notes.: Set the log level output. +# CRITICAL +# ERROR +# WARNING +# NOTICE +# INFO +# DEBUG +# Values: [ LEVEL ] Default: INFO +# +loglevel = {{ fail2ban_conf.default.loglevel if fail2ban_conf.default.loglevel in _loglevel_attr else 'INFO' }} + +# Option: logtarget +# Notes.: Set the log target. This could be a file, SYSLOG, STDERR or STDOUT. +# Only one log target can be specified. +# If you change logtarget from the default value and you are +# using logrotate -- also adjust or disable rotation in the +# corresponding configuration file +# (e.g. /etc/logrotate.d/fail2ban on Debian systems) +# Values: [ STDOUT | STDERR | SYSLOG | SYSOUT | FILE ] Default: STDERR +# +logtarget = {{ fail2ban_conf.default.logtarget | default('STDERR') }} + +# Option: syslogsocket +# Notes: Set the syslog socket file. Only used when logtarget is SYSLOG +# auto uses platform.system() to determine predefined paths +# Values: [ auto | FILE ] Default: auto +syslogsocket = {{ fail2ban_conf.default.syslogsocket | default('auto') }} + +# Option: socket +# Notes.: Set the socket file. This is used to communicate with the daemon. Do +# not remove this file when Fail2ban runs. It will not be possible to +# communicate with the server afterwards. +# Values: [ FILE ] Default: /var/run/fail2ban/fail2ban.sock +# +socket = {{ fail2ban_conf.default.socket | default('/run/fail2ban/fail2ban.sock') }} + +# Option: pidfile +# Notes.: Set the PID file. This is used to store the process ID of the +# fail2ban server. +# Values: [ FILE ] Default: /var/run/fail2ban/fail2ban.pid +# +pidfile = {{ fail2ban_conf.default.pidfile | default('/run/fail2ban/fail2ban.pid') }} + +# Options: dbfile +# Notes.: Set the file for the fail2ban persistent data to be stored. +# A value of ":memory:" means database is only stored in memory +# and data is lost when fail2ban is stopped. +# A value of "None" disables the database. +# Values: [ None :memory: FILE ] Default: /var/lib/fail2ban/fail2ban.sqlite3 +dbfile = {{ fail2ban_conf.default.dbfile | default('/var/lib/fail2ban/fail2ban.sqlite3') }} + +# Options: dbpurgeage +# Notes.: Sets age at which bans should be purged from the database +# Values: [ SECONDS ] Default: 86400 (24hours) +dbpurgeage = {{ fail2ban_conf.default.dbpurgeage | default('86400') }} + +# Options: dbmaxmatches +# Notes.: Number of matches stored in database per ticket (resolvable via +# tags / in actions) +# Values: [ INT ] Default: 10 +dbmaxmatches = {{ fail2ban_conf.default.dbmaxmatches | int | default('10') }} + +[Definition] + + +[Thread] + +# Options: stacksize +# Notes.: Specifies the stack size (in KiB) to be used for subsequently created threads, +# and must be 0 or a positive integer value of at least 32. +# Values: [ SIZE ] Default: 0 (use platform or configured default) +#stacksize = 0 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/filters.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/filters.conf.j2 new file mode 100644 index 0000000..cb91584 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/filters.conf.j2 @@ -0,0 +1,64 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +{% if item.author is defined and + item.author | string | length > 0 %} + +# Author: {{ item.author }} +{% endif %} +{% if item.description is defined and + item.description | string | length > 0 %} + + {% for line in item.description.split('\n') %} +# {{ line }} + {% endfor %} +{% endif %} +{% if item.includes is defined %} + + {% set _attr = ['before', 'after'] %} +[INCLUDES] + {% for k, v in item.includes.items() %} + {% if k in _attr %} + {% if v | bodsch.core.type == 'list' %} +{{ "%-15s" | format(k,) }} = {{ v | join('\n') | indent(18, False) }} + {% else %} +{{ "%-15s" | format(k,) }} = {{ v }} + {% endif %} + {% endif %} + + {% endfor %} +{% endif %} +{% if item.default is defined %} + +[DEFAULT] + {% if item.default.daemon is defined %} +_daemon = {{ item.default.daemon }} + {% endif %} +{% endif %} +{% if item.definition is defined %} + +[Definition] + {% if item.definition.daemon is defined %} +_daemon = {{ item.definition.daemon }} + {% set _ = item.definition.pop('daemon') %} + {% endif %} + {% if item.definition.port is defined %} +_port = {{ item.definition.port }} + {% set _ = item.definition.pop('port') %} + {% endif %} + {% for k, v in item.definition.items() %} + {% if v | bodsch.core.type == 'list' %} +{{ "%-15s" | format(k,) }} = {{ v | join('\n') | indent(18, False) }} + {% else %} +{{ "%-15s" | format(k,) }} = {{ v }} + {% endif %} + + {% endfor %} +{% endif %} +{% if item.init is defined %} + +[Init] + {% for k, v in item.init.items() %} +{{ "%-15s" | format(k,) }} = {{ v }} + {% endfor %} + +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/jail.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/jail.conf.j2 new file mode 100644 index 0000000..1c6ea9a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/jail.conf.j2 @@ -0,0 +1,88 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% set _backend_attr = ['pyinotify','gamin','polling','systemd','auto'] %} + +[INCLUDES] + +before = {{ fail2ban_jail.includes.before }} +# after = {{ fail2ban_jail.includes.after }} + +[DEFAULT] +ignorself = true +ignoreip = {{ fail2ban_ignoreips | join(' ') }} +ignorecommand = + +bantime = {{ fail2ban_jail.default.bantime }} +findtime = {{ fail2ban_jail.default.findtime }} +maxretry = {{ fail2ban_jail.default.maxretry }} +backend = {{ fail2ban_jail.default.backend if fail2ban_jail.default.backend in _backend_attr else 'auto' }} +usedns = {{ fail2ban_jail.default.usedns }} +logencoding = {{ fail2ban_jail.default.logencoding }} +enabled = {{ fail2ban_jail.default.jails_enabled | bool | bodsch.core.config_bool(true_as='true', false_as='false') }} +mode = normal +filter = %(__name__)s[mode=%(mode)s] + +# ------------------------------------------------------------------------------------------------- +# ACTIONS +# + +destemail = {{ fail2ban_jail.actions.destemail }} +sender = {{ fail2ban_jail.actions.sender }} +mta = {{ fail2ban_jail.actions.mta }} +protocol = {{ fail2ban_jail.actions.protocol }} +chain = {{ fail2ban_jail.actions.chain }} +port = 0:65535 +fail2ban_agent = Fail2Ban/%(fail2ban_version)s + +# Action shortcuts. To be used to define action parameter + +banaction = {{ fail2ban_jail.actions.banaction }} +banaction_allports = iptables-allports + +# The simplest action to take: ban only +action_ = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] + +# ban & send an e-mail with whois report to the destemail. +action_mw = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] + %(mta)s-whois[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", protocol="%(protocol)s", chain="%(chain)s"] + +# ban & send an e-mail with whois report and relevant log lines +# to the destemail. +action_mwl = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] + %(mta)s-whois-lines[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", logpath=%(logpath)s, chain="%(chain)s"] + +# See the IMPORTANT note in action.d/xarf-login-attack for when to use this action +# +# ban & send a xarf e-mail to abuse contact of IP address and include relevant log lines +# to the destemail. +action_xarf = %(banaction)s[name=%(__name__)s, bantime="%(bantime)s", port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"] + xarf-login-attack[service=%(__name__)s, sender="%(sender)s", logpath=%(logpath)s, port="%(port)s"] + +# ban IP on CloudFlare & send an e-mail with whois report and relevant log lines +# to the destemail. +action_cf_mwl = cloudflare[cfuser="%(cfemail)s", cftoken="%(cfapikey)s"] + %(mta)s-whois-lines[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", logpath=%(logpath)s, chain="%(chain)s"] + +# Report block via blocklist.de fail2ban reporting service API +action_blocklist_de = blocklist_de[email="%(sender)s", service=%(filter)s, apikey="%(blocklist_de_apikey)s", agent="%(fail2ban_agent)s"] + +# Report ban via badips.com, and use as blacklist +action_badips = badips.py[category="%(__name__)s", banaction="%(banaction)s", agent="%(fail2ban_agent)s"] + +# Report ban via badips.com (uses action.d/badips.conf for reporting only) +action_badips_report = badips[category="%(__name__)s", agent="%(fail2ban_agent)s"] + +# Report ban via abuseipdb.com. +# +# See action.d/abuseipdb.conf for usage example and details. + +action_abuseipdb = abuseipdb + +action = %(action_)s + +# ------------------------------------------------------------------------------------------------- +# JAILS +# + +# in /etc/fail2ban/jail.local. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/jail.local.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/jail.local.j2 new file mode 100644 index 0000000..25d061d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/jail.local.j2 @@ -0,0 +1,17 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% for service in fail2ban_jails %} +[{{ service.name }}] +enabled = {{ service.enabled | default(true) | bool | bodsch.core.config_bool(true_as='true', false_as='false') }} + {% for option, value in service.items() %} + {% if option not in ['name', 'enabled'] %} + {% if value | bodsch.core.type == "list" %} +{{ "%-15s" | format(option,) }} = {{ value | join(' ') }} + {% elif value | bodsch.core.type in ["str", "int"] %} +{{ "%-15s" | format(option,) }} = {{ value }} + {% endif %} + {% endif %} + {% endfor %} + +{% endfor %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/paths.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/paths.conf.j2 new file mode 100644 index 0000000..d2caf87 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/templates/etc/fail2ban/paths.conf.j2 @@ -0,0 +1,14 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% for value, key in item.value.items() %} +[{{ value | upper }}] + {% for k, v in key.items() %} + {% if v | bodsch.core.type in ["str", "int"] %} +{{ "%-15s" | format(k,) }} = {{ v }} + {% elif v | bodsch.core.type == "list" %} +{{ "%-15s" | format(k,) }} = {{ v | join(' ') }} + {% endif %} + {% endfor %} + +{% endfor %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/archlinux-openrc.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/archlinux-openrc.yaml new file mode 100644 index 0000000..ee323f5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/archlinux-openrc.yaml @@ -0,0 +1,8 @@ +--- + +fail2ban_packages: + - fail2ban-openrc + +fail2ban_before_conf: paths-artix.conf + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/archlinux.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/archlinux.yaml new file mode 100644 index 0000000..0a82c1c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/archlinux.yaml @@ -0,0 +1,8 @@ +--- + +fail2ban_packages: + - fail2ban + +fail2ban_before_conf: paths-arch.conf + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/artixlinux-openrc.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/artixlinux-openrc.yaml new file mode 120000 index 0000000..a753e28 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/artixlinux-openrc.yaml @@ -0,0 +1 @@ +archlinux-openrc.yaml \ No newline at end of file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/debian.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/debian.yaml new file mode 100644 index 0000000..e765b3f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/debian.yaml @@ -0,0 +1,8 @@ +--- + +fail2ban_packages: + - fail2ban + +fail2ban_before_conf: paths-debian.conf + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/default.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/default.yaml new file mode 100644 index 0000000..40606cf --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/default.yaml @@ -0,0 +1,7 @@ +--- + +fail2ban_packages: [] + +fail2ban_before_conf: "" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/main.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/main.yaml new file mode 100644 index 0000000..77d0802 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/fail2ban/vars/main.yaml @@ -0,0 +1,165 @@ +--- + +fail2ban_defaults_conf: + default: + # 'CRITICAL','ERROR','WARNING','NOTICE','INFO','DEBUG' + loglevel: INFO + # Set the log target. This could be a file, SYSLOG, STDERR or STDOUT. + logtarget: "/var/log/fail2ban.log" + # Set the syslog socket file. Only used when logtarget is SYSLOG + syslogsocket: auto + # Set the socket file. This is used to communicate with the daemon. + socket: /run/fail2ban/fail2ban.sock + # Set the PID file. This is used to store the process ID of the fail2ban server. + pidfile: /run/fail2ban/fail2ban.pid + # Set the file for the fail2ban persistent data to be stored. + dbfile: /var/lib/fail2ban/fail2ban.sqlite3 + # Sets age at which bans should be purged from the database + dbpurgeage: 86400 + # Number of matches stored in database per ticket + dbmaxmatches: 10 + definition: {} + thread: + stacksize: 0 + +fail2ban_defaults_jail: + includes: + before: "{{ fail2ban_before_conf }}" + after: '' + default: + ignoreips: "{{ fail2ban_ignoreips }}" + # "bantime" is the number of seconds that a host is banned. + bantime: 600 + # "maxretry" is the number of failures before a host get banned. + maxretry: 3 + # A host is banned if it has generated "maxretry" during the last "findtime" seconds. + findtime: 3200 + # pyinotify: requires pyinotify (a file alteration monitor) to be installed. + # If pyinotify is not installed, Fail2ban will use auto. + # gamin: requires Gamin (a file alteration monitor) to be installed. + # If Gamin is not installed, Fail2ban will use auto. + # polling: uses a polling algorithm which does not require external libraries. + # systemd: uses systemd python library to access the systemd journal. + # Specifying "logpath" is not valid for this backend. + # See "journalmatch" in the jails associated filter config + # auto: will try to use the following backends, in order: + # pyinotify, gamin, polling. + backend: auto + + # yes: if a hostname is encountered, a reverse DNS lookup will be performed. + # warn: if a hostname is encountered, a reverse DNS lookup will be performed, + # but it will be logged as a warning. + # no: if a hostname is encountered, will not be used for banning, + # but it will be logged as info. + usedns: warn + + # "logencoding" specifies the encoding of the log files handled by the jail + # This is used to decode the lines from the log file. + # Typical examples: "ascii", "utf-8" + # + # auto: will use the system locale setting + logencoding: auto + + # "enabled" enables the jails. + # By default all jails are disabled, and it should stay this way. + # Enable only relevant to your setup jails in your .local or jail.d/*.conf + # + # true: jail will be enabled and log files will get monitored for changes + # false: jail is not enabled + jails_enabled: false + actions: + destemail: root@localhost + sender: root@localhost + mta: sendmail + protocol: tcp + chain: INPUT + # Default banning action (e.g. iptables, iptables-new, iptables-multiport, shorewall, etc) + banaction: iptables-multiport + +fail2ban_defaults_jails: + - name: ssh + enabled: false + port: ssh + filter: sshd + logpath: '%(auth_log)s' + - name: ssh-unknown-user + enabled: false + mode: ddos + filter: sshd + logpath: '%(auth_log)s' + - name: ssh-breakin + enabled: false + port: ssh + filter: sshd-break-in + logpath: '%(auth_log)s' + - name: ssh-ddos + enabled: false + port: ssh + filter: sshd-ddos + logpath: '%(auth_log)s' + +fail2ban_defaults_actions: [] + +fail2ban_defaults_filters: + - name: sshd-break-in + author: bodsch + definition: + failregex: + - 'sshd\[\S*\]: Address (.*) POSSIBLE BREAK-IN ATTEMPT!' + ignoreregex: '' + + - name: sshd-unknown-user + author: bodsch + description: | + The regex should math against + + # Invalid user from port 54520 + # Connection closed by invalid user port 54520 [preauth] + includes: + before: common.conf + definition: + daemon: sshd + failregex: + - '^[iI]nvalid user .*? (?:from )?' + - '^Connection closed by invalid user .*??' + ignoreregex: '' + + - name: sshd-ddos + author: bodsch + description: | + The regex here also relates to a exploit: + + http://www.securityfocus.com/bid/17958/exploit + The example code here shows the pushing of the exploit straight after + reading the server version. This is where the client version string normally + pushed. As such the server will read this unparsible information as + "Did not receive identification string". + includes: + before: common.conf + definition: + daemon: sshd + failregex: + - '^%(__prefix_line)sDid not receive identification string from \s*$' + ignoreregex: '' + + - name: nginx-botsearch + author: Frantisek Sumsal + description: | + Fail2Ban filter to match web requests for selected URLs that don't exist + + DEV Notes: + Based on apache-botsearch filter + includes: + before: botsearch-common.conf + definition: + failregex: + - '^ \- \S+ \[\] \"(GET|POST|HEAD) \/ \S+\" 404 .+$' + - '^ \[error\] \d+#\d+: \*\d+ (\S+ )?\"\S+\" (failed|is not found) \(2\: No such file or directory\), client\: \, server\: \S*\, request: \"(GET|POST|HEAD) \/ \S+\"\, .*?$' + ignoreregex: '' + datepattern: + - '{^LN-BEG}%%ExY(?P<_sep>[-/.])%%m(?P=_sep)%%d[T ]%%H:%%M:%%S(?:[.,]%%f)?(?:\s*%%z)?' + - '^[^\[]*\[({DATE})' + - '{^LN-BEG}' + + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/.yamllint new file mode 100644 index 0000000..20fd7aa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + +ignore: | + molecule/ + .github diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/README.md new file mode 100644 index 0000000..3ce3c4b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/README.md @@ -0,0 +1,80 @@ + +# Ansible Role: `bodsch.core.logrotate` + +Installs logrotate and provides an easy way to setup additional logrotate scripts by +specifying a list of directives. + +## usage + +```yaml +logrotate_global: + rotate_log: weekly + rotate_size: '' + su_user: '' + su_group: '' + rotate: 2 + create: true + dateext: true + compress: true + tabooext: [] + archive_directory: '' + +logrotate_conf_dir: "/etc/logrotate.d" + +logrotate_scripts: {} + +logroate_disable_systemd: true +``` + +### **logrotate_scripts**: A dictionary of logrotate scripts and the directives to use for the rotation. + +* `state` - create (`present`) or remove (`absent`) configuration. default: `present` +* `path` - Path to point logrotate to for the log rotation +* `paths` - A list of paths to point logrotate to for the log rotation. +* `options` - List of directives for logrotate, view the logrotate man page for specifics +* `scripts` - Dict of scripts for logrotate (see Example below) + +```yaml +logrotate_scripts: + audit: + path: /var/log/audit/audit.log + description: | + rotate all audit logs + options: + - weekly + - rotate 4 + - missingok + - notifempty + - delaycompress + scripts: + prerotate: systemctl stop auditd.service > /dev/null + postrotate: systemctl start auditd.service > /dev/null + foo: failed +``` + +```yaml +logrotate_scripts: + nginx: + paths: + - /var/log/nginx/*/*.log + - /var/log/nginx/*.log + options: + - weekly + - rotate 2 + - missingok + - notifempty + - compress + - sharedscripts + - create 0644 http log + - su root http + scripts: + postrotate: test ! -r /run/nginx.pid || kill -USR1 $(cat /run/nginx.pid) +``` + +## Example Playbook + +see into [molecule test](molecule/default/converge.yml) and [configuration](molecule/default/group_vars/all/vars.yml) + +## Author + +- Bodo Schulz diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/defaults/main.yml new file mode 100644 index 0000000..24c0f40 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/defaults/main.yml @@ -0,0 +1,85 @@ +--- + +logrotate_global: + # frequency + # - hourly + # Log files are rotated every hour. + # Note that usually logrotate is configured to be run by cron daily. + # You have to change this configuration and run logrotate hourly to be able + # to really rotate logs hourly. + # - daily + # Log files are rotated every day. + # - weekly + # Log files are rotated once each weekday, + # or if the date is advanced by at least 7 days since the last rotation (while ignoring the exact time). + # The weekday interpretation is following: + # 0 means Sunday, + # 1 means Monday, + # ..., + # 6 means Saturday; + # the special value 7 means each 7 days, irrespectively of weekday. + # Defaults to 0 if the weekday argument is omitted. + # - monthly + # Log files are rotated the first time logrotate is run in a month (this is normally on the first day of the month). + # - yearly + # Log files are rotated if the current year is not the same as the last rotation. + # + rotate_log: weekly + # restrict maximum size of log files + # Log files are rotated only if they grow bigger than size bytes. + # If size is followed by k, the size is assumed to be in kilobytes. + # If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. + # So size 100, size 100k, size 100M and size 100G are all valid. + # This option is mutually exclusive with the time interval options, + # and it causes log files to be rotated without regard for the last rotation time, + # if specified after the time criteria (the last specified option takes the precedence). + rotate_size: '' # 20M + # Rotate log files set under this user and group instead of using default user/group (usually root) + # su_user specifies the user used for rotation and + # su_group specifies the group used for rotation + su_user: '' + su_group: '' + # Log files are rotated count times before being removed or mailed to the address specified in a mail directive. + # If count is 0, old versions are removed rather than rotated. + # If count is -1, old logs are not removed at all, except they are affected by maxage + # (use with caution, may waste performance and disk space). + rotate: 2 + # create new (empty) log files after rotating old ones + create: true + # use date as a suffix of the rotated file + dateext: true + # if you want your log files compressed + compress: true + # taboo extension list + # At startup, the taboo extension list , + # v, .cfsaved, .disabled, .dpkg-bak, .dpkg-del, .dpkg-dist, + # .dpkg-new, .dpkg-old, .rhn-cfg-tmp-*, .rpmnew, .rpmorig, + # .rpmsave, .swp, .ucf-dist, .ucf-new, .ucf-old, ~ + # for arch based distribution, you can add her: + # .pacorig, .pacnew, .pacsave + tabooext: [] + # Logs are moved into directory for rotation + # e.g. /var/log/archive + archive_directory: '' + +logrotate_conf_dir: "/etc/logrotate.d" + +logrotate_scripts: {} +# audit: +# path: /var/log/audit/audit.log +# description: | +# rotate all audit logs +# options: +# - weekly +# - rotate 4 +# - missingok +# - notifempty +# - delaycompress +# scripts: +# prerotate: systemctl stop auditd.service > /dev/null +# postrotate: systemctl start auditd.service > /dev/null +# foo: failed + +logroate_disable_systemd: true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/meta/main.yml new file mode 100644 index 0000000..7e1b71f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/meta/main.yml @@ -0,0 +1,34 @@ +# Standards: 1.2 +--- + +galaxy_info: + role_name: logrotate + + author: Bodo Schulz + description: Role to configure logrotate scripts + + license: Apache + min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + # 11 + - bullseye + - bookworm + - name: Ubuntu + versions: + # 20.04 + - focal + + galaxy_tags: + - system + - logfile + - rotate + +dependencies: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/configure.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/configure.yaml new file mode 100644 index 0000000..443c322 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/configure.yaml @@ -0,0 +1,60 @@ +--- + +- name: merge logrotate global configuration between defaults and custom + ansible.builtin.set_fact: + logrotate_global: "{{ logrotate_defaults_global | combine(logrotate_global, recursive=True) }}" + +- name: create logrotate.conf + become: true + ansible.builtin.template: + src: logrotate.conf.j2 + dest: /etc/logrotate.conf + mode: "0644" + +- name: create directory {{ logrotate_conf_dir }} + become: true + ansible.builtin.file: + path: "{{ logrotate_conf_dir }}" + state: directory + mode: "0755" + +- name: create directory {{ logrotate_global.archive_directory }} + become: true + ansible.builtin.file: + path: "{{ logrotate_global.archive_directory }}" + state: directory + mode: "0755" + when: + - logrotate_global.archive_directory is defined + - logrotate_global.archive_directory | length > 0 + +- name: create logrotate.d configs + become: true + ansible.builtin.template: + src: logrotate.d.j2 + dest: "{{ logrotate_conf_dir }}/{{ item.key }}" + mode: "0644" + loop: + "{{ logrotate_scripts | dict2items }}" + loop_control: + label: "{{ item.key }}" + when: + - logrotate_scripts is defined + - logrotate_scripts | length > 0 + - item.value.state | default('present') == 'present' + +- name: remove logrotate.d configs + become: true + ansible.builtin.file: + dest: "{{ logrotate_conf_dir }}/{{ item.key }}" + state: absent + loop: + "{{ logrotate_scripts | dict2items }}" + loop_control: + label: "{{ item.key }}" + when: + - logrotate_scripts is defined + - logrotate_scripts | length > 0 + - item.value.state | default('present') == 'absent' + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/cron.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/cron.yaml new file mode 100644 index 0000000..77fb0db --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/cron.yaml @@ -0,0 +1,15 @@ +--- + +- name: ensure that /etc/cron.daily is present + ansible.builtin.file: + state: directory + path: /etc/cron.daily + mode: "0755" + +- name: write cron.daily + ansible.builtin.template: + src: cron_logrotate.j2 + dest: /etc/cron.daily/logrotate + mode: "0755" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/install.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/install.yaml new file mode 100644 index 0000000..783d673 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/install.yaml @@ -0,0 +1,9 @@ +--- + +- name: install logrotate + become: true + ansible.builtin.package: + name: logrotate + state: present + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/main.yml new file mode 100644 index 0000000..db12731 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/main.yml @@ -0,0 +1,31 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yaml + tags: + - logrotate_prepare + - logrotate_configure + +- name: install + ansible.builtin.include_tasks: install.yaml + tags: + - logrotate_install + +- name: configure + ansible.builtin.include_tasks: configure.yaml + tags: + - logrotate_configure + +- name: handle systemd + ansible.builtin.include_tasks: systemd.yaml + when: + - ansible_facts.service_mgr | lower == "systemd" + tags: + - logrotate_systemd + +- name: create cron job + ansible.builtin.include_tasks: cron.yaml + tags: + - logrotate_cron + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/prepare.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/prepare.yaml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/prepare.yaml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/systemd.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/systemd.yaml new file mode 100644 index 0000000..4903388 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/tasks/systemd.yaml @@ -0,0 +1,22 @@ +--- + +- name: systemd service handling + when: + - ansible_facts.service_mgr | lower == "systemd" + - logroate_disable_systemd + block: + - name: set systemd states + ansible.builtin.set_fact: + systemd_state: "{{ 'stopped' if logroate_disable_systemd else 'omit' }}" + systemd_enabled: "{{ 'false' if logroate_disable_systemd else 'true' }}" + + - name: handle systemd timer unit is stopped and disabled + ansible.builtin.systemd: + name: logrotate.timer + state: "{{ systemd_state }}" + enabled: "{{ systemd_enabled }}" + failed_when: false + when: + - not ansible_facts.distribution_major_version | int == 7 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/templates/cron_logrotate.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/templates/cron_logrotate.j2 new file mode 100644 index 0000000..9efa9a9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/templates/cron_logrotate.j2 @@ -0,0 +1,30 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +#!/usr/bin/env bash +# {{ ansible_managed }} + +{% if not logroate_disable_systemd %} +# skip in favour of systemd timer +if [ -d /run/systemd/system ]; then + exit 0 +fi +{% endif %} + +LOGROTATE=$(command -v logrotate) + +if [ -z "${LOGROTATE}" ]; then + exit 1 +fi + +# this cronjob persists removals (but not purges) +if [ ! -x ${LOGROTATE} ]; then + exit 0 +fi + +${LOGROTATE} /etc/logrotate.conf +EXITVALUE=$? +if [ $EXITVALUE != 0 ] +then + /usr/bin/logger -t logrotate "ALERT exited abnormally with [$EXITVALUE]" +fi + +exit $EXITVALUE diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/templates/logrotate.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/templates/logrotate.conf.j2 new file mode 100644 index 0000000..1fdaed5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/templates/logrotate.conf.j2 @@ -0,0 +1,67 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +{% set rotate_log_attr = ['daily', 'weekly', 'monthly', 'yearly'] %} + +# see "man logrotate" for details +# rotate log files weekly +{% if logrotate_global.rotate_log is defined and + logrotate_global.rotate_log in rotate_log_attr %} +{{ logrotate_global.rotate_log }} +{% else %} +weekly +{% endif %} + +{% if logrotate_global.su_user is defined and + logrotate_global.su_user | string | length != 0 and + logrotate_global.su_group is defined and + logrotate_global.su_group | string | length != 0 %} +# use the syslog group by default, since this is the owning group +# of /var/log/syslog. +su {{ logrotate_global.su_user }} {{ logrotate_global.su_group }} +{% endif %} + +# keep 4 weeks worth of backlogs +rotate {{ logrotate_global.rotate | default('2') }} + +{% if logrotate_global.rotate_size is defined and + logrotate_global.rotate_size | string | length != 0 %} +# restrict maximum size of log files +size {{ logrotate_global.rotate_size | default('20M') }} +{% endif %} + +{% if logrotate_global.archive_directory is defined and + logrotate_global.archive_directory | string | length > 0 %} +# Logs are moved into directory for rotation +olddir {{ logrotate_global.archive_directory }} +{% endif %} + +{% if logrotate_global.create is defined and + logrotate_global.create | string | length > 0 and + logrotate_global.create | bool == true -%} +# create new (empty) log files after rotating old ones +create +{% endif %} + +{% if logrotate_global.dateext is defined and + logrotate_global.dateext | string | length > 0 and + logrotate_global.dateext | bool == true -%} +# use date as a suffix of the rotated file +dateext +{% endif %} + +{% if logrotate_global.compress is defined and + logrotate_global.compress | string | length > 0 and + logrotate_global.compress | bool == true -%} +# uncomment this if you want your log files compressed +compress +{% endif %} + +{% if logrotate_global.tabooext is defined and + logrotate_global.tabooext | bodsch.core.type == 'list' and + logrotate_global.tabooext | count >= 1 %} +tabooext + {{ logrotate_global.tabooext | join(' ') }} +{# .pacorig .pacnew .pacsave #} +{% endif %} + +# packages drop log rotation information into this directory +include /etc/logrotate.d diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/templates/logrotate.d.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/templates/logrotate.d.j2 new file mode 100644 index 0000000..76b21ec --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/templates/logrotate.d.j2 @@ -0,0 +1,35 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +{% set scripts_attr = ['postrotate', 'prerotate'] %} + +{% if item.value.description is defined %} +# {{ item.value.description }} +{% endif %} + +{% if item.value.path is defined or item.value.paths is defined %} + {% if item.value.path is defined %} +{{ item.value.path }} + {% elif item.value.paths %} + {% for path in item.value.paths -%} +{{ path }} + {% endfor %} + {% endif %} +{ + {% if item.value.options is defined %} +{# all logrotate options #} + {% for option in item.value.options %} + {{ option }} + {% endfor %} + {% endif %} + {% if item.value.scripts is defined %} + {# pre- or postrotate scripts #} + {% for name, script in item.value.scripts.items() %} + {% if name in scripts_attr %} + {{ name }} + {{ script }} + endscript + {% endif %} + {% endfor %} + {% endif %} +} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/vars/main.yml new file mode 100644 index 0000000..fd67eab --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/logrotate/vars/main.yml @@ -0,0 +1,15 @@ +--- + +logrotate_defaults_global: + rotate_log: weekly + rotate_size: '' + su_user: '' + su_group: '' + rotate: 0 + create: true + dateext: true + compress: true + tabooext: [] + archive_directory: '' + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/.yamllint new file mode 100644 index 0000000..20fd7aa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + +ignore: | + molecule/ + .github diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/README.md new file mode 100644 index 0000000..ef32d63 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/README.md @@ -0,0 +1,79 @@ +# Ansible Role: `bodsch.core.mount` + +Manage generic mountpoints + + +## Role Variables + +```yaml +mount_fstab: /etc/fstab + +mount_devices: [] + +mount_smb_share: [] +``` + +With `mount_fstab` you can specify the fstab file to be generated. +This is especially helpful for tests :) + + +### `mount_smb_share` + +creates passwordfile for smb shares. + +```yaml +mount_smb_share: + - username: "bar" + password: "foo" + domain: "WORKSPACE" + passwordfile: "/tmp/.bar.smbcredentials" +``` + + +### `mount_devices` + +| | required | default | description | +| :--- | :---: | :--- | :--- | +| `source` | **yes** | `-` | | +| `mountpoint` | **yes** | `-` | | +| `fstype` | **yes** | `-` | | +| `opts` | **no** | `defaults` | | +| `state` | **no** | `present` | | +| `dump` | **no** | `0` | | +| `passno` | **no** | `0` | | +| `fstab` | **no** | `` | | + +## example configuration + +```yaml +mount_fstab: /tmp/molecule_fstab + +mount_smb_share: + - username: "bar" + password: "foo" + passwordfile: "/tmp/zorg.pass" + - username: "foo" + password: "bar" + passwordfile: "/tmp/foo.pass" + +mount_devices: + + - source: tmpfs + mountpoint: /tmp + fstype: tmpfs + opts: auto,rw,noatime,size=250M,nr_inodes=800k + state: present + + - source: nfs.example.org:/data + mountpoint: /mnt/remote + fstype: nfs + opts: vers=4,noauto,users,soft,intr,rsize=8192,wsize=8192 +``` + + + +--- + +## Author + +- Bodo Schulz diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/defaults/main.yml new file mode 100644 index 0000000..1dfd1e4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/defaults/main.yml @@ -0,0 +1,39 @@ +--- + +mount_fstab: /etc/fstab + +mount_devices: [] +# - username: "bar" +# password: "foo" +# passwordfile: "/tmp/zorg.pass" +# - username: "foo" +# password: "bar" +# passwordfile: "/tmp/foo.pass" + +mount_smb_share: [] +# - source: /dev/mapper/root # NO default +# mountpoint: / # NO default +# fstype: ext4 # NO default +# opts: noatime,errors=remount-ro # default: "defaults" +# state: present # default: "present" +# dump: 0 # default: "0" +# passno: 1 # default: "0" +# fstab: /etc/fstab # default: "/etc/fstab" +# +# - source: nfs.example.org:/data +# mountpoint: /mnt/remote +# fstype: nfs +# opts: vers=4,noauto,users,soft,intr,rsize=8192,wsize=8192 +# +# - source: nfs.example.org:/read-only +# mountpoint: /mnt/readonly +# opts: defaults,ro +# fstype: nfs4 +# +# - source: tmpfs +# mountpoint: /tmp +# fstype: tmpfs +# opts: auto,rw,noatime,size=250M,nr_inodes=800k +# state: present + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/meta/main.yml new file mode 100644 index 0000000..c216b2b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/meta/main.yml @@ -0,0 +1,36 @@ +--- + +galaxy_info: + role_name: mount + + author: Bodo Schulz + description: manage generic mountpoints + + license: Apache + min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + # 11 + - bullseye + - bookworm + - name: Ubuntu + versions: + # 20.04 + - focal + + galaxy_tags: + - system + - mount + - share + - nfs + - cifs + - smb + +dependencies: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/tasks/configure.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/tasks/configure.yml new file mode 100644 index 0000000..bb3abe3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/tasks/configure.yml @@ -0,0 +1,44 @@ +--- + +- name: create smb credentials file + ansible.builtin.template: + src: credentials.j2 + dest: "{{ item.passwordfile }}" + owner: root + group: root + mode: "0640" + backup: true + when: + - not running_in_check_mode + - mount_smb_share | default([]) | count > 0 + no_log: true + loop: + "{{ mount_smb_share | default([]) }}" + loop_control: + label: "{{ item.passwordfile }}" + +# Do not create mountpoint using file, the mount module will create it +# automatically. This avoids problems where the module tries to change +# permissions on an existing directory + +- name: mount devices if available + ansible.posix.mount: + src: '{{ item.source }}' + name: '{{ item.mountpoint }}' + fstype: '{{ item.fstype }}' + opts: '{{ item.opts | default("defaults") }}' + state: '{{ item.state | default("present") }}' + dump: '{{ item.dump | default("0") }}' + passno: '{{ item.passno | default("0") }}' + fstab: '{{ item.fstab | default(mount_fstab | default("/etc/fstab")) }}' + with_items: '{{ mount_devices }}' + when: + - not running_in_check_mode + - mount_devices | default([]) | count > 0 + loop_control: + label: 'mountpoint: {{ item.mountpoint }} - from: {{ item.source }}' + register: _mount_result + changed_when: true + failed_when: false + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/tasks/installation.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/tasks/installation.yml new file mode 100644 index 0000000..800e25d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/tasks/installation.yml @@ -0,0 +1,17 @@ +--- + +- name: install cifs required packages + ansible.builtin.package: + name: "{{ smb_packages }}" + state: present + when: + - mount_fstypes | regex_search("cifs") + +- name: install nfs required packages + ansible.builtin.package: + name: "{{ nfs_packages }}" + state: present + when: + - mount_fstypes | regex_search("nfs*") + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/tasks/main.yml new file mode 100644 index 0000000..bc0f395 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/tasks/main.yml @@ -0,0 +1,11 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yaml + +#- include_tasks: installation.yml + +- name: configure + ansible.builtin.include_tasks: configure.yml + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/tasks/prepare.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/tasks/prepare.yaml new file mode 100644 index 0000000..5a42d97 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/tasks/prepare.yaml @@ -0,0 +1,35 @@ +--- + +- name: include OS specific configuration ({{ ansible_facts.distribution }} ({{ ansible_facts.os_family }}) {{ ansible_facts.distribution_major_version }}) + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_facts.os_family | lower }}.yml" + # artixlinux + - "{{ ansible_facts.os_family | lower | replace(' ', '') }}.yml" + - default.yml + skip: true + +- name: detect ansible check_mode + bodsch.core.check_mode: + register: _check_mode + +- name: define running_in_check_mode + ansible.builtin.set_fact: + running_in_check_mode: '{{ _check_mode.check_mode }}' + +- name: detect fstypes + ansible.builtin.set_fact: + mount_fstypes: "{{ mount_devices | bodsch.core.fstypes }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/templates/credentials.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/templates/credentials.j2 new file mode 100644 index 0000000..809842b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/templates/credentials.j2 @@ -0,0 +1,6 @@ +{{ ansible_managed | comment }} +username = {{ item.username }} +password = {{ item.password }} +{% if item.domain is defined and item.domain | length > 0 %} +domain = {{ item.domain }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/vars/archlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/vars/archlinux.yml new file mode 100644 index 0000000..5a09b6e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/vars/archlinux.yml @@ -0,0 +1,10 @@ +--- + +nfs_packages: + - nfs-utils + +smb_packages: + - smbclient + - cifs-utils + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/vars/artixlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/vars/artixlinux.yml new file mode 100644 index 0000000..5a09b6e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/vars/artixlinux.yml @@ -0,0 +1,10 @@ +--- + +nfs_packages: + - nfs-utils + +smb_packages: + - smbclient + - cifs-utils + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/vars/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/vars/debian.yml new file mode 100644 index 0000000..e9e3c4f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/vars/debian.yml @@ -0,0 +1,10 @@ +--- + +nfs_packages: + - nfs-common + +smb_packages: + - smbclient + - cifs-utils + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/vars/main.yml new file mode 100644 index 0000000..e703f55 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/vars/main.yml @@ -0,0 +1,7 @@ +--- + +nfs_packages: [] + +smb_packages: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/vars/redhat.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/vars/redhat.yml new file mode 100644 index 0000000..e8f852d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/mount/vars/redhat.yml @@ -0,0 +1,11 @@ +--- + +nfs_packages: + - nfs-utils + +smb_packages: + - samba-common + - samba-client + - cifs-utils + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/.ansible-lint new file mode 100644 index 0000000..2184c68 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/.ansible-lint @@ -0,0 +1,7 @@ +--- + +skip_list: + - name[casing] + - name[template] + - ignore-errors # Use failed_when and specify error conditions instead of using ignore_errors. + - args[module] # value of state must be one of: present, absent, got: {{ client.state }} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/.yamllint new file mode 100644 index 0000000..20fd7aa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + +ignore: | + molecule/ + .github diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/README.md new file mode 100755 index 0000000..079635e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/README.md @@ -0,0 +1,396 @@ +# Ansible Role: `bodsch.core.openvpn` + +Ansible role to install and configure openvpn server. + +## Requirements & Dependencies + +The ipv4 filter requires python's `netaddr` be installed on the ansible controller. + + +## configuration + +```yaml +openvpn_directory: /etc/openvpn + +openvpn_diffie_hellman_keysize: 2048 + +openvpn_mtu: 1500 +openvpn_mssfix: 1360 + +openvpn_keepalive: + interval: 10 + timeout: 120 + +# server or client +openvpn_type: "" + +openvpn_service: + state: started + enabled: true + +openvpn_systemd: {} + +openvpn_logging: {} + +openvpn_easyrsa: {} + +openvpn_certificate: {} + +openvpn_server: {} + +openvpn_persistent_pool: [] + +openvpn_mobile_clients: [] + +openvpn_config_save_dir: "" + +openvpn_subnet: + ip: 10.8.3.0 + mask: 255.255.255.0 + +openvpn_iptables: + enabled: false + +openvpn_push: + routes: [] + route_gateway: "" + dhcp_options: + domains: [] + dns: [] + sndbuf: 393216 + rcvbuf: 393216 +``` + +### `openvpn_systemd` + +If OpenVPN has a dependency on another service (e.g. on sshd), then it should be possible +here to force the service into a corresponding dependency. +(Assuming that I have understood the systemd documentation correctly!) + +```yaml +openvpn_systemd: + requires_services: + - sshd.service +``` + +### `openvpn_logging` + +`verbose` Set the appropriate level of log file verbosity. + +- `0` is silent, except for fatal errors +- `4` is reasonable for general usage +- `5` and `6` can help to debug connection problems +- `9` is extremely verbose + +`mute` Silence repeating messages. +At most 20 sequential messages of the same message category will be output to the log. + + +**example** +```yaml +openvpn_logging: + directory: /var/log/openvpn + file: openvpn.log + status: status.log + verbosity: 3 + mute: 10 + append: true +``` + +### `openvpn_easyrsa` + +The best way to create a PKI for OpenVPN is to separate your CA duty from each server & client. +The CA should ideally be on a secure environment (whatever that means to you.) +**Loss/theft of the CA key destroys the security of the entire PKI.** + +The crl file has a runtime of 180 days (default). +After these 180 days have expired, the VPN clients will no longer establish a connection to the server! +For this reason I have integrated `crl_warn`. +If `expired` is configured with `true`, each time the role is run, it checks whether the crl file is still valid. +If the runtime is less than `expire_in_days`, the crl file is automatically renewed. + +**example** +```yaml +openvpn_easyrsa: + directory: /etc/easy-rsa + openssl_config: "" + key_size: 4096 + ca_expire: 3650 + cert_expire: 3650 + crl_days: 180 + crl_warn: + expired: true + expire_in_days: 20 + x509_dn_mode: cn_only + # Choices for crypto alg are: (each in lower-case) + # * rsa + # * ec + # * ed + crypto_mode: ec + rsa_curve: secp384r1 + # sha256, sha224, sha384, sha512 + digest: sha512 +``` + +### `openvpn_certificate` + +**example** +```yaml +openvpn_certificate: + req_country: DE + req_province: Hamburg + req_city: Hamburg + req_org: ACME Inc. + req_email: openvpn@acme.inc + req_ou: Special Forces + req_cn_ca: 'Open VPN' + req_cn_server: '{{ ansible_facts.fqdn }}' +``` + +### `openvpn_server` + +`user` / `group` It's a good idea to reduce the OpenVPN daemon's privileges after initialization. + +`tls_auth` For extra security beyond that provided by SSL/TLS, create an +"HMAC firewall" to help block DoS attacks and UDP port flooding. + + +**example** +```yaml +openvpn_server: + # network interface connected to internal net + interface: eth0 + # external IP of VPN server (EIP) + external_ip: '' # {{ ansible_default_ipv4.address }}' + # Which local IP address should OpenVPN + # listen on? (optional) + listen_ip: '' + # valid: 'udp' or 'tcp' + proto: udp + # Which TCP/UDP port should OpenVPN listen on? + port: 1194 + # valid: 'tun' or 'tap' + # "tun" will create a routed IP tunnel, + # "tap" will create an ethernet tunnel. + device: tun + max_clients: 10 + tls_auth: + enabled: true + cipher: AES-256-GCM + user: nobody + group: nogroup +``` + +### `openvpn_mobile_clients` + +The generated OVPN files for mobile clients are stored on the VPN server under `/root/vpn-configs`. + +You can also transfer them to the Ansible controller. +To do this, `openvpn_config_save_dir` must be configured accordingly. + +`tls_auth` is recommended when is activated in `openvpn_server`! + +| variable | default | description | +| :--- | :--- | :--- | +| `name` | `-` | | +| `state` | `present` | | +| `roadrunner` | `false` | | +| `remote` | `-` | | +| `port` | `1194` | | +| `proto` | `udp` | | +| `device` | `tun` | | +| `ping` | `20` | | +| `ping_restart` | `45` | | +| `cert` | `${name}.crt` | | +| `key` | `${name}.key` | | + +**example** +```yaml +openvpn_mobile_clients: + - name: molecule_static + state: present + remote: server + port: 1194 + proto: udp + device: tun + ping: 20 + ping_restart: 45 + cert: molecule_static.crt + key: molecule_static.key + + - name: roadrunner_one + state: present + roadrunner: true + remote: server + port: 1194 + proto: udp + device: tun + ping: 20 + ping_restart: 45 + cert: roadrunner_one.crt + key: roadrunner_one.key +``` + +#### `openvpn_persistent_pool` + + +**example** +```yaml +openvpn_persistent_pool: + - name: molecule_mobile + state: present + static_ip: 10.8.3.10 +``` + + +### `openvpn_subnet` + +Configure server mode and supply a VPN subnet for OpenVPN to draw client addresses from. +The server will take 10.8.0.1 for itself, the rest will be made available to clients. +Each client will be able to reach the server on 10.8.0.1. + +Use distinct subnets for every VPN server, if client IPs are persisted! +(`ifconfig-pool-persist` in openvpn `server.conf`) + +**example** +```yaml +openvpn_subnet: + ip: 10.8.3.0 + mask: 255.255.255.0 +``` + +### `openvpn_push` + +Configuration options that can be pushed to the VPN client. + +```yaml +openvpn_push: + routes: [] + dhcp_options: + domains: [] + dns: [] + sndbuf: 393216 + rcvbuf: 393216 +``` + +#### `routes` + +Push routes to the client to allow it to reach other private subnets behind the server. +Remember that these private subnets will also need to know to route the OpenVPN client address pool +(10.8.0.0/255.255.255.0) back to the OpenVPN server. + +List of routes which are propagated to client. Try to keep these nets small! + +**example** +```yaml +openvpn_push: + routes: + - net: 10.8.3.0 + netmask: 255.255.255.0 +``` + +#### `dhcp_options.dns` + +**example** +```yaml +openvpn_push: + dhcp_options: + dns: + - 10.15.0.2 + - 10.15.0.5 +``` + +#### `dhcp_options.domains` + +**example** +```yaml +openvpn_push: + dhcp_options: + domains: + - matrix.vpn + - customer.vpn +``` + +### `openvpn_iptables` + +**example** +```yaml +openvpn_iptables: + enabled: false +``` + +### example configuration for a openvpn server with 2 clients + + +#### example configuration for mobile clients + +```yaml + +openvpn_type: server + +openvpn_persistent_pool: + - name: client1.example.com + state: present + static_ip: 172.25.0.10 + - name: client2.example.com + state: present + static_ip: 172.25.0.11 + +openvpn_subnet: + ip: 172.25.0.0 + netmask: 255.255.255.0 + +openvpn_push: + routes: + - net: 172.25.0.0 + netmask: 255.255.255.0 +``` + +#### example configuration for static client 1 + +```yaml +openvpn_type: client + +openvpn_mobile_clients: + - name: client1.example.com + remote: vpn.example.com + port: 1194 + proto: udp + device: tun + ping: 20 + ping_restart: 45 + cert: client1.example.com.crt + key: client1.example.com.key + tls_auth: + enabled: true +``` + + +#### example configuration for roadrunner client + +```yaml +openvpn_type: server + +openvpn_mobile_clients: + - name: client2.example.com + remote: vpn.example.com + roadrunner: true + port: 1194 + proto: udp + device: tun + ping: 20 + ping_restart: 45 + cert: client2.example.com.crt + key: client2.example.com.key + tls_auth: + enabled: true +``` + +--- + + +--- + +## Author + +- Bodo Schulz diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/defaults/main.yml new file mode 100644 index 0000000..343f8bc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/defaults/main.yml @@ -0,0 +1,119 @@ +--- + +openvpn_directory: /etc/openvpn + +openvpn_diffie_hellman_keysize: 2048 + +openvpn_mtu: 1500 +openvpn_mssfix: 1360 + +openvpn_keepalive: + interval: 10 + timeout: 120 + +# server or client +openvpn_type: "" + +openvpn_service: + state: started + enabled: true + +openvpn_systemd: {} +# requires_services: [] + +openvpn_logging: {} +# directory: /var/log/openvpn +# file: openvpn.log +# status: status.log +# verbosity: 3 +# mute: 10 +# append: true + +openvpn_easyrsa: {} +# directory: /etc/easy-rsa +# openssl_config: "" +# key_size: 4096 +# ca_expire: 3650 +# cert_expire: 3650 +# crl_days: 180 +# crl_warn: +# expired: true +# expire_in_days: 20 +# x509_dn_mode: cn_only +# # Choices for crypto alg are: (each in lower-case) +# # * rsa +# # * ec +# # * ed +# crypto_mode: ec +# rsa_curve: secp384r1 +# # sha256, sha224, sha384, sha512 +# digest: sha512 + +openvpn_certificate: {} +# req_country: DE +# req_province: Hamburg +# req_city: Hamburg +# req_org: ACME Inc. +# req_email: openvpn@acme.inc +# req_ou: Special Forces +# req_cn_ca: 'Open VPN' +# req_cn_server: '{{ ansible_facts.fqdn }}' + +openvpn_server: {} +# name: server +# # external IP of VPN server (EIP) +# external_ip: '' # {{ ansible_default_ipv4.address }}' +# # Which local IP address should OpenVPN +# # listen on? (optional) +# listen_ip: '' +# # valid: 'udp' or 'tcp' +# proto: udp +# # Which TCP/UDP port should OpenVPN listen on? +# port: 1194 +# # valid: 'tun' or 'tap' +# # "tun" will create a routed IP tunnel, +# # "tap" will create an ethernet tunnel. +# device: tun +# max_clients: 10 + +openvpn_persistent_pool: [] +# - name: darillium.matrix.lan +# state: absent +# static_ip: 10.8.3.10 + +openvpn_mobile_clients: [] +# server_name: +# remote: "" +# port: 1194 +# proto: udp +# device: tun +# ping: 20 +# ping_restart: 45 +# tls_auth: +# enabled: false + +openvpn_config_save_dir: "~/openvpn-configs" + +# Use distinct subnets for every VPN server, if client IPs are +# persisted! (ifconfig-pool-persist in openvpn server.conf) +openvpn_subnet: {} +# ip: 10.8.3.0 +# netmask: 255.255.255.0 + +openvpn_iptables: + enabled: false + +openvpn_push: + # List of routes which are propagated to client. + # Try to keep these nets small! + routes: [] + # - net: 172.25.220.0 + # netmask: 255.255.255.0 + # gateway: ... (optional) + dhcp_options: + domains: [] + dns: [] + # sndbuf: 393216 + # rcvbuf: 393216 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/files/down.sh b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/files/down.sh new file mode 100644 index 0000000..1c70db0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/files/down.sh @@ -0,0 +1,33 @@ +#!/bin/sh +# Copyright (c) 2006-2007 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# Contributed by Roy Marples (uberlord@gentoo.org) + +# If we have a service specific script, run this now +if [ -x /etc/openvpn/"${SVCNAME}"-down.sh ] ; then + /etc/openvpn/"${SVCNAME}"-down.sh "$@" +fi + +# Restore resolv.conf to how it was +if [ "${PEER_DNS}" != "no" ]; then + if [ -x /sbin/resolvconf ] ; then + /sbin/resolvconf -d "${dev}" + elif [ -e /etc/resolv.conf-"${dev}".sv ] ; then + # Important that we copy instead of move incase resolv.conf is + # a symlink and not an actual file + cp /etc/resolv.conf-"${dev}".sv /etc/resolv.conf + rm -f /etc/resolv.conf-"${dev}".sv + fi +fi + +if [ -n "${SVCNAME}" ]; then + # Re-enter the init script to start any dependant services + if /etc/init.d/"${SVCNAME}" --quiet status ; then + export IN_BACKGROUND=true + /etc/init.d/"${SVCNAME}" --quiet stop + fi +fi + +exit 0 + +# vim: ts=4 : diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/files/up.sh b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/files/up.sh new file mode 100644 index 0000000..6ce82d6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/files/up.sh @@ -0,0 +1,100 @@ +#!/bin/sh +# Copyright (c) 2006-2007 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# Contributed by Roy Marples (uberlord@gentoo.org) + +# Setup our resolv.conf +# Vitally important that we use the domain entry in resolv.conf so we +# can setup the nameservers are for the domain ONLY in resolvconf if +# we're using a decent dns cache/forwarder like dnsmasq and NOT nscd/libc. +# nscd/libc users will get the VPN nameservers before their other ones +# and will use the first one that responds - maybe the LAN ones? +# non resolvconf users just the the VPN resolv.conf + +# FIXME:- if we have >1 domain, then we have to use search :/ +# We need to add a flag to resolvconf to say +# "these nameservers should only be used for the listed search domains +# if other global nameservers are present on other interfaces" +# This however, will break compatibility with Debians resolvconf +# A possible workaround would be to just list multiple domain lines +# and try and let resolvconf handle it + +min_route() { + local n=1 + local m + local r + + eval m="\$route_metric_$n" + while [ -n "${m}" ]; do + if [ -z "$r" ] || [ "$r" -gt "$m" ]; then + r="$m" + fi + n="$(($n+1))" + eval m="\$route_metric_$n" + done + + echo "$r" +} + +if [ "${PEER_DNS}" != "no" ]; then + NS= + DOMAIN= + SEARCH= + i=1 + while true ; do + eval opt=\$foreign_option_${i} + [ -z "${opt}" ] && break + if [ "${opt}" != "${opt#dhcp-option DOMAIN *}" ] ; then + if [ -z "${DOMAIN}" ] ; then + DOMAIN="${opt#dhcp-option DOMAIN *}" + else + SEARCH="${SEARCH}${SEARCH:+ }${opt#dhcp-option DOMAIN *}" + fi + elif [ "${opt}" != "${opt#dhcp-option DNS *}" ] ; then + NS="${NS}nameserver ${opt#dhcp-option DNS *}\n" + fi + i=$((${i} + 1)) + done + + if [ -n "${NS}" ] ; then + DNS="# Generated by openvpn for interface ${dev}\n" + if [ -n "${SEARCH}" ] ; then + DNS="${DNS}search ${DOMAIN} ${SEARCH}\n" + elif [ -n "${DOMAIN}" ]; then + DNS="${DNS}domain ${DOMAIN}\n" + fi + DNS="${DNS}${NS}" + if [ -x /sbin/resolvconf ] ; then + metric="$(min_route)" + printf "${DNS}" | /sbin/resolvconf -a "${dev}" ${metric:+-m ${metric}} + else + # Preserve the existing resolv.conf + if [ -e /etc/resolv.conf ] ; then + cp /etc/resolv.conf /etc/resolv.conf-"${dev}".sv + fi + printf "${DNS}" > /etc/resolv.conf + chmod 644 /etc/resolv.conf + fi + fi +fi + +# Below section is Gentoo specific +# Quick summary - our init scripts are re-entrant and set the SVCNAME env var +# as we could have >1 openvpn service + +if [ -n "${SVCNAME}" ]; then + # If we have a service specific script, run this now + if [ -x /etc/openvpn/"${SVCNAME}"-up.sh ] ; then + /etc/openvpn/"${SVCNAME}"-up.sh "$@" + fi + + # Re-enter the init script to start any dependant services + if ! /etc/init.d/"${SVCNAME}" --quiet status ; then + export IN_BACKGROUND=true + /etc/init.d/${SVCNAME} --quiet start + fi +fi + +exit 0 + +# vim: ts=4 : diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/handlers/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/handlers/main.yml new file mode 100644 index 0000000..3332ce1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/handlers/main.yml @@ -0,0 +1,25 @@ +--- + +- name: systemctl daemon-reload + become: true + ansible.builtin.systemd: + daemon_reload: true + force: true + when: + - ansible_facts.service_mgr | lower == "systemd" + +- name: restart openvpn-server + ansible.builtin.service: + name: "{{ openvpn_service_name }}" + state: restarted + ignore_errors: "{{ 'true' if ansible_facts.service_mgr | lower == 'openrc' else 'false' }}" + failed_when: false + +- name: restart openvpn-client + ansible.builtin.service: + name: "{{ openvpn_service_name }}" + state: restarted + ignore_errors: "{{ 'true' if ansible_facts.service_mgr | lower == 'openrc' else 'false' }}" + failed_when: false + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/meta/main.yml new file mode 100755 index 0000000..ed83ba7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/meta/main.yml @@ -0,0 +1,34 @@ +--- + +galaxy_info: + role_name: openvpn + + author: Bodo Schulz + description: OpenVPN server role + + license: Apache + min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 11 + - bullseye + # 12 + - bookworm + - name: Ubuntu + versions: + # 20.04 LTS (Focal Fossa) + - focal + # 22.04 LTS (Jammy Jellyfish) + - jammy + + galaxy_tags: + - development + - system + - openvpn + +dependencies: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/configure.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/configure.yml new file mode 100644 index 0000000..5edaab0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/configure.yml @@ -0,0 +1,55 @@ +--- + +- name: create openvpn log directory + ansible.builtin.file: + state: directory + path: '{{ openvpn_logging.directory }}' + owner: "{{ openvpn_owner }}" + group: "{{ openvpn_group }}" + mode: "0775" + tags: + - setup + - openvpn + +- name: create client key directory + ansible.builtin.file: + state: directory + path: '{{ openvpn_directory }}/keys' + owner: "{{ openvpn_owner }}" + group: "{{ openvpn_group }}" + mode: "0755" + tags: + - setup + - openvpn + +- name: create a combined client list + become: false + delegate_to: localhost + run_once: true + ansible.builtin.set_fact: + openvpn_client_list: "{{ openvpn_client_list | default([]) | bodsch.core.openvpn_clients(hostvars[item]) }}" + loop: "{{ ansible_play_hosts }}" + loop_control: + loop_var: item + no_log: true + +- name: configure openvpn server + ansible.builtin.include_tasks: configure/server.yml + when: + - openvpn_type == "server" + +- name: configure openvpn client + ansible.builtin.include_tasks: configure/client.yml + when: + - openvpn_mobile_clients is defined + - openvpn_mobile_clients | count > 0 + +# - name: change rights for created files +# ansible.builtin.file: +# state: directory +# path: "{{ openvpn_directory }}/keys" +# owner: "{{ openvpn_owner }}" +# group: "{{ openvpn_group }}" +# recurse: true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/configure/client.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/configure/client.yml new file mode 100644 index 0000000..3d368c7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/configure/client.yml @@ -0,0 +1,68 @@ +--- + +- name: create temporary transfer directory on ansible controller + become: false + delegate_to: localhost + ansible.builtin.file: + path: "{{ openvpn_local_tmp_directory }}" + state: directory + mode: "0700" + +- name: create target directory for generated client certificates + ansible.builtin.file: + state: directory + path: /root/vpn-configs + mode: "0700" + +- name: get CA certificate + become: false + delegate_to: localhost + ansible.builtin.slurp: + src: '{{ openvpn_local_tmp_directory }}/ca.crt' + register: openvpn_ca_cert + no_log: true + +- name: get TA key + become: false + delegate_to: localhost + ansible.builtin.slurp: + src: '{{ openvpn_local_tmp_directory }}/ta.key' + register: openvpn_ta_key + no_log: true + +- name: create openvpn client configuration template + ansible.builtin.template: + src: openvpn/client_users/client.ovpn.template.j2 + dest: '{{ openvpn_directory }}/client.ovpn.template' + mode: "0600" + owner: root + group: root + register: _changed_template + +- name: define static and roadrunner clients + ansible.builtin.set_fact: + static_clients: "{{ openvpn_mobile_clients | bodsch.core.clients_type('static') }}" + roadrunner_clients: "{{ openvpn_mobile_clients | bodsch.core.clients_type('roadrunner') }}" + +- name: create openvpn client configs + ansible.builtin.include_tasks: configure/static_client_instances.yml + loop: "{{ static_clients }}" + loop_control: + index_var: index + loop_var: client + label: "client: {{ client.name }}" + when: + - client is defined + +- name: create openvpn client configs for roadrunners + ansible.builtin.include_tasks: configure/roadrunners.yml + loop: "{{ roadrunner_clients }}" + loop_control: + index_var: index + loop_var: client + label: "client: {{ client.name }}" + when: + - client is defined + +... + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/configure/roadrunners.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/configure/roadrunners.yml new file mode 100644 index 0000000..eba559c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/configure/roadrunners.yml @@ -0,0 +1,33 @@ +--- + +- name: undefine openvpn service variables + ansible.builtin.set_fact: + openvpn_client_name: + +- name: create client configuration + bodsch.core.openvpn_ovpn: + state: "{{ client.state }}" + username: "{{ client.name }}" + destination_directory: /root/vpn-configs + force: "{{ _changed_template.changed }}" + args: + chdir: '{{ openvpn_easyrsa.directory }}' + loop_control: + label: "{{ client.name }}, state: {{ client.state }}" + +- name: copy openvpn client configuration to ansible controller + become: true + ansible.builtin.fetch: + src: "/root/vpn-configs/{{ client.name }}.ovpn" + dest: "{{ openvpn_config_save_dir }}/{{ client.name }}.ovpn" + mode: "0600" + flat: true + validate_checksum: false + loop_control: + label: "{{ item.name }}.ovpn" + when: + - client.state == "present" + - openvpn_config_save_dir is defined + - openvpn_config_save_dir | string | length > 0 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/configure/server.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/configure/server.yml new file mode 100644 index 0000000..9683445 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/configure/server.yml @@ -0,0 +1,271 @@ +--- + +- name: create openvpn server directory + ansible.builtin.file: + state: directory + path: '{{ openvpn_directory }}/server' + owner: "{{ openvpn_owner }}" + group: "{{ openvpn_group }}" + mode: "0755" + tags: + - openvpn + - openvpn_setup + +- name: create server key directory + ansible.builtin.file: + state: directory + path: '{{ openvpn_directory }}/keys/server' + owner: "{{ openvpn_owner }}" + group: "{{ openvpn_group }}" + mode: "0755" + tags: + - openvpn + - openvpn_setup + +# ------------------------------------------------------------------------------------------------ + +- name: create easy-rsa configuration file + ansible.builtin.template: + src: easy-rsa/vars.j2 + dest: '{{ openvpn_easyrsa.directory }}/vars' + mode: "0644" + owner: root + group: root + backup: true + +- name: create openssl-easyrsa.cnf + ansible.builtin.file: + state: link + src: "{{ openvpn_easyrsa.directory }}/{{ openvpn_easyrsa.openssl_config }}" + dest: "{{ openvpn_easyrsa.directory }}/pki/{{ openvpn_easyrsa.openssl_config }}" + mode: "0644" + owner: root + group: root + when: + - ansible_facts.distribution | lower == "ubuntu" + +- name: initialize easy-rsa - (this is going to take a long time) + bodsch.core.easyrsa: + pki_dir: '{{ openvpn_easyrsa.directory }}/pki' + req_cn_ca: "{{ openvpn_certificate.req_cn_ca }}" + req_cn_server: '{{ openvpn_certificate.req_cn_server }}' + ca_keysize: 4096 + dh_keysize: "{{ openvpn_diffie_hellman_keysize }}" + working_dir: '{{ openvpn_easyrsa.directory }}' + # force: true + register: _easyrsa_result + +- name: validate crl + bodsch.core.openvpn_crl: + warn_for_expire: "{{ openvpn_easyrsa.crl_warn.expired }}" + expire_in_days: "{{ openvpn_easyrsa.crl_warn.expire_in_days }}" + register: openvpn_crl + +- name: renew CRL + when: + - openvpn_crl.expired | default('false') | bool + bodsch.core.openvpn_crl: + state: renew + force: true + working_dir: '{{ openvpn_easyrsa.directory }}/pki' + +# ------------------------------------------------------------------------------------------------ + +- name: copy CA certificate to openvpn server directory + ansible.builtin.copy: + src: '{{ openvpn_easyrsa.directory }}/pki/ca.crt' + dest: '{{ openvpn_directory }}/keys/server/' + owner: "{{ openvpn_owner }}" + group: "{{ openvpn_group }}" + remote_src: true + mode: "0644" + +- name: copy server certificate to openvpn server directory + ansible.builtin.copy: + src: '{{ openvpn_easyrsa.directory }}/pki/issued/{{ openvpn_certificate.req_cn_server }}.crt' + dest: '{{ openvpn_directory }}/keys/server/' + owner: "{{ openvpn_owner }}" + group: "{{ openvpn_group }}" + remote_src: true + mode: "0644" + +- name: copy server key to openvpn server directory + ansible.builtin.copy: + src: '{{ openvpn_easyrsa.directory }}/pki/private/{{ openvpn_certificate.req_cn_server }}.key' + dest: '{{ openvpn_directory }}/keys/server/' + owner: "{{ openvpn_owner }}" + group: "{{ openvpn_group }}" + remote_src: true + mode: "0600" + +- name: copy DH parameter file to openvpn server directory + ansible.builtin.copy: + src: '{{ openvpn_easyrsa.directory }}/pki/dh.pem' + dest: '{{ openvpn_directory }}/keys/server/dh{{ openvpn_diffie_hellman_keysize }}.pem' + owner: "{{ openvpn_owner }}" + group: "{{ openvpn_group }}" + remote_src: true + mode: "0644" + +- name: generate a tls-auth key + bodsch.core.openvpn: + state: genkey + secret: "{{ openvpn_directory }}/keys/server/ta.key" + args: + creates: '{{ openvpn_directory }}/keys/server/ta.key' + +- name: change rights for pki + ansible.builtin.file: + state: directory + path: "{{ openvpn_easyrsa.directory }}/pki" + owner: "{{ openvpn_owner }}" + group: "{{ openvpn_group }}" + mode: "0770" + # recurse: true + +- name: change rights for crl.pem + ansible.builtin.file: + # state: file + path: "{{ openvpn_easyrsa.directory }}/pki/crl.pem" + owner: "{{ openvpn_owner }}" + group: "{{ openvpn_group }}" + mode: "0640" + +- name: fetch CA certificate to ansible controller + ansible.builtin.fetch: + src: "{{ openvpn_easyrsa.directory }}/pki/ca.crt" + dest: "{{ openvpn_local_tmp_directory }}/" + mode: "0600" + flat: true + no_log: true + +- name: fetch TA key to ansible controller + ansible.builtin.fetch: + src: "{{ openvpn_directory }}/keys/server/ta.key" + dest: "{{ openvpn_local_tmp_directory }}/" + mode: "0600" + flat: true + no_log: true + +# ------------------------------------------------------------------------------------------------ + +- name: create or revoke client certificate + delegate_to: "{{ openvpn_server.name }}" + bodsch.core.openvpn_client_certificate: + clients: "{{ openvpn_client_list }}" + working_dir: '{{ openvpn_easyrsa.directory }}' + when: + - openvpn_client_list | default([]) | count > 0 + +# ------------------------------------------------------------------------------------------------ + +- name: iptables integration + when: + - openvpn_iptables is defined + - openvpn_iptables.enabled is defined + - openvpn_iptables.enabled + block: + - name: add iptables rule for OpenVPN (masquerading)" + ansible.builtin.iptables: + table: nat + chain: POSTROUTING + source: '{{ openvpn_subnet.ip }}/{{ openvpn_subnet.netmask }}' + out_interface: '{{ openvpn_server.interface }}' + jump: MASQUERADE + register: add_rule + tags: + - setup + - networking + - openvpn + when: + - openvpn_subnet.ip is defined + - openvpn_subnet.netmask is defined + - openvpn_server.interface is defined + + # - name: save iptables rules + # shell: iptables-save > /etc/sysconfig/iptables + # when: add_rule is changed + # tags: + # - setup + # - networking + # - openvpn + + - name: enable ip forwarding (sysctl) + ansible.posix.sysctl: + name: net.ipv4.ip_forward + value: '1' + state: present + sysctl_file: /etc/sysctl.conf + tags: + - setup + - networking + - openvpn + +# ------------------------------------------------------------------------------------------------ + +- name: create openvpn configuration file (server.conf) + ansible.builtin.template: + src: openvpn/server/server.conf.j2 + dest: "{{ openvpn_directory }}/server/server.conf" + mode: "0644" + owner: "{{ openvpn_owner }}" + group: "{{ openvpn_group }}" + backup: true + notify: + - restart openvpn-server + +# - name: define static client IPs +# ansible.builtin.template: +# src: openvpn/server/ipp.txt.j2 +# dest: /etc/openvpn/ipp.txt +# mode: "0644" +# when: +# - openvpn_persistent_pool is defined +# - openvpn_persistent_pool | count > 0 + +- name: remove old static client IPs + ansible.builtin.file: + state: absent + path: "/etc/openvpn/client/{{ item.name }}" + loop: + "{{ openvpn_persistent_pool }}" + loop_control: + label: "{{ item.name | default('') }}" + when: + - openvpn_persistent_pool is defined + - openvpn_persistent_pool | count > 0 + - item.state | default('present') == 'absent' + +- name: create static client IPs + ansible.builtin.template: + src: openvpn/server/static-client.j2 + dest: "/etc/openvpn/client/{{ item.name }}" + mode: "0644" + loop: + "{{ openvpn_persistent_pool }}" + loop_control: + label: "{{ item.name | default('') }}" + when: + - openvpn_persistent_pool is defined + - openvpn_persistent_pool | count > 0 + - item.state | default('present') == 'present' + +- name: create link for openrc init + ansible.builtin.file: + src: "{{ openvpn_directory }}/server/server.conf" + dest: "{{ openvpn_directory }}/openvpn.conf" + owner: "{{ openvpn_owner }}" + group: "{{ openvpn_group }}" + state: link + force: true + when: + - ansible_facts.service_mgr | lower == "openrc" + - ansible_facts.distribution | lower == "archlinux" or + ansible_facts.os_family | lower | replace(' ', '') | lower == "artixlinux" + notify: + - restart openvpn-server + +- name: flush handlers + ansible.builtin.meta: flush_handlers + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/configure/static_client_instances.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/configure/static_client_instances.yml new file mode 100644 index 0000000..7d58fe6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/configure/static_client_instances.yml @@ -0,0 +1,191 @@ +--- + +- name: define client name to '{{ client.name }}' + ansible.builtin.set_fact: + openvpn_client_name: "{{ client.name }}" + +- name: systemd + when: + - ansible_facts.service_mgr | lower == "systemd" + block: + - name: redefine service name + ansible.builtin.set_fact: + openvpn_service_name: "openvpn-client@{{ openvpn_client_name }}" + + - name: support overrides for systemd + when: + - openvpn_systemd is defined + - openvpn_systemd.requires_services is defined + - openvpn_systemd.requires_services | count > 0 + block: + - name: ensure openvpn-client@{{ openvpn_client_name }}.service.d is present + ansible.builtin.file: + dest: /etc/systemd/system/openvpn-client@{{ openvpn_client_name }}.service.d + state: directory + mode: "0755" + + - name: create overwrite.conf for systemd + ansible.builtin.template: + src: init/systemd/override.conf.j2 + dest: "/etc/systemd/system/openvpn-client@{{ openvpn_client_name }}.service.d/override.conf" + mode: "0444" + notify: + - systemctl daemon-reload + - restart openvpn-client + +- name: openrc + when: + - ansible_facts.service_mgr | lower == "openrc" + block: + - name: redefine service name + ansible.builtin.set_fact: + openvpn_service_name: "openvpn.{{ openvpn_client_name }}" + +- name: create client key directory + ansible.builtin.file: + state: directory + path: '{{ openvpn_directory }}/keys/{{ openvpn_client_name }}/' + mode: "0755" + tags: + - setup + - openvpn + +- name: detect client certificate file + ansible.builtin.stat: + path: "{{ openvpn_directory }}/keys/{{ openvpn_client_name }}/{{ openvpn_client_name }}.crt" + checksum_algorithm: sha256 + register: _client_checksum + +- name: detect server certificate file + delegate_to: "{{ client.remote }}" + ansible.builtin.stat: + path: "{{ openvpn_easyrsa.directory }}/pki/issued/{{ openvpn_client_name }}.crt" + checksum_algorithm: sha256 + register: _server_checksum + +- name: compare checksums + ansible.builtin.set_fact: + certificate_equal: "{{ + _client_checksum.stat.checksum | default('xx_client') == _server_checksum.stat.checksum | default('xx_server') + }}" + +- name: export client certificate from openvpn server + delegate_to: "{{ client.remote }}" + when: + - not _client_checksum.stat.exists or not certificate_equal + block: + - name: create export directory + ansible.builtin.file: + state: directory + path: /root/vpn-configs + mode: "0700" + + - name: create export directory + ansible.builtin.file: + state: directory + path: /tmp/openvpn-export + mode: "0700" + + - name: mount tmpfs to export + ansible.posix.mount: + name: /tmp/openvpn-export + src: tmpfs + fstype: tmpfs + opts: nodev,nosuid,size=1M + state: mounted + + - name: create export directory for client + ansible.builtin.file: + state: directory + path: "/tmp/openvpn-export/{{ openvpn_client_name }}" + mode: "0700" + + - name: copy files for export + ansible.builtin.copy: + remote_src: true + src: "{{ file }}" + dest: "/tmp/openvpn-export/{{ openvpn_client_name }}/" + mode: "0600" + loop: + - "{{ openvpn_directory }}/keys/server/ta.key" + - "{{ openvpn_easyrsa.directory }}/pki/private/{{ openvpn_client_name }}.key" + - "{{ openvpn_easyrsa.directory }}/pki/issued/{{ openvpn_client_name }}.crt" + - "{{ openvpn_easyrsa.directory }}/pki/ca.crt" + loop_control: + loop_var: file + + - name: create archive with certificate files + delegate_to: "{{ client.remote }}" + community.general.archive: + format: gz + path: "/tmp/openvpn-export/{{ openvpn_client_name }}" + dest: "/root/vpn-configs/{{ openvpn_client_name }}.tar.gz" + mode: "0600" + + - name: umount export tmpfs + ansible.posix.mount: + name: /tmp/openvpn-export + src: tmpfs + state: unmounted + + - name: copy created certificates from openvpn server + delegate_to: "{{ client.remote }}" + ansible.builtin.fetch: + src: "/root/vpn-configs/{{ openvpn_client_name }}.tar.gz" + dest: "{{ openvpn_local_tmp_directory }}/{{ openvpn_client_name }}.tar.gz" + flat: true + +- name: detect certificate archive on ansible controller + delegate_to: localhost + ansible.builtin.stat: + path: "{{ openvpn_local_tmp_directory }}/{{ openvpn_client_name }}.tar.gz" + register: _certificate_archive + +- name: unarchive certificates + ansible.builtin.unarchive: + src: "{{ openvpn_local_tmp_directory }}/{{ openvpn_client_name }}.tar.gz" + dest: "{{ openvpn_directory }}/keys/" + owner: "{{ openvpn_owner }}" + group: "{{ openvpn_group }}" + when: + - _certificate_archive.stat.exists + - not certificate_equal + +- name: fix rights for openvpn key file + ansible.builtin.file: + path: "{{ openvpn_directory }}/keys/{{ openvpn_client_name }}/{{ openvpn_client_name }}.key" + mode: "0600" + +- name: create openvpn client config + ansible.builtin.template: + src: openvpn/clients/client.conf.j2 + dest: "{{ openvpn_directory }}/client/{{ openvpn_client_name }}.conf" + mode: "0640" + notify: + - restart openvpn-client + +- name: openrc + when: + - ansible_facts.service_mgr | lower == "openrc" + - ansible_facts.distribution | lower == "archlinux" or + ansible_facts.os_family | lower | replace(' ', '') | lower == "artixlinux" + notify: + - restart openvpn-server + block: + - name: create link for openrc init + ansible.builtin.file: + src: "/etc/init.d/openvpn" + dest: "/etc/init.d/openvpn.{{ openvpn_client_name }}" + state: link + force: true + + - name: create link for openvpn config + ansible.builtin.file: + src: "{{ openvpn_directory }}/client/{{ openvpn_client_name }}.conf" + dest: "{{ openvpn_directory }}/{{ openvpn_client_name }}.conf" + owner: "{{ openvpn_owner }}" + group: "{{ openvpn_group }}" + state: link + force: true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/install.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/install.yml new file mode 100644 index 0000000..997fb98 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/install.yml @@ -0,0 +1,87 @@ +--- + +- name: install package + ansible.builtin.package: + name: "{{ openvpn_packages }}" + state: present + tags: + - openvpn + - openvpn_install + +- name: get openvpn _version + bodsch.core.openvpn_version: + register: openvpn_version + +- name: create custom fact file + bodsch.core.facts: + name: openvpn + facts: + version: "{{ openvpn_version.version }}" + +- name: gathering facts now + ansible.builtin.setup: + +- name: handle easy-rsa + when: + - ansible_facts.os_family | lower == "debian" + block: + - name: link easy-rsa into /etc + ansible.builtin.file: + state: link + src: /usr/share/easy-rsa + dest: "{{ openvpn_easyrsa.directory }}" + + - name: link easyrsa binary into /bin + ansible.builtin.file: + state: link + src: "{{ openvpn_easyrsa.directory }}/easyrsa" + dest: /bin/easyrsa + + - name: create pki directory structure + when: + - ansible_facts.distribution | lower == "ubuntu" + block: + - name: create pki directory + ansible.builtin.file: + state: directory + path: "{{ openvpn_easyrsa.directory }}/pki" + mode: "0770" + + - name: create pki directory structure + ansible.builtin.file: + state: directory + path: "{{ openvpn_easyrsa.directory }}/pki/{{ item }}" + mode: "0700" + loop: + - certs_by_serial + - issued + - private + - reqs + - revoked + + - name: create symlink for x509-types + ansible.builtin.file: + state: link + src: "{{ openvpn_easyrsa.directory }}/x509-types" + dest: /usr/bin/x509-types + when: + - ansible_facts.distribution_major_version == "20" + +- name: openrc + when: + - ansible_facts.service_mgr | lower == "openrc" + - not openvpn_type == "server" + block: + - name: copy up.sh to system + ansible.builtin.copy: + src: up.sh + dest: /etc/openvpn/ + mode: "0750" + + - name: copy down.sh to system + ansible.builtin.copy: + src: down.sh + dest: /etc/openvpn/ + mode: "0750" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/main.yml new file mode 100644 index 0000000..e6c6b41 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: preparement + ansible.builtin.include_tasks: prepare.yml + +- name: install + ansible.builtin.include_tasks: install.yml + +- name: configure + ansible.builtin.include_tasks: configure.yml + +- name: service + ansible.builtin.include_tasks: service.yml + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/prepare.yml new file mode 100644 index 0000000..8a61ce8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/prepare.yml @@ -0,0 +1,46 @@ +--- + +- name: include OS specific configuration + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yml" + # eg. redhat / debian + - "{{ ansible_facts.os_family | lower }}.yml" + # artixlinux + - "{{ ansible_facts.os_family | lower | replace(' ', '') }}.yml" + - default.yml + skip: true + +- name: detect docker environment + ansible.builtin.set_fact: + is_docker_guest: "{{ + ansible_facts.virtualization_role | default('host') == 'guest' and + ansible_facts.virtualization_type | default('none') == 'docker' }}" + +- name: install dependencies + ansible.builtin.package: + name: "{{ openvpn_dependencies }}" + state: present + +- name: gathering facts now + ansible.builtin.setup: + +- name: merge openvpn configuration between defaults and custom + ansible.builtin.set_fact: + openvpn_service: "{{ openvpn_defaults_service | combine(openvpn_service, recursive=True) }}" + openvpn_logging: "{{ openvpn_defaults_logging | combine(openvpn_logging, recursive=True) }}" + openvpn_easyrsa: "{{ openvpn_defaults_easyrsa | combine(openvpn_easyrsa, recursive=True) }}" + openvpn_certificate: "{{ openvpn_defaults_certificate | combine(openvpn_certificate, recursive=True) }}" + openvpn_server: "{{ openvpn_defaults_server | combine(openvpn_server, recursive=True) }}" + openvpn_push: "{{ openvpn_defaults_push | combine(openvpn_push, recursive=True) }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/service.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/service.yml new file mode 100644 index 0000000..b8bd940 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/tasks/service.yml @@ -0,0 +1,17 @@ +--- + +- name: start and enable service + ansible.builtin.service: + name: "{{ openvpn_service_name }}" + state: "{{ openvpn_service.state | default('started') }}" + enabled: "{{ openvpn_service.enabled | default(true) | bool }}" + ignore_errors: "{{ 'true' if ansible_facts.service_mgr | lower == 'openrc' else 'false' }}" + failed_when: false + tags: + - openvpn + - start-enable-service + when: + - openvpn_type == "server" or + (openvpn_client_name is defined and openvpn_client_name | string | length > 0) + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/easy-rsa/openssl-easyrsa.cnf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/easy-rsa/openssl-easyrsa.cnf.j2 new file mode 100644 index 0000000..e3f0812 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/easy-rsa/openssl-easyrsa.cnf.j2 @@ -0,0 +1,142 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +# For use with Easy-RSA 3.1 and OpenSSL or LibreSSL + +RANDFILE = $ENV::EASYRSA_PKI/.rnd + +#################################################################### +[ ca ] +default_ca = CA_default # The default ca section + +#################################################################### +[ CA_default ] + +dir = $ENV::EASYRSA_PKI # Where everything is kept +certs = $dir # Where the issued certs are kept +crl_dir = $dir # Where the issued crl are kept +database = $dir/index.txt # database index file. +new_certs_dir = $dir/certs_by_serial # default place for new certs. + +certificate = $dir/ca.crt # The CA certificate +serial = $dir/serial # The current serial number +crl = $dir/crl.pem # The current CRL +private_key = $dir/private/ca.key # The private key +RANDFILE = $dir/.rand # private random number file + +x509_extensions = basic_exts # The extentions to add to the cert + +# This allows a V2 CRL. Ancient browsers don't like it, but anything Easy-RSA +# is designed for will. In return, we get the Issuer attached to CRLs. +crl_extensions = crl_ext + +default_days = $ENV::EASYRSA_CERT_EXPIRE # how long to certify for +default_crl_days = $ENV::EASYRSA_CRL_DAYS # how long before next CRL +default_md = $ENV::EASYRSA_DIGEST # use public key default MD +preserve = no # keep passed DN ordering + +# This allows to renew certificates which have not been revoked +unique_subject = no + +# A few difference way of specifying how similar the request should look +# For type CA, the listed attributes must be the same, and the optional +# and supplied fields are just that :-) +policy = policy_anything + +# For the 'anything' policy, which defines allowed DN fields +[ policy_anything ] +countryName = optional +stateOrProvinceName = optional +localityName = optional +organizationName = optional +organizationalUnitName = optional +commonName = supplied +name = optional +emailAddress = optional + +#################################################################### +# Easy-RSA request handling +# We key off $DN_MODE to determine how to format the DN +[ req ] +default_bits = $ENV::EASYRSA_KEY_SIZE +default_keyfile = privkey.pem +default_md = $ENV::EASYRSA_DIGEST +distinguished_name = $ENV::EASYRSA_DN +x509_extensions = easyrsa_ca # The extentions to add to the self signed cert + +# A placeholder to handle the $EXTRA_EXTS feature: +#%EXTRA_EXTS% # Do NOT remove or change this line as $EXTRA_EXTS support requires it + +#################################################################### +# Easy-RSA DN (Subject) handling + +# Easy-RSA DN for cn_only support: +[ cn_only ] +commonName = Common Name (eg: your user, host, or server name) +commonName_max = 64 +commonName_default = $ENV::EASYRSA_REQ_CN + +# Easy-RSA DN for org support: +[ org ] +countryName = Country Name (2 letter code) +countryName_default = $ENV::EASYRSA_REQ_COUNTRY +countryName_min = 2 +countryName_max = 2 + +stateOrProvinceName = State or Province Name (full name) +stateOrProvinceName_default = $ENV::EASYRSA_REQ_PROVINCE + +localityName = Locality Name (eg, city) +localityName_default = $ENV::EASYRSA_REQ_CITY + +0.organizationName = Organization Name (eg, company) +0.organizationName_default = $ENV::EASYRSA_REQ_ORG + +organizationalUnitName = Organizational Unit Name (eg, section) +organizationalUnitName_default = $ENV::EASYRSA_REQ_OU + +commonName = Common Name (eg: your user, host, or server name) +commonName_max = 64 +commonName_default = $ENV::EASYRSA_REQ_CN + +emailAddress = Email Address +emailAddress_default = $ENV::EASYRSA_REQ_EMAIL +emailAddress_max = 64 + +#################################################################### +# Easy-RSA cert extension handling + +# This section is effectively unused as the main script sets extensions +# dynamically. This core section is left to support the odd usecase where +# a user calls openssl directly. +[ basic_exts ] +basicConstraints = CA:FALSE +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer:always + +# The Easy-RSA CA extensions +[ easyrsa_ca ] + +# PKIX recommendations: + +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer:always + +# This could be marked critical, but it's nice to support reading by any +# broken clients who attempt to do so. +basicConstraints = CA:true + +# Limit key usage to CA tasks. If you really want to use the generated pair as +# a self-signed cert, comment this out. +keyUsage = cRLSign, keyCertSign + +# nsCertType omitted by default. Let's try to let the deprecated stuff die. +# nsCertType = sslCA + +# CRL extensions. +[ crl_ext ] + +# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL. + +# issuerAltName=issuer:copy +authorityKeyIdentifier = keyid:always,issuer:always diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/easy-rsa/vars.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/easy-rsa/vars.j2 new file mode 100644 index 0000000..00ef13f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/easy-rsa/vars.j2 @@ -0,0 +1,173 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# Easy-RSA 3 parameter settings + + +# A little housekeeping: DON'T EDIT THIS SECTION +# +# Easy-RSA 3.x doesn't source into the environment directly. +# Complain if a user tries to do this: +if [ -z "$EASYRSA_CALLER" ] +then + echo "You appear to be sourcing an Easy-RSA 'vars' file." >&2 + echo "This is no longer necessary and is disallowed. See the section called" >&2 + echo "'How to use this file' near the top comments for more details." >&2 + return 1 +fi + +# DO YOUR EDITS BELOW THIS POINT + +# This variable should point to the top level of the easy-rsa tree. By default, +# this is taken to be the directory you are currently in. + +#set_var EASYRSA "$PWD" +set_var EASYRSA "{{ openvpn_easyrsa.directory }}" + +set_var EASYRSA_OPENSSL "openssl" + + +# Edit this variable to point to your soon-to-be-created key directory. +# +# WARNING: init-pki will do a rm -rf on this directory so make sure you define +# it correctly! (Interactive mode will prompt before acting.) + +set_var EASYRSA_PKI "$EASYRSA/pki" + +# Define X509 DN mode. +# This is used to adjust what elements are included in the Subject field as the DN +# (this is the "Distinguished Name.") +# Note that in cn_only mode the Organizational fields further below aren't used. +# +# Choices are: +# cn_only - use just a CN value +# org - use the "traditional" Country/Province/City/Org/OU/email/CN format + +set_var EASYRSA_DN "{{ openvpn_easyrsa.x509_dn_mode }}" + +# Organizational fields (used with 'org' mode and ignored in 'cn_only' mode.) +# These are the default values for fields which will be placed in the +# certificate. Don't leave any of these fields blank, although interactively +# you may omit any specific field by typing the "." symbol (not valid for +# email.) + +set_var EASYRSA_REQ_COUNTRY "{{ openvpn_certificate.req_country }}" +set_var EASYRSA_REQ_PROVINCE "{{ openvpn_certificate.req_province }}" +set_var EASYRSA_REQ_CITY "{{ openvpn_certificate.req_city }}" +set_var EASYRSA_REQ_ORG "{{ openvpn_certificate.req_org }}" +set_var EASYRSA_REQ_EMAIL "{{ openvpn_certificate.req_email }}" +set_var EASYRSA_REQ_OU "{{ openvpn_certificate.req_ou }}" + +# Choose a size in bits for your keypairs. The recommended value is 2048. Using +# 2048-bit keys is considered more than sufficient for many years into the +# future. Larger keysizes will slow down TLS negotiation and make key/DH param +# generation take much longer. Values up to 4096 should be accepted by most +# software. Only used when the crypto alg is rsa (see below.) + +set_var EASYRSA_KEY_SIZE {{ openvpn_easyrsa.key_size }} + +# The default crypto mode is rsa; ec can enable elliptic curve support. +# Note that not all software supports ECC, so use care when enabling it. +# Choices for crypto alg are: (each in lower-case) +# * rsa +# * ec +# * ed +{% if openvpn_easyrsa.crypto_mode in ["rsa","ec","ed"] %} +set_var EASYRSA_ALGO "{{ openvpn_easyrsa.crypto_mode }}" +{% endif %} +# Define the named curve, used in ec mode only: +{% if openvpn_easyrsa.crypto_mode == "ec" %} +set_var EASYRSA_CURVE "{{ openvpn_easyrsa.rsa_curve }}" +{% endif %} + +# In how many days should the root CA key expire? + +set_var EASYRSA_CA_EXPIRE {{ openvpn_easyrsa.ca_expire }} + +# In how many days should certificates expire? + +set_var EASYRSA_CERT_EXPIRE {{ openvpn_easyrsa.cert_expire }} + +# How many days until the next CRL publish date? Note that the CRL can still be +# parsed after this timeframe passes. It is only used for an expected next +# publication date. + +set_var EASYRSA_CRL_DAYS {{ openvpn_easyrsa.crl_days }} + +# Support deprecated "Netscape" extensions? (choices "yes" or "no".) The default +# is "no" to discourage use of deprecated extensions. If you require this +# feature to use with --ns-cert-type, set this to "yes" here. This support +# should be replaced with the more modern --remote-cert-tls feature. If you do +# not use --ns-cert-type in your configs, it is safe (and recommended) to leave +# this defined to "no". When set to "yes", server-signed certs get the +# nsCertType=server attribute, and also get any NS_COMMENT defined below in the +# nsComment field. + +set_var EASYRSA_NS_SUPPORT "no" + +# When NS_SUPPORT is set to "yes", this field is added as the nsComment field. +# Set this blank to omit it. With NS_SUPPORT set to "no" this field is ignored. + +#set_var EASYRSA_NS_COMMENT "Easy-RSA Generated Certificate" + +# A temp file used to stage cert extensions during signing. The default should +# be fine for most users; however, some users might want an alternative under a +# RAM-based FS, such as /dev/shm or /tmp on some systems. + +#set_var EASYRSA_TEMP_FILE "$EASYRSA_PKI/extensions.temp" + +# !! +# NOTE: ADVANCED OPTIONS BELOW THIS POINT +# PLAY WITH THEM AT YOUR OWN RISK +# !! + +# Broken shell command aliases: If you have a largely broken shell that is +# missing any of these POSIX-required commands used by Easy-RSA, you will need +# to define an alias to the proper path for the command. The symptom will be +# some form of a "command not found" error from your shell. This means your +# shell is BROKEN, but you can hack around it here if you really need. These +# shown values are not defaults: it is up to you to know what you are doing if +# you touch these. +# +#alias awk="/alt/bin/awk" +#alias cat="/alt/bin/cat" + +# X509 extensions directory: +# If you want to customize the X509 extensions used, set the directory to look +# for extensions here. Each cert type you sign must have a matching filename, +# and an optional file named "COMMON" is included first when present. Note that +# when undefined here, default behaviour is to look in $EASYRSA_PKI first, then +# fallback to $EASYRSA for the "x509-types" dir. You may override this +# detection with an explicit dir here. +# +#set_var EASYRSA_EXT_DIR "$EASYRSA/x509-types" + +# OpenSSL config file: +# If you need to use a specific openssl config file, you can reference it here. +# Normally this file is auto-detected from a file named openssl-easyrsa.cnf from the +# EASYRSA_PKI or EASYRSA dir (in that order.) NOTE that this file is Easy-RSA +# specific and you cannot just use a standard config file, so this is an +# advanced feature. + +set_var EASYRSA_SSL_CONF "$EASYRSA/{{ openvpn_easyrsa.openssl_config }}" + +# Default CN: +# This is best left alone. Interactively you will set this manually, and BATCH +# callers are expected to set this themselves. + +#set_var EASYRSA_REQ_CN "ChangeMe" + +# Cryptographic digest to use. +# Do not change this default unless you understand the security implications. +# Valid choices include: md5, sha1, sha256, sha224, sha384, sha512 +{% set rsa_digest = "sha512" %} +{% if openvpn_easyrsa.digest in ["sha256", "sha224", "sha384", "sha512"] %} + {% set rsa_digest = openvpn_easyrsa.digest %} +{% endif %} +set_var EASYRSA_DIGEST "{{ rsa_digest }}" + +# Batch mode. Leave this disabled unless you intend to call Easy-RSA explicitly +# in batch mode without any user input, confirmation on dangerous operations, +# or most output. Setting this to any non-blank string enables batch mode. + +#set_var EASYRSA_BATCH "" + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/helper/copy-config-to-sudo-user-home.sh.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/helper/copy-config-to-sudo-user-home.sh.j2 new file mode 100755 index 0000000..16b9b8a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/helper/copy-config-to-sudo-user-home.sh.j2 @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +set -e +# +# ./copy-vpn-config-to-sudo-user-home.sh ( | ) [ ] +# +# copies openvpn config file to sudo user home directory +# and change owner and group +# so that the sudo user can easily scp the file. +# destination file is set to 0600. +# + +if [[ -z "$SUDO_USER" ]] +then + echo "environment variable SUDO_USER not set - only sudo'ed shells supported" >&2 + exit 2 +fi + +if [[ -z "$1" ]] +then + echo "No config name given - please specify one as firstname_lastname or complete file name!" >&2 + exit 1 +fi +name="$1" + +vpn_config_dir="/root/vpn-configs" + +alt_src="${vpn_config_dir}/${name}@${label}.ovpn" +if [[ -n "${name##*/*}" && -f "$alt_src" ]] +then + echo "openvpn config file found in $vpn_config_dir: $alt_src" + src="$alt_src" +else + src="$name" + if [[ ! -f "$src" ]]; then + echo "file $src not found" >&2 + exit 3 + fi +fi + +if [[ -n "$2" ]] +then + dest_dir="$2" +else + dest_dir="$( getent passwd "$SUDO_USER" | cut -d: -f6 )" +fi + +group="$( id -gn "$SUDO_USER" )" + +echo "copy to $dest_dir" +install -m 0600 -o "$SUDO_USER" -g "$group" "$src" "$dest_dir/." + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/helper/create-vpn-user.sh.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/helper/create-vpn-user.sh.j2 new file mode 100755 index 0000000..145c14d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/helper/create-vpn-user.sh.j2 @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +set -e +# +# create-vpn-user.sh [ ] +# +# creates certfikate/key for OpenVPN (request + sign) and openvpn client config +# for the specified +# +# openvpn client config will be created in /root/vpn-configs +# or in (if specified) as second argument) +# +# will be created, if it does not exist. +# +# certificate name will be +# +# no questions will be asked (no confirmation etc.) +# + +if [[ -z "$1" ]]; then + echo "No user name given. Please specify one as firstname_lastname!" >&2 + exit 1 +fi +user="$1" + +dest_dir="/root/vpn-configs" +if [[ -n "$2" ]] +then + dest_dir="$2" +fi +if [[ ! -d "$dest_dir" ]] +then + install -d -m 0700 "$dest_dir" +fi + +name="${user}" + +pushd {{ openvpn_easyrsa.directory }} + +#{{ openvpn_easyrsa.directory }}/easyrsa --batch --req-cn="${name}" gen-req "${name}" nopass +#{{ openvpn_easyrsa.directory }}/easyrsa --batch sign-req client "${name}" +easyrsa --batch build-client-full "${name}" nopass + +# openvpn client configuration from template +umask 077 +KEY="$( cat {{ openvpn_easyrsa.directory }}/pki/private/"${name}.key" )" +CERT="$( cat {{ openvpn_easyrsa.directory }}/pki/issued/"${name}.crt" | perl -n -e 'print if /^-----BEGIN CERTIFICATE-----/ .. eof' )" +export CERT KEY +envsubst '$CERT $KEY' < /etc/openvpn/client.ovpn.template > "${dest_dir}/${name}.ovpn" + +popd diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/helper/revoke-vpn-user.sh.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/helper/revoke-vpn-user.sh.j2 new file mode 100755 index 0000000..ebe3bed --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/helper/revoke-vpn-user.sh.j2 @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +set -e + +if [[ -z "$1" ]] +then + echo "No user name given. Please specify one as firstname_lastname!" >&2 + echo >&2 + echo "To force immediate usage of new CRL and reset exiting connection, restart openvpn-server." >&2 + echo "Otherwsie CRL is re-read any time a new client connects or an existing client renegotiates the SSL/TLS connection." >&2 + echo >&2 + exit 1 +fi +user="$1" + +name="${user}" + +pushd {{ openvpn_easyrsa.directory }} + +easyrsa --batch revoke "${name}" + +# recreate CRL +easyrsa gen-crl + +chmod a+rX {{ openvpn_easyrsa.directory }}/pki {{ openvpn_easyrsa.directory }}/pki/crl.pem + +echo "don't forget to restart your openvpn server!" + +# restart openvpn server to immediately re-read new CRL +#systemctl restart openvpn-server@server + +popd + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/init/systemd/openvpn-server@.service b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/init/systemd/openvpn-server@.service new file mode 100644 index 0000000..699edbe --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/init/systemd/openvpn-server@.service @@ -0,0 +1,66 @@ + +# arch +openvpn --version +OpenVPN 2.6.4 [git:makepkg/b4f749f14a8edc75+] x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] [DCO] built on May 11 2023 +library versions: OpenSSL 3.0.8 7 Feb 2023, LZO 2.10 + +[Unit] +Description=OpenVPN service for %I +After=syslog.target network-online.target +Wants=network-online.target +Documentation=man:openvpn(8) +Documentation=https://community.openvpn.net/openvpn/wiki/Openvpn24ManPage +Documentation=https://community.openvpn.net/openvpn/wiki/HOWTO + +[Service] +Type=notify +PrivateTmp=true +WorkingDirectory=/etc/openvpn/server +ExecStart=/usr/bin/openvpn --status %t/openvpn-server/status-%i.log --status-version 2 --suppress-timestamps --config %i.conf +User=openvpn +Group=network +AmbientCapabilities=CAP_IPC_LOCK CAP_NET_ADMIN CAP_NET_BIND_SERVICE CAP_NET_RAW CAP_SETGID CAP_SETUID CAP_SETPCAP CAP_SYS_CHROOT CAP_DAC_OVERRIDE CAP_AUDIT_WRITE +CapabilityBoundingSet=CAP_IPC_LOCK CAP_NET_ADMIN CAP_NET_BIND_SERVICE CAP_NET_RAW CAP_SETGID CAP_SETUID CAP_SETPCAP CAP_SYS_CHROOT CAP_DAC_OVERRIDE CAP_AUDIT_WRITE +LimitNPROC=10 +DeviceAllow=/dev/null rw +DeviceAllow=/dev/net/tun rw +ProtectSystem=true +ProtectHome=true +KillMode=process +RestartSec=5s +Restart=on-failure + +# debian +# openvpn --version +OpenVPN 2.5.1 x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on May 14 2021 +library versions: OpenSSL 1.1.1n 15 Mar 2022, LZO 2.10 +Originally developed by James Yona + +[Install] +WantedBy=multi-user.target + +[Unit] +Description=OpenVPN service for %I +After=network-online.target +Wants=network-online.target +Documentation=man:openvpn(8) +Documentation=https://community.openvpn.net/openvpn/wiki/Openvpn24ManPage +Documentation=https://community.openvpn.net/openvpn/wiki/HOWTO + +[Service] +Type=notify +PrivateTmp=true +WorkingDirectory=/etc/openvpn/server +ExecStart=/usr/sbin/openvpn --status %t/openvpn-server/status-%i.log --status-version 2 --suppress-timestamps --config %i.conf +CapabilityBoundingSet=CAP_IPC_LOCK CAP_NET_ADMIN CAP_NET_BIND_SERVICE CAP_NET_RAW CAP_SETGID CAP_SETUID CAP_SYS_CHROOT CAP_DAC_OVERRIDE CAP_AUDIT_WRITE +LimitNPROC=10 +DeviceAllow=/dev/null rw +DeviceAllow=/dev/net/tun rw +ProtectSystem=true +ProtectHome=true +KillMode=process +RestartSec=5s +Restart=on-failure + +[Install] +WantedBy=multi-user.target diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/init/systemd/override.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/init/systemd/override.conf.j2 new file mode 100644 index 0000000..7030034 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/init/systemd/override.conf.j2 @@ -0,0 +1,9 @@ +[Unit] + +After = +Wants = +Requires = + +After = network-online.target {{ openvpn_systemd.requires_services | join(' ') }} +Wants = network-online.target {{ openvpn_systemd.requires_services | join(' ') }} +Requires = {{ openvpn_systemd.requires_services | join(' ') }} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/openvpn/client_users/client.ovpn.template.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/openvpn/client_users/client.ovpn.template.j2 new file mode 100644 index 0000000..f22a2a4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/openvpn/client_users/client.ovpn.template.j2 @@ -0,0 +1,51 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +{% set external_ip = ansible_facts.default_ipv4.address %} +{% if openvpn_server.external_ip is defined and + openvpn_server.external_ip | string | length > 0 %} + {% set external_ip = openvpn_server.external_ip %} +{% endif %} +{% set _valid_attr = ['udp','tcp'] %} +{% if not openvpn_server.proto in _valid_attr %} + {% set openvpn_server.proto = 'udp' %} +{% endif %} + +client + +remote {{ external_ip }} {{ openvpn_server.port }} {{ openvpn_server.proto }} + +proto tcp-client +dev {{ openvpn_server.device }} + +remote-cert-tls server +key-direction 1 +keepalive 10 120 + +tun-mtu {{ openvpn_mtu | default('1500') }} +mssfix 1360 +persist-key +persist-tun + +cipher {{ openvpn_server.cipher }} + +float +resolv-retry infinite +nobind + +verb {{ openvpn_logging.verbosity }} +mute {{ openvpn_logging.mute }} + + +{{ openvpn_ca_cert.content | b64decode }} + +{% raw %} +{{ cert }} +{% endraw %} + +{% raw %} +{{ key }} +{% endraw %} + + +{{ openvpn_ta_key.content | b64decode | regex_replace('(?m)^#.*?\n', '') }} + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/openvpn/clients/client.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/openvpn/clients/client.conf.j2 new file mode 100644 index 0000000..150f07d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/openvpn/clients/client.conf.j2 @@ -0,0 +1,45 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +client + +remote {{ client.remote }} +port {{ client.port }} +proto {{ client.proto }} +dev {{ client.device }} + +ca {{ openvpn_directory }}/keys/{{ openvpn_client_name }}/ca.crt +cert {{ openvpn_directory }}/keys/{{ openvpn_client_name }}/{{ client.cert }} +key {{ openvpn_directory }}/keys/{{ openvpn_client_name }}/{{ client.key }} +auth-nocache + +{% if client.tls_auth.enabled is defined and + client.tls_auth.enabled %} +tls-client +tls-auth {{ openvpn_directory }}/keys/{{ openvpn_client_name }}/ta.key 1 +{% else %} +remote-cert-tls server +{% endif %} + +ping 20 +ping-restart 45 +ping-timer-rem + +tun-mtu {{ openvpn_mtu | default('1500') }} +persist-tun +persist-key + +cipher {{ openvpn_server.cipher }} + +{% if openvpn_logging.append | bool | default('false') %} +log-append {{ openvpn_logging.directory }}/{{ openvpn_logging.file }} +{% else %} +log {{ openvpn_logging.directory }}/{{ openvpn_logging.file }} +{% endif %} +{% if openvpn_logging.status is defined and + openvpn_logging.status | string | length > 0 %} +status {{ openvpn_logging.directory }}/{{ openvpn_logging.status }} +{% endif %} + +verb {{ openvpn_logging.verbosity }} +mute {{ openvpn_logging.mute }} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/openvpn/server/ipp.txt.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/openvpn/server/ipp.txt.j2 new file mode 100644 index 0000000..3421c21 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/openvpn/server/ipp.txt.j2 @@ -0,0 +1,11 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +{% for opp in openvpn_persistent_pool | default([]) %} + {% if opp.state is defined and opp.state == "present" and + opp.static_ip is defined and + opp.static_ip | string | length > 0 or + (opp.static_ip | ansible.utils.ipv4 or + opp.static_ip | ansible.utils.ipv6) %} +{{ opp.name }},{{ opp.static_ip }} + {% endif %} +{% endfor %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/openvpn/server/server.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/openvpn/server/server.conf.j2 new file mode 100644 index 0000000..339d425 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/openvpn/server/server.conf.j2 @@ -0,0 +1,142 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# Which local IP address should OpenVPN +# listen on? (optional) +{% if openvpn_server.listen_ip is defined and + openvpn_server.listen_ip | string | length > 0 %} +local {{ openvpn_server.listen_ip }} +{% endif %} + +port {{ openvpn_server.port }} +{% set _valid_attr = ['udp','tcp'] %} +{% if not openvpn_server.proto in _valid_attr %} + {% set openvpn_server.proto = 'udp' %} +{% endif %} +proto {{ openvpn_server.proto }} +{% set _valid_attr = ['tun','tap'] %} +{% if not openvpn_server.device in _valid_attr %} + {% set openvpn_server.device = 'tun' %} +{% endif %} +dev {{ openvpn_server.device }} + +ca {{ openvpn_directory }}/keys/server/ca.crt +cert {{ openvpn_directory }}/keys/server/{{ openvpn_certificate.req_cn_server }}.crt +key {{ openvpn_directory }}/keys/server/{{ openvpn_certificate.req_cn_server }}.key +dh {{ openvpn_directory }}/keys/server/dh{{ openvpn_diffie_hellman_keysize }}.pem +{% if openvpn_server.tls_auth.enabled is defined and + openvpn_server.tls_auth.enabled %} +tls-server +tls-auth {{ openvpn_directory }}/keys/server/ta.key 0 # This file is secret +{% endif %} + +topology subnet + +{% if openvpn_subnet is defined and + openvpn_subnet.ip is defined and + openvpn_subnet.ip | string | length > 0 and + openvpn_subnet.netmask is defined and + openvpn_subnet.netmask | string | length > 0 %} +server {{ openvpn_subnet.ip }} {{ openvpn_subnet.netmask }} + +{% if openvpn_persistent_pool is defined and + openvpn_persistent_pool | count > 0 %} +# ifconfig-pool-persist {{ openvpn_directory }}/ipp.txt 30 +client-config-dir {{ openvpn_directory }}/client +{% endif %} + +{% else %} +# NO SERVER NETWORK HAS BEEN DEFINED! +{% endif %} +{% if openvpn_push is defined and + openvpn_push | bodsch.core.type == "dict" and + openvpn_push | count > 0 %} + {% set _push_route_gateway = openvpn_push.route_gateway | default('') %} + {% set _push_routes = openvpn_push.routes | default([]) %} + {% set _push_dns = openvpn_push.dhcp_options.dns | default([]) %} + {% set _push_domains = openvpn_push.dhcp_options.domains | default([]) %} + {% set _push_sndbuf = openvpn_push.sndbuf | default('') %} + {% set _push_rcvbuf = openvpn_push.rcvbuf | default('') %} + + {% if _push_routes | count > 0 %} + {% for e in _push_routes %} +push "route {{ e.net }} {{ e.netmask }} {{ e.gateway | default('') }}" + {% endfor %} + {% endif %} + {% if _push_route_gateway | string | length > 0 %} +push "redirect-gateway {{ _push_route_gateway }}" + {% endif %} + {% if _push_dns | count > 0 %} + {% for e in _push_dns %} +push "dhcp-option DNS {{ e }}" + {% endfor %} + {% endif %} + {% if _push_domains | count > 0 %} + {% for e in _push_domains %} +push "dhcp-option DOMAIN {{ e }}" + {% endfor %} + {% endif %} + {% if _push_sndbuf | string | length > 0 %} +push "sndbuf {{ _push_sndbuf }}" + {% endif %} + {% if _push_rcvbuf | string | length > 0 %} +push "rcvbuf {{ _push_rcvbuf }}" + {% endif %} +{% endif %} + +## sndbuf 393216 +## rcvbuf 393216 + +{% if openvpn_mtu is defined and + openvpn_mtu | string | length > 0 %} +tun-mtu {{ openvpn_mtu }} +{% endif %} +{% if openvpn_mssfix is defined and + openvpn_mssfix | string | length > 0 %} +mssfix {{ openvpn_mssfix }} +{%endif%} +persist-key +persist-tun + +{% if openvpn_keepalive is defined and + openvpn_keepalive.interval is defined and + openvpn_keepalive.interval | string | length > 0 and + openvpn_keepalive.timeout is defined and + openvpn_keepalive.timeout | string | length > 0 %} +keepalive {{ openvpn_keepalive.interval }} {{ openvpn_keepalive.timeout }} +{% endif %} +cipher {{ openvpn_server.cipher }} + +{# +# Enable compression on the VPN link and push the +# option to the client (v2.4+ only, for earlier +# versions see below) +compress lz4-v2 +push "compress lz4-v2" +allow-compression yes +#} +max-clients {{ openvpn_server.max_clients }} + +{% if openvpn_server.user is defined and + openvpn_server.user | string | length > 0 and + openvpn_server.group is defined and + openvpn_server.group | string | length > 0 and + openvpn_version.version is version('2.6.0', '<') %} +user {{ openvpn_server.user | default('nobody') }} +group {{ openvpn_server.group | default('nobody') }} + +{% endif %} +{% if openvpn_logging.append | default('false') | bool %} +log-append {{ openvpn_logging.directory }}/{{ openvpn_logging.file }} +{% else %} +log {{ openvpn_logging.directory }}/{{ openvpn_logging.file }} +{% endif %} +{% if openvpn_logging.status is defined and + openvpn_logging.status | string | length > 0 %} +status {{ openvpn_logging.directory }}/{{ openvpn_logging.status }} +{% endif %} + +verb {{ openvpn_logging.verbosity }} +mute {{ openvpn_logging.mute }} + +explicit-exit-notify 1 +crl-verify {{ openvpn_easyrsa.directory }}/pki/crl.pem diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/openvpn/server/static-client.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/openvpn/server/static-client.j2 new file mode 100644 index 0000000..1a934ba --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/templates/openvpn/server/static-client.j2 @@ -0,0 +1 @@ +ifconfig-push {{ item.static_ip }} {{ item.netmask | default('255.255.255.0') }} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/vars/archlinux-openrc.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/vars/archlinux-openrc.yml new file mode 100644 index 0000000..761d7d3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/vars/archlinux-openrc.yml @@ -0,0 +1,42 @@ +--- + +openvpn_dependencies: + - easy-rsa + - procps-ng + - rsync + - sshpass + - python-jinja + +openvpn_packages: + - openvpn + - openvpn-openrc + +openvpn_owner: openvpn +openvpn_group: network + +openvpn_service_name: openvpn + +# openvpn_defaults_server: +# # network interface connected to internal net +# interface: eth0 +# # external IP of VPN server (EIP) +# external_ip: '' # {{ ansible_default_ipv4.address }}' +# # Which local IP address should OpenVPN +# # listen on? (optional) +# listen_ip: '' +# # valid: 'udp' or 'tcp' +# proto: udp +# # Which TCP/UDP port should OpenVPN listen on? +# port: 1194 +# # valid: 'tun' or 'tap' +# # "tun" will create a routed IP tunnel +# # "tap" will create an ethernet tunnel +# device: tun +# max_clients: 10 +# tls_auth: +# enabled: true +# cipher: AES-256-GCM +# user: nobody +# group: nobody + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/vars/archlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/vars/archlinux.yml new file mode 100644 index 0000000..4718bc4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/vars/archlinux.yml @@ -0,0 +1,39 @@ +--- + +openvpn_dependencies: + - easy-rsa + - procps-ng + - rsync + - sshpass + - python-jinja + +# openvpn_packages: +# - openvpn + +openvpn_owner: openvpn +openvpn_group: network + +# openvpn_defaults_server: +# # network interface connected to internal net +# interface: eth0 +# # external IP of VPN server (EIP) +# external_ip: '' # {{ ansible_default_ipv4.address }}' +# # Which local IP address should OpenVPN +# # listen on? (optional) +# listen_ip: '' +# # valid: 'udp' or 'tcp' +# proto: udp +# # Which TCP/UDP port should OpenVPN listen on? +# port: 1194 +# # valid: 'tun' or 'tap' +# # "tun" will create a routed IP tunnel +# # "tap" will create an ethernet tunnel +# device: tun +# max_clients: 10 +# tls_auth: +# enabled: true +# cipher: AES-256-GCM +# user: nobody +# group: nobody + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/vars/artixlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/vars/artixlinux.yml new file mode 100644 index 0000000..761d7d3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/vars/artixlinux.yml @@ -0,0 +1,42 @@ +--- + +openvpn_dependencies: + - easy-rsa + - procps-ng + - rsync + - sshpass + - python-jinja + +openvpn_packages: + - openvpn + - openvpn-openrc + +openvpn_owner: openvpn +openvpn_group: network + +openvpn_service_name: openvpn + +# openvpn_defaults_server: +# # network interface connected to internal net +# interface: eth0 +# # external IP of VPN server (EIP) +# external_ip: '' # {{ ansible_default_ipv4.address }}' +# # Which local IP address should OpenVPN +# # listen on? (optional) +# listen_ip: '' +# # valid: 'udp' or 'tcp' +# proto: udp +# # Which TCP/UDP port should OpenVPN listen on? +# port: 1194 +# # valid: 'tun' or 'tap' +# # "tun" will create a routed IP tunnel +# # "tap" will create an ethernet tunnel +# device: tun +# max_clients: 10 +# tls_auth: +# enabled: true +# cipher: AES-256-GCM +# user: nobody +# group: nobody + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/vars/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/vars/debian.yml new file mode 100644 index 0000000..4edda47 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/vars/debian.yml @@ -0,0 +1,11 @@ +--- + +openvpn_dependencies: + - easy-rsa + - procps + - rsync + - sshpass + - iptables + - iproute2 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/vars/main.yml new file mode 100644 index 0000000..dcb72a4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/openvpn/vars/main.yml @@ -0,0 +1,95 @@ +--- + +openvpn_local_tmp_directory: "{{ + lookup('env', 'CUSTOM_LOCAL_TMP_DIRECTORY') | + default(lookup('env', 'HOME') ~ '/.cache/ansible/openvpn', true) }}" + +openvpn_dependencies: + - easy-rsa + - procps + - rsync + - sshpass + - iptables + +openvpn_packages: + - openvpn + +openvpn_owner: root +openvpn_group: root + +openvpn_service_name: openvpn-server@server + +openvpn_defaults_service: + state: started + enabled: true + +openvpn_defaults_logging: + directory: /var/log/openvpn + file: openvpn.log + status: "" + verbosity: 3 + mute: 10 + append: true + +openvpn_defaults_easyrsa: + directory: /etc/easy-rsa + openssl_config: "openssl-easyrsa.cnf" + key_size: 4096 + ca_expire: 3650 + cert_expire: 3650 + crl_days: 180 + crl_warn: + expired: true + expire_in_days: 20 + x509_dn_mode: cn_only + # Choices for crypto alg are: (each in lower-case) + # * rsa + # * ec + # * ed + crypto_mode: ec + rsa_curve: secp384r1 + digest: sha512 + +openvpn_defaults_certificate: + req_country: DE + req_province: Hamburg + req_city: Hamburg + req_org: ACME Inc. + req_email: openvpn@acme.inc + req_ou: Special Forces + req_cn_ca: 'Open VPN' + req_cn_server: '{{ ansible_facts.fqdn }}' + +openvpn_defaults_server: + # network interface connected to internal net + interface: eth0 + # external IP of VPN server (EIP) + external_ip: '' # {{ ansible_default_ipv4.address }}' + # Which local IP address should OpenVPN + # listen on? (optional) + listen_ip: '' + # valid: 'udp' or 'tcp' + proto: udp + # Which TCP/UDP port should OpenVPN listen on? + port: 1194 + # valid: 'tun' or 'tap' + # "tun" will create a routed IP tunnel + # "tap" will create an ethernet tunnel + device: tun + max_clients: 10 + tls_auth: + enabled: true + cipher: AES-256-GCM + user: nobody + group: nogroup + +openvpn_defaults_push: + routes: [] + route_gateway: "" + dhcp_options: + domains: [] + dns: [] + sndbuf: 393216 + rcvbuf: 393216 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/.yamllint new file mode 100644 index 0000000..20fd7aa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + +ignore: | + molecule/ + .github diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/README.md new file mode 100644 index 0000000..bdd9375 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/README.md @@ -0,0 +1,156 @@ + +# Ansible Role: `bodsch.core.pacman` + +Ansible role to configure pacman. + +## supported Operating systems + +* ArchLinux +* ArtixLinux + +## Configuration + +There are pre-defined values for [Arch Linux](vars/archlinux.yml) and [Artix Linux](vars/artixlinux.yml). + +```yaml +pacman_config: {} + +pacman_options: {} + +pacman_repositories: {} + +pacman_mirrors: {} + +pacman_custom_mirrors: [] +``` + +### `pacman_config` + +[documentation](https://archlinux.org/pacman/pacman.conf.5.html#_options) + +```yaml +pacman_config: + root_dir: "/" + db_path: /var/lib/pacman/ + cache_dir: /var/cache/pacman/pkg/ + log_file: /var/log/pacman.log + gpg_dir: /etc/pacman.d/gnupg/ + hook_dir: /etc/pacman.d/hooks/ + hold_pkg: + - pacman + - glibc + xfer_command: /usr/bin/curl -s -L -C - -f -o %o %u + clean_method: KeepInstalled + architecture: auto + ignore_pkg: [] + ignore_group: [] + no_upgrade: [] + no_extract: [] + + use_syslog: true + color: true + no_progress_bar: true + check_space: true + verbose_pkg_lists: false + parallel_downloads: 5 + + sig_level: + - Required + - DatabaseOptional + local_file_sig_level: + - Optional + remote_file_sig_level: + - Required +``` + +### `pacman_config` + +```yaml +pacman_options: + no_extract: + - "usr/share/help/* !usr/share/help/en*" + - "usr/share/gtk-doc/html/* usr/share/doc/*" + - "usr/share/locale/* usr/share/X11/locale/* usr/share/i18n/*" + - "!*locale*/en*/* !usr/share/i18n/charmaps/UTF-8.gz !usr/share/*locale*/locale.*" + - "!usr/share/*locales/en_?? !usr/share/*locales/i18n* !usr/share/*locales/iso*" + - "!usr/share/*locales/trans*" + - "usr/share/man/* usr/share/info/*" + - "usr/share/vim/vim*/lang/*" +``` + +### `pacman_repositories` + +[documentation](https://archlinux.org/pacman/pacman.conf.5.html#_repository_sections) + +[Package and Database Signature Checking](https://archlinux.org/pacman/pacman.conf.5.html#_package_and_database_signature_checking_a_id_sc_a) + +[Artix Support](https://wiki.artixlinux.org/Main/Repositories) + +[cleaning old repositories](https://archlinux.org/news/cleaning-up-old-repositories/) + + +```yaml +pacman_repositories: + custom: + enabled: false + sig_level: + - Optional + - TrustAll + server: file:///home/custompkgs + usage: + - All + + core: + enabled: true + include: /etc/pacman.d/mirrorlist + + extra: + enabled: true + include: /etc/pacman.d/mirrorlist + + community-testing: + enabled: false + include: /etc/pacman.d/mirrorlist + + community: + enabled: false + include: /etc/pacman.d/mirrorlist +``` + +### `pacman_mirrors` + + +```yaml +pacman_mirrors: + "Default mirrors": + enabled: true + servers: + - https://geo.mirror.pkgbuild.com/$repo/os/$arch + - https://mirror.rackspace.com/archlinux/$repo/os/$arch + - https://mirror.leaseweb.net/archlinux/$repo/os/$arch + + "Europe - Germany": + enabled: true + servers: + - https://mirror.netcologne.de/artix-linux/$repo/os/$arch + - http://mirrors.redcorelinux.org/artixlinux/$repo/os/$arch + - https://mirror.pascalpuffke.de/artix-linux/$repo/os/$arch + - https://ftp.uni-bayreuth.de/linux/artix-linux/$repo/os/$arch +``` + +### `pacman_custom_mirrors` + +```yaml +pacman_custom_mirrors: + - file: /etc/pacman.d/mirrorlist-arch + "ARCH MIRRORS": + enabled: false + servers: + - http://mirror.i3d.net/pub/archlinux/$repo/os/$arch +``` + +--- + +## Author and License + +- Bodo Schulz diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/defaults/main.yml new file mode 100644 index 0000000..025a02a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/defaults/main.yml @@ -0,0 +1,13 @@ +--- + +pacman_config: {} + +pacman_options: {} + +pacman_repositories: {} + +pacman_mirrors: {} + +pacman_custom_mirrors: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/handlers/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/handlers/main.yml new file mode 100644 index 0000000..689a249 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/handlers/main.yml @@ -0,0 +1,7 @@ +--- + +- name: update package cache + ansible.builtin.package: + update_cache: true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/meta/main.yml new file mode 100644 index 0000000..7261708 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/meta/main.yml @@ -0,0 +1,20 @@ +--- + +galaxy_info: + role_name: pacman + + author: Bodo Schulz + description: ansible role to configure pacman for Arch- and/or ArtixLinux + + license: Apache + min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + + galaxy_tags: + - pacman + +dependencies: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/tasks/configure.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/tasks/configure.yml new file mode 100644 index 0000000..d76b29a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/tasks/configure.yml @@ -0,0 +1,51 @@ +--- + +- name: merge pacman config configuration between defaults and custom + ansible.builtin.set_fact: + pacman_config: "{{ pacman_defaults_config | combine(pacman_config, recursive=True) }}" + pacman_repositories: "{{ pacman_defaults_repositories | combine(pacman_repositories, recursive=True) }}" + pacman_mirrors: "{{ pacman_defaults_mirrors | combine(pacman_mirrors, recursive=True) }}" + pacman_options: "{{ pacman_defaults_options | combine(pacman_options, recursive=True) }}" + +- name: create default pacman mirrors + ansible.builtin.template: + src: pacman.d/mirrorlist.j2 + dest: /etc/pacman.d/mirrorlist + backup: true + mode: "0644" + when: + - pacman_mirrors | default({}) | count > 0 + notify: + - update package cache + +- name: create custom pacman mirrors + ansible.builtin.template: + src: pacman.d/mirrorlist.j2 + dest: "{{ item.file }}" + backup: true + mode: "0644" + loop: + "{{ pacman_custom_mirrors }}" + loop_control: + label: "{{ item.file }}" + when: + - pacman_custom_mirrors | default([]) | count > 0 + notify: + - update package cache + +- name: create pacman configuration + ansible.builtin.template: + src: pacman.conf.j2 + dest: /etc/pacman.conf + backup: true + mode: "0644" + notify: + - update package cache + +- name: create pacman hooks + ansible.builtin.include_tasks: configure/hooks.yml + when: + - pacman_hooks is defined + - pacman_hooks | default([]) | count > 0 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/tasks/configure/hooks.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/tasks/configure/hooks.yml new file mode 100644 index 0000000..1e94d4d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/tasks/configure/hooks.yml @@ -0,0 +1,24 @@ +--- + +- name: create neccessary directories + ansible.builtin.file: + path: "/etc/pacman.d/hooks" + state: directory + owner: root + group: root + mode: "0755" + +- name: create custom pacman hooks + ansible.builtin.template: + src: pacman.d/hooks/hook.j2 + dest: "/etc/pacman.d/hooks/{{ item.name }}.hook" + backup: true + mode: "0644" + loop: + "{{ pacman_hooks }}" + loop_control: + label: "/etc/pacman.d/hooks/{{ item.name }}.hook" + when: + - item.state | default('present') == "present" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/tasks/main.yml new file mode 100644 index 0000000..26380f5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/tasks/main.yml @@ -0,0 +1,9 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yml + +- name: configure + ansible.builtin.include_tasks: configure.yml + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/tasks/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/tasks/prepare.yml new file mode 100644 index 0000000..829b279 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/tasks/prepare.yml @@ -0,0 +1,28 @@ +--- + +- name: check, for a valid system + ansible.builtin.assert: + msg: "this role works only with ArchLinux or Artix Linux" + quiet: true + that: + - ansible_facts.distribution | lower in [ "archlinux", "artix linux" ] + +- name: include OS specific configuration ({{ ansible_facts.distribution }} ({{ ansible_facts.os_family }}) {{ ansible_facts.distribution_major_version }}) + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yml" + # eg. artixlinux + - "{{ ansible_facts.distribution | lower | replace(' ', '') }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_facts.os_family | lower }}.yml" + - default.yaml + skip: true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/templates/pacman.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/templates/pacman.conf.j2 new file mode 100644 index 0000000..51c354f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/templates/pacman.conf.j2 @@ -0,0 +1,128 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +[options] +RootDir = {{ pacman_config.root_dir }} +DBPath = {{ pacman_config.db_path }} +CacheDir = {{ pacman_config.cache_dir }} +LogFile = {{ pacman_config.log_file }} +GPGDir = {{ pacman_config.gpg_dir }} +HookDir = {{ pacman_config.hook_dir }} +HoldPkg = {{ pacman_config.hold_pkg | default([]) | join(' ') }} +{% if pacman_config.xfer_command is defined and + pacman_config.xfer_command | string | length > 0 %} +XferCommand = {{ pacman_config.xfer_command }} +{% endif %} +CleanMethod = {{ pacman_config.clean_method }} +Architecture = {{ pacman_config.architecture }} + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +{% if pacman_config.ignore_pkg | default([]) | count > 0 %} +IgnorePkg = {{ pacman_config.ignore_pkg | join(' ') }} +{% endif %} +{% if pacman_config.ignore_group | default([]) | count > 0 %} +IgnoreGroup = {{ pacman_config.ignore_group | join(' ') }} +{% endif %} +{% if pacman_config.no_upgrade | default([]) | count > 0 %} +NoUpgrade = {{ pacman_config.no_upgrade | join(' ') }} +{% endif %} +{% if pacman_config.no_extract | default([]) | count > 0 %} +NoExtract = {{ pacman_config.no_extract | join(' ') }} +{% endif %} + +{% if pacman_config.use_syslog | default('true') | bool %} +UseSyslog +{% endif %} +{% if pacman_config.color | default('true') | bool %} +Color +{% endif %} +{% if pacman_config.no_progress_bar | default('true') | bool %} +NoProgressBar +{% endif %} +{% if pacman_config.check_space | default('true') | bool %} +CheckSpace +{% endif %} +{% if pacman_config.verbose_pkg_lists | default('true') | bool %} +VerbosePkgLists +{% endif %} +{% if pacman_config.parallel_downloads | default('5') | string | length > 0 %} +ParallelDownloads = {{ pacman_config.parallel_downloads }} +{% endif %} +{% if pacman_config.download_user | default('alpm') | string | length > 0 %} +DownloadUser = {{ pacman_config.download_user }} +{% endif %} +{% if pacman_config.disable_sandbox | default('true') | bool %} +# No kernel landlock in containerd +DisableSandbox +{% endif %} + +SigLevel = {{ pacman_config.sig_level | default([]) | join(' ') }} +LocalFileSigLevel = {{ pacman_config.local_file_sig_level | default([]) | join(' ') }} +{% if pacman_config.remote_file_sig_level | default([]) | count > 0 %} +RemoteFileSigLevel = {{ pacman_config.remote_file_sig_level | join(' ') }} +{% endif %} + +{% if pacman_repositories is defined %} + {% for repo, values in pacman_repositories.items() %} + {% if not values.enabled | default('true') %} +# [{{ repo }}] + {% for k, v in values.items() %} + {% if k == "sig_level" %} + {% if v | bodsch.core.type == "list" %} + {% set _v = v | join(' ') %} + {% elif v | bodsch.core.type == "str" %} + {% set _v = v %} + {% endif %} +# SigLevel = {{ _v }} + {% endif %} + {% if k == "server" %} +# Server = {{ v }} + {% endif %} + {% if k == "include" %} +# Include = {{ v }} + {% endif %} + {% if k == "usage" %} +# Usage = {{ v | join(' ') }} + {% endif %} + {% endfor %} + + {% else %} +[{{ repo }}] + {% for k, v in values.items() %} + {% if k == "sig_level" %} + {% if v | bodsch.core.type == "list" %} + {% set _v = v | join(' ') %} + {% elif v | bodsch.core.type == "str" %} + {% set _v = v %} + {% endif %} +SigLevel = {{ _v }} + {% endif %} + {% if k == "server" %} +Server = {{ v }} + {% endif %} + {% if k == "include" %} +Include = {{ v }} + {% endif %} + {% if k == "usage" %} +Usage = {{ v | join(' ') }} + {% endif %} + {% endfor %} + + {% endif %} + {% endfor %} +{% endif %} + +{% if pacman_options is defined %} +[options] + {% for option, values in pacman_options.items() %} + {% if option == "no_extract" %} + {% for v in values %} +NoExtract = {{ v }} + {% endfor %} + {% endif %} + {% endfor %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/templates/pacman.d/hooks/hook.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/templates/pacman.d/hooks/hook.j2 new file mode 100644 index 0000000..94631e0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/templates/pacman.d/hooks/hook.j2 @@ -0,0 +1,41 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% set _ = item.pop('name') %} +{% if item.get('state') %} +{% set _ = item.pop('state') %} +{% endif %} +{% for key, values in item.items() %} +[{{ key | title }}] + {% for k, v in values.items() %} + {% if v | bodsch.core.type == 'list' %} + {% if k.endswith("s") %} + {% set k = k[:-1] %} + {% endif %} + {% for i in v %} +{{ k | title }} = {{ i }} + {% endfor %} + {% else %} +{{ k | title }} = {{ v }} + {% endif %} + {% endfor %} + +{% endfor %} + +{# +https://man.archlinux.org/man/alpm-hooks.5 + + +[Trigger] (Required, Repeatable) +Operation = Install|Upgrade|Remove (Required, Repeatable) +Type = Path|Package (Required) +Target = (Required, Repeatable) + +[Action] (Required) +Description = ... (Optional) +When = PreTransaction|PostTransaction (Required) +Exec = (Required) +Depends = (Optional) +AbortOnFail (Optional, PreTransaction only) +NeedsTargets (Optional) +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/templates/pacman.d/hooks/linux-modules-post.hook.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/templates/pacman.d/hooks/linux-modules-post.hook.j2 new file mode 100644 index 0000000..0a91d90 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/templates/pacman.d/hooks/linux-modules-post.hook.j2 @@ -0,0 +1,11 @@ +[Trigger] +Operation = Upgrade +Type = Package +Target = linux + +[Action] +Description = Restore Linux kernel modules +When = PostTransaction +Depends = coreutils +Depends = rsync +Exec = /bin/sh -xc 'KVER="${KVER:-$(uname -r)}"; if test -e "/lib/modules/backup/${KVER}"; then rsync -AHXal --ignore-existing "/lib/modules/backup/${KVER}" /lib/modules/; fi; rm -rf /lib/modules/backup' diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/templates/pacman.d/hooks/linux-modules-pre.hook.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/templates/pacman.d/hooks/linux-modules-pre.hook.j2 new file mode 100644 index 0000000..f42b0d4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/templates/pacman.d/hooks/linux-modules-pre.hook.j2 @@ -0,0 +1,10 @@ +[Trigger] +Operation = Upgrade +Type = Package +Target = linux + +[Action] +Description = Save Linux kernel modules +When = PreTransaction +Depends = rsync +Exec = /bin/sh -c 'KVER="${KVER:-$(uname -r)}"; if test -e "/lib/modules/${KVER}"; then rsync -AHXal --delete-after "/lib/modules/${KVER}" /lib/modules/backup/; fi' diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/templates/pacman.d/mirrorlist.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/templates/pacman.d/mirrorlist.j2 new file mode 100644 index 0000000..0290a4c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/templates/pacman.d/mirrorlist.j2 @@ -0,0 +1,22 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# +## repository mirrorlist + +{% if item | default([]) | count > 0 %} + {% set _ = item.pop('file') %} + {% set pacman_mirrors = item %} +{% endif %} + +{% if pacman_mirrors is defined %} + {% for location, values in pacman_mirrors.items() %} +## {{ location }} + {% set _enabled = values.enabled | default('true') | bool %} + {% for m in values.servers | default([]) %} + {% if not _enabled | default('true') %} +# {% endif %} +Server = {{ m }} + {% endfor %} + + {% endfor %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/vars/archlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/vars/archlinux.yml new file mode 100644 index 0000000..377b1b7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/vars/archlinux.yml @@ -0,0 +1,66 @@ +--- + +pacman_defaults_repositories: + custom: + enabled: false + sig_level: + - Optional + - TrustAll + server: file:///home/custompkgs + usage: + - All + + core: + enabled: true + include: /etc/pacman.d/mirrorlist + + extra: + enabled: true + include: /etc/pacman.d/mirrorlist + + community-testing: + enabled: false + include: /etc/pacman.d/mirrorlist + + community: + enabled: false + include: /etc/pacman.d/mirrorlist + +pacman_defaults_mirrors: {} + # "Default mirrors": + # enabled: true + # servers: + # - https://geo.mirror.pkgbuild.com/$repo/os/$arch + # - https://mirror.rackspace.com/archlinux/$repo/os/$arch + # - https://mirror.leaseweb.net/archlinux/$repo/os/$arch + + # "Europe - Czech Republic": + # enabled: false + # servers: + # - https://ftp.sh.cvut.cz/artix-linux/$repo/os/$arch + # + # "Europe - Germany": + # enabled: true + # servers: + # - https://mirror.netcologne.de/artix-linux/$repo/os/$arch + # - http://mirrors.redcorelinux.org/artixlinux/$repo/os/$arch + # - https://mirror.pascalpuffke.de/artix-linux/$repo/os/$arch + # - https://ftp.uni-bayreuth.de/linux/artix-linux/$repo/os/$arch + # - https://ftp.halifax.rwth-aachen.de/artixlinux/$repo/os/$arch + # - https://mirror.linux.pizza/artix-linux/$repo/os/$arch + # - https://artix.unixpeople.org/repos/$repo/os/$arch + # - https://mirror1.artixlinux.org/repos/$repo/os/$arch + # - https://eu-mirror.artixlinux.org/repos/$repo/os/$arch + # + # "Europe - Austria": + # enabled: false + # servers: + # - http://mirror.alwyzon.net/archlinux/$repo/os/$arch + # - https://mirror.alwyzon.net/archlinux/$repo/os/$arch + # - http://mirror.digitalnova.at/archlinux/$repo/os/$arch + # - http://mirror.easyname.at/archlinux/$repo/os/$arch + # - https://at.arch.mirror.kescher.at/$repo/os/$arch + # - http://mirror.reisenbauer.ee/archlinux/$repo/os/$arch + # - https://mirror.reisenbauer.ee/archlinux/$repo/os/$arch + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/vars/artixlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/vars/artixlinux.yml new file mode 100644 index 0000000..64e00a2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/vars/artixlinux.yml @@ -0,0 +1,81 @@ +--- + +pacman_defaults_repositories: + custom: + enabled: false + sig_level: + - Optional + - TrustAll + server: file:///home/custompkgs + usage: + - All + + system: + enabled: true + include: /etc/pacman.d/mirrorlist + + world: + enabled: true + include: /etc/pacman.d/mirrorlist + + galaxy: + enabled: true + include: /etc/pacman.d/mirrorlist + + # testing + gremlins: + enabled: false + include: /etc/pacman.d/mirrorlist + + galaxy-gremlins: + enabled: false + include: /etc/pacman.d/mirrorlist + + universe: + enabled: false + include: /etc/pacman.d/mirrorlist-universe + + # staging + goblins: + enabled: false + include: /etc/pacman.d/mirrorlist + + goblins-gremlins: + enabled: false + include: /etc/pacman.d/mirrorlist + + # ARCH Linux + core: + enabled: false + include: /etc/pacman.d/mirrorlist-arch + + extra: + enabled: false + include: /etc/pacman.d/mirrorlist-arch + + community-testing: + enabled: false + include: /etc/pacman.d/mirrorlist-arch + + community: + enabled: false + include: /etc/pacman.d/mirrorlist-arch + +pacman_defaults_mirrors: + "Default mirrors": + enabled: true + servers: + - https://mirrors.dotsrc.org/artix-linux/repos/$repo/os/$arch + - http://ftp.ntua.gr/pub/linux/artix-linux/$repo/os/$arch + - https://artix.wheaton.edu/repos/$repo/os/$arch + - https://mirror.clarkson.edu/artix-linux/repos/$repo/os/$arch + - https://ftp.cc.uoc.gr/mirrors/linux/artixlinux/$repo/os/$arch + - https://artix.unixpeople.org/repos/$repo/os/$arch + - https://artix.rw-net.de/repos/$repo/os/$arch + - https://mirrors.tuna.tsinghua.edu.cn/artixlinux/$repo/os/$arch + - http://www.nylxs.com/mirror/repos/$repo/os/$arch + - https://ftp.sh.cvut.cz/artix-linux/$repo/os/$arch + - https://mirrors.nettek.us/artix-linux/$repo/os/$arch + - http://mirror1.artixlinux.org/repos/$repo/os/$arch + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/vars/main.yml new file mode 100644 index 0000000..1c49d08 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/pacman/vars/main.yml @@ -0,0 +1,61 @@ +--- + +# pacman-contrib + +pacman_defaults_config: + root_dir: "/" + db_path: /var/lib/pacman/ + cache_dir: /var/cache/pacman/pkg/ + log_file: /var/log/pacman.log + gpg_dir: /etc/pacman.d/gnupg/ + hook_dir: /etc/pacman.d/hooks/ + hold_pkg: + - pacman + - glibc + xfer_command: "" # /usr/bin/curl -s -L -C - -f -o %o %u + # XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u + clean_method: KeepInstalled + architecture: auto + # Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup + ignore_pkg: [] + ignore_group: [] + no_upgrade: [] + no_extract: [] + + # Misc options + use_syslog: true + color: true + no_progress_bar: true + check_space: true + verbose_pkg_lists: false + parallel_downloads: 5 + download_user: alpm + disable_sandbox: false + + # By default, pacman accepts packages signed by keys that its local keyring + # trusts (see pacman-key and its man page), as well as unsigned packages. + # https://archlinux.org/pacman/pacman.conf.5.html#SC + sig_level: + - Required + - DatabaseOptional + local_file_sig_level: + - Optional + remote_file_sig_level: [] + # - Required + +pacman_defaults_options: + no_extract: [] + # - "usr/share/help/* !usr/share/help/en*" + # - "usr/share/gtk-doc/html/* usr/share/doc/*" + # - "usr/share/locale/* usr/share/X11/locale/* usr/share/i18n/*" + # - "!*locale*/en*/* !usr/share/i18n/charmaps/UTF-8.gz !usr/share/*locale*/locale.*" + # - "!usr/share/*locales/en_?? !usr/share/*locales/i18n* !usr/share/*locales/iso*" + # - "!usr/share/*locales/trans*" + # - "usr/share/man/* usr/share/info/*" + # - "usr/share/vim/vim*/lang/*" + +pacman_defaults_repositories: {} + +pacman_defaults_mirrors: {} + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/.yamllint new file mode 100644 index 0000000..20fd7aa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + +ignore: | + molecule/ + .github diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/LICENSE new file mode 100644 index 0000000..df702e3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2020] [Bodo Schulz] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/Makefile new file mode 100644 index 0000000..3abaf48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_6.1 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/README.md new file mode 100644 index 0000000..4a71e17 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/README.md @@ -0,0 +1,187 @@ + +# Ansible Role: `bodsch.core.sshd` + +## usage + +```yaml +sshd_config: + port: # 22 + - 22 + address_family: any + listen_address: [] + # - "0.0.0.0:22" + # - "127.0.1.1:22622" + # https://man.openbsd.org/sshd_config#RekeyLimit + rekey_limit: [] # ['default', 'none'] + # - default + # - none + syslog_facility: AUTH # + log_level: INFO # + log_verbose: [] # 'kex.c:*:1000,*:kex_exchange_identification():*,packet.c:*' + login_grace_time: "" # 2m + permit_root_login: prohibit-password + strict_modes: true + max_auth_tries: "" # 6 + max_sessions: "" # 10 + pubkey_authentication: true + + # Expect .ssh/authorized_keys2 to be disregarded by default in future. + authorized_keys_file: + - "/etc/ssh/authorized_keys/%u" + - .ssh/authorized_keys + + authorized_principals_file: "" # false + + authorized_keys_command: "" # false + authorized_keys_command_user: "" # false + + # For this to work you will also need host keys in /etc/ssh/ssh_known_hosts + hostbased_authentication: "" # false + hostbased_accepted_algorithms: "" # + + host_certificate: "" + host_keys: + - "/etc/ssh/ssh_host_rsa_key" + - "/etc/ssh/ssh_host_ecdsa_key" + - "/etc/ssh/ssh_host_ed25519_key" + host_key_agent: "" # + host_key_algorithms: [] # + + # Change to yes if you don't trust ~/.ssh/known_hosts for + # HostbasedAuthentication + ignore_user_known_hosts: "" # false + # Don't read the user's ~/.rhosts and ~/.shosts files + ignore_rhosts: "" # true + + # To disable tunneled clear text passwords, change to no here! + password_authentication: "" # true + permit_empty_passwords: "" # false + + # Change to yes to enable challenge-response passwords (beware issues with + # some PAM modules and threads) + challenge_response_authentication: "" # false + + # Kerberos options + kerberos_authentication: "" # false + kerberos_or_local_passwd: "" # true + kerberos_ticket_cleanup: "" # true + kerberos_get_afs_token: "" # false + + kex_algorithms: [] + + # GSSAPI options + gss_api_authentication: "" # false + gss_api_cleanup_credentials: "" # true + gss_api_strict_acceptor_check: "" # true + gss_api_key_exchange: "" # false + + # Set this to 'yes' to enable PAM authentication, account processing, + # and session processing. If this is enabled, PAM authentication will + # be allowed through the ChallengeResponseAuthentication and + # PasswordAuthentication. Depending on your PAM configuration, + # PAM authentication via ChallengeResponseAuthentication may bypass + # the setting of "PermitRootLogin without-password". + # If you just want the PAM account and session checks to run without + # PAM authentication, then enable this but set PasswordAuthentication + # and ChallengeResponseAuthentication to 'no'. + use_pam: true + + allow_agent_forwarding: "" # true + allow_tcp_forwarding: "" # true + gateway_ports: "" # false + x11_forwarding: "" # false + x11_display_offset: "" # 10 + x11_use_localhost: "" # true + permit_tty: "" # true + print_motd: "" # false + print_last_log: "" # true + tcp_keep_alive: "" # true + permituser_environment: "" # false + compression: "" # delayed + client_alive_interval: "" # 0 + client_alive_count_max: "" # 3 + use_dns: "" # false + pid_file: "" # /var/run/sshd.pid + max_startups: "" # 10:30:100 + permit_tunnel: "" # false + chroot_directory: "" # false + version_addendum: "" # false + + # no default banner path + banner: "" # false + deny_groups: [] + deny_users: [] + ciphers: [] + macs: [] + + # Allow client to pass locale environment variables + accept_env: + - LANG + - LC_* + + # override default of no subsystems + subsystem: + name: sftp + path: "{{ sshd_sftp_server }}" + + # Example of overriding settings on a per-user basis + match_users: + - username: anoncvs + options: + x11_forwarding: false + AllowTcpForwarding: false + PermitTTY: false + ForceCommand: + - cvs + - server + +ssh_config: + - host: "*" + # ForwardAgent: false + # ForwardX11: false + # ForwardX11Trusted: false + # PasswordAuthentication: true + # HostbasedAuthentication: "" + # GSSAPIAuthentication: "" + # GSSAPIDelegateCredentials: "" + # GSSAPIKeyExchange: false + # GSSAPITrustDNS: false + # BatchMode: false + # CheckHostIP: true + # AddressFamily: any + # ConnectTimeout: 0 + # StrictHostKeyChecking: ask + # IdentityFile: + # - "~/.ssh/id_rsa" + # - "~/.ssh/id_dsa" + # - "~/.ssh/id_ecdsa" + # - "~/.ssh/id_ed25519" + # Port: 22 + # Ciphers: + # - aes128-ctr + # - aes192-ctr + # - aes256-ctr + # - aes128-cbc + # - 3des-cbc + # MACs: + # - hmac-md5 + # - hmac-sha1 + # - umac-64@openssh.com + # EscapeChar: "~" + # Tunnel: false + # TunnelDevice: "any:any" + # PermitLocalCommand: false + # VisualHostKey: false + # ProxyCommand: ssh -q -W %h:%p gateway.example.com + # RekeyLimit: 1G 1h + # UserKnownHostsFile: ~/.ssh/known_hosts.d/%k + SendEnv: + - "LANG LC_*" + hash_known_hosts: false +``` + +--- + +## Author and License + +- Bodo Schulz diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/defaults/main.yml new file mode 100644 index 0000000..307c38e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/defaults/main.yml @@ -0,0 +1,6 @@ +--- + +sshd_config: {} +ssh_config: {} + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/handlers/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/handlers/main.yml new file mode 100644 index 0000000..67063ad --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/handlers/main.yml @@ -0,0 +1,22 @@ +--- + +- name: reload systemctl daemon + ansible.builtin.systemd: + daemon_reload: true + when: + - ansible_facts.service_mgr == 'systemd' + +- name: reload sshd + ansible.builtin.service: + name: "{{ sshd_service }}" + state: reloaded + +- name: restart sshd + ansible.builtin.service: + name: "{{ sshd_service }}" + state: restarted + +- name: start sshdgenkeys.service + ansible.builtin.service: + name: sshdgenkeys.service + state: started diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/meta/main.yml new file mode 100644 index 0000000..05f95e1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/meta/main.yml @@ -0,0 +1,26 @@ +--- +galaxy_info: + role_name: sshd + + author: Bodo Schulz + description: Manage sshd configuration + license: Apache + + min_ansible_version: "2.10" + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + - bullseye + - name: Ubuntu + versions: + # 20.04 + - focal + + galaxy_tags: + - system + - sshd + +dependencies: [] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/tasks/configure.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/tasks/configure.yml new file mode 100644 index 0000000..530d02c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/tasks/configure.yml @@ -0,0 +1,105 @@ +--- + +- name: merge sshd configuration between defaults and custom + ansible.builtin.set_fact: + ssh_config: "{{ ssh_defaults_config | bodsch.core.merge_lists(ssh_config) | bodsch.core.sshd_values }}" + sshd_config: "{{ sshd_defaults_config | combine(sshd_config, recursive=True) | bodsch.core.sshd_values }}" + tags: + - sshd + - configuration + +# - name: check FIPS mode +# when: +# - sshd_hostkeys_nofips | default([]) +# block: +# - name: check the kernel FIPS mode +# ansible.builtin.slurp: +# src: /proc/sys/crypto/fips_enabled +# register: sshd_kernel_fips_mode +# failed_when: false +# +# - name: check the userspace FIPS mode +# ansible.builtin.slurp: +# src: /etc/system-fips +# register: sshd_userspace_fips_mode +# failed_when: false + +- name: create /etc/ssh/sshd_config.d + ansible.builtin.file: + state: directory + name: /etc/ssh/sshd_config.d + mode: "0750" + tags: + - sshd + - configuration + +- name: create /etc/ssh/ssh_config.d + ansible.builtin.file: + state: directory + name: /etc/ssh/ssh_config.d + mode: "0750" + tags: + - sshd + - configuration + +- name: create /run/sshd for config tests + ansible.builtin.file: + state: directory + name: /run/sshd + mode: "0755" + tags: + - sshd + - configuration + +- name: configure ssh_config + ansible.builtin.template: + src: sshd/ssh_config.j2 + dest: /etc/ssh/ssh_config + mode: "0644" + backup: true + tags: + - sshd + - configuration + +- name: configure sshd.conf + ansible.builtin.template: + src: sshd/sshd_config.j2 + dest: /etc/ssh/sshd_config + mode: "0644" + backup: true + validate: "{{ sshd_binary }} -t -f %s" + notify: + - restart sshd + tags: + - sshd + - configuration + +- name: restart sshd if needed + ansible.builtin.meta: flush_handlers + +- name: create overriding settings on a per-user basis + ansible.builtin.template: + src: sshd/sshd_config.d/match_users.conf.j2 + dest: /etc/ssh/sshd_config.d/match_users.conf + mode: "0644" + backup: true + validate: "{{ sshd_binary }} -t -f %s" + notify: + - restart sshd + tags: + - sshd + - configuration + +- name: create subsystem configuration + ansible.builtin.template: + src: sshd/sshd_config.d/subsystem.conf.j2 + dest: /etc/ssh/sshd_config.d/subsystem.conf + mode: "0644" + backup: true + validate: "{{ sshd_binary }} -t -f %s" + notify: + - restart sshd + tags: + - sshd + - configuration + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/tasks/install.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/tasks/install.yml new file mode 100644 index 0000000..3c36dde --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/tasks/install.yml @@ -0,0 +1,42 @@ +--- + +- name: install sshd + ansible.builtin.package: + name: "{{ sshd_packages }}" + state: present + +- name: ensure host keys are created + block: + # find sshd host keys: + # - /etc/ssh/ssh_host_ecdsa_key.pub + # - /etc/ssh/ssh_host_ed25519_key.pub + # - /etc/ssh/ssh_host_rsa_key.pub + - name: find ssh host keys + ansible.builtin.find: + paths: "/etc/ssh" + file_type: file + patterns: + - "ssh_host_*key" + recurse: true + register: found_host_keys + + - name: define sshd_host_keys + ansible.builtin.set_fact: + sshd_host_keys: "{{ + found_host_keys.files | + sort(attribute='path', reverse=True) | + map(attribute='path') | list }}" + when: + - found_host_keys.files is defined + - found_host_keys.files | count > 0 + +- name: create sshd host keys + when: + - sshd_host_keys | count == 0 + ansible.builtin.command: + cmd: /usr/bin/ssh-keygen -A + register: ssh_keygen + changed_when: ssh_keygen.rc != 0 + failed_when: ssh_keygen.rc != 0 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/tasks/main.yml new file mode 100644 index 0000000..af539a1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yml + +- name: install + ansible.builtin.include_tasks: install.yml + +- name: configure + ansible.builtin.include_tasks: configure.yml + +- name: service + ansible.builtin.include_tasks: service.yml + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/tasks/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/tasks/prepare.yml new file mode 100644 index 0000000..d595f25 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/tasks/prepare.yml @@ -0,0 +1,38 @@ +--- + +- name: include OS specific configuration ({{ ansible_facts.distribution }} ({{ ansible_facts.os_family }}) {{ ansible_facts.distribution_major_version }}) + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_facts.os_family | lower }}.yml" + # artixlinux + - "{{ ansible_facts.os_family | lower | replace(' ', '') }}.yml" + skip: true + +- name: detect docker environment + ansible.builtin.set_fact: + is_docker_guest: "{{ + ansible_virtualization_role | default('host') == 'guest' and + ansible_virtualization_type | default('none') == 'docker' }}" + +- name: install requirements + ansible.builtin.package: + name: "{{ sshd_dependencies }}" + state: present + when: + - sshd_dependencies | default([]) | count > 0 + +- name: gathering facts now + ansible.builtin.setup: + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/tasks/service.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/tasks/service.yml new file mode 100644 index 0000000..08b7acd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/tasks/service.yml @@ -0,0 +1,9 @@ +--- + +- name: enable and start sshd + ansible.builtin.service: + name: "{{ sshd_service }}" + enabled: true + state: started + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/templates/sshd/ssh_config.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/templates/sshd/ssh_config.j2 new file mode 100644 index 0000000..fdffa06 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/templates/sshd/ssh_config.j2 @@ -0,0 +1,85 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +# This is the ssh client system-wide configuration file. See +# ssh_config(5) for more information. This file provides defaults for +# users, and the values can be changed in per-user configuration files +# or on the command line. + +# Configuration data is parsed as follows: +# 1. command line options +# 2. user-specific file +# 3. system-wide file +# Any configuration value is only changed the first time it is set. +# Thus, host-specific definitions should be at the beginning of the +# configuration file, and defaults at the end. + +# Site-wide defaults for some commonly used options. For a comprehensive +# list of available options, their meanings and defaults, please see the +# ssh_config(5) man page. + +Include /etc/ssh/ssh_config.d/*.conf + +{% for h in ssh_config %} + {% set host = h.get('host') %} + {% set _ = h.pop('host') %} +Host {{ host }} + {% for k, v in h.items() %} + {% if k == "IdentityFile" and + v | bodsch.core.type == "list" %} + {% for x in v %} + {{ k }} {{ x }} + {% endfor %} + {% elif k == "SendEnv" and + v | bodsch.core.type == "list" %} + {% for x in v %} + {{ k }} {{ x }} + {% endfor %} + {% else %} + {% set value = v %} + {% if v | bodsch.core.type == "bool" %} + {% set value = v | bodsch.core.config_bool(true_as='yes', false_as='no') %} + {% endif %} + {% if v | bodsch.core.type == "list" %} + {% set value = v | join(',') %} + {% endif %} + {{ k }} {{ value }} + {% endif %} + {% endfor %} + +{% endfor %} +{# +Host * +# ForwardAgent no +# ForwardX11 no +# ForwardX11Trusted yes +# PasswordAuthentication yes +# HostbasedAuthentication no +# GSSAPIAuthentication no +# GSSAPIDelegateCredentials no +# GSSAPIKeyExchange no +# GSSAPITrustDNS no +# BatchMode no +# CheckHostIP yes +# AddressFamily any +# ConnectTimeout 0 +# StrictHostKeyChecking ask +# IdentityFile ~/.ssh/id_rsa +# IdentityFile ~/.ssh/id_dsa +# IdentityFile ~/.ssh/id_ecdsa +# IdentityFile ~/.ssh/id_ed25519 +# Port 22 +# Ciphers aes128-ctr,aes192-ctr,aes256-ctr,aes128-cbc,3des-cbc +# MACs hmac-md5,hmac-sha1,umac-64@openssh.com +# EscapeChar ~ +# Tunnel no +# TunnelDevice any:any +# PermitLocalCommand no +# VisualHostKey no +# ProxyCommand ssh -q -W %h:%p gateway.example.com +# RekeyLimit 1G 1h +# UserKnownHostsFile ~/.ssh/known_hosts.d/%k + SendEnv LANG LC_* + HashKnownHosts yes + GSSAPIAuthentication yes +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/templates/sshd/sshd_config.d/match_users.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/templates/sshd/sshd_config.d/match_users.conf.j2 new file mode 100644 index 0000000..3bcc36b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/templates/sshd/sshd_config.d/match_users.conf.j2 @@ -0,0 +1,18 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% set _match = sshd_config.get("Match", []) %} +{% for users in _match %} + {% set username = users.get('username') %} + {% set options = users.get('options') %} +Match User {{ username }} + {% for k, v in options.items() %} + {% set value = v %} + {% if v | bodsch.core.type == "bool" %} + {% set value = v | bodsch.core.config_bool(true_as='yes', false_as='no') %} + {% elif v | bodsch.core.type == "list" %} + {% set value = v | join(' ') %} + {% endif %} + {{ k }} {{ value }} + {% endfor %} +{% endfor %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/templates/sshd/sshd_config.d/subsystem.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/templates/sshd/sshd_config.d/subsystem.conf.j2 new file mode 100644 index 0000000..5bac274 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/templates/sshd/sshd_config.d/subsystem.conf.j2 @@ -0,0 +1,7 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% set _subsystem = sshd_config.get("Subsystem", {}) %} +{% if _subsystem | count > 0 %} +Subsystem {{ _subsystem.get('name', 'sftp') }} {{ _subsystem.get('path', '/usr/lib/openssh/sftp-server') }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/templates/sshd/sshd_config.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/templates/sshd/sshd_config.j2 new file mode 100644 index 0000000..3f5860e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/templates/sshd/sshd_config.j2 @@ -0,0 +1,140 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +{% macro validate_option(value, valid_values=[]) %} + {% if valid_values | count > 0 %} + {% if value not in valid_values %} + {% set value = None %} + {% endif %} + {% endif %} + {% if value | bodsch.core.type == "str" %} + {% if value | length == 0 %} + {% set value = None %} + {% endif %} + {% elif value | bodsch.core.type == "list" %} + {% if value | count == 0 %} + {% set value = None %} + {% endif %} + {% elif value | bodsch.core.type == "bool" %} + {% set value = value | bodsch.core.config_bool(true_as='yes', false_as='no') %} + {% endif %} + {{ value }} +{% endmacro %} +{% set _ = sshd_config.pop("Subsystem") %} +{% set _ = sshd_config.pop("Match") %} + +Include /etc/ssh/sshd_config.d/*.conf + +{% for key, value in sshd_config.items() %} +{# {{ key }}: {{ value }} - {{ value | bodsch.core.type }} #} +{# # {{ value }} | {{ value | bodsch.core.type }} {{ value | string | length }} #} + {% if key == "Port" %} + {% if value | bodsch.core.type in ["int", "string"] %} + {% set value = value | bodsch.core.string_to_list() %} + {% endif %} + {% for x in value %} +{{ key }} {{ x }} + {% endfor %} + {% elif key == "AddressFamily" %} + {% set _valid_values = ["any", "inet", "inet6"] %} + {% set value = validate_option(value=value, valid_values=_valid_values) %} + {% if value | string | trim | length > 0 %} +{{ key -}} {{ value -}} + {% endif %} + {% elif key == "ListenAddress" %} + {% set value = value | bodsch.core.string_to_list() %} + {% for p in value %} +{{ key }} {{ p }} + {% endfor %} + {% elif key == "HostKey" %} + {% set value = value | bodsch.core.string_to_list() %} + {% for p in value %} +{{ key }} {{ p }} + {% endfor %} + {% elif key == "RekeyLimit" %} + {% if value | bodsch.core.type == "string" %} + {% set value = value | bodsch.core.string_to_list() %} + {% endif %} + {% if value | count > 0 %} +{{ key }} {{ value | join(' ') }} + {% endif %} + {% elif key == "SyslogFacility" %} + {% set _valid_values = ["AUTHPRIV", "DAEMON","USER","AUTH","LOCAL0","LOCAL1","LOCAL2","LOCAL3","LOCAL4","LOCAL5","LOCAL6","LOCAL7"] %} + {% set value = validate_option(value=value, valid_values=_valid_values) %} + {% if value | string | trim | length > 0 %} +{{ key -}} {{ value -}} + {% endif %} + {% elif key == "LogLevel" %} + {% set _valid_values = ["QUIET","FATAL","ERROR","INFO","VERBOSE","DEBUG","DEBUG1","DEBUG2","DEBUG3"] %} + {% set value = validate_option(value=value, valid_values=_valid_values) %} + {% if value | string | trim | length > 0 %} +{{ key -}} {{ value -}} + {% endif %} + {% elif key == "LogVerbose" %} + {% if value | bodsch.core.type == "string" %} + {% set value = value | bodsch.core.string_to_list() %} + {% endif %} + {% set value = validate_option(value=value) %} + {% if value | string | trim | length > 0 %} +{{ key }} {{ value | join(',') }} + {% endif %} + {% elif key == "PermitRootLogin" %} + {% set _valid_values = ["yes","prohibit-password","forced-commands-only","no"] %} + {% set value = validate_option(value=value, valid_values=_valid_values) %} + {% if value | string | trim | length > 0 %} +{{ key -}} {{ value -}} + {% endif %} + {% elif key == "AuthorizedKeysFile" %} + {% if value | bodsch.core.type == "string" %} + {% set value = value | bodsch.core.string_to_list() %} + {% endif %} +{{ key }} {{ value | join(' ') }} + {% elif key == "Compression" %} + {% set _valid_values = ["yes","delayed","no"] %} + {% set value = validate_option(value=value, valid_values=_valid_values) %} + {% if value | string | trim | length > 0 %} +{{ key -}} {{ value -}} + {% endif %} + {% elif key == "AcceptEnv" %} + {% if value | bodsch.core.type == "string" %} + {% set value = value | bodsch.core.string_to_list() %} + {% endif %} + {% if value | count > 0 %} +{{ key }} {{ value | join(' ') }} + {% endif %} + {# #} + {% elif key == "KexAlgorithms" %} + {% if value | bodsch.core.type == "string" %} + {% set value = value | bodsch.core.string_to_list() %} + {% endif %} + {% if value | count > 0 %} +{{ key }} {{ value | join(',') }} + {% endif %} + {% elif key == "Ciphers" %} + {% if value | bodsch.core.type == "string" %} + {% set value = value | bodsch.core.string_to_list() %} + {% endif %} + {% if value | count > 0 %} +{{ key }} {{ value | join(',') }} + {% endif %} + {% elif key == "MACs" %} + {% if value | bodsch.core.type == "string" %} + {% set value = value | bodsch.core.string_to_list() %} + {% endif %} + {% if value | count > 0 %} +{{ key }} {{ value | join(',') }} + {% endif %} + {% elif key == "HostKeyAlgorithms" %} + {% if value | bodsch.core.type == "string" %} + {% set value = value | bodsch.core.string_to_list() %} + {% endif %} + {% if value | count > 0 %} +{{ key }} {{ value | join(',') }} + {% endif %} + {# #} + {% else %} + {% set value = validate_option(value=value) %} + {% if value | string | trim | length > 0 %} +{{ key -}} {{ value -}} + {% endif %} + {% endif %} +{% endfor %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/archlinux-openrc.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/archlinux-openrc.yml new file mode 100644 index 0000000..4c72e70 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/archlinux-openrc.yml @@ -0,0 +1,6 @@ +--- + +sshd_packages: + - openssh + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/archlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/archlinux.yml new file mode 100644 index 0000000..a1a2d0c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/archlinux.yml @@ -0,0 +1,8 @@ +--- + +sshd_packages: + - openssh + +sshd_sftp_server: /usr/lib/ssh/sftp-server + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/artixlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/artixlinux.yml new file mode 100644 index 0000000..4c72e70 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/artixlinux.yml @@ -0,0 +1,6 @@ +--- + +sshd_packages: + - openssh + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/debian.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/debian.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/main.yml new file mode 100644 index 0000000..483f614 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/main.yml @@ -0,0 +1,188 @@ +--- + +sshd_packages: + - openssh-server + - openssh-sftp-server + +sshd_sftp_server: /usr/lib/openssh/sftp-server +sshd_service: sshd +sshd_binary: /usr/sbin/sshd + +sshd_hostkeys_nofips: [] + +sshd_defaults_config: + port: # 22 + - 22 + address_family: any + listen_address: [] + # - "0.0.0.0:22" + # https://man.openbsd.org/sshd_config#RekeyLimit + rekey_limit: [] # ['default', 'none'] + # - default + # - none + syslog_facility: AUTH # + log_level: INFO # + log_verbose: [] # 'kex.c:*:1000,*:kex_exchange_identification():*,packet.c:*' + login_grace_time: "" # 2m + permit_root_login: prohibit-password + strict_modes: true + max_auth_tries: "" # 6 + max_sessions: "" # 10 + pubkey_authentication: true + + # Expect .ssh/authorized_keys2 to be disregarded by default in future. + authorized_keys_file: + - .ssh/authorized_keys + + authorized_principals_file: "" # false + + authorized_keys_command: "" # false + authorized_keys_command_user: "" # false + + # For this to work you will also need host keys in /etc/ssh/ssh_known_hosts + hostbased_authentication: "" # false + hostbased_accepted_algorithms: "" # + + host_certificate: "" + host_keys: + - "/etc/ssh/ssh_host_rsa_key" + - "/etc/ssh/ssh_host_ecdsa_key" + - "/etc/ssh/ssh_host_ed25519_key" + host_key_agent: "" # + host_key_algorithms: [] # + + # Change to yes if you don't trust ~/.ssh/known_hosts for + # HostbasedAuthentication + ignore_user_known_hosts: "" # false + # Don't read the user's ~/.rhosts and ~/.shosts files + ignore_rhosts: "" # true + + # To disable tunneled clear text passwords, change to no here! + password_authentication: "" # true + permit_empty_passwords: "" # false + + # Change to yes to enable challenge-response passwords (beware issues with + # some PAM modules and threads) + challenge_response_authentication: "" # false + + # Kerberos options + kerberos_authentication: "" # false + kerberos_or_local_passwd: "" # true + kerberos_ticket_cleanup: "" # true + kerberos_get_afs_token: "" # false + + kex_algorithms: [] + + # GSSAPI options + gss_api_authentication: "" # false + gss_api_cleanup_credentials: "" # true + gss_api_strict_acceptor_check: "" # true + gss_api_key_exchange: "" # false + + # Set this to 'yes' to enable PAM authentication, account processing, + # and session processing. If this is enabled, PAM authentication will + # be allowed through the ChallengeResponseAuthentication and + # PasswordAuthentication. Depending on your PAM configuration, + # PAM authentication via ChallengeResponseAuthentication may bypass + # the setting of "PermitRootLogin without-password". + # If you just want the PAM account and session checks to run without + # PAM authentication, then enable this but set PasswordAuthentication + # and ChallengeResponseAuthentication to 'no'. + use_pam: true + + allow_agent_forwarding: "" # true + allow_tcp_forwarding: "" # true + gateway_ports: "" # false + x11_forwarding: "" # false + x11_display_offset: "" # 10 + x11_use_localhost: "" # true + permit_tty: "" # true + print_motd: "" # false + print_last_log: "" # true + tcp_keep_alive: "" # true + permituser_environment: "" # false + compression: "" # delayed + client_alive_interval: "" # 0 + client_alive_count_max: "" # 3 + use_dns: "" # false + pid_file: "" # /var/run/sshd.pid + max_startups: "" # 10:30:100 + permit_tunnel: "" # false + chroot_directory: "" # false + version_addendum: "" # false + + # no default banner path + banner: "" # false + deny_groups: [] + deny_users: [] + ciphers: [] + macs: [] + + # Allow client to pass locale environment variables + accept_env: + - LANG + - LC_* + + # override default of no subsystems + subsystem: + name: sftp + path: "{{ sshd_sftp_server }}" + + # Example of overriding settings on a per-user basis + match_users: [] + # - username: anoncvs + # options: + # x11_forwarding: false + # AllowTcpForwarding: false + # PermitTTY: false + # ForceCommand: + # - cvs + # - server + +ssh_defaults_config: + - host: "*" + # ForwardAgent: false + # ForwardX11: false + # ForwardX11Trusted: false + # PasswordAuthentication: true + # HostbasedAuthentication: "" + # GSSAPIAuthentication: "" + # GSSAPIDelegateCredentials: "" + # GSSAPIKeyExchange: false + # GSSAPITrustDNS: false + # BatchMode: false + # CheckHostIP: true + # AddressFamily: any + # ConnectTimeout: 0 + # StrictHostKeyChecking: ask + # IdentityFile: + # - "~/.ssh/id_rsa" + # - "~/.ssh/id_dsa" + # - "~/.ssh/id_ecdsa" + # - "~/.ssh/id_ed25519" + # Port: 22 + # Ciphers: + # - aes128-ctr + # - aes192-ctr + # - aes256-ctr + # - aes128-cbc + # - 3des-cbc + # MACs: + # - hmac-md5 + # - hmac-sha1 + # - umac-64@openssh.com + # EscapeChar: "~" + # Tunnel: false + # TunnelDevice: "any:any" + # PermitLocalCommand: false + # VisualHostKey: false + # ProxyCommand: ssh -q -W %h:%p gateway.example.com + # RekeyLimit: 1G 1h + # UserKnownHostsFile: ~/.ssh/known_hosts.d/%k + SendEnv: + - "LANG LC_*" + hash_known_hosts: false + +sshd_host_keys: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/ubuntu-24.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/ubuntu-24.yml new file mode 100644 index 0000000..0252c6d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sshd/vars/ubuntu-24.yml @@ -0,0 +1,5 @@ +--- + +sshd_service: ssh + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/.yamllint new file mode 100644 index 0000000..20fd7aa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + +ignore: | + molecule/ + .github diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/README.md new file mode 100644 index 0000000..46a69b7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/README.md @@ -0,0 +1,26 @@ +# Ansible Role: `bodsch.core.sysctl` + +Configure sysctl on your system. + +## Role Variables + +```yaml +sysctl_directory: /etc/sysctl.d + +sysctl_rules: + - name: sshd + rules: + net.ipv4.ip_nonlocal_bind: 1 + net.ipv6.ip_nonlocal_bind: 1 + - name: openvpn + rules: + net.ipv4.ip_forward: 1 + +sysctl_reload: true +``` + +--- + +## Author + +- Bodo Schulz diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/defaults/main.yml new file mode 100644 index 0000000..5f59ac0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/defaults/main.yml @@ -0,0 +1,16 @@ +--- + +sysctl_directory: /etc/sysctl.d + +sysctl_rules: [] +# - name: sshd +# rules: +# net.ipv4.ip_nonlocal_bind: 1 +# net.ipv6.ip_nonlocal_bind: 1 +# - name: openvpn +# rules: +# net.ipv4.ip_forward: 1 + +sysctl_reload: true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/handlers/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/handlers/main.yml new file mode 100644 index 0000000..298c4d4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/handlers/main.yml @@ -0,0 +1,10 @@ +--- + +- name: reload sysctl + ansible.builtin.shell: + cmd: "sysctl --load {{ sysctl_directory }}/{{ sysctl_config_file }}" + failed_when: false + when: + - sysctl_reload + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/meta/main.yml new file mode 100644 index 0000000..d45ee49 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/meta/main.yml @@ -0,0 +1,32 @@ +--- + +galaxy_info: + role_name: sysctl + + author: Bodo Schulz + description: manage generic sysctl + + license: Apache + min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + # 11 + - bullseye + - bookworm + - name: Ubuntu + versions: + # 20.04 + - focal + + galaxy_tags: + - system + - sysctl + +dependencies: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/tasks/configure/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/tasks/configure/main.yml new file mode 100644 index 0000000..1ebb376 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/tasks/configure/main.yml @@ -0,0 +1,28 @@ +--- + +- name: create /etc/sysctl.d + ansible.builtin.file: + state: directory + name: /etc/sysctl.d + mode: "0755" + tags: + - sysctl + +- name: create /etc/sysctl.d + ansible.builtin.template: + src: etc/sysctl.conf.j2 + dest: /etc/sysctl.conf + mode: "0644" + tags: + - sysctl + +- name: create sysctl setting file + ansible.builtin.include_tasks: sysctl.yml + loop: + "{{ sysctl_rules }}" + loop_control: + loop_var: item + when: + - sysctl_rules is defined + - sysctl_rules | count > 0 + - ansible_connection not in [ "container", "docker", "community.docker.docker" ] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/tasks/configure/sysctl.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/tasks/configure/sysctl.yml new file mode 100644 index 0000000..6d4a87a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/tasks/configure/sysctl.yml @@ -0,0 +1,20 @@ +--- + +- name: define sysctl config file + ansible.builtin.set_fact: + sysctl_config_file: "{{ item.name }}.conf" + +- name: create sysctl setting file + ansible.builtin.template: + src: sysctl.conf.j2 + dest: "{{ sysctl_directory }}/{{ sysctl_config_file }}" + mode: "0644" + notify: + - reload sysctl + when: + - item.name is defined + - item.rules is defined + - item.rules | count > 0 + - ansible_connection not in [ "container", "docker", "community.docker.docker" ] + loop_control: + label: "{{ item.name }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/tasks/installation.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/tasks/installation.yml new file mode 100644 index 0000000..9d04dd2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/tasks/installation.yml @@ -0,0 +1,10 @@ +--- + +- name: install procps + ansible.builtin.package: + name: + - procps + state: present + +... + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/tasks/main.yml new file mode 100644 index 0000000..2cdbbe2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/tasks/main.yml @@ -0,0 +1,12 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yaml + +- name: install + ansible.builtin.include_tasks: installation.yml + +- name: configure + ansible.builtin.include_tasks: configure/main.yml + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/tasks/prepare.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/tasks/prepare.yaml new file mode 100644 index 0000000..2fc1894 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/tasks/prepare.yaml @@ -0,0 +1,23 @@ +--- + +- name: include OS specific configuration ({{ ansible_facts.distribution }} ({{ ansible_facts.os_family }}) {{ ansible_facts.distribution_major_version }}) + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_facts.os_family | lower }}.yml" + # artixlinux + - "{{ ansible_facts.os_family | lower | replace(' ', '') }}.yml" + - default.yml + skip: true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/templates/etc/sysctl.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/templates/etc/sysctl.conf.j2 new file mode 100644 index 0000000..fc8db35 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/templates/etc/sysctl.conf.j2 @@ -0,0 +1,68 @@ +# {{ ansible_managed }} +# +# /etc/sysctl.conf - Configuration file for setting system variables +# See /etc/sysctl.d/ for additional system variables. +# See sysctl.conf (5) for information. +# + +#kernel.domainname = example.com + +# Uncomment the following to stop low-level messages on console +#kernel.printk = 3 4 1 3 + +################################################################### +# Functions previously found in netbase +# + +# Uncomment the next two lines to enable Spoof protection (reverse-path filter) +# Turn on Source Address Verification in all interfaces to +# prevent some spoofing attacks +#net.ipv4.conf.default.rp_filter=1 +#net.ipv4.conf.all.rp_filter=1 + +# Uncomment the next line to enable TCP/IP SYN cookies +# See http://lwn.net/Articles/277146/ +# Note: This may impact IPv6 TCP sessions too +#net.ipv4.tcp_syncookies=1 + +# Uncomment the next line to enable packet forwarding for IPv4 +#net.ipv4.ip_forward=1 + +# Uncomment the next line to enable packet forwarding for IPv6 +# Enabling this option disables Stateless Address Autoconfiguration +# based on Router Advertisements for this host +#net.ipv6.conf.all.forwarding=1 + + +################################################################### +# Additional settings - these settings can improve the network +# security of the host and prevent against some network attacks +# including spoofing attacks and man in the middle attacks through +# redirection. Some network environments, however, require that these +# settings are disabled so review and enable them as needed. +# +# Do not accept ICMP redirects (prevent MITM attacks) +#net.ipv4.conf.all.accept_redirects = 0 +#net.ipv6.conf.all.accept_redirects = 0 +# _or_ +# Accept ICMP redirects only for gateways listed in our default +# gateway list (enabled by default) +# net.ipv4.conf.all.secure_redirects = 1 +# +# Do not send ICMP redirects (we are not a router) +#net.ipv4.conf.all.send_redirects = 0 +# +# Do not accept IP source route packets (we are not a router) +#net.ipv4.conf.all.accept_source_route = 0 +#net.ipv6.conf.all.accept_source_route = 0 +# +# Log Martian Packets +#net.ipv4.conf.all.log_martians = 1 +# + +################################################################### +# Magic system request Key +# 0=disable, 1=enable all, >1 bitmask of sysrq functions +# See https://www.kernel.org/doc/html/latest/admin-guide/sysrq.html +# for what other values do +#kernel.sysrq=438 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/templates/sysctl.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/templates/sysctl.conf.j2 new file mode 100644 index 0000000..3dcb1fe --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/templates/sysctl.conf.j2 @@ -0,0 +1,4 @@ +# {{ ansible_managed }} +{% for k, v in item.rules.items() %} +{{ k }} = {{ v }} +{% endfor %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/vars/archlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/vars/archlinux.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/vars/archlinux.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/vars/artixlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/vars/artixlinux.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/vars/artixlinux.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/vars/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/vars/debian.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/vars/debian.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/vars/main.yml new file mode 100644 index 0000000..9381f49 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/sysctl/vars/main.yml @@ -0,0 +1,5 @@ +--- + +sysctl_packages: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/.yamllint new file mode 100644 index 0000000..20fd7aa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + +ignore: | + molecule/ + .github diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/LICENSE new file mode 100644 index 0000000..df702e3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2020] [Bodo Schulz] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/README.md new file mode 100644 index 0000000..eb3a70e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/README.md @@ -0,0 +1,205 @@ + +# Ansible Role: `bodsch.core.syslog_ng` + + +Installs and configures a classic syslog-ng service for processing log files away from journald. + +This again allows a simple, central logging service to be run on the user's own network. + +**This role disables journald and takes over its function!** + +## usage + +```yaml +syslog_options: {} + +syslog_logs: {} + +syslog_sources: + src: + - comment: messages generated by syslog-ng + system: + +syslog_message_templates: {} + +syslog_journald: + wipe_persistent: true + config: + Storage: volatile + ForwardToSyslog: 'yes' + +syslog_hardened: false + +syslog_systemd_template: default +``` + +### `syslog_options` + +```yaml +syslog_options: + chain_hostnames: false + create_dirs: false + dns-cache: false + flush_lines: 0 + group: "adm" + keep_hostname: true + log_fifo_size: 10000 + mark_freq: 3600 + perm: "0640" + # obsoleted keyword + # please update your configuration! + # keyword='stats_freq', change='Use the stats() block. E.g. stats(freq(x)); + stats_freq: 43200 + time_reopen: 10 + ts_format: iso + use_dns: false + use_fqdn: false + # Available in syslog-ng 4.1 and later + stats: + freq: "" # 1 + level: "" # 1 + lifetime: "" # 1000 + max-dynamics: "" # 10000 + syslog-stats: "" # true +``` + +### `syslog_logs` + +Additional log destinations to be merged with the [default](./vars/main.yml) (`syslog_defaults_logs`) ones. + +`syslog_logs` + +A list of hashes, a trio of file destination, filter, log. +A simplified configuration that should be sufficient for most cases. + +| parameter | required | default | description | +| :---- | :---- | :---- | :----- | +| `source` | `false` | `src` | source of logging messages - 'src', or 'kernsrc' | +| `destination.file` | `false` | `${key}.log` | log file relative to `/var/log`. (The basic directory is created automatically.) | +| `destination.udp` | `false` | `-` | `udp` log Destination to an remote syslog server. | +| `destination.tcp` | `false` | `-` | `tcp` log Destination to an remote syslog server. | +| `filter.name` | `false` | `${key}` | An (optional) name of the filter. If it is not specified, the `${key}` is used as name. | +| `filter.filter` | `false` | `program(${key})` | The syslog filter. This can be a simple string or a list of strings.
The list is connected with an `and`. | +| `final` | `false` | `false` | whether set a final flag | + +> **Only one log destination may be defined!** +> **So either `file`, or `udp` / `tcp`!** + +#### Example + +```yaml +syslog_logs: + kern: + source: kernsrc + destination: + file: kernel.log + filter: + name: kern + filter: facility(kern) + messages: + file_name: messages.log + filter: + filter: + - level(info..alert) + - level(debug..notice) + - not facility(auth, authpriv, mail, cron, kern, local7) + - not program (syslog-ng) + - not program (dhcpd) + - not program(named) + iptables: + source: kernsrc + destination: + file: iptables.log + filter: + filter: message("^(\\[.*\..*\] |)ip6?tables.*") + final: true + remote: + source: net + template: nginx + destination: + file: "remote/nginx/${FULLHOST}.log" + loghost: + source: s_remote + destination: + udp: + ip: "10.10.0.1" + port: 514 + spoof_source: true +``` + + +### `syslog_sources` + +Additional log sources to be merged with the [default](./vars/main.yml) (`syslog_defaults_sources`) ones. + + +| parameter | description | +| :---- | :----- | +| `comment` | an optional comment | +| `$driver` | key is name of the source driver, typically `unix_stream` or `file`
(underscores are replaced with hyphens)
value is the driver parameter | + + +#### Example + +```yaml +syslog_sources: + src: + - comment: messages generated by syslog-ng + internal: + kernsrc: + - comment: messages from the kernel + file: /proc/kmsg + net: + - comment: messages from syslog-clients + udp: + s_remote: + - comment: remote sources on port 5140 + tcp: + ip: 0.0.0.0 + port: 5140 + udp: + ip: 0.0.0.0 + port: 5140 +``` + + +### `syslog_message_templates` + +You can define your own templates for log files. +This makes it possible to give each log file a suitable output format. + +By default, a template called `tmpl` is created, which is defined as follows: + +`'${YEAR}-${MONTH}-${DAY}T${HOUR}:${MIN}:${SEC} ${LEVEL} ${MSGHDR}${MSG}\n'` + +By default, each destination is assigned this template. +If this is not desired, `use_template` must be set in the `syslog_logs` configuration. + +```yaml +syslog_logs: + ansible: + file_name: ansible.log + filter: + filter: program(ansible) + use_template: false +``` + +If a destination is to have its own output format, the defined template must be specified under `template`. + +```yaml +syslog_message_templates: + nginx: '${MSG}\n' + +syslog_logs: + remote: + source: net + template: nginx + destination: + file: "remote/${FULLHOST}.log" +``` + +--- + +## Author and License + +- Bodo Schulz diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/defaults/main.yml new file mode 100644 index 0000000..bcbc205 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/defaults/main.yml @@ -0,0 +1,26 @@ +--- + +syslog_options: {} + +syslog_logs: {} + +syslog_sources: + src: + - comment: messages generated by syslog-ng + system: + +syslog_message_templates: {} + +syslog_journald: + wipe_persistent: false + config: + Storage: volatile + ForwardToSyslog: 'yes' + +syslog_hardened: false + +syslog_systemd_template: default + +syslog_includes: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/handlers/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/handlers/main.yml new file mode 100644 index 0000000..bb5098f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/handlers/main.yml @@ -0,0 +1,39 @@ +--- +- name: reload systemctl daemon + ansible.builtin.systemd: + daemon_reload: true + when: + - ansible_facts.service_mgr == 'systemd' + +- name: reload syslog-ng + ansible.builtin.service: + name: '{{ syslog_unit_file }}' + state: reloaded + when: + - not ansible_check_mode + - systemd_facts["ansible_facts"]["services"][syslog_unit_file + ".service"]["state"] | default('not-found') == "running" + +- name: restart syslog-ng + ansible.builtin.service: + name: '{{ syslog_unit_file }}' + state: restarted + when: + - not ansible_check_mode + - systemd_facts["ansible_facts"]["services"][syslog_unit_file + ".service"]["state"] | default('not-found') == "stopped" + +- name: validate syslog-ng config + bodsch.core.syslog_cmd: + parameters: + - --syntax-only + check_mode: true + when: + - not ansible_check_mode + +- name: restart systemd-journald + ansible.builtin.service: + name: systemd-journald + state: restarted + +- name: wait for clean startup + ansible.builtin.pause: + seconds: 5 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/meta/main.yml new file mode 100644 index 0000000..c68ce24 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/meta/main.yml @@ -0,0 +1,27 @@ +--- +galaxy_info: + role_name: syslog_ng + + author: Bodo Schulz + description: Manage syslog-ng configuration + license: Apache + + min_ansible_version: "2.9" + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + - bullseye + - bookworm + - name: Ubuntu + versions: + # 20.04 + - focal + + galaxy_tags: + - system + - syslog + +dependencies: [] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/configure.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/configure.yml new file mode 100644 index 0000000..91d09fe --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/configure.yml @@ -0,0 +1,111 @@ +--- + +- name: merge syslog configuration between defaults and custom + ansible.builtin.set_fact: + syslog_options: "{{ syslog_defaults_options | combine(syslog_options, recursive=True) }}" + syslog_sources: "{{ syslog_defaults_sources | combine(syslog_sources, recursive=True) }}" + syslog_message_templates: "{{ syslog_defaults_message_templates | combine(syslog_message_templates, recursive=True) }}" + syslog_logs: "{{ syslog_defaults_logs | combine(syslog_logs, recursive=True) }}" + +- name: defined log directories + ansible.builtin.set_fact: + syslog_server_log_destinations: "{{ syslog_logs | bodsch.core.log_directories('/var/log') }}" + +- name: detect config version + bodsch.core.syslog_cmd: + parameters: + - --version + when: + - not running_in_check_mode + register: _syslog_config_version + tags: + - syslog-ng + - configuration + +- name: set syslog config version + ansible.builtin.set_fact: + syslog_config_version: '{{ _syslog_config_version.version }}' + when: + - not running_in_check_mode + - _syslog_config_version is defined + - _syslog_config_version.version is defined + - _syslog_config_version.version | int != 0 + tags: + - syslog-ng + - configuration + +- name: assert syslog_config_version + ansible.builtin.assert: + that: syslog_config_version | length != 0 + msg: "I can't found an valid syslog-ng version :(" + when: + - not running_in_check_mode + tags: + - syslog-ng + - configuration + +- name: create /etc/syslog-ng/conf.d + ansible.builtin.file: + state: directory + name: /etc/syslog-ng/conf.d + mode: "0750" + when: + - not running_in_check_mode + tags: + - syslog-ng + - configuration + +- name: create directory for syslog clients + ansible.builtin.file: + state: directory + name: "{{ item }}" + mode: "0775" + loop: "{{ syslog_server_log_destinations }}" + when: + - not running_in_check_mode + - syslog_server_log_destinations is defined + - syslog_server_log_destinations | default([]) | count > 0 + tags: + - syslog-ng + - configuration + +- name: write splittet configuration files + ansible.builtin.template: + src: "conf.d/{{ item }}.j2" + dest: "/etc/syslog-ng/conf.d/{{ item }}" + mode: "0644" + when: + - not running_in_check_mode + notify: + - validate syslog-ng config + - reload syslog-ng + - restart syslog-ng + loop: + - sources.conf + - destinations.conf + - filters.conf + - logs.conf + tags: + - syslog-ng + - configuration + +- name: configure syslog-ng.conf + ansible.builtin.template: + src: syslog-ng.conf.j2 + dest: /etc/syslog-ng/syslog-ng.conf + mode: "0644" + backup: true + when: + - not running_in_check_mode + notify: + - validate syslog-ng config + - reload syslog-ng + - restart syslog-ng + tags: + - syslog-ng + - configuration + +- name: flush handlers + ansible.builtin.meta: flush_handlers + when: + - not running_in_check_mode diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/configure_journald.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/configure_journald.yml new file mode 100644 index 0000000..9d6065e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/configure_journald.yml @@ -0,0 +1,30 @@ +--- + +- name: deactivate journald + when: + - not running_in_check_mode + - ansible_facts.service_mgr == 'systemd' + - (syslog_journald.config is defined and + syslog_journald.config.items() | count != 0) or + (syslog_journald.wipe_persistent is defined and + syslog_journald.wipe_persistent | bool) + block: + - name: create journald configuration + ansible.builtin.template: + src: journald.conf.j2 + dest: /etc/systemd/journald.conf + mode: "0644" + owner: root + group: root + when: + - syslog_journald.config is defined + - syslog_journald.config.items() | count != 0 + notify: restart systemd-journald + + - name: wipe persistent journal directory + ansible.builtin.file: + path: /var/log/journal + state: absent + when: + - syslog_journald.wipe_persistent is defined + - syslog_journald.wipe_persistent | bool diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/install.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/install.yml new file mode 100644 index 0000000..67a1f46 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/install.yml @@ -0,0 +1,77 @@ +--- + +- name: install syslog-ng + ansible.builtin.package: + name: "{{ syslog_packages }}" + state: present + when: + - not running_in_check_mode + register: install_syslog + tags: + - syslog-ng + - install + +- name: systemd + when: + - not running_in_check_mode + - ansible_facts.service_mgr == "systemd" + block: + - name: populate service facts + ansible.builtin.service_facts: + register: systemd_facts + no_log: true + tags: + - syslog-ng + - install + + - name: set systemd unit name + ansible.builtin.set_fact: + syslog_unit_file: "{{ ansible_facts.services | bodsch.core.get_service('syslog-ng') }}" + tags: + - syslog-ng + - install + + - name: detect systemd template unit + ansible.builtin.stat: + path: /etc/default/{{ syslog_unit_file }}{{ syslog_systemd_template }} + register: systemd_template + when: + - syslog_unit_file is match('.*@') + tags: + - syslog-ng + - install + + - name: use systemd template unit + when: + - syslog_unit_file is match('.*@') + - systemd_template.stat.exists + block: + - name: set link for template unit + ansible.builtin.file: + src: /lib/systemd/system/{{ syslog_unit_file }}.service + dest: /etc/systemd/system/{{ syslog_unit_file }}{{ syslog_systemd_template }}.service + state: link + notify: + - reload systemctl daemon + tags: + - syslog-ng + - install + + - name: define systemd template unit + ansible.builtin.set_fact: + syslog_unit_file: "{{ syslog_unit_file }}{{ syslog_systemd_template }}" + tags: + - syslog-ng + - install + +- name: create custom fact file + bodsch.core.facts: + name: syslog_ng + facts: + version: "{{ syslog_config_version }}" + service_unit: "{{ syslog_unit_file | default('') }}" + when: + - not running_in_check_mode + tags: + - syslog-ng + - install diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/main.yml new file mode 100644 index 0000000..f2e13bf --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/main.yml @@ -0,0 +1,23 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yml + +- name: install + ansible.builtin.include_tasks: install.yml + +- name: configure + ansible.builtin.include_tasks: configure.yml + +- name: configure journald + ansible.builtin.include_tasks: configure_journald.yml + when: + - ansible_facts.service_mgr is defined + - ansible_facts.service_mgr | lower == "systemd" + +- name: service + ansible.builtin.include_tasks: service.yml + when: + - not ansible_check_mode + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/prepare.yml new file mode 100644 index 0000000..820840e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/prepare.yml @@ -0,0 +1,36 @@ +--- + +- name: include OS specific configuration ({{ ansible_facts.distribution }} ({{ ansible_facts.os_family }}) {{ ansible_facts.distribution_major_version }}) + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_facts.os_family | lower }}.yml" + # artixlinux + - "{{ ansible_facts.os_family | lower | replace(' ', '') }}.yml" + skip: true + +- name: detect ansible check_mode + bodsch.core.check_mode: + register: _check_mode + +- name: define running_in_check_mode + ansible.builtin.set_fact: + running_in_check_mode: '{{ _check_mode.check_mode }}' + +- name: install requirements + ansible.builtin.package: + name: "{{ syslog_dependencies }}" + state: present + when: + - syslog_dependencies | default([]) | count > 0 + - not running_in_check_mode diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/service.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/service.yml new file mode 100644 index 0000000..b01d0b5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/tasks/service.yml @@ -0,0 +1,26 @@ +--- + +- name: flush handlers + ansible.builtin.meta: flush_handlers + +- name: systemd + when: + - not running_in_check_mode + - ansible_facts.service_mgr == "systemd" + block: + - name: enable and start {{ syslog_unit_file }}.service + ansible.builtin.service: + name: '{{ syslog_unit_file }}' + enabled: true + state: started + when: + - syslog_unit_file is defined + +- name: enable and start syslog-ng (no systemd) + when: + - not running_in_check_mode + - ansible_facts.service_mgr != "systemd" + ansible.builtin.service: + name: syslog-ng + enabled: true + state: started diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/conf.d/destinations.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/conf.d/destinations.conf.j2 new file mode 100644 index 0000000..5b08d0c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/conf.d/destinations.conf.j2 @@ -0,0 +1,94 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% for k, v in syslog_logs.items() %} + {% if loop.first %} +# HOST SPECIFIC DESTINATIONS + {% endif %} + +destination d_{{ k }} { + {% if v.destination is defined and v.destination | bodsch.core.type == "dict" and v.destination | count > 0 %} + {% set _net_type = None %} + {% if v.destination.file is defined and v.destination.file | string | length > 0 %} + file( + "/var/log/{{ v.destination.file | default(k + '.log', true) }}" + {% if v.use_template | default('true') and + v.template | default('tmpl') | string | length > 0 %} + template({{ v.template | default('tmpl') }}) + {% endif %} + ); + {% endif %} + {% if v.destination.udp is defined and v.destination.udp | bodsch.core.type == "dict" and v.destination.udp | count > 0 %} + {% set _net = v.destination.udp %} + {% set _net_type = "udp" %} + {% endif %} + {% if v.destination.tcp is defined and v.destination.tcp | bodsch.core.type == "dict" and v.destination.tcp | count > 0 %} + {% set _net = v.destination.tcp %} + {% set _net_type = "tcp" %} + {% endif %} + {% if v.destination.network is defined and v.destination.network | bodsch.core.type == "dict" and v.destination.network | count > 0 %} + {% set _net = v.destination.network %} + {% set _net_type = "network" %} + {% endif %} + {% if v.destination.syslog is defined and v.destination.syslog | bodsch.core.type == "dict" and v.destination.syslog | count > 0 %} + {% set _net = v.destination.syslog %} + {% set _net_type = "syslog" %} + {% endif %} + {% if _net_type in ["udp", "tcp", "network", "syslog"] %} + {{ _net_type }}( + {% set network_definition = _net | bodsch.core.syslog_network_definition("destination") %} + {% for k, v in network_definition.items() %} + {% if k == "ip" %} + {{ v }} + {% else %} + {% if v | bodsch.core.type != "dict" %} + {{ k }}{{ v }} + {% else %} + {{ k }}( + {% for kk, vv in v.items() %} + {{ kk }}{{ vv }} + {% endfor %} + ) + {% endif %} + {% endif %} + {% endfor %} + ); + {% endif %} + {% endif %} + {% if v.file_name is defined and v.file_name | string | length > 0 %} + file( + "/var/log/{{ v.file_name | default(k + '.log', true) }}" + {% if v.use_template | default('true') and + v.template | default('tmpl') | string | length > 0 %} + template({{ v.template | default('tmpl') }}) + {% endif %} + ); + {% endif %} +}; + +{% endfor %} + +# COMMON DESTINATIONS +{% if syslog_hardened %} +destination d_grsec { + file( + "/var/log/grsec.log" + template(tmpl) + ); +}; +destination d_pax { + file( + "/var/log/pax.log" + template(tmpl) + ); +}; +{% endif %} +destination d_console { + usertty("root"); +}; +destination d_console_all { + file( + "/dev/tty12" + template(tmpl) + ); +}; diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/conf.d/filters.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/conf.d/filters.conf.j2 new file mode 100644 index 0000000..83f5059 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/conf.d/filters.conf.j2 @@ -0,0 +1,46 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% for k, v in syslog_logs.items() %} + {% set _filter = {} %} + {% set _filter_name = k %} + {% if v.filter is defined and + v.filter | count != 0 and + v.filter.filter | default('') | string | length > 0 %} + {% set _filter = v.filter %} + {% if _filter.filter is defined %} + {% if _filter.filter is iterable and + ( + _filter.filter is not string and + _filter.filter is not mapping + ) %} + {% set filter = _filter.filter | join(' and\n') | indent(2, first=False) %} + {% elif _filter.filter is string %} + {% set filter = _filter.filter %} + {% endif %} + {% else %} + {% set filter = 'program(' + k + ')' %} + {% endif %} + {% else %} + {% set filter = "" %} + {% endif %} + {% if loop.first %} +# HOST SPECIFIC FILTERS + {% endif %} + + {% if filter | string | length > 0 %} +filter f_{{ _filter_name }} { + {{ filter }}; +}; + {% endif %} +{% endfor %} + +# COMMON FILTERS +{% if syslog_hardened %} +filter f_grsec { + message("^(\\[.*\..*\] |)grsec:.*"); +}; +filter f_pax { + message("^(\\[.*\..*\] |)PAX:.*"); +}; +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/conf.d/logs.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/conf.d/logs.conf.j2 new file mode 100644 index 0000000..248f670 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/conf.d/logs.conf.j2 @@ -0,0 +1,47 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% for k, v in syslog_logs.items() %} + {% set _filter_name = k %} + {% if loop.first %} +# HOST SPECIFIC LOGS + {% endif %} + +log { + source({{ v.source | default('src', true) }}); + {% if v.filter is defined and + v.filter | count != 0 and + v.filter.filter | default('') | string | length > 0 %} + filter(f_{{ _filter_name }}); + {% endif %} + destination(d_{{ k }}); + {% if v.final is defined or v.flags is defined %} + {% if v.flags is defined %} + {% if v.flags | bodsch.core.type == "str" %} + flags({{ v.flags }}); + {% elif v.flags | bodsch.core.type == "list" %} + flags({{ v.flags | join(', ') }}); + {% endif %} + {% else %} + flags(final); + {% endif %} + {% endif %}}; +{% endfor %} + +# COMMON LOGS +{% if syslog_hardened %} +log { + source(kernsrc); + filter(f_pax); + destination(d_pax); +}; +log { + source(kernsrc); + filter(f_grsec); + destination(d_grsec); +}; +{% endif %} +log { + source(src); + destination(d_console_all); +}; diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/conf.d/sources.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/conf.d/sources.conf.j2 new file mode 100644 index 0000000..728425b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/conf.d/sources.conf.j2 @@ -0,0 +1,40 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +# SOURCES + +{% for id in syslog_sources.keys() %} +source {{ id }} { + {% for dic in syslog_sources.get(id, []) %} + {% if dic.comment is defined %} + # {{ dic.comment }} + {% set _ = dic.pop('comment') %} + {% endif %} + {% for k, v in dic.items() %} + {% if k == "file" %} + {{ k | replace('_', '-') }}({{ '"%s"' | format(v) if v else '' }}); + {% elif k in ["udp", "tcp", "network", "syslog"] %} + {% set _net = v %} + {% set _net_type = k %} + {{ _net_type }}( + {% set network_definition = v | bodsch.core.syslog_network_definition("source") %} + {% for k, v in network_definition.items() %} + {% if v | bodsch.core.type != "dict" %} + {{ k }}{{ v }} + {% else %} + {{ k }}( + {% for kk, vv in v.items() %} + {{ kk }}{{ vv }} + {% endfor %} + ) + {% endif %} + {% endfor %} + ); + {% else %} + {{ k | replace('_', '-') }}({{ '%s' | format(v) if v else '' }}); + {% endif %} + {% endfor %} + {% endfor %} +}; + +{% endfor %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/journald.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/journald.conf.j2 new file mode 100644 index 0000000..876be85 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/journald.conf.j2 @@ -0,0 +1,6 @@ +# {{ ansible_managed }} + +[Journal] +{% for k,v in syslog_journald.config.items() %} +{{ k }} = {{ v }} +{% endfor %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/syslog-ng.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/syslog-ng.conf.j2 new file mode 100644 index 0000000..dbc78fb --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/templates/syslog-ng.conf.j2 @@ -0,0 +1,57 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +@version: {{ syslog_config_version }} +# {{ ansible_managed }} + +{% if syslog_scl is defined %} +@include "{{ syslog_scl }}" +{% endif %} +{% set data = syslog_options | bodsch.core.verify_syslog_options(version=syslog_config_version) %} + +options { +{% for k, v in data.items() %} + {% if v | bodsch.core.type == "bool" %} + {{ k }}({{ v | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }}); + {% elif v | bodsch.core.type == "str" %} + {% if k in ["perm", "dir_perm", "dir-perm"] %} + {{ k }}({{ v }}); + {% else %} + {{ k }}("{{ v }}"); + {% endif %} + {% elif v | bodsch.core.type == "dict" %} + {% if k == "stats" %} + stats ( + {% for sub_key, sub_value in v.items() %} + {% if sub_value | string | length > 0 %} + {% if sub_value | bodsch.core.type == "bool" %} + {{ sub_key }}({{ sub_value | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }}) + {% else %} + {{ sub_key }}({{ sub_value }}) + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + ); + {% else %} + {{ k }}({{ v }}); + {% endif %} +{% endfor %} +}; + +# The custom template for file destinations. +{% if syslog_message_templates is defined and + syslog_message_templates | default({}) | count > 0 %} + {% for k, v in syslog_message_templates.items() %} +template {{ k }} { + template("{{ v }}"); +}; + + {% endfor %} +{% endif %} + +@include "conf.d/sources.conf" +@include "conf.d/destinations.conf" +@include "conf.d/filters.conf" +@include "conf.d/logs.conf" +{% for syslog_include in syslog_includes %} +@include "{{ syslog_include }}" +{% endfor %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/vars/archlinux-openrc.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/vars/archlinux-openrc.yml new file mode 100644 index 0000000..c52e0d5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/vars/archlinux-openrc.yml @@ -0,0 +1,10 @@ +--- + +syslog_dependencies: + - pcre + +syslog_packages: + - syslog-ng + - syslog-ng-openrc + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/vars/archlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/vars/archlinux.yml new file mode 100644 index 0000000..7e66a26 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/vars/archlinux.yml @@ -0,0 +1,9 @@ +--- + +syslog_dependencies: + - pcre + +syslog_packages: + - syslog-ng + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/vars/artixlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/vars/artixlinux.yml new file mode 100644 index 0000000..c52e0d5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/vars/artixlinux.yml @@ -0,0 +1,10 @@ +--- + +syslog_dependencies: + - pcre + +syslog_packages: + - syslog-ng + - syslog-ng-openrc + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/vars/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/vars/debian.yml new file mode 100644 index 0000000..0248012 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/vars/debian.yml @@ -0,0 +1,9 @@ +--- + +syslog_dependencies: + - iproute2 + +syslog_packages: + - syslog-ng-core + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/vars/main.yml new file mode 100644 index 0000000..3e6a946 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/roles/syslog_ng/vars/main.yml @@ -0,0 +1,114 @@ +--- + +#+ stats_freq(43200); +#+ mark_freq(3600); +#+ ts_format(iso); + +syslog_defaults_message_templates: + tmpl: '${YEAR}-${MONTH}-${DAY}T${HOUR}:${MIN}:${SEC} ${LEVEL} ${MSGHDR}${MSG}\n' + +syslog_defaults_options: + chain_hostnames: false + create_dirs: false + dns-cache: false + flush_lines: 0 + group: "adm" + keep_hostname: true + log_fifo_size: 10000 + mark_freq: 3600 + perm: "0640" + stats_freq: 43200 # obsoleted keyword, please update your configuration; keyword='stats_freq', change='Use the stats() block. E.g. stats(freq(1)); + time_reopen: 10 + ts_format: iso + use_dns: false + use_fqdn: false + stats: + freq: "" # 1 + level: "" # 1 + lifetime: "" # 1000 + max-dynamics: "" # 10000 + syslog-stats: "" # true + +syslog_defaults_sources: + src: + - comment: messages generated by syslog-ng + internal: + kernsrc: + - comment: messages from the kernel + file: /proc/kmsg + +syslog_defaults_logs: + cron: + source: src + destination: + file: cron.log + filter: + name: cron + filter: facility(cron) + kern: + source: kernsrc + destination: + file: kernel.log + filter: + name: kern + filter: facility(kern) + syslog: + destination: + file: syslog.log + filter: + filter: not facility(authpriv) + user: + destination: + file: user.log + filter: + filter: facility(user) + warn: + destination: + file: warn.log + filter: + filter: level(warn..alert) + emergency: + destination: + file: emergency.log + filter: + filter: level(emerg) + authlog: + destination: + file: auth.log + filter: + filter: facility(auth, authpriv) + messages: + destination: + file: messages.log + # filter_linkage: and + filter: + filter: + - level(info..alert) + - level(debug..notice) + - not facility(auth, authpriv, mail, cron, kern, local7) + - not program (syslog-ng) + - not program (dhcpd) + - not program(named) + mail: + destination: + file: mail.log + filter: + filter: facility(mail) + dhcpd: + destination: + file: dhcpd.log + filter: + filter: program(dhcpd) + + # destination console_all { file("/dev/tty12"); }; + # log { source(src); destination(console_all); }; + +syslog_config_version: '' + +syslog_unit_file: syslog-ng + +syslog_dependencies: [] + +syslog_packages: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/test-requirements.txt b/ansible/playbooks/collections/ansible_collections/bodsch/core/test-requirements.txt new file mode 100644 index 0000000..8b2f356 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/test-requirements.txt @@ -0,0 +1,12 @@ +ansible-lint +docker +dnspython +flake8 +molecule +molecule-plugins[docker] +netaddr +pytest-testinfra +tox +tox-gh-actions +yamllint +python-dateutil diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/tests/test_lookup.yml b/ansible/playbooks/collections/ansible_collections/bodsch/core/tests/test_lookup.yml new file mode 100644 index 0000000..fffaf75 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/tests/test_lookup.yml @@ -0,0 +1,71 @@ +--- + +- name: Test rbw_structured Lookup Plugin + hosts: localhost + gather_facts: false + vars: + json_entry: "37588485-5149-42e8-af98-1876f1e472cd" # Note mit JSON + login_entry: "4e1f183d-6983-44d9-ba5c-9c64ecdff482" # Login mit username/password + raw_note: "expressschnitte.org" # Note mit einfachem Text + broken_json_entry: "" # Note mit ungültigem JSON + tasks: + - name: "Test: String lesen" + debug: + msg: "Raw note: {{ lookup('bodsch.core.rbw', raw_note) }}" + + - name: "Test: Login-Feld (username)" + debug: + msg: "Username: {{ lookup('bodsch.core.rbw', login_entry, field='username') }}" + + - name: "Test: Login-Feld (password)" + debug: + msg: "Password: {{ lookup('bodsch.core.rbw', login_entry, field='password') }}" + + - name: "Test: JSON parsen" + set_fact: + json_data: "{{ lookup('bodsch.core.rbw', json_entry, parse_json=True) }}" + + - name: "Test: Multi fetch" + set_fact: + multi_data: "{{ lookup('bodsch.core.rbw', raw_note, login_entry) }}" + + - name: "Test: Lookup mit Index-Daten (name, folder, user)" + set_fact: + indexed_entry: "{{ lookup('bodsch.core.rbw', + {'name': 'expresszuschnitt.de', 'folder': '', 'user': ''}, + field='password', + use_index=True) }}" + + - name: "Ausgabe: Indexed Entry" + debug: + msg: "Indexed Entry: {{ indexed_entry }}" + + - name: "Ausgabe: JSON-Felder" + debug: + msg: + - "Token: {{ json_data.token | default('nicht gesetzt') }}" + - "URL: {{ json_data.url | default('nicht gesetzt') }}" + + - name: "Ausgabe: Multi fetch" + debug: + msg: + - "Multi Data: {{ multi_data | default('nicht gesetzt') }}" + + - name: "Test: Defektes JSON → Fallback zu leerem Dict" + set_fact: + broken: "{{ lookup('bodsch.core.rbw', broken_json_entry, parse_json=True) }}" + + - name: "Ausgabe: Fallback-JSON" + debug: + var: broken + + - name: "Test: Striktes JSON → Fehler" + block: + - set_fact: + broken_strict: "{{ lookup('bodsch.core.rbw', broken_json_entry, parse_json=True, strict_json=True) }}" + - debug: + var: broken_strict + rescue: + - debug: + msg: "Wie erwartet: JSON ungültig – Fehler behandelt." + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/core/tox.ini b/ansible/playbooks/collections/ansible_collections/bodsch/core/tox.ini new file mode 100644 index 0000000..1dd6dfe --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/core/tox.ini @@ -0,0 +1,46 @@ +[tox] +ignore_basepython_conflict = True +skip_missing_interpreters = True + +minversion = 3.25 +toxworkdir = /tmp/.tox/ + +skipsdist = true + +[testenv] +passenv = * + +# allowlist_externals = +# /usr/bin/find +# /bin/sh +# rm + +deps = + -r test-requirements.txt + ansible_4.10: ansible>=4.10,<4.11 + ansible_5.1: ansible>=5.1,<5.2 + ansible_5.2: ansible>=5.2,<5.3 + ansible_5.10: ansible>=5.10,<5.11 + ansible_6.1: ansible>=6.1,<6.2 + ansible_6.7: ansible>=6.7,<6.8 + ansible_7.0: ansible>=7.0,<7.1 + ansible_7.5: ansible>=7.5,<7.6 + ansible_8.0: ansible>=8.0,<8.1 + ansible_8.5: ansible>=8.5,<8.6 + ansible_9.0: ansible>=9.0,<9.1 + ansible_9.5: ansible>=9.5,<9.6 + ansible_10.0: ansible>=10.0,<10.1 + ansible_10.7: ansible>=10.7,<10.8 + ansible_11.0: ansible>=11.0,<11.1 + ansible_11.13: ansible>=11.13,<11.14 + ansible_12.0: ansible>=12.0,<12.1 + ansible_12.3: ansible>=12.3,<12.4 + ansible_13.0: ansible>=13.0,<13.1 + ansible_13.1: ansible>=13.1,<13.2 + +#commands_pre = +# /usr/bin/find {toxinidir} -type f -not -path '{toxworkdir}/*' -path '*/__pycache__/*' -name '*.py[c|o]' -delete +# /bin/sh -c '/usr/bin/find {homedir}/.cache -type d -path "*/molecule_*" -exec rm -rfv \{\} +;' + +commands = + {posargs:molecule test --all --destroy always} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/.config/ansible-lint.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/.config/ansible-lint.yml new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/.config/ansible-lint.yml @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/.config/pycodestyle.cfg b/ansible/playbooks/collections/ansible_collections/bodsch/dns/.config/pycodestyle.cfg new file mode 100644 index 0000000..f538fae --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/.config/pycodestyle.cfg @@ -0,0 +1,65 @@ +[pycodestyle] + +ignore = + E203, + E251, + W503, + W504, + W605 + +exclude = + # No need to traverse our git directory + .git, + .github, + # There's no value in checking cache directories + __pycache__, + .tox, + molecule, + hooks, + tools, + test_*.py + +max-line-length = 155 + +# Optionale Erweiterung: zeige absolute Pfade +show-source = true +count = true + +[flake8] + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals +# E203 für Black-Kompatibilität +# W503 für Zeilenumbruch-Operator) + +ignore = + E203, + E251, + W503, + W504, + W605 + +exclude = + # No need to traverse our git directory + .git, + .github, + # There's no value in checking cache directories + __pycache__, + .tox, + molecule, + hooks, + tools, + test_*.py + +max-line-length = 155 + +# Optionale Erweiterung: zeige absolute Pfade +show-source = true +count = true + +[flake8-length] +max-code-length = 120 +max-comment-length = 300 +max-docstring-length = 200 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/FILES.json b/ansible/playbooks/collections/ansible_collections/bodsch/dns/FILES.json new file mode 100644 index 0000000..460a096 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/FILES.json @@ -0,0 +1,6109 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "test-requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8760824aa95ece31ab1dde90c40c58cdd0daab2d2274fdf70ad55c46fda12ca", + "format": 1 + }, + { + "name": "tox.ini", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c94cefed134293a6a727beaaa4c235883ddc585c0c08deadd6043e4f9b80fc28", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d00a707ed61144d9bd1eb5e88f80e1add87c2b8d106c27f3bcfce1e8565a903", + "format": 1 + }, + { + "name": "Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d5363979d63997d826e3660c0cc7c752f3ac76af25c708712236cb4c217c1535", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "db1e92d99f55366772b6e4b1053c55eb40d8dd93c32c6c66f29c97185dabe5e1", + "format": 1 + }, + { + "name": "LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4", + "format": 1 + }, + { + "name": "roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/bind/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/bind/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/bind/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/bind/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/bind/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c32820da1cc4358e7fa4731358bfa38fcbb221154bd45412aa2d9202f60c8419", + "format": 1 + }, + { + "name": "roles/bind/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fbafd0a44904bdb4b3b880ef8d2776279fd50416e825c23e43110a71e96bf90", + "format": 1 + }, + { + "name": "roles/bind/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc834382afef2e3f88f3f3305aae7ca0b33a4c9aa39364b1cd789376e32c5f9d", + "format": 1 + }, + { + "name": "roles/bind/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/zone-transfer", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/zone-transfer/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/zone-transfer/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8c7a4afe7862258c35f685b33a4a003d39984e00cce86635d350550a24b2cdf3", + "format": 1 + }, + { + "name": "roles/bind/molecule/zone-transfer/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/zone-transfer/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/zone-transfer/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f5acc1956e8543c22b70571ae3c1b9b5ef726195efecbd1b7e2b819d5a996c3", + "format": 1 + }, + { + "name": "roles/bind/molecule/zone-transfer/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0261146ede402b20b0674e53b6e6ece2afcefdbbf53d63d8098362d466bbc1c", + "format": 1 + }, + { + "name": "roles/bind/molecule/zone-transfer/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "632419613bebff3714038ca570c92d55b32a3d25714e6f793fad6e17a9b81ee3", + "format": 1 + }, + { + "name": "roles/bind/molecule/zone-transfer/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "59e6a55d09d4be63dd1700b5c1f5a3cca334c179390dc6440863706c0fcabd66", + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/tests/test_ns1.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8639f7e8292cbe72349dac21d7fb7f537f835be473ae4577472f9de7f3aebeda", + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d8cb0060d24d5ce912d48f924a2450dade2c6ceb67f1ee4cea0535b2ad857508", + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/tests/test_ns3.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "30553a6dae555493c464dba669afdd77b027d323d03ec483f25737553567f26f", + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/tests/test_ns2.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "636a97cb3bf6b3b08ece3d651e701d409e1e4b2ba5584b6a0b9162845701b333", + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff708e59536098b5e5857dba5805b27740776d5e31cc9f8c5c36d759d751ec36", + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/host_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/host_vars/ns2.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1d64c47c900eb393e2cd3f80c27a5e6fe3be460490f4e3a72a992deeae315f2b", + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/host_vars/ns3.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fb742502d50615d1dd990dea48d39e1ad47409cccbdf6fbde7c69bf8de8e2d41", + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/host_vars/ns1.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "63b10354d1fd0e7ed86a37c8e72721592f8858823ea7e08211d8c2722c5a67d2", + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "123dc2fa844b699b8177bbe1ef4d1d4e8a4d683dc60d45131496efcc2eed46a6", + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "276f33f63e581bb900994cb43fd682a55be6b41165edc403823172eba7b7221e", + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05a193cd706c93844ae0ff1684d6297013d003dfd4942f838fbc9b3ab60c1dcc", + "format": 1 + }, + { + "name": "roles/bind/molecule/upgrade/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2049f46c2e98f61e97f0e26494115d94daa46259c81f07ad0d446dabb9dac25f", + "format": 1 + }, + { + "name": "roles/bind/molecule/require-systemd-unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/require-systemd-unit/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/require-systemd-unit/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "159293b6626e861665e9bb6e517d053d105896b6739289b322b5cd21226b05db", + "format": 1 + }, + { + "name": "roles/bind/molecule/require-systemd-unit/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/require-systemd-unit/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/require-systemd-unit/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee7db52b6dccc093e0256e6033acf963fa70abde894041f418008ef6bab04c36", + "format": 1 + }, + { + "name": "roles/bind/molecule/require-systemd-unit/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fdc5435ef8d9ccd5ce3704f3f655d87a78eb8e4f199fc738197f6aa8afde1756", + "format": 1 + }, + { + "name": "roles/bind/molecule/require-systemd-unit/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2bf0da6c7e73c474b6bbbfc83e70729cffdb1d78e030bcc556fadd35bd9b999c", + "format": 1 + }, + { + "name": "roles/bind/molecule/require-systemd-unit/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "00337da5d4b55c30a5f37c7e3268fc119ed22c6c13cd9ee8b83e64d491061de9", + "format": 1 + }, + { + "name": "roles/bind/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d8cb0060d24d5ce912d48f924a2450dade2c6ceb67f1ee4cea0535b2ad857508", + "format": 1 + }, + { + "name": "roles/bind/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/bind/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d292707d693093b9b2335c63fcb667f61d1781ffec41feaf28aafd07541ecb81", + "format": 1 + }, + { + "name": "roles/bind/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ea92e684c4a022509518f2784bad33ebe85de68d630c933b120aabe0946f279", + "format": 1 + }, + { + "name": "roles/bind/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "296e9140e62bf8631297fa0205f63ca2b19298f162c1d0dc0ebcafc1b3656161", + "format": 1 + }, + { + "name": "roles/bind/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8c7a4afe7862258c35f685b33a4a003d39984e00cce86635d350550a24b2cdf3", + "format": 1 + }, + { + "name": "roles/bind/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "199e281dd020d3a5428984c81ef1fcc35518dba4b8b38dd6708687e66d3f0543", + "format": 1 + }, + { + "name": "roles/bind/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0261146ede402b20b0674e53b6e6ece2afcefdbbf53d63d8098362d466bbc1c", + "format": 1 + }, + { + "name": "roles/bind/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "632419613bebff3714038ca570c92d55b32a3d25714e6f793fad6e17a9b81ee3", + "format": 1 + }, + { + "name": "roles/bind/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "59e6a55d09d4be63dd1700b5c1f5a3cca334c179390dc6440863706c0fcabd66", + "format": 1 + }, + { + "name": "roles/bind/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b1556cc4532cdd179c8e0f42db2d3f5a7e44aaa89ce21a66859e0e9e9ed1795b", + "format": 1 + }, + { + "name": "roles/bind/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22a0d6abe4d830d86b79c2ef366996400d6b634b9e11d0834c1bb0a9ce1bd12d", + "format": 1 + }, + { + "name": "roles/bind/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/templates/etc", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/templates/etc/named.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "837e410020cf424dbc22d6018063b3cda88d3e607889919529436a5a3b2369fa", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/auth_update.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "70edbb9691c2f6695fb96eaeafaebbfb723d046faaae273cbc0a03df910309ed", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/reverse_zone.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "208cc1e92834480ded611144be44d9266e9f3dd52ef4c9865bb6ae4ff6ce5b9c", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/named.conf.d", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/templates/etc/named.conf.d/acl.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9368143eae3df109d3288d9ecb9e4763000139fa227da864b9550df17adb91c5", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/named.conf.d/allow-transfer.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "04b3b93b5549deb07d89d50f1fe651921dd036b4974310f15a4aa86182ed9f4a", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/named.conf.d/forwarders.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "543ed5165940441da14de26131cfdf5fa6bfad56bfe4864304045e3a234353cd", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/named.conf.d/includes.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7859510658321a2127c3cfb603e4e43c83d4365c54d044e85227ebdf1394b88f", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/named.conf.d/recursion.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fbb0f8802433bdefc5561474a920c2292ec0ebd0c807aded16f46e43f8cddce", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/named.conf.d/listen.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "db8d68cce40e4243c3f4a896f83cddc599ce52a803b82f8d57da07de4fc3975f", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/named.conf.d/dnssec.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1402a03b798c7f5d777a150046130d917353fa001fe9f94feba91d13058c05c6", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/named.conf.d/statistics.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "24f578bfc5969d4bdae85c94d80f0eabd819c87b5de23ba9fad1186ffbd7a3c6", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/named.conf.d/dns64.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c6a26d9c883cafe53ca76839136691560fba84064f98ba8d31ce1d66825595ea", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/named.conf.d/querylog.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "71ace8af2734727ca1a0ae1140dd35ff22a63b736cf8a5923878053608726888", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/named.conf.d/zones.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "431e5030492d1f04fc910fe3e6fa2b245f81ccf58c0de85566e0a148fd148682", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/named.conf.d/logging.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b902f993cd41fc9f7158353d025a6914aa897c0c932d2d741c7fc8aab2072acc", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/named.conf.d/check-names.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f76e4468089ad320d534c46e5010197152ca0a5c191bc5ecc6f99498bb75d53d", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/bind_zone.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7ab369d5caa8e77d7090d12112f1d033b1db68e5102f3c7868da8cdaa892a898", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/reverse_zone_ipv6.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a480689000dd891132e7e4543b3eac483b9a1a489f5e8719ca354774b6953ca", + "format": 1 + }, + { + "name": "roles/bind/templates/etc/auth_transfer.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0f158efa562d1d9d3d9b16716aa377823fc4e0c0ce1204d808c13706c3617467", + "format": 1 + }, + { + "name": "roles/bind/templates/init", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/templates/init/systemd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/templates/init/systemd/override.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8beb9f769774a6301f811f8295b4dc926263980992587a0c1236891ed5b9617", + "format": 1 + }, + { + "name": "roles/bind/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c03d12dcc179e03cfad597ebea1edf347d8ed415dd84da8f03baefb1a8df275", + "format": 1 + }, + { + "name": "roles/bind/meta/.galaxy_install_info", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e458403a94788659c988ff841900390bf6933de98bc8f7bf259b525c5a8003e1", + "format": 1 + }, + { + "name": "roles/bind/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "53a26b53d8c9b134c1d4050e5f1b2dcf459574e8b816f13697e72df7ca64162d", + "format": 1 + }, + { + "name": "roles/bind/vars/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/bind/vars/archlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee35197bd78d0aeb67be5a9742701a3155d8259437ce162ff5544fe445dcf0c4", + "format": 1 + }, + { + "name": "roles/bind/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/bind/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "23f3baf6b6753153b21919f58f32278ab8f958bfc37a60f82b20985a82b0f3b3", + "format": 1 + }, + { + "name": "roles/bind/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/bind/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/tasks/service.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "905c2c931a4af59d3805a3ce356d4f9d750c83a810693d47250f9811682d49b7", + "format": 1 + }, + { + "name": "roles/bind/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b46d47a60fdf934efbf8da0dd2f231f8568b310bf7602b98341b184b2767f34e", + "format": 1 + }, + { + "name": "roles/bind/tasks/install.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22ceb5ad1751073198feecc14a89f896ebec5618ec93d423584b81473e2849d6", + "format": 1 + }, + { + "name": "roles/bind/tasks/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "749ad98dfeb7ebd9a37cfee2e1c737930641acc0f830d2f5398fdcd07eb11dbb", + "format": 1 + }, + { + "name": "roles/bind/tasks/configure", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/bind/tasks/configure/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c08205157664928fdd24a8995ff174c2801bab14a0b0962dfae8353e460f58", + "format": 1 + }, + { + "name": "roles/bind/tasks/configure/zones.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "25f30999440fd5916308163c77b0968bef3b19a3ffc59be0a12ef377e70bda11", + "format": 1 + }, + { + "name": "roles/pdns", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aeb75b7ab2ec189ea21fd30dfd374d44c7f547c0f342d742f13c93cc3d22c3a2", + "format": 1 + }, + { + "name": "roles/pdns/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-sqlite", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-sqlite/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-sqlite/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "db101ad1a98a0aa728449d6ac966194f00fa2cb8db7fd52a43c4736927ceeb2f", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-sqlite/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-sqlite/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-sqlite/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "63cf35283c7d1e7e5fc2390be194ea532612ce3c3df38de8a5c4ae07a83deebc", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-sqlite/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0261146ede402b20b0674e53b6e6ece2afcefdbbf53d63d8098362d466bbc1c", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-sqlite/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87b0167452949c6ddaf944f4918508ea7c0a246e2f409710b9f8be4e75b0ad10", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-sqlite/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5729f2075cf82f0b9053ccca7c19a0b901d500e9b53bbab88c7e5a823798a8e9", + "format": 1 + }, + { + "name": "roles/pdns/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6075eb6490be1c6dc232f545104be521c25445782bf970b0f32560f36dc4bd24", + "format": 1 + }, + { + "name": "roles/pdns/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/pdns/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d292707d693093b9b2335c63fcb667f61d1781ffec41feaf28aafd07541ecb81", + "format": 1 + }, + { + "name": "roles/pdns/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c911de4dcb61dc0c5b86cdd6a8007d4114c4e6fb45dab3014020865a6b202f6", + "format": 1 + }, + { + "name": "roles/pdns/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b8857ea71ab2725d306012f0018c32cfa349a430586f7d26772f0a9cf2a357f", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-mysql", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-mysql/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-mysql/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "02bda32aa094eadeae66934bfbe9d3b964816ecbea70aa3f6e60f4ca5d946ae8", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-mysql/collections.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c05f075d81e1576557a2ae19d87b377434503eae439800d2fa9d108a9a66e201", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-mysql/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-mysql/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-mysql/group_vars/all/vault.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4dcb7880517c3a4c0a4cceef4eb4c9b0191bbab0427e49f354df32165c9cd330", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-mysql/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8eb6b340891ecb4629018e2a9a77f7bc98e2a3d91f3f1db927986c8ef347fa7", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-mysql/host_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-mysql/host_vars/database", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-mysql/host_vars/database/mariadb.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b159967b979876516732931c29680a1163200e9c5d31f635a045b494414a2517", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-mysql/host_vars/database/vault.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4dcb7880517c3a4c0a4cceef4eb4c9b0191bbab0427e49f354df32165c9cd330", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-mysql/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "35607947714218adda0893504d8ed0a44536ad8e3914ce9d7d6609efdba175f4", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-mysql/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "420765a85f59c1c30cfabfce9eaf4629606f6c83137f49724ba1d21bbcb9608e", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-mysql/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "51724cbf3392783e8be61dee13ffa48042f1410126983b6c3787abeabd471e6c", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-lmdb", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-lmdb/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-lmdb/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "968440c209cc1fdef5aafd16c0003ff900f70ad18187148e4382fb1254cf30da", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-lmdb/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-lmdb/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-lmdb/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d0027762d89278f7bac8037739e97e010b85c5ef9d7204474b5b593726c39c10", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-lmdb/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0261146ede402b20b0674e53b6e6ece2afcefdbbf53d63d8098362d466bbc1c", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-lmdb/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87b0167452949c6ddaf944f4918508ea7c0a246e2f409710b9f8be4e75b0ad10", + "format": 1 + }, + { + "name": "roles/pdns/molecule/backend-lmdb/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5729f2075cf82f0b9053ccca7c19a0b901d500e9b53bbab88c7e5a823798a8e9", + "format": 1 + }, + { + "name": "roles/pdns/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c8de9f8b4b17ea00e9c57ae13a406d5cb19b3feab2b22dd97e7747ae148a4a37", + "format": 1 + }, + { + "name": "roles/pdns/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eade51cec291380c105cf3962f8b32231865d27cebeed55030a44486a1072e1e", + "format": 1 + }, + { + "name": "roles/pdns/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0261146ede402b20b0674e53b6e6ece2afcefdbbf53d63d8098362d466bbc1c", + "format": 1 + }, + { + "name": "roles/pdns/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38cc13c5de32f20093d3b78062ca93496b7efcc053194169bd9c877e4b8aa6ee", + "format": 1 + }, + { + "name": "roles/pdns/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5729f2075cf82f0b9053ccca7c19a0b901d500e9b53bbab88c7e5a823798a8e9", + "format": 1 + }, + { + "name": "roles/pdns/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5845e18e9f23155f423207df9abac970aed687c638620bc2c9ee06706191054b", + "format": 1 + }, + { + "name": "roles/pdns/notes.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "46566a876479eb86f88c11d16b24469c9f568119589a48dcf59e3616854d577a", + "format": 1 + }, + { + "name": "roles/pdns/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c12c7e5fe0f39aa1f156852d9b3503da1edc9ce5f054f26bb1e984c887476313", + "format": 1 + }, + { + "name": "roles/pdns/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c1b6e489f2294f2b42735c304c0d8c4cfb12ecad6e30cb39b25f8366e2524710", + "format": 1 + }, + { + "name": "roles/pdns/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/templates/pdns_backends.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2eea78ab9851d963bdf41538a610fce1e08d9737cbf633ec24a302e932a59097", + "format": 1 + }, + { + "name": "roles/pdns/templates/pdns_api.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "635c5473cc72b63a9b2a08834848fff1c4b01a324d2afb1eaddf4d9e41db1a00", + "format": 1 + }, + { + "name": "roles/pdns/templates/pdns.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "81a41d9ea3223ce3b26296831c36a7c23540dc9fbaf2c1582eecba40b1070c47", + "format": 1 + }, + { + "name": "roles/pdns/templates/pdns_webserver.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8c19469f4926b61ad017c622deeee79f36aaf11804f497b8dfbe58e85ca24fef", + "format": 1 + }, + { + "name": "roles/pdns/templates/pdns_general.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "706f19ef936db765ad9e73c02405d3b225f4f08fd4fc9a99b4cbc69ba5903771", + "format": 1 + }, + { + "name": "roles/pdns/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6cdc37d45f53b3c4858273dc65ad1031ada59bc2953fa5dc032ad58b28fec8f7", + "format": 1 + }, + { + "name": "roles/pdns/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fdcd924f7a0ff977a0e929119de8dae5053c5c76d4c2c3936b40a70eb4760d2", + "format": 1 + }, + { + "name": "roles/pdns/vars/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4fbf84f08ee5eedea0b7dac7c80ffd862203f3ddb89b2801b7289ae7fc5aaa4", + "format": 1 + }, + { + "name": "roles/pdns/vars/archlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb3e6045dd0938c23bdd6b57b53aa9ae8da831de5064fcb175f7025409187e59", + "format": 1 + }, + { + "name": "roles/pdns/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "72155d6735e8c67c9b2b17633e5747f672cf3e7c078145c0e906c0174f157b1e", + "format": 1 + }, + { + "name": "roles/pdns/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/tasks/service.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca103ed6edf823ef84aaf4a7edaab7a7b5e2447af98e6d99f05ab8386753c6c6", + "format": 1 + }, + { + "name": "roles/pdns/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "120f4067975f80362f30fda9033ea008c211cb6b30d9e6e0816f128221f97967", + "format": 1 + }, + { + "name": "roles/pdns/tasks/configure.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "991809b19d7a1e0e55eb0e96e600a2193526619e4d00f05b66e162a5d4e3e855", + "format": 1 + }, + { + "name": "roles/pdns/tasks/database", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/tasks/database/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dbabac3a695760252833f4a4e00edc79e4222087def38216beb606c0d67d6825", + "format": 1 + }, + { + "name": "roles/pdns/tasks/database/sqlite3.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ee848c3ea15bd9aa3f0249db8884486fa4c5cea897501f1afd68ff9fd7de965", + "format": 1 + }, + { + "name": "roles/pdns/tasks/database/schema_files.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6965f3de595eeb05124ad7c679ff0f8d24e663ef058f2fd33c54798d7b023905", + "format": 1 + }, + { + "name": "roles/pdns/tasks/database/mysql.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "575623ace2913128a197fbad0ba4813d77bd88726b7ad9924efa699f953cdbdc", + "format": 1 + }, + { + "name": "roles/pdns/tasks/database/lmdb.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "27b1f2a8aa5740f14d59e69b1b830018984b8df8acf7659d12200cf7461228f7", + "format": 1 + }, + { + "name": "roles/pdns/tasks/install.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6180f1e16d2fe4942853a0cd087d6503a36fa3923eaf53679520796f7cb441d", + "format": 1 + }, + { + "name": "roles/pdns/tasks/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a6777659fb10734929293c6a194606299974eb0b869a9c2405a88456b3810f6", + "format": 1 + }, + { + "name": "roles/pdns/tasks/repositories", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/tasks/repositories/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d758f01d83b8ce71d46de7156ce8284ce6593054eab6947b9422253f38c33c1", + "format": 1 + }, + { + "name": "roles/pdns/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/files/bind-dnssec.schema.sqlite3.sql", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8c9abc2b08d951908eadd456c88349ff15e929a604bc03e76e62da5c1b9e41e", + "format": 1 + }, + { + "name": "roles/pdns/files/archlinux", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns/files/archlinux/bind-dnssec.schema.sqlite3.sql", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8c9abc2b08d951908eadd456c88349ff15e929a604bc03e76e62da5c1b9e41e", + "format": 1 + }, + { + "name": "roles/pdns/files/archlinux/schema.sqlite3.sql", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92a25cdb363597671f19a0206489ab34d74e77c86b3072713db2ce16110090c0", + "format": 1 + }, + { + "name": "roles/pdns/files/archlinux/schema.mysql.sql", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08205abbd8747deb10a8475ce4372a2d2474f2b3dde6dd637383da0893e02995", + "format": 1 + }, + { + "name": "roles/pdns/files/schema.pgsql.sql", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "710fe153de6467ca76f6c6355fe32506c48f9e0eef1f6a6534ee413bfa7cb120", + "format": 1 + }, + { + "name": "roles/pdns/files/schema.sqlite3.sql", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92a25cdb363597671f19a0206489ab34d74e77c86b3072713db2ce16110090c0", + "format": 1 + }, + { + "name": "roles/pdns/files/schema.mysql.sql", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fb4a8408d69c7d25c7bd491cdcc620d6b5195a32e00ca544e0be6fdca53382a4", + "format": 1 + }, + { + "name": "roles/knot", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/knot/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/knot/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/knot/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/knot/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/knot/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c32820da1cc4358e7fa4731358bfa38fcbb221154bd45412aa2d9202f60c8419", + "format": 1 + }, + { + "name": "roles/knot/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fbafd0a44904bdb4b3b880ef8d2776279fd50416e825c23e43110a71e96bf90", + "format": 1 + }, + { + "name": "roles/knot/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/knot/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3301e02033f99e230b93b6d2cbf790bb32129d57082e597f764c541850f27977", + "format": 1 + }, + { + "name": "roles/knot/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "28dd42290fb473579289f338d7574b651a985d6bc797684c03c596763d9e5cb4", + "format": 1 + }, + { + "name": "roles/knot/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/knot/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/knot/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d68324888a5bc2ed1a421bb55994adb3d697fdad63b1e256442ff0262a737550", + "format": 1 + }, + { + "name": "roles/knot/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "973f5172299d186aa2012ed41090c1493b44f2c59eb53b2182d75a0b13061c4c", + "format": 1 + }, + { + "name": "roles/knot/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0cb45a8c749d73e31686c5cc2b5b7d6b79b89d39a9944d9484a90a9ea4eaeee0", + "format": 1 + }, + { + "name": "roles/knot/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e86da247c1b7025b944a1aa96dfffa3be88da2a969ec434b4b755c36e689721", + "format": 1 + }, + { + "name": "roles/knot/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/knot/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d68324888a5bc2ed1a421bb55994adb3d697fdad63b1e256442ff0262a737550", + "format": 1 + }, + { + "name": "roles/knot/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "973f5172299d186aa2012ed41090c1493b44f2c59eb53b2182d75a0b13061c4c", + "format": 1 + }, + { + "name": "roles/knot/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f79cdace5e0ac1512d6b3e35517aa678069b483ffb80ca63e5b51718be5bc31", + "format": 1 + }, + { + "name": "roles/knot/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0b8af235d23db527385396b1b168b14f5604ad1f82b912f645919b1c95b76a0", + "format": 1 + }, + { + "name": "roles/knot/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/knot/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b3332c23eae692f139ea5719165960efc70e0b89f086ba6fd8e74bc6aecfded", + "format": 1 + }, + { + "name": "roles/knot/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22a0d6abe4d830d86b79c2ef366996400d6b634b9e11d0834c1bb0a9ce1bd12d", + "format": 1 + }, + { + "name": "roles/knot/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/templates/etc", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/templates/etc/knot", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/templates/etc/knot/knot.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "766cd18f4847dc7cec9dcee6488b34a4e7a2db2cd6fb9a86763da2b1452af7fb", + "format": 1 + }, + { + "name": "roles/knot/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "96de8b7da0c870b51152c8495ab2a48b5a588fcf25f0c003edfcc048ce5990fc", + "format": 1 + }, + { + "name": "roles/knot/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/vars/artixlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "90630b1e7cb9ecfec6757cc135d56b2c9f89533ce08a2e598afceb764fb8c360", + "format": 1 + }, + { + "name": "roles/knot/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3d56d273be266115d89e0275ca6c76e2fec1aa5f1517a21a1fc89e5abfbaeb9a", + "format": 1 + }, + { + "name": "roles/knot/vars/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "31619bed671388ef6927fb8e6cc72f6cb5d0c9c5eda5e50afb1fb8776ecf0e43", + "format": 1 + }, + { + "name": "roles/knot/vars/archlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea2856a0165119ac0b95b3032433d0a46ed91890fa001b551664948b69853363", + "format": 1 + }, + { + "name": "roles/knot/vars/archlinux-openrc.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "90630b1e7cb9ecfec6757cc135d56b2c9f89533ce08a2e598afceb764fb8c360", + "format": 1 + }, + { + "name": "roles/knot/vars/ubuntu.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "52836cfa682a2333589f984727ea2a7b4963d48769c699cc764934731eefa361", + "format": 1 + }, + { + "name": "roles/knot/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/knot/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7cf91f4531ce816dedfb22a16a61aa3e6c8b116f4f8d9fa9148f96d479f400ae", + "format": 1 + }, + { + "name": "roles/knot/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot/tasks/repositories.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "99168eab025520d0f4c354ecd0e8b6b6503ca83787893618b52154c05f942f1f", + "format": 1 + }, + { + "name": "roles/knot/tasks/service.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3ba831e967929815ca98dbe5dbfcec2775003a90e377200e4087fb26311fe54b", + "format": 1 + }, + { + "name": "roles/knot/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f4e85833abb8489e53fafbf955dbfe581c65eca8d280602d0ca4938b4cbde71a", + "format": 1 + }, + { + "name": "roles/knot/tasks/configure.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "936ad2e0e313910aaf42d820a25b41de91cde4d6480a6c41a9e80f3aa45e0bc6", + "format": 1 + }, + { + "name": "roles/knot/tasks/install.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "71b022e0cda380ed55118f1fdbeba1e98503ad0c307c5686abadc88351547ca0", + "format": 1 + }, + { + "name": "roles/knot/tasks/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "240ce4f14f5116e9268c89d80296cef6e5e7175e2f51e3ee00f914d945611ddf", + "format": 1 + }, + { + "name": "roles/knot/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/dnsmasq", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/dnsmasq/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/dnsmasq/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/dnsmasq/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/dnsmasq/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/dnsmasq/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c32820da1cc4358e7fa4731358bfa38fcbb221154bd45412aa2d9202f60c8419", + "format": 1 + }, + { + "name": "roles/dnsmasq/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fbafd0a44904bdb4b3b880ef8d2776279fd50416e825c23e43110a71e96bf90", + "format": 1 + }, + { + "name": "roles/dnsmasq/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/dnsmasq/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb9a293caa0eedb0ba7c4ef6ddae9f2f83f7c5b16f1a904c9b1d4e8662834bf9", + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/require-systemd-unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/require-systemd-unit/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/require-systemd-unit/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "159293b6626e861665e9bb6e517d053d105896b6739289b322b5cd21226b05db", + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/require-systemd-unit/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/require-systemd-unit/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/require-systemd-unit/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee7db52b6dccc093e0256e6033acf963fa70abde894041f418008ef6bab04c36", + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/require-systemd-unit/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "30840b1e2320a5251fdb16acc477675970baed33e2e7596d01338b2370a9a9db", + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/require-systemd-unit/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ea92e684c4a022509518f2784bad33ebe85de68d630c933b120aabe0946f279", + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/require-systemd-unit/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "76140a6ab5d8273cd2012cd380b78bdc89b6ac313eb6e4b991c6b3f957005dad", + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "159293b6626e861665e9bb6e517d053d105896b6739289b322b5cd21226b05db", + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "30840b1e2320a5251fdb16acc477675970baed33e2e7596d01338b2370a9a9db", + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ea92e684c4a022509518f2784bad33ebe85de68d630c933b120aabe0946f279", + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "76140a6ab5d8273cd2012cd380b78bdc89b6ac313eb6e4b991c6b3f957005dad", + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "159293b6626e861665e9bb6e517d053d105896b6739289b322b5cd21226b05db", + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2b9058bdfad51193b08f19a6cda8cebec98c5b17fe0ae24290fa53fd26f02702", + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "30840b1e2320a5251fdb16acc477675970baed33e2e7596d01338b2370a9a9db", + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ea92e684c4a022509518f2784bad33ebe85de68d630c933b120aabe0946f279", + "format": 1 + }, + { + "name": "roles/dnsmasq/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "76140a6ab5d8273cd2012cd380b78bdc89b6ac313eb6e4b991c6b3f957005dad", + "format": 1 + }, + { + "name": "roles/dnsmasq/.markdown-lint.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "25b7d23f1f5d63f1774adf9badffa424d4591e897febb65fcf0b9f42a327cea3", + "format": 1 + }, + { + "name": "roles/dnsmasq/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f79cdace5e0ac1512d6b3e35517aa678069b483ffb80ca63e5b51718be5bc31", + "format": 1 + }, + { + "name": "roles/dnsmasq/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a8f34854e6109024a678dbdc16a082b21e0134067fcfab10072b547991d8339", + "format": 1 + }, + { + "name": "roles/dnsmasq/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/dnsmasq/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc91f68124d0841738b46b65c4c5a34a605db066b6e0a26604770292c3b4cb2a", + "format": 1 + }, + { + "name": "roles/dnsmasq/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22a0d6abe4d830d86b79c2ef366996400d6b634b9e11d0834c1bb0a9ce1bd12d", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/20-domain.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4df14309b8c93d3d0127dfec3240cb9ed5c2bc0e491f44900938d2b9031a84f", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/20-local.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8047dcf95b92e53155db5b5ad50a4bd22a59a95e9b779dce1efa7fb50a0dcfb0", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/20-alias.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "07b414fbd24558ca1f880be664cb62db986de1502887a8ff178c91c0aaa62b1d", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/10-logging.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "127ccd409658d9abeeff652b584d8f37fcafa418d71724978c0b0ad6b238d011", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/20-dnssec.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "25f0fa3b9187fc8c88bec813ce546c4fb36f86d5e2c77b7c3c94c47f0e9bc1b3", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/20-ipset.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8e808bf24c33ad0a7554a7bb339f7e86bd9e2ca823b6f0972b02935bda504653", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/20-mx.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5a635a79eff603618bfba38815aec7ff2e04b382cb97351bc1f6d78326c54e1f", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/10-interfaces.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8d1e0535696c2cec9b8f867461b36a134298a2b867e418d2edd0d8ea1afd8350", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/25-cname-records.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "52fa788002b5bada6108e8c0514c833709017ce8c500d36a4ae4861a478c8bd0", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/25-srv-records.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0a3271d11d027316efde412d224d58f8197049455a6ff0cd74647cfcd680f8db", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/20-address.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e6d0e046f9d3b43e6d3b344b5329f9d226c55d1527d2cc46fae33b360c7b3713", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/20-nftset.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "66ae6f7b7ae7e7bde2e03c4a562a133fc84424af0c2fccaaea00965399ceb4cd", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/20-dhcp.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1705473c6be713372fe28655f850e97b0305fa27be224584270ba27b7f9b5cd7", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/25-txt-records.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a276c2b3d3f2e121757fd74f4d3b7a41df8b4cf32fdab3c13c147f53f8d54ab6", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/20-pxe.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a345bdb5597378df373f4d6b7cc14012bf71a70f044a7aee60b1705ad93e5c8", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/20-server.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e5b11163b25f9ad01b475ffdf24ab1d9acfff4ce56f6971be723393f9656c7d1", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/20-tftp.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "18aa4bd43a9c12cf69a07d958138f07f11a379e5cdc3bf7300de8e6f6c3274b0", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.d/25-ptr-records.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "50f8f2b007a6b2cead7d187497c71632878b4898a4baa95f65268cc750022f7a", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.conf", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "da6daa895e9bcaf065528574928695683a46dc33b72eb54a23b6421369eba65e", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/etc/dnsmasq.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "212f7ed8c0b31a5c946b66d08d1854469bab1942ce568a4781461d17255ff289", + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/init", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/init/systemd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/templates/init/systemd/override.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8beb9f769774a6301f811f8295b4dc926263980992587a0c1236891ed5b9617", + "format": 1 + }, + { + "name": "roles/dnsmasq/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1dd30daa40aa4f33056b962f12c61409f60e8c5791927aa41e8610f60fb4029a", + "format": 1 + }, + { + "name": "roles/dnsmasq/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0b84a03c440108038a420f297e9931f607a68a1ec1254c51dd22c6cc39342189", + "format": 1 + }, + { + "name": "roles/dnsmasq/vars/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/dnsmasq/vars/archlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/dnsmasq/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/dnsmasq/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc32212927cbd90e0a0ccbdaf5965e071e05ba2c11aa46869a4e3c1e1636f709", + "format": 1 + }, + { + "name": "roles/dnsmasq/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/dnsmasq/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/dnsmasq/tasks/service.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb0d8cd011d3c2c19fcd89edf4b5b7e77d71bc74ec1e8e0f03afb4461eab2be2", + "format": 1 + }, + { + "name": "roles/dnsmasq/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68fdaa86d6c51c631842432fa898bce854d43ef00f5e1a87156d09c17177e93d", + "format": 1 + }, + { + "name": "roles/dnsmasq/tasks/configure.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "852220eb4bcb2fcd315a721f50b4eafdb86312b41b98f654a4d7e89d7b70fdb3", + "format": 1 + }, + { + "name": "roles/dnsmasq/tasks/install.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c9ab3bb482409c482f236bfe5c470f013a83a334bc61c1698d1edb8b14ae3575", + "format": 1 + }, + { + "name": "roles/dnsmasq/tasks/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "742710d04acbf4d594670faf1aef216153df5a88f60b27b1626d87693900046e", + "format": 1 + }, + { + "name": "roles/dnsmasq/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/pdns_records", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0825b7083195300b4eb0480c5d5898bf196ff0b591ae27e3016b44c773649a59", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-sqlite", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-sqlite/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-sqlite/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1450d190db89a187d95f392315f29c8cab70e7fe98bdfe3dc92b0bb5156f65ae", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-sqlite/tests/helper", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-sqlite/tests/helper/dns_utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b2610eaa7aacbba834b91e6833e81f269bf4ef7a47144a9e5cebfc4edf28c4a", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-sqlite/tests/helper/molecule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba00a224f2abe4a0d5c5fde368b98360f6cd94717f5a2c3e0014cbd8cdc50dbb", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-sqlite/tests/helper/dns_utils.save", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3900cd66b39a4348f30cc9ad6d39d58b6b694bfdcfae46c06c60f2b5805dbae", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-sqlite/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-sqlite/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-sqlite/group_vars/all/pdns.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "84d6e9ec4045ae82136639ab8e61e2a6563f712b21e72fbfffd2075c29e3b411", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-sqlite/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cf5b11e74a7a41aaa112ab5a1d7a4d4631d188792c0861bd39ea370841c17ba9", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-sqlite/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c023789dc9214de428108412c3546e84d0f65bd259f08e0976cb081f93d87769", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-sqlite/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87b0167452949c6ddaf944f4918508ea7c0a246e2f409710b9f8be4e75b0ad10", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-sqlite/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a7fdba913c55f8517b86e8bfa4724f813314cd0a72f769e9c73dd623baf3204", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "308f3f1eea1c854f893fb11e6b12702c4c8a7ccab89f1a7b5d2f10cce43e1a26", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/tests/helper", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/tests/helper/dns_utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b2610eaa7aacbba834b91e6833e81f269bf4ef7a47144a9e5cebfc4edf28c4a", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/tests/helper/molecule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba00a224f2abe4a0d5c5fde368b98360f6cd94717f5a2c3e0014cbd8cdc50dbb", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/tests/helper/dns_utils.save", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3900cd66b39a4348f30cc9ad6d39d58b6b694bfdcfae46c06c60f2b5805dbae", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/group_vars/all/pdns.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fcaac032ed04de5da97592d9036eb2a7da22edf25fc2d2d273a69277608c2354", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/group_vars/all/vault.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4dcb7880517c3a4c0a4cceef4eb4c9b0191bbab0427e49f354df32165c9cd330", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "372d87392852050cf1d25e27b5e124caf87c8474d649e2284f04579b862b4596", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/host_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/host_vars/database", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/host_vars/database/mariadb.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c7e6f104310442630d0e6d2ff550ce10082c3c1f39da43059dbeb7eafbc327c9", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c683998c22e0a6aa98e5739f0c96f4f766a87974bb0f3c0477f3849622f6c8b", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a4c247ba291a7142ba6dd97825e060d1e600420f68bfd402315fbf04949ef52", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ff2eb51a3fee912b20d689f4bab701d93781c9380b1a1541ae7b804544002cc5", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-mysql/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "729590a8c6b8cf1f523c0de6dea53f6e93b4c861a1f3be9182867477d5b5875d", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-lmdb", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-lmdb/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-lmdb/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1450d190db89a187d95f392315f29c8cab70e7fe98bdfe3dc92b0bb5156f65ae", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-lmdb/tests/helper", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-lmdb/tests/helper/dns_utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b2610eaa7aacbba834b91e6833e81f269bf4ef7a47144a9e5cebfc4edf28c4a", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-lmdb/tests/helper/molecule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba00a224f2abe4a0d5c5fde368b98360f6cd94717f5a2c3e0014cbd8cdc50dbb", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-lmdb/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-lmdb/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-lmdb/group_vars/all/pdns.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "112a62b1dd94a64fd5b9c73ee0aaa915837cadb396c320ee9d3fbef6aef2a5e6", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-lmdb/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb57e40d143ea5be67663a0be72c2fd57d270156521904aeec1e99bfe93fff25", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-lmdb/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ab40ef57b73259b58095c10879e033e1775012271e4b4e68c652531c35d542f", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-lmdb/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87b0167452949c6ddaf944f4918508ea7c0a246e2f409710b9f8be4e75b0ad10", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/backend-lmdb/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a7fdba913c55f8517b86e8bfa4724f813314cd0a72f769e9c73dd623baf3204", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "308f3f1eea1c854f893fb11e6b12702c4c8a7ccab89f1a7b5d2f10cce43e1a26", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured/tests/helper", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured/tests/helper/dns_utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9b2610eaa7aacbba834b91e6833e81f269bf4ef7a47144a9e5cebfc4edf28c4a", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured/tests/helper/molecule.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba00a224f2abe4a0d5c5fde368b98360f6cd94717f5a2c3e0014cbd8cdc50dbb", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured/tests/helper/dns_utils.save", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3900cd66b39a4348f30cc9ad6d39d58b6b694bfdcfae46c06c60f2b5805dbae", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured/tests/test_pdns.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "968440c209cc1fdef5aafd16c0003ff900f70ad18187148e4382fb1254cf30da", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured/group_vars/all/pdns.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bcbf059aa853c5e9a4057da74c4daf929f1edd54d0a55008b8178d655e03cb8b", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured/group_vars/all/vault.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e903a6f5fa9dc19b3b4a01e5cecc56912b90c66e2fa62e6b183901b91cd9404b", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9f75b3dc90afc853930596054b3e27e8ab0df8d9aa6332761125d4c2709b603c", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ab40ef57b73259b58095c10879e033e1775012271e4b4e68c652531c35d542f", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "632419613bebff3714038ca570c92d55b32a3d25714e6f793fad6e17a9b81ee3", + "format": 1 + }, + { + "name": "roles/pdns_records/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a7fdba913c55f8517b86e8bfa4724f813314cd0a72f769e9c73dd623baf3204", + "format": 1 + }, + { + "name": "roles/pdns_records/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5845e18e9f23155f423207df9abac970aed687c638620bc2c9ee06706191054b", + "format": 1 + }, + { + "name": "roles/pdns_records/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "23eb6071653040325f77f6ddd2c2da65de20452af2fb107c7b7953a54a0e1282", + "format": 1 + }, + { + "name": "roles/pdns_records/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "04a79fd78e5c93a06bc5953a3cbe87005fb8ccd38102342a914242053897fa9b", + "format": 1 + }, + { + "name": "roles/pdns_records/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fdcd924f7a0ff977a0e929119de8dae5053c5c76d4c2c3936b40a70eb4760d2", + "format": 1 + }, + { + "name": "roles/pdns_records/vars/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4fbf84f08ee5eedea0b7dac7c80ffd862203f3ddb89b2801b7289ae7fc5aaa4", + "format": 1 + }, + { + "name": "roles/pdns_records/vars/archlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f86e2ccfbec7285a828d038ec0f44758adb8be3d52558eef0fa6f22415d2489f", + "format": 1 + }, + { + "name": "roles/pdns_records/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_records/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6ab4bd92fbcfb838f64216e1c9fbcb29748e8ac10958f3d02ac40341bd7995c", + "format": 1 + }, + { + "name": "roles/pdns_records/tasks/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aa2fbc4f5e0cbc443e28289077de3d30f28c4ad7aa837fef943a33b9e8575cbb", + "format": 1 + }, + { + "name": "roles/pdns_records/tasks/domains.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "65dda8efad6ace6241a6f8cf67e43c7247b659dbba58d54372c3781d3256be01", + "format": 1 + }, + { + "name": "roles/hosts", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/hosts/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/hosts/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/hosts/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8949634d1c8690ab5caae908a00e619a174a0ce8b35167fbbfcae4bd51b0f8a2", + "format": 1 + }, + { + "name": "roles/hosts/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/hosts/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/hosts/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/hosts/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/hosts/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7a250e803f02906f719c6f46b6c5ff5e76230abd9d11b7e299d538a10a117b66", + "format": 1 + }, + { + "name": "roles/hosts/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fdc5435ef8d9ccd5ce3704f3f655d87a78eb8e4f199fc738197f6aa8afde1756", + "format": 1 + }, + { + "name": "roles/hosts/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "632419613bebff3714038ca570c92d55b32a3d25714e6f793fad6e17a9b81ee3", + "format": 1 + }, + { + "name": "roles/hosts/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1c012767d94cedb6c0c95da959262d36a93c75300a6e026ac1dbdad619f4eee", + "format": 1 + }, + { + "name": "roles/hosts/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f79cdace5e0ac1512d6b3e35517aa678069b483ffb80ca63e5b51718be5bc31", + "format": 1 + }, + { + "name": "roles/hosts/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0b8af235d23db527385396b1b168b14f5604ad1f82b912f645919b1c95b76a0", + "format": 1 + }, + { + "name": "roles/hosts/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/hosts/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2f9a4697b4ce3786d1d866a73d48aa9e2aaf70306716ee40926372681259c43", + "format": 1 + }, + { + "name": "roles/hosts/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/hosts/templates/etc", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/hosts/templates/etc/hosts.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e5b44421f6cf807714efc2b511a1756cd5f7ec003d04a1bf0bf2a66ceefef4bf", + "format": 1 + }, + { + "name": "roles/hosts/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/hosts/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8d175ab420307831bfd418b12c4d7719b981c0d7d029fcca38f30a934ded7c34", + "format": 1 + }, + { + "name": "roles/hosts/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/hosts/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4f7d8c3d10ccd91b30c5c86fae53239056e544c1b9ab56de11a1262b356b16bb", + "format": 1 + }, + { + "name": "roles/hosts/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/hosts/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/hosts/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e583f4131c584f602a64d584df610398454117fcfe52fc76fba7d2025de539a5", + "format": 1 + }, + { + "name": "roles/hosts/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/unbound", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "60c1a19545c4971b6fff290c5520278ead2c2c36e61981700f786a89fcdba8d8", + "format": 1 + }, + { + "name": "roles/unbound/hooks/_tox_base", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03502d1cecefbfae0c44115de1394c29618b05a3c1488f2dc4bde321aaf782aa", + "format": 1 + }, + { + "name": "roles/unbound/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "10ab3def4958f02f40172fc92efe9635de1aca1dae9bac9dca07c4356df8c0c5", + "format": 1 + }, + { + "name": "roles/unbound/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fb99eedbda8b9b02b3e6aafe3cf9afbc990813d05b8cd87ee6ad2a64dc47889", + "format": 1 + }, + { + "name": "roles/unbound/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c05db3d34aa10506944427f4cb1331e6fcc38c97c538d574c503ccd4e14a008f", + "format": 1 + }, + { + "name": "roles/unbound/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c32820da1cc4358e7fa4731358bfa38fcbb221154bd45412aa2d9202f60c8419", + "format": 1 + }, + { + "name": "roles/unbound/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fbafd0a44904bdb4b3b880ef8d2776279fd50416e825c23e43110a71e96bf90", + "format": 1 + }, + { + "name": "roles/unbound/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/unbound/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9431a300f3202f4820eead86bc47f8643b2436f6d94aaf5af626a611876e52b1", + "format": 1 + }, + { + "name": "roles/unbound/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b1b7b5bdcddbca86a762796e3d496dcf7b70065fc60b6d5a10cea8a12ce1cef9", + "format": 1 + }, + { + "name": "roles/unbound/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/unbound/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cea49f9dd1b49e6fd2b9283cfea1dde8bc61d16df62f53c6c7602bfd07904020", + "format": 1 + }, + { + "name": "roles/unbound/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ea92e684c4a022509518f2784bad33ebe85de68d630c933b120aabe0946f279", + "format": 1 + }, + { + "name": "roles/unbound/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b2e9b962597d0bc1bdb0eefae7de3d23b505337ac6f2ab3b14c0b4fa04480765", + "format": 1 + }, + { + "name": "roles/unbound/molecule/podman", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/molecule/podman/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/molecule/podman/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "75ec9e5f38a6031538edcb6e2a9d7964a005d8525836f37e87ba390b9febabb7", + "format": 1 + }, + { + "name": "roles/unbound/molecule/podman/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/molecule/podman/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/molecule/podman/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "37193c9bf1455f29d96bf6a48016b521c6178cd41a937861bbda7fb21b17702e", + "format": 1 + }, + { + "name": "roles/unbound/molecule/podman/requirements.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ce640d98485d845a995cb51fd1b2a7d88722a4aa033270937d6a9947a1026de", + "format": 1 + }, + { + "name": "roles/unbound/molecule/podman/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c538056a23ec5eb1eb9b6cf70c73d3bd51cdc64cb01d6a497a0362143e6019f", + "format": 1 + }, + { + "name": "roles/unbound/molecule/podman/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "edaa7aa1c125f98cac83935e663d13a54103d74c46f163811a7dc8f4733a63ac", + "format": 1 + }, + { + "name": "roles/unbound/molecule/podman/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a59121b574df6afb6c86761c787e8a68778f7d9713464e214159d6f132ae66f", + "format": 1 + }, + { + "name": "roles/unbound/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bbc08344b60f91cd02cb627f26278cd95b0275f8df56b793a673e94a903f50a3", + "format": 1 + }, + { + "name": "roles/unbound/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fb8cbb0c98417e6d5e33a3ce14f0fed1887f8041343d55afeb50207bfddfaaa3", + "format": 1 + }, + { + "name": "roles/unbound/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a3071e418da5fe7ee03dff837049277d15c22e5ab141b9d2545de68bd96dfcd3", + "format": 1 + }, + { + "name": "roles/unbound/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ea92e684c4a022509518f2784bad33ebe85de68d630c933b120aabe0946f279", + "format": 1 + }, + { + "name": "roles/unbound/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b2e9b962597d0bc1bdb0eefae7de3d23b505337ac6f2ab3b14c0b4fa04480765", + "format": 1 + }, + { + "name": "roles/unbound/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f79cdace5e0ac1512d6b3e35517aa678069b483ffb80ca63e5b51718be5bc31", + "format": 1 + }, + { + "name": "roles/unbound/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a8f34854e6109024a678dbdc16a082b21e0134067fcfab10072b547991d8339", + "format": 1 + }, + { + "name": "roles/unbound/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/unbound/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d127a33cc578ba537660a5ff3314e617c6bd1c8cdac089950080640f2a47b0c", + "format": 1 + }, + { + "name": "roles/unbound/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9a669f17d41f0686488447a101bc3c7ff32547c69c5c33900af48af7ea32642d", + "format": 1 + }, + { + "name": "roles/unbound/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/templates/etc", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/templates/etc/unbound.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dee31633487188ac3b3009bb009bfbdbf5ed17de7e9f18ae11037f893045cae6", + "format": 1 + }, + { + "name": "roles/unbound/templates/etc/default.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2f4bfcfba22b59f3290adc0e083c49006df94acf62d9d5fbac7d09704f755117", + "format": 1 + }, + { + "name": "roles/unbound/templates/etc/unbound.conf.d", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/templates/etc/unbound.conf.d/remote_control.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dd0c6ff9ead23ec5be676afd89007a5ed521d9fb7fbbd340b03a05f37970a330", + "format": 1 + }, + { + "name": "roles/unbound/templates/etc/unbound.conf.d/unbound.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3745a5e814e3bae102897c8eaf9d29f6cbd8c8be38c6965529b9f20d327ad93a", + "format": 1 + }, + { + "name": "roles/unbound/templates/etc/unbound.conf.d/views.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e619d074d9e4dd6a257b0c603e98b4edf7ad1e9bbdee5a28e3b3221cdafd6139", + "format": 1 + }, + { + "name": "roles/unbound/templates/etc/unbound.conf.d/forward_zone.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4972764eaef7ec34fc0b8d069c0637588e3fe7614f6ab1e198f2bcc5866d7243", + "format": 1 + }, + { + "name": "roles/unbound/templates/etc/unbound.conf.d/python.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "613322c1207338a290327b948176a6688b6428b3dc4c74d059f64a9854cfbb7c", + "format": 1 + }, + { + "name": "roles/unbound/templates/etc/unbound.conf.d/dnscrypt.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca3ec93ae332e35446ef391218138bcd692cf2406a610136b55af030e96024a4", + "format": 1 + }, + { + "name": "roles/unbound/templates/etc/unbound.conf.d/server.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e01c0358a70719a2fe483a57b75fde439d6c48053bce06d18600ac1665da87f0", + "format": 1 + }, + { + "name": "roles/unbound/templates/etc/unbound.conf.d/auth_zone.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "83c6b5be03b5a33576f88adbf463313e6b2e81b005e1a235fb5ee5d029aa755f", + "format": 1 + }, + { + "name": "roles/unbound/templates/etc/unbound.conf.d/stub_zone.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af550588def7036f4181acb54dfac1391e13b771ea6b15afe5367b9423b1801a", + "format": 1 + }, + { + "name": "roles/unbound/templates/etc/unbound.conf.d/cache_db.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a40459586cbdff6ff7173dffa55cdb0984231649f85e5e451a42f879bbde7822", + "format": 1 + }, + { + "name": "roles/unbound/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "27a21f4694de594bcce7e5a40765313514551ae733ed3f875cec7826387dfbf0", + "format": 1 + }, + { + "name": "roles/unbound/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0ccb7a23222a4be53d6fa88e3d1a9f97deac25b8084beb9df20200412ec4aa4d", + "format": 1 + }, + { + "name": "roles/unbound/vars/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "28f2ff79548e3e3ad8032b43d63b1ba73e520ee0097f159a68d5f5501ea5c42c", + "format": 1 + }, + { + "name": "roles/unbound/vars/archlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "62098842d2ddddb2753e7a57b46555cb49ccc12073cda82356c4ced652ae145e", + "format": 1 + }, + { + "name": "roles/unbound/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/unbound/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9eaf96473e0ca160610ec7f0586c641bf89ef118c22b2a103455af16e5e4f2cc", + "format": 1 + }, + { + "name": "roles/unbound/handlers/.keepme", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "roles/unbound/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/unbound/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/unbound/tasks/service.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2bdbe37409023a0f6e30ce7afdffcd41edd77c5799edc899d5c2a91651367b3", + "format": 1 + }, + { + "name": "roles/unbound/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "78a8b64d937ba0f79adb1181e2c647c64ea3f9fab316e17dd7d385eff34365c6", + "format": 1 + }, + { + "name": "roles/unbound/tasks/configure.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e427c7d1ba3d83dda75300d6f6738c761bf81478b32e01e06a20d6068f2636af", + "format": 1 + }, + { + "name": "roles/unbound/tasks/install.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ba4c07f56e9d694d115b0b2d6976c0ab264cad90403e183e5dd268ff24e87878", + "format": 1 + }, + { + "name": "roles/unbound/tasks/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3da4dfc54744015748f3e47f4d41ef312683261527efef7fae3312e52586fffb", + "format": 1 + }, + { + "name": "roles/unbound/tasks/chroot.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c00ad5a88cb1b21f485ebbd674125f0dbed4113cd45f3b0133d6aaccc4b8bbc4", + "format": 1 + }, + { + "name": "roles/unbound/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/fqdn", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "60c1a19545c4971b6fff290c5520278ead2c2c36e61981700f786a89fcdba8d8", + "format": 1 + }, + { + "name": "roles/fqdn/hooks/_tox_base", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03502d1cecefbfae0c44115de1394c29618b05a3c1488f2dc4bde321aaf782aa", + "format": 1 + }, + { + "name": "roles/fqdn/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "10ab3def4958f02f40172fc92efe9635de1aca1dae9bac9dca07c4356df8c0c5", + "format": 1 + }, + { + "name": "roles/fqdn/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fb99eedbda8b9b02b3e6aafe3cf9afbc990813d05b8cd87ee6ad2a64dc47889", + "format": 1 + }, + { + "name": "roles/fqdn/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c05db3d34aa10506944427f4cb1331e6fcc38c97c538d574c503ccd4e14a008f", + "format": 1 + }, + { + "name": "roles/fqdn/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c32820da1cc4358e7fa4731358bfa38fcbb221154bd45412aa2d9202f60c8419", + "format": 1 + }, + { + "name": "roles/fqdn/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fbafd0a44904bdb4b3b880ef8d2776279fd50416e825c23e43110a71e96bf90", + "format": 1 + }, + { + "name": "roles/fqdn/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/fqdn/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "59662fb686d4383d4cbb3f50ee0ae2072cfee31501b35c9a95a106e00a57ecaf", + "format": 1 + }, + { + "name": "roles/fqdn/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cfcfa7503f93d00a8b3149ba7f33d2bb991a19fd531e2769f9189d49c6aed2bf", + "format": 1 + }, + { + "name": "roles/fqdn/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/fqdn/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d292707d693093b9b2335c63fcb667f61d1781ffec41feaf28aafd07541ecb81", + "format": 1 + }, + { + "name": "roles/fqdn/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ea92e684c4a022509518f2784bad33ebe85de68d630c933b120aabe0946f279", + "format": 1 + }, + { + "name": "roles/fqdn/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ec278c6d2ddbed1bcf112c4f23c69a1ae2c7dcd0fc868730cd0f37bf63cbea", + "format": 1 + }, + { + "name": "roles/fqdn/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cfcfa7503f93d00a8b3149ba7f33d2bb991a19fd531e2769f9189d49c6aed2bf", + "format": 1 + }, + { + "name": "roles/fqdn/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a652c88b4b8f60918dde5b93eaea873f9650310dc4ee99bc83b5b06a60ff5ea", + "format": 1 + }, + { + "name": "roles/fqdn/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d292707d693093b9b2335c63fcb667f61d1781ffec41feaf28aafd07541ecb81", + "format": 1 + }, + { + "name": "roles/fqdn/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ea92e684c4a022509518f2784bad33ebe85de68d630c933b120aabe0946f279", + "format": 1 + }, + { + "name": "roles/fqdn/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ec278c6d2ddbed1bcf112c4f23c69a1ae2c7dcd0fc868730cd0f37bf63cbea", + "format": 1 + }, + { + "name": "roles/fqdn/.travis.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a482a01066e073527f104911158f74a56e7b7818c7fc975c5ea56ad7d25a57bd", + "format": 1 + }, + { + "name": "roles/fqdn/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f79cdace5e0ac1512d6b3e35517aa678069b483ffb80ca63e5b51718be5bc31", + "format": 1 + }, + { + "name": "roles/fqdn/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a8f34854e6109024a678dbdc16a082b21e0134067fcfab10072b547991d8339", + "format": 1 + }, + { + "name": "roles/fqdn/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/fqdn/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "db5e7c5183cf02531ff87ebc666e6a20281e933de4d912c9e38a2cff4e67ae73", + "format": 1 + }, + { + "name": "roles/fqdn/Vagrantfile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61a0463a0c5162d5309fa7179231d2fd80531577eced571f1ee38577e9371c01", + "format": 1 + }, + { + "name": "roles/fqdn/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9a669f17d41f0686488447a101bc3c7ff32547c69c5c33900af48af7ea32642d", + "format": 1 + }, + { + "name": "roles/fqdn/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9f76bbce2be19fcb1cad253744d5290976b3ed9d1e4ac6ddc4204289d08eb040", + "format": 1 + }, + { + "name": "roles/fqdn/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/vars/artixlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/fqdn/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "07abe04bab991d2463ec38df99dd04d3683aaa0441ad20be85bd584ecaf942bd", + "format": 1 + }, + { + "name": "roles/fqdn/vars/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "021be9a6d33446b31d23db8f4efa5981dec97e1a54c4d12faaffceedec6aa85f", + "format": 1 + }, + { + "name": "roles/fqdn/vars/archlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/fqdn/vars/archlinux-openrc.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/fqdn/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c7480e3cc09653abce5655fc5891b112dd628fed4c34e4e9cd919566ad1d2144", + "format": 1 + }, + { + "name": "roles/fqdn/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "94de2a2877e91b94fcd347516f58827e4949440972af3812e1d38ee234d1e306", + "format": 1 + }, + { + "name": "roles/fqdn/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/fqdn/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac977b894e8c72301722a2d8ef5437bcd210179c7c3b8a3cd3b2257744a820bb", + "format": 1 + }, + { + "name": "roles/fqdn/tasks/configure.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61d8a71931f0c4ef49b4a4dbdbf031caa4c9335c3013e7333f853833e0b094b0", + "format": 1 + }, + { + "name": "roles/fqdn/tasks/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f03d06a699252d54c78e444659d6a7d717666f43d939d3abd3eedd6755b1c115", + "format": 1 + }, + { + "name": "roles/fqdn/tasks/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1290b0d8745f0ec8e8d1d63c3a0ed12395b8180b24e3932fcab0eba494f1aa5b", + "format": 1 + }, + { + "name": "roles/fqdn/tasks/linux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e927d24b79cbcb90b980f9cceec94ddbb4436013dd1cef957839b12c63f69cfa", + "format": 1 + }, + { + "name": "roles/fqdn/tasks/gentoo.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1e0fb32bc0533509cd8c8cf8b22e3bdeac7fefec061f247fd0051cb97623c06f", + "format": 1 + }, + { + "name": "roles/fqdn/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/pihole", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/pihole/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f69c3d30213cb8a3ab147a69d7c741540c472a43bfd2bcd8b1b4a9f93561fd1", + "format": 1 + }, + { + "name": "roles/pihole/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8bfaab8282fb1536953513daf197814505dfcc1b62ea57262e935caa366a88b1", + "format": 1 + }, + { + "name": "roles/pihole/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/pihole/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a181a4c734fc91a6411f427b149c9b34dee1683a6f91587c377c6ba28c3f5051", + "format": 1 + }, + { + "name": "roles/pihole/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c8bb4a17d31dd9f9965c2fc1b05b3257aa6e6c3717404181ebad88d75e6f73eb", + "format": 1 + }, + { + "name": "roles/pihole/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "757947033967fa03f457184c1de66675236ae1643af3fa3bb04e3125eb294af8", + "format": 1 + }, + { + "name": "roles/pihole/molecule/family-friendly", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/molecule/family-friendly/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/molecule/family-friendly/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "428606cd9004e7c75b75dfeda4b0bbf120356428c2b2ad96ed2810717567884d", + "format": 1 + }, + { + "name": "roles/pihole/molecule/family-friendly/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/molecule/family-friendly/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/molecule/family-friendly/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2472d2026099c158b306d652d450c6ea62feb974ef4d48805e980cbec1b4567e", + "format": 1 + }, + { + "name": "roles/pihole/molecule/family-friendly/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a181a4c734fc91a6411f427b149c9b34dee1683a6f91587c377c6ba28c3f5051", + "format": 1 + }, + { + "name": "roles/pihole/molecule/family-friendly/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "659f1fda81a0817c02d3ea8f188e268311dedaf533e47bc1d5c0f105ed55c090", + "format": 1 + }, + { + "name": "roles/pihole/molecule/family-friendly/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0c976812ba5d4a50515aa450c210ca76b8840954f8ab799a0a5171bd667518c0", + "format": 1 + }, + { + "name": "roles/pihole/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "428606cd9004e7c75b75dfeda4b0bbf120356428c2b2ad96ed2810717567884d", + "format": 1 + }, + { + "name": "roles/pihole/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3cd4abacb4cb30d700d791396578d6d81ce01cbe4af6a7d3404807258d5bb4e0", + "format": 1 + }, + { + "name": "roles/pihole/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a181a4c734fc91a6411f427b149c9b34dee1683a6f91587c377c6ba28c3f5051", + "format": 1 + }, + { + "name": "roles/pihole/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "659f1fda81a0817c02d3ea8f188e268311dedaf533e47bc1d5c0f105ed55c090", + "format": 1 + }, + { + "name": "roles/pihole/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0c976812ba5d4a50515aa450c210ca76b8840954f8ab799a0a5171bd667518c0", + "format": 1 + }, + { + "name": "roles/pihole/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac817a8b08bc1d9a70d7aeb065df53866c53dd0cad960b62f5a88fca0f55ae19", + "format": 1 + }, + { + "name": "roles/pihole/notes.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c138f8d76d1ad6adbb9b37f98e6155185df90dabbd4307390a58713c9b9e754", + "format": 1 + }, + { + "name": "roles/pihole/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af647b36561b704d36d80eba14afe67029d85c650cc374f53ad3d62db87eee05", + "format": 1 + }, + { + "name": "roles/pihole/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f0100ecc4c25c8f8684be756248fc3731a15f86126db18f0997537bede0973a5", + "format": 1 + }, + { + "name": "roles/pihole/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5d5746145ac6f77987737f15174fe444c656e25705cee721821987333f58938d", + "format": 1 + }, + { + "name": "roles/pihole/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/templates/pihole.toml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f16692318964faaf8d64d813bde563286ee6c44104813eb3d1a3fdfec2ef0f2", + "format": 1 + }, + { + "name": "roles/pihole/templates/custom.list.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "567a6ea491a0e0d31687dac0372c89f336077dca6c06121dfeb20737843f29c4", + "format": 1 + }, + { + "name": "roles/pihole/templates/pihole.toml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1913878da6a344e36b215a319ef34d90a8068b92f3c8d197b54b3feb325ca9aa", + "format": 1 + }, + { + "name": "roles/pihole/templates/setupVars.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a4ec0dc261badef625b73768219c1967e872a0d3ac29b6a2c8e8076065b2f05", + "format": 1 + }, + { + "name": "roles/pihole/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ecad3f9be99da60fc4c06b03c4dc05f72af8eddcbb648f77c1129ce80531fa33", + "format": 1 + }, + { + "name": "roles/pihole/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38f8a76cffafa5cc5ebbf5b19b05389cf1dfeb664c9ee5f63c3b6edc67db0351", + "format": 1 + }, + { + "name": "roles/pihole/vars/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "format": 1 + }, + { + "name": "roles/pihole/vars/archlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "14250c67cfc1068f38414b32a70acc0f7cba7340be436859a6515c8e2ec83475", + "format": 1 + }, + { + "name": "roles/pihole/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/pihole/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7655e6dbe3ae5b341d480b35c75820732422910dbdf7ad45000b4fc931d57396", + "format": 1 + }, + { + "name": "roles/pihole/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/pihole/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pihole/tasks/service.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0bbb8b234c2cc4c55c1d405d7e614a97028229dc10da09df7477b49616c876d4", + "format": 1 + }, + { + "name": "roles/pihole/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6f51a6ed926fcd380eba1204c57c9654d3501d19b9186757688da4a99fefb6b7", + "format": 1 + }, + { + "name": "roles/pihole/tasks/configure.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d973918b42c9dffdae68d8f75e8c76d37d70b62b13124332f7f3db462c0a31f9", + "format": 1 + }, + { + "name": "roles/pihole/tasks/install.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08c9d2306f9a1f523641f47391fed9bfd5b045e6d805ecf73657d1dc4b2c70c2", + "format": 1 + }, + { + "name": "roles/pihole/tasks/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8b8021b7976713cc2d8ada44daff9c4bc8171091fcf96507051119bf8e931f52", + "format": 1 + }, + { + "name": "roles/pihole/tasks/download.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a5076ba23e11811f08c2a9b2db3fe21b3c8c9a48d62869af88ef85fe159bbff5", + "format": 1 + }, + { + "name": "roles/pihole/tasks/firewall.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3607f19ce5e56be9c6068bffd5632ed43c49e1acf1587f56308fd85b75d5d1a5", + "format": 1 + }, + { + "name": "roles/pihole/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/knot_resolver", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/knot_resolver/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/knot_resolver/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/knot_resolver/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/knot_resolver/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/knot_resolver/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c32820da1cc4358e7fa4731358bfa38fcbb221154bd45412aa2d9202f60c8419", + "format": 1 + }, + { + "name": "roles/knot_resolver/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7fbafd0a44904bdb4b3b880ef8d2776279fd50416e825c23e43110a71e96bf90", + "format": 1 + }, + { + "name": "roles/knot_resolver/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/knot_resolver/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/defaults/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0b5732c1c2b7578b29d310b1c79abe97010b6c4f65568b5313335cb78672b147", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8c9c70e059b44168e95f98232087e9d22f8603e176eeaf6f369d38ac3bcd3bda", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/default/group_vars/all/syslog-ng.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/default/group_vars/all/knot.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e342a00ce9e290391f6e5e346ea52759e5c41c61786fd11bc6c5f9644d01c488", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/default/group_vars/all/pacman.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8bd06c6af3556de715615c48d6bd92b5572003a9c5413ea0d07acdce675f695c", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "926067c69c32dab2a9251b1a27c57ef93099867317be5d5d7b088577b3f2fa1b", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0883959fc4b3048c39ee6cbb65a0969a62654a1bfa39cb5257e47a3da4942134", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2b663f52a14e8263b4592349ff8d7cf450c43e928d616cc552a81d7842b6c8a", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8c9c70e059b44168e95f98232087e9d22f8603e176eeaf6f369d38ac3bcd3bda", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/configured/group_vars/all/syslog-ng.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/configured/group_vars/all/knot.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e342a00ce9e290391f6e5e346ea52759e5c41c61786fd11bc6c5f9644d01c488", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fa0d194393608858fe589adcf792180234e156f7bbf7e9d2cec8d77bf16d859b", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/configured/group_vars/all/pacman.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8bd06c6af3556de715615c48d6bd92b5572003a9c5413ea0d07acdce675f695c", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "926067c69c32dab2a9251b1a27c57ef93099867317be5d5d7b088577b3f2fa1b", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0883959fc4b3048c39ee6cbb65a0969a62654a1bfa39cb5257e47a3da4942134", + "format": 1 + }, + { + "name": "roles/knot_resolver/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a2b663f52a14e8263b4592349ff8d7cf450c43e928d616cc552a81d7842b6c8a", + "format": 1 + }, + { + "name": "roles/knot_resolver/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f79cdace5e0ac1512d6b3e35517aa678069b483ffb80ca63e5b51718be5bc31", + "format": 1 + }, + { + "name": "roles/knot_resolver/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6a8f34854e6109024a678dbdc16a082b21e0134067fcfab10072b547991d8339", + "format": 1 + }, + { + "name": "roles/knot_resolver/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/knot_resolver/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ff8e77552f3b30bce600e48d4e8549c81bb501b5cdaa79c162955f7d4aa1cda", + "format": 1 + }, + { + "name": "roles/knot_resolver/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22a0d6abe4d830d86b79c2ef366996400d6b634b9e11d0834c1bb0a9ce1bd12d", + "format": 1 + }, + { + "name": "roles/knot_resolver/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/templates/kresd.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fe3c540c4ffdb20bd009a8a0bdf62e68b4a47d85509e3057e67d02eea111eaec", + "format": 1 + }, + { + "name": "roles/knot_resolver/templates/apt_knot.list.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "544f3eec1ff1fe8615c67b585a94ae848339dadad6d7cab1c557c48aa56edbf0", + "format": 1 + }, + { + "name": "roles/knot_resolver/templates/init", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/templates/init/openrc", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/templates/init/openrc/conf.d", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/templates/init/openrc/conf.d/kresd.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fb8eee6df74e649848254f3a9ae851d1c281d24286d077388816016269deb633", + "format": 1 + }, + { + "name": "roles/knot_resolver/templates/init/openrc/conf.d/kres-cache-gc.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "162d3236d054b89b74cc5eedfe0f07b3f67710b7c42f6f9c49e53bfedd0d5bda", + "format": 1 + }, + { + "name": "roles/knot_resolver/templates/init/openrc/init.d", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/templates/init/openrc/init.d/kresd.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22b6e99bd46422b8d7d589f8cf37bb758bd7c071ad130d46cb735b5577537ed7", + "format": 1 + }, + { + "name": "roles/knot_resolver/templates/init/openrc/init.d/kres-cache-gc.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0c83201339c361927097fe196ba2cd44856c39af786548b53cf1601b7f8c9d96", + "format": 1 + }, + { + "name": "roles/knot_resolver/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/meta/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5528af1d7d764609683432b48c1412c8ec346bec97c2fee7901a0236096fafd0", + "format": 1 + }, + { + "name": "roles/knot_resolver/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/vars/debian.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "78fcefeb61295e0a0892a69e6dcd4f5afad4d3c8d12a1b9a4bf840ba55f8ca56", + "format": 1 + }, + { + "name": "roles/knot_resolver/vars/archlinux.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0caf81cda08bb7261b67a220ac9220a75761d70cfef007858c5c27bcbdeff540", + "format": 1 + }, + { + "name": "roles/knot_resolver/vars/artixlinux.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21e67a6b372f6238704898b0b39caeb9523f730a127a05a390e588a3a2502177", + "format": 1 + }, + { + "name": "roles/knot_resolver/vars/redhat.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "917c5e94d998776017039829a27ad43218f778f33e6730a432c691a40ccc17aa", + "format": 1 + }, + { + "name": "roles/knot_resolver/vars/archlinux-openrc.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21e67a6b372f6238704898b0b39caeb9523f730a127a05a390e588a3a2502177", + "format": 1 + }, + { + "name": "roles/knot_resolver/vars/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "04ae88d88a8161cf07e69da33742d0ca14ee0c978bb0c0621ea367cd5ee0b013", + "format": 1 + }, + { + "name": "roles/knot_resolver/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/knot_resolver/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/handlers/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "151a1296b433d3203ae08ddbd8bbc7fbeaa07f807cb05fc15126bdcf5cdbeb86", + "format": 1 + }, + { + "name": "roles/knot_resolver/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/knot_resolver/tasks/prepare.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4507f527ec3d3c02bf44e2eaededa24ba667cfdc850d1e59d1c21e250fbdd090", + "format": 1 + }, + { + "name": "roles/knot_resolver/tasks/configure.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38f8fa0d62148364b6823d33ad943ff17ce332d691b1490504fe16754c5ae5e6", + "format": 1 + }, + { + "name": "roles/knot_resolver/tasks/repositories.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92046f2e798034c451a4211dcbebeb6d7034da9f69ccb552bce21afb76a962a2", + "format": 1 + }, + { + "name": "roles/knot_resolver/tasks/service.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "725f76260e32eb93741e28b2ab03a40f7e242ae18003e4823b8f49bbb0419798", + "format": 1 + }, + { + "name": "roles/knot_resolver/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e20701ef3eeb28fc9cb7569186a96246d6643c491ecc21773cfd738a4cd4a62", + "format": 1 + }, + { + "name": "roles/knot_resolver/tasks/install.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "92df189b19d61021971c577368d4d129e41d013a5e72505c3f94478c12e539e6", + "format": 1 + }, + { + "name": "roles/knot_resolver/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/pdns_recursor", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61bff4b0e3e2aa4e75f17d6b1caa85dd3b7e8bba223879a863e8fd9b9569aa4b", + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fcb75f0c5c861468db2ca6ef73c05cf04719c8ae30d95c0cad3b098fcec927f3", + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a3071e418da5fe7ee03dff837049277d15c22e5ab141b9d2545de68bd96dfcd3", + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ea92e684c4a022509518f2784bad33ebe85de68d630c933b120aabe0946f279", + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b61f41443d30f894b9507b821817e37bddbfb7dee8caee525d8b9f47e4f175cb", + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fcb75f0c5c861468db2ca6ef73c05cf04719c8ae30d95c0cad3b098fcec927f3", + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb7d549cee5048821e48b2bde54c963c0c794725e66eb6cd5cf46601591d22e8", + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a3071e418da5fe7ee03dff837049277d15c22e5ab141b9d2545de68bd96dfcd3", + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ea92e684c4a022509518f2784bad33ebe85de68d630c933b120aabe0946f279", + "format": 1 + }, + { + "name": "roles/pdns_recursor/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b61f41443d30f894b9507b821817e37bddbfb7dee8caee525d8b9f47e4f175cb", + "format": 1 + }, + { + "name": "roles/pdns_recursor/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b7f38475529d02ab650288f67bf67f244221f9afa24d613deef1f9da8e0fa35c", + "format": 1 + }, + { + "name": "roles/pdns_recursor/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22a0d6abe4d830d86b79c2ef366996400d6b634b9e11d0834c1bb0a9ce1bd12d", + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc/powerdns", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc/powerdns/recursor.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "067f216b7556c7925e55fbbc686fd113490262afa14078a2f3a140cab22ba6c1", + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc/powerdns/recursor.d", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc/powerdns/recursor.d/outgoing.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1723a889b8df05e1b5e638b55a3152e26f02c934f312f1cd9ca8d6473ab0567", + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc/powerdns/recursor.d/snmp.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "73692ab90be8c916b7ee69226a99a92378109def46c4d2421f023fccd7223d83", + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc/powerdns/recursor.d/carbon.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5ff6ef5fd88ab707b3a558761462d74d1793bc553b76f5fc2fa1f975d9030800", + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc/powerdns/recursor.d/nod.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "09f5238d911ad84eb339c95f2d52dd3688cd00c3c033f9c8dd8d33f24033868a", + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc/powerdns/recursor.d/packetcache.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f478b3f984ff4727327db26c863d1b50fc25539c529b37fc26f8f32f7df3fe04", + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc/powerdns/recursor.d/incoming.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d0465a10d3d0e984d399a3a4e1b4014d59abdd908c53883ec33f9a3ba89615ad", + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc/powerdns/recursor.d/dnssec.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0c2b1420db438d6be82678bbef6fa89029e00b08165b1fcee71b4c8c26db956a", + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc/powerdns/recursor.d/webservice.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9cf49f1d4df93bf451d6a0eeed8183f6d2716ae3a919473a8b17dfb55e5c9dc0", + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc/powerdns/recursor.d/recursor.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "769303d55fda1b0dc4d268c408e40617bbfd34ac275c73792b6ac5813325dff2", + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc/powerdns/recursor.d/logging.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7c9d72badd58ede5e0459ad337cd6aa6cf4481f37d5ef9baa57947b8aef0b8ef", + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc/powerdns/recursor.d/ecs.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ecbabe6b92de3fe2496aaecacf069d69d7a701e7fe7bcf6c58d4a915758258aa", + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/etc/powerdns/recursor.d/recordcache.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c416373a0183e8602227620b07681767b6ba78564c355d543ba8c1b7707b18a", + "format": 1 + }, + { + "name": "roles/pdns_recursor/templates/recursor.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f4ec27f41dc4b5cb3db837039818b7918a69819fb8ed9271690d764d68c9eb9d", + "format": 1 + }, + { + "name": "roles/pdns_recursor/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2df5a99994959605855035feb5d34772cdccab4c25eab45284cf0b604a1762b2", + "format": 1 + }, + { + "name": "roles/pdns_recursor/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08e926336b78070c8e3a5fd4524458f1f18bd6d8b3876283be49a11b4c7581e2", + "format": 1 + }, + { + "name": "roles/pdns_recursor/vars/debian.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "932f917a2b909481ba2257dfa8a1db4bcaa5278576f82060e067f2443579d1c8", + "format": 1 + }, + { + "name": "roles/pdns_recursor/vars/archlinux.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cfffd8bd443903e1301e89f963d52fb2d60664d287d9ffb7bd62ed649379258b", + "format": 1 + }, + { + "name": "roles/pdns_recursor/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/handlers/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1d3b09e897e601bfecc4b12210755808d123b6433dca56b445177a95765f9f45", + "format": 1 + }, + { + "name": "roles/pdns_recursor/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pdns_recursor/tasks/service.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d8d79de7889db0c1c5dd7a9d3a2dade72abceda9c6dd4a5592e7c5208189a461", + "format": 1 + }, + { + "name": "roles/pdns_recursor/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "68fdaa86d6c51c631842432fa898bce854d43ef00f5e1a87156d09c17177e93d", + "format": 1 + }, + { + "name": "roles/pdns_recursor/tasks/configure.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "81b3e6a16ea9eef7acfe21311fb484b2a4f3793d5884f3a2320fb07dd73f099d", + "format": 1 + }, + { + "name": "roles/pdns_recursor/tasks/install.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e887b3b227af64d54e3f8f03d3662eca4c513aa84d51449961ad6393dae427ea", + "format": 1 + }, + { + "name": "roles/pdns_recursor/tasks/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccda4d7c6dfd2f68bdc46d8a79baca95ce450337a1def4b26118af05fc8f11ae", + "format": 1 + }, + { + "name": "roles/resolv", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolv/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolv/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "110fe14bcc72896bb9fa9f8ea52fd0a0153d3be1bd44f664e3fb8a67242be9b1", + "format": 1 + }, + { + "name": "roles/resolv/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolv/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolv/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolv/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolv/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/resolv/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d292707d693093b9b2335c63fcb667f61d1781ffec41feaf28aafd07541ecb81", + "format": 1 + }, + { + "name": "roles/resolv/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ea92e684c4a022509518f2784bad33ebe85de68d630c933b120aabe0946f279", + "format": 1 + }, + { + "name": "roles/resolv/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4e2cad54736d688aa3ae1728a66c99f8ca006540602928aa5c7f0ed9fbb6499c", + "format": 1 + }, + { + "name": "roles/resolv/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolv/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolv/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolv/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d37bef7e566b01531ed8aee3363e11a1e9c9bf6f53b6e403c5d153d9c4ab280c", + "format": 1 + }, + { + "name": "roles/resolv/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d292707d693093b9b2335c63fcb667f61d1781ffec41feaf28aafd07541ecb81", + "format": 1 + }, + { + "name": "roles/resolv/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9ea92e684c4a022509518f2784bad33ebe85de68d630c933b120aabe0946f279", + "format": 1 + }, + { + "name": "roles/resolv/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4e2cad54736d688aa3ae1728a66c99f8ca006540602928aa5c7f0ed9fbb6499c", + "format": 1 + }, + { + "name": "roles/resolv/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac22a1b7a2806d510920bfeb4abe188d3fa769fff12506289b9a500eddc193cd", + "format": 1 + }, + { + "name": "roles/resolv/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolv/templates/etc", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolv/templates/etc/resolv.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb6e451fd01a56299a45f77066e7655ae329c394f2ae160cfb424988dc22f8b3", + "format": 1 + }, + { + "name": "roles/resolv/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolv/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "300e80a807df6802a2073b9e8f4bbc7041f6bdd46a9f37291eb8ea525737ea9d", + "format": 1 + }, + { + "name": "roles/resolv/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/resolv/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolv/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0063b710aeb38d20573925d1c4561aed01fdb15fa0209d83f49bc04a9768d7d4", + "format": 1 + }, + { + "name": "CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/filter", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/filter/pdns_recursor.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36cfda688ebb4b8609bc2d29fd05e653c5c9edda2d0ef672889b9381b5eb0a31", + "format": 1 + }, + { + "name": "plugins/filter/bind.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8754946d5f8bff7c8399f726c67c12afa418577825de09d0c417d9f9fdb5b8d4", + "format": 1 + }, + { + "name": "plugins/filter/pdns.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8dd1d959c05ab02f34db3cf4152e315d276cc5769acde2a8604445d15b758cae", + "format": 1 + }, + { + "name": "plugins/filter/knot.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1c06a3854d0233bfd173fc0de6062789ab8dbba039896b44684d05476e746eb5", + "format": 1 + }, + { + "name": "plugins/filter/resolver_listener.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ef38eb7ca0d40ad8f5e9b16715f79e8f4cc0b9031ac07be858847a93f16ebc48", + "format": 1 + }, + { + "name": "plugins/filter/unbound.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d47fcb65c4c4cab37f79f28680b06d2ee6623376890861b2cec7b8ee44706dda", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/pihole_adlists.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7707765dd2a835a47e248a9b3fd88da51f6146ee73efaf863ce0ea4ec055eba2", + "format": 1 + }, + { + "name": "plugins/modules/pihole_custom_lists.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3d33c38cbe93a8d57f24527a90f242b7ecb02f3d46d74e7c60d0757882ea8be", + "format": 1 + }, + { + "name": "plugins/modules/bind_zone_data.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "23d91aab34ac0f73c417316c142c358a90e0a5fff0dae4c0fbef6b4467188c67", + "format": 1 + }, + { + "name": "plugins/modules/bind_version.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6cb2f660cd7892614bfddf836438022013e1c3fa3636dfcf608e8ba9528a8830", + "format": 1 + }, + { + "name": "plugins/modules/pdns_mysql_backend.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1a4fac837e0a0115ece07412f92305711809b5a33fdf40869b2cd504038b6f01", + "format": 1 + }, + { + "name": "plugins/modules/bind_zone_hash.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ddbd29be23e581aa4a3445c33fa88a0872a3577fd6f4af3f9480c85fe787bdd", + "format": 1 + }, + { + "name": "plugins/modules/pihole_command.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7152170478ec8099cf50aac0f9807b92f2942e12bba050ad6b1f7733753ad4cf", + "format": 1 + }, + { + "name": "plugins/modules/kdig.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c17346a9aead7a96d1c29cfdb4e3ab8369012d06e1ce0fbf16d113e963f8ada", + "format": 1 + }, + { + "name": "plugins/modules/pdns_sqlite_backend.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b16d8fd22d0c02e06b816dfcb05584d2794b25949d9c6c8f560b24b66f5cecc8", + "format": 1 + }, + { + "name": "plugins/modules/pdns_zone_data.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cccffb378ab882a6053de060a657a928422d473923ff14d04e4bf18c59c2c85f", + "format": 1 + }, + { + "name": "plugins/modules/knot_zone.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "97641f8bed51123aa510552eac433aa12580f8c05bb89572f16eb1b523a177f8", + "format": 1 + }, + { + "name": "plugins/modules/pdns_version.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cbd26aeafb0ed05b32cb05ab680b3c0dada339f19f0dea8c1f5fc5f8528f57cc", + "format": 1 + }, + { + "name": "plugins/modules/pihole_config.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b50aff9133f167348cf48cae841736bddf505c456b36e84d32383e09df6753c4", + "format": 1 + }, + { + "name": "plugins/modules/pihole_version.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ac09dc66b077afbf10fd0429236bf47f9eef22d9b1473809ef8db587feab38f", + "format": 1 + }, + { + "name": "plugins/modules/pihole_clients.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44844d709221ed0af2723c67338785f203cf0337fc6ccf5d9ac59d4fb1c44123", + "format": 1 + }, + { + "name": "plugins/modules/pihole_admin_password.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4ed8d39e8d10c8406358291e1de3561f14359376c59e7584c8a3d42c76845553", + "format": 1 + }, + { + "name": "plugins/modules/pihole_groups.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4bc09360bd9197b1445aeb10018158107d97ac9595fffaed2e6282ac39e8386e", + "format": 1 + }, + { + "name": "plugins/modules/recursor_version.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a7d5474ffbac255e5b4d87a8e5c3e662b83c13716ca842dc129f0d6cd8e58ee1", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/pdns", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/pdns/config_loader.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79db0467d26d540126ec1f577224aa85bbce7e5609982405ff9460cab6062326", + "format": 1 + }, + { + "name": "plugins/module_utils/pdns/web_api.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1578c09035478caefc1a966c73693c3c0abdd6842b927b19e09a67c005c816db", + "format": 1 + }, + { + "name": "plugins/module_utils/pdns/records.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f45f8bd688902dbe44bfbb573647a1939e9a86e1b91484e78c856a784474c7f", + "format": 1 + }, + { + "name": "plugins/module_utils/pdns/utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1d8e50d81008d4a9814784eb70f7f3f329f8e4fdcd6d6be4cd98b52dff688e1b", + "format": 1 + }, + { + "name": "plugins/module_utils/pihole", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/pihole/client_manager.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "268a29455471ffddec5d8ef7aba9e84cf2e8fdf5558369ceefac694e7089efdc", + "format": 1 + }, + { + "name": "plugins/module_utils/pihole/utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3e7a7aa4c10ce6969286c610711a409019f29dec5564388c9cfafc5397bee27f", + "format": 1 + }, + { + "name": "plugins/module_utils/pihole/group_manager.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c95b7853b764c28e1dd580389abb626cfb919dbfa7e3bd2bc6d3affc90781958", + "format": 1 + }, + { + "name": "plugins/module_utils/pihole/adlist_manager.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "02d14bcb6fa87f7ab8b0af70dcdfcde90cd333e2baa4b2400727cb582cedeada", + "format": 1 + }, + { + "name": "plugins/module_utils/pihole/database.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "745661940cae4b65fc0fa29b5c8b71ea5a77282eeb711356a9894285e8ec3223", + "format": 1 + }, + { + "name": "plugins/module_utils/pihole/config.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bac0645c1e0a05f7d755032e3e26e0a0eba6f271589909b9754f88dd2ec4116c", + "format": 1 + }, + { + "name": "plugins/module_utils/pihole/pihole.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f3c6d72ad5a641b27162c5efd7c0d91ca25d783877853b8ae00da4e5a5d1f1f", + "format": 1 + }, + { + "name": "plugins/module_utils/utils.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1248f2410c247fd0fb078ad81147ce15a5aa5b8f17ede48e49ea878507a3f0ff", + "format": 1 + }, + { + "name": "plugins/module_utils/network_type.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05243290e0cccb3e49e235802c89d3a8082cffc3bb62a6f9b5cb7cc878b4b7af", + "format": 1 + }, + { + "name": "plugins/module_utils/database.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f3504696d88f7213575f971c2aa98580fb7bfbe07c67d620c8dbe4d20fc488b", + "format": 1 + }, + { + "name": ".config", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".config/pycodestyle.cfg", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d29dc03e3583d2490acfaa73527741a6721df54bd51d0bab7b4894f055e79c59", + "format": 1 + }, + { + "name": ".config/ansible-lint.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f79cdace5e0ac1512d6b3e35517aa678069b483ffb80ca63e5b51718be5bc31", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/dns/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/MANIFEST.json b/ansible/playbooks/collections/ansible_collections/bodsch/dns/MANIFEST.json new file mode 100644 index 0000000..9048662 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/MANIFEST.json @@ -0,0 +1,41 @@ +{ + "collection_info": { + "namespace": "bodsch", + "name": "dns", + "version": "1.4.0", + "authors": [ + "Bodo Schulz " + ], + "readme": "README.md", + "tags": [ + "infrastructure", + "linux", + "tools", + "system", + "dns" + ], + "description": "A collection of Ansible roles for DNS handling.", + "license": [ + "Apache-2.0" + ], + "license_file": null, + "dependencies": { + "community.general": ">=10.3.0", + "ansible.utils": "*", + "bodsch.core": ">=2.9.0", + "bodsch.systemd": ">=1.4.0" + }, + "repository": "https://github.com/bodsch/ansible-collection-dns", + "documentation": "https://github.com/bodsch/ansible-collection-dns/README.md", + "homepage": "https://github.com/bodsch/ansible-collection-dns", + "issues": "https://github.com/bodsch/ansible-collection-dns/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d0b94d0e3f43ea4614052ed33093dfb342619cb2a67c60c7fe8343aa5e682fe6", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/dns/Makefile new file mode 100644 index 0000000..883bc91 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/Makefile @@ -0,0 +1,43 @@ +# +export COLLECTION_ROLE ?= +export COLLECTION_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_9.5 +export TOX_SILENCE ?= true +# -------------------------------------------------------- + +LANG := C.UTF-8 +TEMP_REPO_URL := http://git.boone-schulz.de/ansible/ansible-hooks.git +TEMP_REPO_PATH := collections/hooks +TARGET_DIR := hooks +CACHE_DIR := $(HOME)/.cache/ansible/ansible-hooks + +# -------------------------------------------------------- + +# Alle Targets, die schlicht ein Skript in hooks/ aufrufen +HOOKS := install uninstall doc prepare converge destroy verify idempotence test lint gh-clean +TARGET_DIR := hooks + +.SILENT: hooks-ready +.PHONY: $(HOOKS) +.ONESHELL: +.DEFAULT_GOAL := converge + +$(HOOKS): | hooks-ready + @hooks/$@ + +hooks-ready: + @if [ ! -d "hooks" ] || [ -z "$$(ls -A 'hooks' 2>/dev/null)" ]; then \ + $(MAKE) --no-print-directory fetch-hooks >/dev/null 2>&1; \ + fi + +fetch-hooks: + @if [ -d "$(CACHE_DIR)/.git" ]; then + git -C "$(CACHE_DIR)" fetch --depth=1 --prune origin + def=$$(git -C "$(CACHE_DIR)" remote show origin | awk '/HEAD branch/ {print "origin/"$$NF}') + git -C "$(CACHE_DIR)" reset --hard "$$def" + else + mkdir -p "$(dir $(CACHE_DIR))" + GIT_TERMINAL_PROMPT=0 git clone --depth 1 "$(TEMP_REPO_URL)" "$(CACHE_DIR)" + fi + @mkdir -p "$(TARGET_DIR)" + @rsync -a --delete "$(CACHE_DIR)/$(TEMP_REPO_PATH)/" "$(TARGET_DIR)/" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/README.md new file mode 100644 index 0000000..e5a4792 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/README.md @@ -0,0 +1,63 @@ +# Ansible Collection - bodsch.dns + +A collection of Ansible roles for DNS Stuff. + + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-dns/main.yml?branch=main)][ci] +[![GitHub issues](https://img.shields.io/github/issues/bodsch/ansible-collection-dns)][issues] +[![GitHub release (latest by date)](https://img.shields.io/github/v/release/bodsch/ansible-collection-dns)][releases] + +[ci]: https://github.com/bodsch/ansible-collection-dns/actions +[issues]: https://github.com/bodsch/ansible-collection-dns/issues?q=is%3Aopen+is%3Aissue +[releases]: https://github.com/bodsch/ansible-collection-dns/releases + + +## supported operating systems + +* Arch Linux +* Debian based + - Debian 10 / 11 + - Ubuntu 20.10 + +## Contribution + +Please read [Contribution](CONTRIBUTING.md) + +## Development, Branches (Git Tags) + +The `master` Branch is my *Working Horse* includes the "latest, hot shit" and can be complete broken! + +If you want to use something stable, please use a [Tagged Version](https://github.com/bodsch/ansible-collection-prometheus/tags)! + +--- + +## Roles + +| Role | Build State | Description | +|:----------------------------------------------------------- | :---- | :---- | +| [bodsch.dns.bind](./roles/bind/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-dns/bind.yml?branch=main)][bind] | Ansible role to install and configure `bind`. | +| [bodsch.dns.knot](./roles/knot/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-dns/knot.yml?branch=main)][knot] | Ansible role to install and configure `knot`. | +| [bodsch.dns.knot_resolver](./roles/knot_resolver/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-dns/knot_resolver.yml?branch=main)][knot_resolver] | Ansible role to install and configure `knot-resolver`. | +| [bodsch.dns.dnsmasq](./roles/dnsmasq/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-dns/dnsmasq.yml?branch=main)][dnsmasq] | Ansible role to install and configure `dnsmasq`. | +| [bodsch.dns.fqdn](./roles/fqdn/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-dns/fqdn.yml?branch=main)][fqdn] | Ansible role to install and configure `fqdn`. | +| [bodsch.dns.hosts](./roles/hosts/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-dns/hosts.yml?branch=main)][hosts] | Ansible role to install and configure `hosts`. | +| [bodsch.dns.resolv](./roles/resolv/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-dns/resolv.yml?branch=main)][resolv] | Ansible role to install and configure `resolv`. | +| [bodsch.dns.unbound](./roles/unbound/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-dns/unbound.yml?branch=main)][unbound] | Ansible role to install and configure `unbound`. | +| [bodsch.dns.pdns](./roles/pdns/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-dns/pdns.yml?branch=main)][pdns] | Ansible role to install and configure `powerdns`. | +| [bodsch.dns.pdns_recursor](./roles/pdns_recursor/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-dns/pdns_recursor.yml?branch=main)][pdns_recursor] | Ansible role to install and configure `pdns_recursor`. | +| [bodsch.dns.pdns_records](./roles/pdns_records/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-dns/pdns_records.yml?branch=main)][pdns_records] | Ansible role to create dns records for `pdns`. | +| [bodsch.dns.pihole](./roles/pihole/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-dns/pihole.yml?branch=main)][pihole] | Ansible role to install and configure `pihole`. | + + +[bind]: https://github.com/bodsch/ansible-collection-dns/actions/workflows/bind.yml +[knot]: https://github.com/bodsch/ansible-collection-dns/actions/workflows/knot.yml +[knot_resolver]: https://github.com/bodsch/ansible-collection-dns/actions/workflows/knot_resolver.yml +[dnsmasq]: https://github.com/bodsch/ansible-collection-dns/actions/workflows/dnsmasq.yml +[fqdn]: https://github.com/bodsch/ansible-collection-dns/actions/workflows/fqdn.yml +[hosts]: https://github.com/bodsch/ansible-collection-dns/actions/workflows/hosts.yml +[resolv]: https://github.com/bodsch/ansible-collection-dns/actions/workflows/resolv.yml +[unbound]: https://github.com/bodsch/ansible-collection-dns/actions/workflows/unbound.yml +[pdns]: https://github.com/bodsch/ansible-collection-dns/actions/workflows/pdns.yml +[pdns_recursor]: https://github.com/bodsch/ansible-collection-dns/actions/workflows/pdns_recursor.yml +[pdns_records]: https://github.com/bodsch/ansible-collection-dns/actions/workflows/pdns_records.yml +[pihole]: https://github.com/bodsch/ansible-collection-dns/actions/workflows/pihole.yml diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/meta/runtime.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/meta/runtime.yml new file mode 100644 index 0000000..6d61aff --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/meta/runtime.yml @@ -0,0 +1,3 @@ +--- + +requires_ansible: '>=2.15.0' diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/filter/bind.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/filter/bind.py new file mode 100644 index 0000000..4431732 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/filter/bind.py @@ -0,0 +1,470 @@ +# python 3 headers, required if submitting to Ansible +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +# import netaddr +import hashlib +import json +import time + +from ansible.utils.display import Display +from ansible_collections.bodsch.dns.plugins.module_utils.network_type import reverse_dns + +# import re + +display = Display() + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +name: bind +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +description: TBD +short_description: TBD +""" + +EXAMPLES = """ +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +class FilterModule(object): + """ """ + + def filters(self): + return { + "zone_type": self.zone_type, + "zone_serial": self.zone_serial, + "forward_zone_data": self.forward_zone_data, + "reverse_zone_data": self.reverse_zone_data, + "zone_filename": self.zone_filename, + } + + def zone_type(self, data, all_addresses): + """ """ + # display.v(f"zone_type({data}, {all_addresses})") + result = None + _type = data.get("type", None) + _primaries = data.get("primaries", None) + _forwarders = data.get("forwarders", None) + + # display.v(f" - type : {_type}") + # display.v(f" - primaries : {_primaries}") + # display.v(f" - forwarders : {_forwarders}") + + if _type and _type in ["primary", "secondary", "forward"]: + # display.v(f" type is defined and {_type}") + result = _type + # display.v(f" 1 = {result}") + + elif not _type and _primaries: + # display.v(" not _type and _primaries") + # display.v(f" - all_addresses : {all_addresses}") + # display.v(f" - primaries : {_primaries}") + + primaries_in_all_addresses = [x for x in all_addresses if x in _primaries] + # display.v(f" - primaries_in_all_addresses : {primaries_in_all_addresses}") + + primaries_in_all_addresses = len(primaries_in_all_addresses) > 0 + # display.v(f" {primaries_in_all_addresses}") + + if primaries_in_all_addresses: + result = "primary" + else: + result = "secondary" + # display.v(f" 2 = {result}") + + elif not _type and _forwarders: + # display.v(f" not _type and _forwarders") + result = "forward" + # display.v(f" 3 = {result}") + + # display.v(f" = {result}") + + return result + + def zone_serial(self, domain, zone_hash, exists_hashes, network=None): + """ + define serial for zone data or take existing serial when hash are equal + + input: + domain: + - 'acme-inc.local' + zone_hash: + - '79803e1202406f3051d3b151ed953db2a98c86f61d5c9eead61671377d10320d' + exists_hashes: + - '{ + 'zone_data': { + 'forward': [{ + 'example.com': { + 'filename': 'example.com', + 'hash': '; Hash: 8d591afa6aa30ca0ea7b0293a2468b57b81f591681cd932a7e7a42de5a2a0004 1702835325', + 'sha256': '8d591afa6aa30ca0ea7b0293a2468b57b81f591681cd932a7e7a42de5a2a0004', + 'serial': '1702835325' + } + }, { + 'acme-inc.local': { + 'filename': 'acme-inc.local', + 'hash': '; Hash: 79803e1202406f3051d3b151ed953db2a98c86f61d5c9eead61671377d10320d 1702835326', + 'sha256': '79803e1202406f3051d3b151ed953db2a98c86f61d5c9eead61671377d10320d', + 'serial': '1702835326' + } + }], + 'reverse': [{ + '192.0.2': { + 'filename': '2.0.192.in-addr.arpa', + 'hash': None, + 'sha256': 'None', + 'serial': 'None', + 'network': '192.0.2' + } + }], + }, + }' + network: + - None or + - 'acme-inc.local' + """ + # display.v(f"zone_serial({domain}, {zone_hash}, {exists_hashes}, {network})") + result = dict(hash=zone_hash, serial=int(time.time())) + domain_data = None + + if isinstance(exists_hashes, str): + exists_hashes = json.loads(exists_hashes) + + zone_data = exists_hashes.get("zone_data", []) + + if network: + hashes = zone_data.get("reverse", {}) + domain_data = [x for x in hashes for k, v in x.items() if k == network] + else: + hashes = zone_data.get("forward", {}) + domain_data = [x for x in hashes for k, v in x.items() if k == domain] + + if isinstance(domain_data, list) and len(domain_data) > 0: + domain_data = domain_data[0] + + if network: + domain_data = domain_data.get(network) + else: + domain_data = domain_data.get(domain) + + if domain_data and len(domain_data) > 0: + _serial = domain_data.get("serial", "") + + if _serial and _serial != "None": + result.update({"serial": _serial}) + + # display.v(f" = {result}") + return result + + def forward_zone_data(self, data, soa, ansible_hostname): + """ """ + # display.v(f"forward_zone_data({data}, {soa}, {ansible_hostname})") + + domain = data.get("name") + hostmaster_email = data.get("hostmaster_email", None) + soa_ns_server = data.get("name_servers", []) + other_name_servers = data.get("other_name_servers", None) + mail_servers = data.get("mail_servers", []) + + if not hostmaster_email: + hostmaster_email = f"hostmaster.{domain}." + else: + if not hostmaster_email[:-1] == ".": + hostmaster_email = f"{hostmaster_email}.{domain}." + + # append domain to ns entry, when the tast char not a dot is + for x in range(len(soa_ns_server)): + # display.v(f" - {soa_ns_server[x]}") + if not soa_ns_server[x][-1:] == ".": + soa_ns_server[x] = f"{soa_ns_server[x]}.{domain}." + + if len(soa_ns_server) == 0: + soa_ns_server.append(f"{ansible_hostname}.{domain}.") + + if other_name_servers: + # display.v(f" - {other_name_servers}") + other_name_servers = self.__append(other_name_servers) + + if not mail_servers[-1:] == ".": + self.__append(mail_servers, domain) + + result = dict( + ttl=soa.get("ttl"), + domain=domain, + soa_name_server=soa_ns_server, + other_name_servers=other_name_servers, + mail=mail_servers, + hostmaster_email=hostmaster_email, + refresh=soa.get("time_to_refresh"), + retry=soa.get("time_to_retry"), + expire=soa.get("time_to_expire"), + minimum=soa.get("minimum_ttl"), + hosts=data.get("hosts", []), + delegate=data.get("delegate", []), + services=data.get("services", []), + text=data.get("text", []), + caa=data.get("caa", []), + naptr=data.get("naptr", []), + ) + + result_hash = self.__hash(result) + + return dict(forward_zone_data=result, zone_hash=result_hash) + + def reverse_zone_data(self, data, soa, ansible_hostname): + """ + input: + data: [ + { + 'name': 'molecule.lan', 'primaries': ['172.17.0.2'], 'name_servers': ['ns1.acme-inc.local.', 'ns2.acme-inc.local.'], + 'hostmaster_email': 'admin', + 'hosts': [ + {'name': 'srv001', 'ip': '172.17.2.1', 'aliases': ['www']}, + {'name': 'srv002', 'ip': '172.17.2.2'} + ] + }, + '172.17' + ], + soa: {'ttl': '1W', 'time_to_refresh': '1D', 'time_to_retry': '1H', 'time_to_expire': '1W', 'minimum_ttl': '1D'}, + ansible_hostname: instance + + """ + # display.v(f"reverse_zone_data({data}, {soa}, {ansible_hostname})") + + revip = None + + if isinstance(data, list) and len(data) == 2: + revip = data[1] + data = data[0] + + result = dict() + + domain = data.get("name") + hostmaster_email = data.get("hostmaster_email", None) + soa_ns_server = data.get("name_servers", []) + other_name_servers = data.get("other_name_servers", None) + + if not hostmaster_email: + hostmaster_email = f"hostmaster.{domain}." + else: + if not hostmaster_email[:-1] == ".": + hostmaster_email = f"{hostmaster_email}.{domain}." + + # append domain to ns entry, when the tast char not a dot is + for x in range(len(soa_ns_server)): + # display.v(f" - {soa_ns_server[x]}") + if not soa_ns_server[x][-1:] == ".": + soa_ns_server[x] = f"{soa_ns_server[x]}.{domain}." + + if len(soa_ns_server) == 0: + soa_ns_server.append(f"{ansible_hostname}.{domain}.") + + if other_name_servers: + # display.v(f" - {other_name_servers}") + other_name_servers = self.__append(other_name_servers) + + reverse_ip = reverse_dns(revip) + + result = dict( + ttl=soa.get("ttl"), + domain=domain, + soa_name_server=soa_ns_server, + other_name_servers=other_name_servers, + hostmaster_email=hostmaster_email, + refresh=soa.get("time_to_refresh"), + retry=soa.get("time_to_retry"), + expire=soa.get("time_to_expire"), + minimum=soa.get("minimum_ttl"), + hosts=data.get("hosts", []), + revip=reverse_ip, + ) + + result_hash = self.__hash(result) + + # display.v(f" = {result} - {result_hash}") + + return dict(reverse_zone_data=result, zone_hash=result_hash) + + def zone_filename(self, data, zone_data): + """ + append to every list element + """ + # display.v(f"zone_filename({data}, {zone_data})") + result = None + + zone_data = zone_data.get("zone_data", {}) + + # display.v(f" - zone_data: {zone_data}") + + item = { + k: v + for key, values in zone_data.items() + for x in values + for k, v in x.items() + if k == data + } + + # display.v(f" - item : {item}") + + if item: + result = list(item.values())[0].get("filename") + + # display.v(f"= {result}") + + return result + + def __append(self, data, domain=None): + """ + append to evvery list element + """ + # display.v(f"__append_dot({data})") + # display.v(f" - {type(data)}") + + if not len(data) > 0: + return data + + if isinstance(data, list): + try: + for x in range(len(data)): + if not data[x][-1:] == ".": + if domain: + data[x] = f"{data[x]}.{domain}." + else: + data[x] = f"{data[x]}." + except Exception: + # display.v(f" - {e} - {type(e)}") + + for i in data: + if not i.get("name")[-1:] == ".": + if domain: + i["name"] = f"{i['name']}.{domain}." + else: + i["name"] = f"{i['name']}." + + pass + # display.v(f"= {data}") + + return data + + # def __reverse_dns(self, data): + # """ + # """ + # # display.v(f"__reverse_dns({data})") + # if self.__is_valid_ipv4(data): + # reverse_ip = ".".join(data.replace(data + '.', '').split('.')[::-1]) + # reverse_ip += ".in-addr.arpa" + # + # return reverse_ip + # else: + # try: + # _offset = None + # if data.count("/") == 1: + # _prefix = data.split("/")[1] + # _offset = int(9 + int(_prefix) // 2) + # display.v(f" {_prefix} - {_offset}") + # + # _network = netaddr.IPNetwork(str(data)) + # _prefix = _network.prefixlen + # _ipaddress = netaddr.IPAddress(_network) + # reverse_ip = _ipaddress.reverse_dns + # if _offset: + # reverse_ip = reverse_ip[-_offset:] + # + # return reverse_ip + # + # except Exception as e: + # display.v(f" ERROR: {e}") + # pass + # + # return None + + def __hash(self, data): + """ """ + result_str = str(data) + _bytes = result_str.encode("utf-8") + + return hashlib.sha256(_bytes).hexdigest() + + # def __is_valid_ipv4(self, ip): + # """ + # Validates IPv4 addresses. + # """ + # pattern = re.compile(r""" + # ^ + # (?: + # # Dotted variants: + # (?: + # # Decimal 1-255 (no leading 0's) + # [3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2} + # | + # 0x0*[0-9a-f]{1,2} # Hexadecimal 0x0 - 0xFF (possible leading 0's) + # | + # 0+[1-3]?[0-7]{0,2} # Octal 0 - 0377 (possible leading 0's) + # ) + # (?: # Repeat 0-3 times, separated by a dot + # \. + # (?: + # [3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2} + # | + # 0x0*[0-9a-f]{1,2} + # | + # 0+[1-3]?[0-7]{0,2} + # ) + # ){0,3} + # | + # 0x0*[0-9a-f]{1,8} # Hexadecimal notation, 0x0 - 0xffffffff + # | + # 0+[0-3]?[0-7]{0,10} # Octal notation, 0 - 037777777777 + # | + # # Decimal notation, 1-4294967295: + # 429496729[0-5]|42949672[0-8]\d|4294967[01]\d\d|429496[0-6]\d{3}| + # 42949[0-5]\d{4}|4294[0-8]\d{5}|429[0-3]\d{6}|42[0-8]\d{7}| + # 4[01]\d{8}|[1-3]\d{0,9}|[4-9]\d{0,8} + # ) + # $ + # """, re.VERBOSE | re.IGNORECASE) + # + # return pattern.match(ip) is not None + # + # def __is_valid_ipv6(self, ip): + # """ + # Validates IPv6 addresses. + # """ + # pattern = re.compile(r""" + # ^ + # \s* # Leading whitespace + # (?!.*::.*::) # Only a single whildcard allowed + # (?:(?!:)|:(?=:)) # Colon iff it would be part of a wildcard + # (?: # Repeat 6 times: + # [0-9a-f]{0,4} # A group of at most four hexadecimal digits + # (?:(?<=::)|(? + + +# python 3 headers, required if submitting to Ansible +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import re + +from ansible.plugins.test.core import version_compare +from ansible.utils.display import Display + +# --------------------------------------------------------------------------------------- + +display = Display() + + +class FilterModule(object): + """ """ + + def filters(self): + return { + "pdns_backend_types": self.backend_types, + "pdns_backend_packages": self.backend_packages, + "pdns_backend_data": self.backend_data, + "pdns_config_upgrades": self.config_upgrades, + } + + def backend_types(self, data, version): + """ + input: + ``` + backend_types: + - name: 'gmysql:one' + user: powerdns + host: localhost + password: "{{ vault__pdns.database_pdns }}" + dbname: pdns + credentials: {} + + - name: 'gmysql:two' + user: pdns_user + host: 192.0.2.15 + port: 3307 + password: my_password + dbname: dns + credentials: {} + + - name: bind + config: '/etc/named/named.conf' + hybrid: true + dnssec-db: '{{ pdns_config_dir }}/dnssec.db' + + - name: gsqlite3 + database: /var/lib/powerdns/pdns.db + dnssec: true + ``` + output: + ``` + ['bind', 'mysql', 'sqlite3'] + ``` + """ + display.v(f"backend_types({data}, {version})") + result = [] + names = set() + for entry in data: + name = entry.get("name", "") + if name.startswith("g"): + name = name[1:] # entferne führendes 'g' + name = name.split(":")[0] # entferne alles ab dem ':' + names.add(name) + + result = sorted(names) + + display.v(f"= {result})") + + return result + + def backend_packages(self, data, packages): + """ + input: + ``` + data: ['bind', 'mysql', 'sqlite3'], + packages: { + 'geo': 'pdns-backend-geo', + 'geoip': 'pdns-backend-geoip', + 'mysql': 'pdns-backend-mysql', + 'pgsql': 'pdns-backend-pgsql', + 'sqlite3': 'pdns-backend-sqlite3', + ... + } + ``` + output: + ``` + ['pdns-backend-mysql', 'pdns-backend-sqlite3'] + ``` + """ + display.v(f"backend_packages({data}, {packages})") + result = [] + + result = self.flatten([v for k, v in packages.items() if k in data]) + + display.v(f"= {result})") + + return result + + def backend_data(self, data, backend): + """ """ + # display.v(f"backend_data({data}, {backend})") + + def normalize(name): + if name.startswith("g"): + name = name[1:] + return name.split(":")[0] + + pattern = re.compile(backend) + + result = [ + entry for entry in data if pattern.search(normalize(entry.get("name", ""))) + ] + + # display.v(f"= {result})") + + return result + + def config_upgrades(self, data, version): + """ + ersetzt veraltete config parameter + """ + # display.v(f"config_upgrades({data}, {version})") + + def replace_keys(obj, version): + key_map = {} + if version_compare(str(version), "4.5", ">="): + # https://doc.powerdns.com/authoritative/upgrading.html?highlight=master#to-4-9-0 + key_map.update( + { + "allow-unsigned-supermaster": "allow-unsigned-autoprimary", + "master": "primary", + "slave-cycle-interval": "xfr-cycle-interval", + "slave-renotify": "secondary-do-renotify", + "slave": "secondary", + "superslave": "autosecondary", + "domain-metadata-cache-ttl": "zone-metadata-cache-ttl", + } + ) + + if version_compare(str(version), "4.9", ">="): + key_map.update( + { + "supermaster-config": "autoprimary-config", + "supermasters": "autoprimaries", + "supermaster-destdir": "autoprimary-destdir", + "info-all-slaves-query": "info-all-secondaries-query", + "supermaster-query": "autoprimary-query", + "supermaster-name-to-ips": "autoprimary-name-to-ips", + "supermaster-add": "autoprimary-add", + "update-master-query": "update-primary-query", + "info-all-master-query": "info-all-primary-query", + } + ) + + if isinstance(obj, dict): + # Ersetze die Keys und rufe rekursiv für die Werte auf + return { + key_map.get(k, k): replace_keys(v, version) for k, v in obj.items() + } + elif isinstance(obj, list): + # Falls es eine Liste ist, rekursiv die Elemente bearbeiten + return [replace_keys(item, version) for item in obj] + else: + return obj + + # Ersetze die Keys im geladenen YAML + result = replace_keys(data, version) + + # display.v(f"= result: {result}") + + return result + + def flatten(self, lst): + """ + input: nested = [1, [2, [3, 4], 5], 6] + output: [1, 2, 3, 4, 5, 6] + + input: [1, 2, 3] + output: [1, 2, 3] + """ + result = [] + for item in lst: + if isinstance(item, list): + result.extend(self.flatten(item)) + else: + result.append(item) + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/filter/pdns_recursor.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/filter/pdns_recursor.py new file mode 100644 index 0000000..4838748 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/filter/pdns_recursor.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2023-2024 Bodo Schulz + + +# python 3 headers, required if submitting to Ansible +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible.plugins.test.core import version_compare +from ansible.utils.display import Display + +# --------------------------------------------------------------------------------------- + +display = Display() + + +class FilterModule(object): + """ """ + + def filters(self): + return { + "recursor_backwards_compatibility": self.recursor_backwards_compatibility, + } + + def recursor_backwards_compatibility(self, data, version): + """ + input: + ``` + pdns_recursor_recursor: + forward_zones: + - zone: matrix.lan + forwarders: + - 192.168.0.4:53 + - 192.168.0.1:5300 + - zone: google.de + forwarders: + - 127.0.0.1 + - zone: google.com + forwarders: + - 127.0.0.1 + ``` + output: + ``` + ['matrix.lan=192.168.0.4;192.168.0.1:5300', 'google.de=127.0.0.1', 'google.com=127.0.0.1'] + ``` + """ + # display.v(f"recursor_backwards_compatibility({data}, {version})") + result = [] + + if version_compare(str(version), "5", ">="): + return data + + for i in data: + zone = i.get("zone") + forwarders = ";".join(i.get("forwarders", [])) + result.append(f"{zone}={forwarders}") + + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/filter/resolver_listener.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/filter/resolver_listener.py new file mode 100644 index 0000000..607b5c1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/filter/resolver_listener.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +# Copyright 2023-2024 Bodo Schulz + +# BSD 2-clause (see LICENSE or https://opensource.org/licenses/BSD-2-Clause) + +""" +filter plugin file for knot_resolver filters: resolver_listener +""" + +# python 3 headers, required if submitting to Ansible +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible.utils.display import Display + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +name: resolver_listener +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +description: TBD +short_description: TBD + +""" + +EXAMPLES = """ +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +display = Display() + + +class FilterModule(object): + """ """ + + def filters(self): + return { + "resolver_listener": self.listener, + } + + def listener(self, data): + """ """ + result = "" + # count = len(data) + # display.v("found: {} entries in {} {}".format(count, data, type(data))) + + if isinstance(data, dict): + _interfaces = [] + _ips = [] + _port = 0 + _options = [] + + interfaces = data.get("interfaces", []) + ips = data.get("ips", []) + port = data.get("port", "") + options = data.get("options", {}) + + if len(interfaces) > 0: + _interfaces = ("net." + ",net.".join(interfaces)).split(",") + + if len(ips) > 0: + _ips = ("'" + "','".join(ips) + "'").split(",") + + if int(port) > 0: + _port = [str(port)] + + if len(options) > 0: + for k, v in options.items(): + # -- {{ k }} - {{ v }} + if k.lower() == "tls" and v: + _options = ["{ tls = true }"] + elif k.lower() == "kind" and v: + _options = [f"{{ {k.lower()} = '{v}' }}"] + + _listen = ["{ " + ", ".join(_interfaces + _ips) + " }"] + result = ", ".join(_listen + _port + _options) + + # display.v("result {} {}".format(result, type(result))) + + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/filter/unbound.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/filter/unbound.py new file mode 100644 index 0000000..7da9477 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/filter/unbound.py @@ -0,0 +1,61 @@ +# python 3 headers, required if submitting to Ansible +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible.utils.display import Display + +# from ansible_collections.bodsch.dns.plugins.module_utils.network_type import reverse_dns + +# import json +# import netaddr +# import hashlib +# import time +# import re + +display = Display() + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +name: bind +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +description: TBD +short_description: TBD +""" + +EXAMPLES = """ +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +class FilterModule(object): + """ """ + + def filters(self): + return { + "unbound_helper": self.unbound_helper, + } + + def unbound_helper(self, helper_1, helper_2): + """ """ + display.v(f"unbound_helper({helper_1}, {helper_2})") + + display.v(f" - 1: {helper_1}") + display.v(f" - 2: {helper_2})") + + helper_1_exists = helper_1.get("stat", {}).get("path", None) + helper_2_exists = helper_2.get("stat", {}).get("path", None) + + if helper_1_exists: + return helper_1_exists + elif helper_2_exists: + return helper_2_exists + else: + return None diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/database.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/database.py new file mode 100644 index 0000000..e1ad239 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/database.py @@ -0,0 +1,569 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +""" +database.py + +Database helper utilities used in an Ansible context. + +This module provides a `Database` helper class with two primary responsibilities: + +1) SQLite management + - Create an SQLite database file if it does not exist yet, optionally importing an SQL schema. + - Remove an SQLite database file. + +2) MySQL/MariaDB management (via Ansible mysql_driver abstraction) + - Validate configuration attributes (hostname, schema, credentials). + - Build connection credentials, connect, execute queries, import SQL scripts, and + check for table existence via information_schema. + +Integration expectations: + The class is designed to be used inside Ansible modules. The provided `module` object is + expected to expose: + - module.log(msg=...) for debug logging + - module.fail_json(...) for hard-fail behavior + +Important: + Several instance attributes are expected to be provided externally (e.g. by the calling + Ansible module) before certain operations are executed: + - For sqlite_create(): `schema_file`, `owner`, `group`, `mode` + - For validate()/db_credentials()/db_connect(): `db_hostname`, `db_port`, `db_config`, + `db_socket`, `db_schemaname`, `db_login_username`, `db_login_password`, etc. +""" + +from __future__ import annotations + +import os +import shutil +import sqlite3 +import warnings +from typing import Any, Dict, Optional, Tuple, TypedDict, Union + +# from ansible.module_utils._text import to_native +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.mysql import mysql_driver, mysql_driver_fail_msg + + +class AnsibleResult(TypedDict): + """ + Standard Ansible-style operation result. + + Keys: + failed: Whether the operation failed. + changed: Whether the operation changed the system state. + msg: Human-readable message. + """ + + failed: bool + changed: bool + msg: str + + +DbConnectResult = Tuple[bool, str] +DbValidateResult = Tuple[bool, str] +DbExecuteResult = Tuple[bool, bool, Optional[str]] +ImportSqlResult = Tuple[bool, str] +CheckSchemaResult = Tuple[bool, bool, Optional[str]] + + +class Database: + """ + Helper class for SQLite and MySQL/MariaDB operations in an Ansible module context. + + Attributes: + module: Ansible module-like object providing logging and fail_json. + config: MySQL/MariaDB connection config dict (populated by `db_credentials()`). + db_connection: Active DB connection handle (populated by `db_connect()`). + db_cursor: Active cursor handle (populated by `db_connect()` / refreshed by `db_execute()`). + + Externally provided attributes (expected to be set by the caller as needed): + schema_file (str): Path to a SQLite schema file imported by `sqlite_create()`. + owner (str|int|None): Owner for `shutil.chown` in `sqlite_create()`. + group (str|int|None): Group for `shutil.chown` in `sqlite_create()`. + mode (str|int): File mode applied in `sqlite_create()`. + + db_config (str|None): MySQL default file for credentials (optional). + db_socket (str|None): MySQL unix socket (optional). + db_hostname (str|None): MySQL host. + db_port (int): MySQL port. + db_schemaname (str|None): DB schema name. + db_login_username (str|None): Login username. + db_login_password (str|None): Login password. + """ + + def __init__(self, module: Any) -> None: + """ + Initialize the Database helper. + + Args: + module: Ansible module-like object. + + Returns: + None + """ + self.module = module + self.module.log("Database::__init__(module)") + + # MySQL runtime state (set during operation) + self.config: Dict[str, Any] = {} + self.db_connection: Any = None + self.db_cursor: Any = None + + # Optional externally populated defaults / settings + self.schema_file: str = "" + self.owner: Optional[Union[str, int]] = None + self.group: Optional[Union[str, int]] = None + self.mode: Union[str, int] = "0600" + + self.db_config: Optional[str] = None + self.db_socket: Optional[str] = None + self.db_hostname: Optional[str] = None + self.db_port: int = 3306 + self.db_schemaname: Optional[str] = None + self.db_login_username: Optional[str] = None + self.db_login_password: Optional[str] = None + + def sqlite_create(self, database_file: str) -> AnsibleResult: + """ + Create (or initialize) an SQLite database file. + + Behavior: + - Opens (or creates) the SQLite DB file. + - Checks whether non-metadata tables exist. + - If no tables exist, imports SQL schema from `self.schema_file`. + - Sets ownership (`shutil.chown`) and file mode (`os.chmod`) after creation. + + Preconditions: + - `self.schema_file` must point to a readable SQL schema file if initialization is needed. + - `self.owner`, `self.group`, and `self.mode` should be configured if ownership/mode + should be enforced. + + Args: + database_file: Path to the SQLite database file. + + Returns: + AnsibleResult: + failed: True if an SQLite error occurred, else False. + changed: True if schema was imported (fresh DB), else False. + msg: Status message describing the outcome. + """ + self.module.log(f"Database::sqlite_create({database_file})") + + failed = False + changed = False + msg = "" + + conn: Optional[sqlite3.Connection] = None + + try: + conn = sqlite3.connect( + database_file, isolation_level=None, detect_types=sqlite3.PARSE_COLNAMES + ) + conn.row_factory = lambda cursor, row: row[0] + + self.module.log(f"SQLite Version: '{sqlite3.sqlite_version}'") + + query = "SELECT name FROM sqlite_schema WHERE type ='table' AND name not LIKE '%metadata%'" + cursor = conn.execute(query) + schemas = cursor.fetchall() + self.module.log(f" - schemas '{schemas}") + + if len(schemas) == 0: + # Import SQL schema + self.module.log(msg="import database schemas") + + with open(self.schema_file, "r", encoding="utf-8") as f: + cursor.executescript(f.read()) + + changed = True + msg = "Database successfully created." + else: + msg = "Database already exists." + + # Apply ownership/mode (caller must ensure values are correct and process has permissions) + shutil.chown(database_file, self.owner, self.group) + + mode: int + if isinstance(self.mode, str): + mode = int(self.mode, base=8) + else: + mode = int(self.mode) + + os.chmod(database_file, mode) + + except sqlite3.Error as er: + self.module.log(f"SQLite error: '{(' '.join(er.args))}'") + self.module.log(f"Exception class is '{er.__class__}'") + + failed = True + msg = " ".join(er.args) + + finally: + if conn: + conn.close() + + return {"failed": failed, "changed": changed, "msg": msg} + + def sqlite_remove(self, database_file: str) -> AnsibleResult: + """ + Remove an SQLite database file if it exists. + + Args: + database_file: Path to the SQLite database file. + + Returns: + AnsibleResult: + failed: Always False in the current implementation (filesystem errors would raise). + changed: True if the file existed and was removed, else False. + msg: Status message describing the outcome. + """ + self.module.log(f"Database::sqlite_remove({database_file})") + + failed = False + changed = False + + if os.path.exists(database_file): + os.remove(database_file) + changed = True + msg = "The database has been successfully deleted." + else: + msg = f"The database file '{database_file}' does not exist." + + return {"failed": failed, "changed": changed, "msg": msg} + + def validate(self) -> DbValidateResult: + """ + Validate MySQL/MariaDB database configuration attributes on this instance. + + Validates that the following instance attributes are configured: + - db_hostname + - db_schemaname + - db_login_username + - db_login_password + + Returns: + tuple[bool, str]: (ok, message) + ok: True if all required attributes are present, else False. + message: Empty string on success, otherwise an "ERROR: ..." message with details. + """ + msg = "" + errors = [] + result = False + + if not self.db_hostname: + errors.append("`database.hostname` was not configured.") + if not self.db_schemaname: + errors.append("`database.schemaname` was not configured.") + if not self.db_login_username: + errors.append("`database.login.username` was not configured.") + if not self.db_login_password: + errors.append("`database.login.password` was not configured.") + + if len(errors) > 0: + _msg = ", ".join(errors) + msg = f"ERROR: {_msg}" + else: + result = True + + return (result, msg) + + def db_credentials( + self, + db_username: Optional[str], + db_password: Optional[str], + db_schema_name: str, + ) -> None: + """ + Build and store MySQL/MariaDB connection credentials in `self.config`. + + Behavior: + - Reads credentials from `self.db_config` if configured and file exists. + - Uses unix socket if `self.db_socket` is set; otherwise uses host/port. + - Explicit `db_username` / `db_password` override config file credentials. + + Args: + db_username: Username to authenticate with (overrides config file). + db_password: Password to authenticate with (overrides config file). + db_schema_name: Target database schema name. + + Returns: + None + """ + config: Dict[str, Any] = {} + + if self.db_config and os.path.exists(self.db_config): + config["read_default_file"] = self.db_config + + if self.db_socket: + config["unix_socket"] = self.db_socket + else: + config["host"] = self.db_hostname + config["port"] = self.db_port + + if db_username is not None: + config["user"] = db_username + if db_password is not None: + config["passwd"] = db_password + + config["db"] = db_schema_name + + self.config = config + + def db_connect(self) -> DbConnectResult: + """ + Connect to MySQL/MariaDB using `self.config`. + + Behavior: + - Ensures the Ansible mysql_driver is available; otherwise fails the module. + - Treats mysql_driver warnings as errors (`warnings.filterwarnings`). + - Creates `self.db_connection` and `self.db_cursor`. + + Returns: + tuple[bool, str]: (db_connect_error, message) + db_connect_error: False on success, True on failure. + message: A human-readable status message. + """ + if mysql_driver is None: + self.module.fail_json(msg=mysql_driver_fail_msg) + else: + warnings.filterwarnings("error", category=mysql_driver.Warning) + + db_connect_error = True + + try: + self.db_connection = mysql_driver.connect(**self.config) + self.db_cursor = self.db_connection.cursor() + db_connect_error = False + + except mysql_driver.Warning as e: + message = "unable to connect to database. " + message += f"Exception message: {to_native(e)}" + + self.module.log(msg=message) + return (db_connect_error, message) + + except Exception as e: + message = "unable to connect to database. " + message += "check login_host, login_user and login_password are correct " + message += f"or {self.db_config} has the credentials. " + message += f"Exception message: {to_native(e)}" + + self.module.log(msg=message) + return (db_connect_error, message) + + return (db_connect_error, "successful connected") + + def db_execute( + self, + query: str, + commit: bool = True, + rollback: bool = True, + close_cursor: bool = False, + ) -> DbExecuteResult: + """ + Execute a single SQL query against an established MySQL/MariaDB connection. + + Behavior: + - If the query does not start with "--", it is executed. + - Commits on success if `commit=True`. + - On exceptions, optionally rolls back if `rollback=True`. + - Optionally closes the cursor if `close_cursor=True`. + + Args: + query: SQL statement to execute. + commit: Whether to commit after executing the query. + rollback: Whether to roll back the current transaction on unexpected errors. + close_cursor: Whether to close the cursor before returning. + + Returns: + tuple[bool, bool, Optional[str]]: (state, db_error, db_message) + state: True if the query execution path completed (including no-op comment queries), + False if it did not execute successfully. + db_error: True if an unexpected exception occurred, else False. + db_message: Error message if `db_error=True`, otherwise None. + """ + if self.db_connection: + self.db_cursor = self.db_connection.cursor() + + state = False + db_error = False + db_message: Optional[str] = None + + try: + if not query.startswith("--"): + self.db_cursor.execute(query) + if commit: + self.db_connection.commit() + state = True + + except mysql_driver.Warning as e: + try: + error_id = e.args[0] + error_msg = e.args[1] + self.module.log(f"WARNING: {error_id} - {error_msg}") + except Exception: + self.module.log(f"WARNING: {str(e)}") + + except mysql_driver.Error as e: + try: + error_id = e.args[0] + error_msg = e.args[1] + + if error_id == 1050: # Table '...' already exists + self.module.log(f"WARNING: {error_msg}") + except Exception: + self.module.log(f"ERROR: {str(e)}") + + except Exception as e: + db_error = True + db_message = f"Cannot execute SQL '{query}' : {to_native(e)}" + + if rollback: + self.db_connection.rollback() + + finally: + if close_cursor and self.db_cursor: + self.db_cursor.close() + + return (state, db_error, db_message) + + def import_sqlfile( + self, + sql_file: str, + commit: bool = True, + rollback: bool = True, + close_cursor: bool = False, + ) -> ImportSqlResult: + """ + Import a full SQL script file into the connected MySQL/MariaDB database. + + Implementation details: + - The file content is split by ";\n" to get statements. + - Lines starting with "--" (SQL comments) are ignored. + - Statements are executed sequentially via `db_execute()`. + - On error, processing stops and (optionally) a rollback is performed. + + Args: + sql_file: Path to the SQL script file. + commit: Whether to commit after processing all statements successfully. + rollback: Whether to roll back if any statement fails. + close_cursor: Whether to close the cursor before returning. + + Returns: + tuple[bool, str]: (state, message) + state: False if file does not exist; otherwise True after processing. + Note: On SQL failure, `state` is set to True in the original logic. + message: Human-readable success or error message. + """ + self.module.log( + f"Database::import_sqlfile(sql_file={sql_file}, commit={commit}, rollback={rollback}, close_cursor={close_cursor})" + ) + + if not os.path.exists(sql_file): + return (False, f"The file {sql_file} does not exist.") + + state = False + db_error = False + db_message: Optional[str] = None + msg: str + + with open(sql_file, encoding="utf8") as f: + sql_data = f.read() + sql_commands = sql_data.split(";\n") + sql_commands = [ + x.replace("\n", "").strip() + for x in sql_commands + if not x.replace("\n", "").strip().startswith("--") + ] + + for command in sql_commands: + state = False + db_error = False + db_message = None + + if command: + state, db_error, db_message = self.db_execute( + query=command, commit=commit + ) + if db_error: + break + + if rollback and db_error: + self.db_connection.rollback() + + if commit and not db_error: + self.db_connection.commit() + + if close_cursor and self.db_cursor: + self.db_cursor.close() + + if db_error: + state = True + msg = f"Cannot import file '{sql_file}' : {to_native(db_message)}" + else: + file_name = os.path.basename(sql_file) + msg = f"file '{file_name}' successful imported." + + return (state, msg) + + def check_table_schema(self, database_table_name: str) -> CheckSchemaResult: + """ + Check whether a database table exists using `information_schema.tables`. + + Args: + database_table_name: Table name to verify. + + Returns: + tuple[bool, bool, Optional[str]]: (state, db_error, db_error_message) + state: True if exactly one row was found (table exists), else False. + db_error: Always False in the current implementation (errors are logged only). + db_error_message: Message describing the result; empty/None if not found or on error. + + Notes: + - The query uses string formatting; if `database_table_name` is user-controlled, + parameterize it to avoid SQL injection. In Ansible-controlled inputs this may be acceptable + but still not ideal. + """ + state = False + db_error = False + db_error_message: Optional[str] = "" + + q = f"SELECT * FROM information_schema.tables WHERE table_name = '{database_table_name}'" + + number_of_rows = 0 + + try: + number_of_rows = self.db_cursor.execute(q) + self.db_cursor.fetchone() + + except mysql_driver.Warning as e: + try: + error_id = e.args[0] + error_msg = e.args[1] + self.module.log(f"WARNING: {error_id} - {error_msg}") + except Exception: + self.module.log(f"WARNING: {str(e)}") + + except mysql_driver.Error as e: + try: + error_id = e.args[0] + error_msg = e.args[1] + + if ( + error_id == 1050 + ): # Table '...' already exists (not typical for SELECT) + self.module.log(f"WARNING: {error_msg}") + except Exception: + self.module.log(f"ERROR: {str(e)}") + + except Exception as e: + self.module.log(f"Cannot execute SQL '{q}' : {to_native(e)}") + + if number_of_rows == 1: + state = True + db_error = False + db_error_message = ( + f"The database schema '{database_table_name}' has already been created." + ) + + return (state, db_error, db_error_message) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/network_type.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/network_type.py new file mode 100644 index 0000000..c70e10f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/network_type.py @@ -0,0 +1,205 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +""" +network_type.py + +IP address and reverse-DNS helper functions. + +This module provides: + - `reverse_dns(...)`: Convert an IPv4/IPv6 address (optionally a CIDR network) into its + corresponding reverse-DNS domain name. + - `is_valid_ipv4(...)`: Validate whether a string is a syntactically valid IPv4 address + (supports dotted, decimal, octal, hex notations per the used regex). + - `is_valid_ipv6(...)`: Validate whether a string is a syntactically valid IPv6 address. + +Implementation notes: + - IPv6 reverse computation uses `netaddr.IPNetwork` / `netaddr.IPAddress`. + - For IPv6 CIDR inputs, an offset is applied so the returned reverse name corresponds to the + reverse-zone boundary implied by the prefix length. + - `reverse_dns(...)` logs an error and returns `None` if the input cannot be interpreted + as a valid IPv4 or IPv6 address/network. + +Source: `network_type.py` :contentReference[oaicite:0]{index=0} +""" + +from __future__ import absolute_import, print_function + +import logging +import re +from typing import Optional + +import netaddr + +__metaclass__ = type + +logging.basicConfig(level=logging.DEBUG) + + +def reverse_dns(data: str) -> Optional[str]: + """ + Convert an IP address (IPv4/IPv6) or network (CIDR) into its reverse-DNS name. + + Behavior: + - If `data` is a valid IPv4 address, the function returns: + ".in-addr.arpa" + Example: + "192.0.2.10" -> "10.2.0.192.in-addr.arpa" + + - Otherwise, the function tries to parse `data` as an IPv6 address or IPv6 network via + `netaddr.IPNetwork`. If parsing succeeds, it uses `IPAddress(...).reverse_dns` and, for CIDR + inputs, trims the result to match the reverse-zone boundary implied by the prefix length. + + - If parsing fails, the function logs an error and returns `None`. + + Args: + data: IPv4/IPv6 address string or CIDR network string (e.g. "2001:db8::/64"). + + Returns: + Optional[str]: + - Reverse-DNS domain name (without a trailing dot) on success. + - None if the input is not a valid IPv4/IPv6 address/network. + """ + reverse_ip: Optional[str] = None + result: Optional[str] = None + + if is_valid_ipv4(data): + # Reverse octets for in-addr.arpa. + reverse_ip = ".".join(data.split(".")[::-1]) + result = f"{reverse_ip}.in-addr.arpa" + else: + try: + offset: Optional[int] = None + if data.count("/") == 1: + prefix_str = data.split("/")[1] + # Original logic: compute an offset depending on prefix. + offset = int(9 + int(prefix_str) // 2) + + network = netaddr.IPNetwork(str(data)) + ipaddress = netaddr.IPAddress(network) + reverse_ip = ( + ipaddress.reverse_dns + ) # typically ends with a trailing "ip6.arpa." + + if offset: + result = reverse_ip[-offset:] + else: + result = reverse_ip + + if result and result.endswith("."): + result = result[:-1] + + except Exception: + # Keep original behavior: swallow parsing errors and handle below. + pass + + if not result: + logging.error( + f"PROBLEM: {data} is neither a valid IPv4 nor a valid IPv6 network." + ) + return None + + logging.info(f"= {result}") + return result + + +def is_valid_ipv4(ip: str) -> bool: + """ + Validate whether a string is a syntactically valid IPv4 address. + + The regex supports multiple IPv4 notations: + - dotted decimal/octal/hex variants + - pure decimal/octal/hex integer forms + + Args: + ip: IPv4 address candidate string. + + Returns: + bool: True if `ip` matches the IPv4 pattern, otherwise False. + """ + pattern = re.compile( + r""" + ^ + (?: + # Dotted variants: + (?: + # Decimal 1-255 (no leading 0's) + [3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2} + | + 0x0*[0-9a-f]{1,2} # Hexadecimal 0x0 - 0xFF (possible leading 0's) + | + 0+[1-3]?[0-7]{0,2} # Octal 0 - 0377 (possible leading 0's) + ) + (?: # Repeat 0-3 times, separated by a dot + \. + (?: + [3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2} + | + 0x0*[0-9a-f]{1,2} + | + 0+[1-3]?[0-7]{0,2} + ) + ){0,3} + | + 0x0*[0-9a-f]{1,8} # Hexadecimal notation, 0x0 - 0xffffffff + | + 0+[0-3]?[0-7]{0,10} # Octal notation, 0 - 037777777777 + | + # Decimal notation, 1-4294967295: + 429496729[0-5]|42949672[0-8]\d|4294967[01]\d\d|429496[0-6]\d{3}| + 42949[0-5]\d{4}|4294[0-8]\d{5}|429[0-3]\d{6}|42[0-8]\d{7}| + 4[01]\d{8}|[1-3]\d{0,9}|[4-9]\d{0,8} + ) + $ + """, + re.VERBOSE | re.IGNORECASE, + ) + + return pattern.match(ip) is not None + + +def is_valid_ipv6(ip: str) -> bool: + """ + Validate whether a string is a syntactically valid IPv6 address. + + The regex accepts: + - full and compressed IPv6 forms (single '::') + - IPv4-mapped tail forms (ending in dotted IPv4) + + Args: + ip: IPv6 address candidate string. + + Returns: + bool: True if `ip` matches the IPv6 pattern, otherwise False. + """ + pattern = re.compile( + r""" + ^ + \s* # Leading whitespace + (?!.*::.*::) # Only a single wildcard allowed + (?:(?!:)|:(?=:)) # Colon iff it would be part of a wildcard + (?: # Repeat 6 times: + [0-9a-f]{0,4} # A group of at most four hexadecimal digits + (?:(?<=::)|(? +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +import glob +import os +from typing import Any, Dict, List, Union + + +class PowerDNSConfigLoader: + """ + Load and normalize PowerDNS configuration from a main config file plus optional include files. + + This loader reads a PowerDNS-style key/value configuration (e.g. ``pdns.conf``) and: + - Parses the main configuration file. + - If ``include-dir`` is set and points to a directory, reads all ``*.conf`` files in that directory. + - Detects backends via ``launch=...`` and ``launch+...`` directives. + - Collects backend-specific settings (keys prefixed with ``-``) into a structured list. + + Output format: + - ``config``: A dictionary containing global settings plus the key ``"backends"``. + - ``config["backends"]``: A list of dictionaries; each element has at least ``{"name": ""}`` + and may include additional backend-specific keys. + + The class is designed to be used in an Ansible context: ``module`` is expected to provide + ``module.log(...)`` for debug logging. + + Attributes: + module: Ansible-like module object, used for logging. + config: Aggregated global configuration values. + backends: Detected backend names in the effective launch order. + backend_configs: Per-backend configuration mapping, keyed by backend name. + """ + + def __init__(self, module: Any) -> None: + """ + Create a new config loader instance. + + Args: + module: An Ansible-like module object providing a ``log(...)`` method. + + Returns: + None + """ + self.module = module + self.config: Dict[str, Any] = {} + self.backends: List[str] = [] + self.backend_configs: Dict[str, Dict[str, Any]] = {} + + self.module.log("PowerDNSConfigLoader::__init__()") + + def load(self, main_config: str = "/etc/powerdns/pdns.conf") -> Dict[str, Any]: + """ + Load the main configuration file and all include files, then return the normalized config. + + Processing rules: + - Reads `main_config`. + - If the config contains ``include-dir`` and it is an existing directory, loads all + ``*.conf`` files from that directory in sorted order. + - Builds ``config["backends"]`` from the detected backend list and collected + backend-specific key/value pairs. + + Args: + main_config: Path to the primary PowerDNS configuration file. + + Returns: + dict[str, Any]: Normalized configuration dictionary with: + - global keys (as found in config files) + - ``"backends"`` list containing backend dictionaries + + Raises: + FileNotFoundError: If `main_config` (or any included file) does not exist. + OSError: If a config file cannot be read. + """ + self.module.log(f"PowerDNSConfigLoader::load(main_config={main_config})") + + self._load_file(main_config) + + include_dir = self.config.get("include-dir") + if include_dir and os.path.isdir(include_dir): + for file_path in sorted(glob.glob(os.path.join(include_dir, "*.conf"))): + self._load_file(file_path) + + # Finalize backend list with associated settings. + backend_list: List[Dict[str, Any]] = [] + for backend in self.backends: + backend_entry = {"name": backend} + for key, value in self.backend_configs.get(backend, {}).items(): + backend_entry[key] = value + backend_list.append(backend_entry) + + self.config["backends"] = backend_list + + return self.config + + def _load_file(self, file_path: str) -> None: + """ + Read and process a single PowerDNS configuration file. + + Supported directives and behavior: + - Ignores empty lines and comments starting with ``#``. + - Expects ``key=value`` pairs; other lines are ignored. + - ``launch=``: + Clears the current backend list and replaces it with the comma-separated values. + - ``launch+=``: + Appends the backend name from the value to the backend list (if non-empty). + - ``-=...``: + If `` is in the current backend list, stores the key/value in `backend_configs[backend]`. + - Any other keys are treated as global config and stored in `self.config`. + + Values are normalized via :meth:`_convert_value` (bool/int/float where possible). + + Args: + file_path: Path to the configuration file to read. + + Returns: + None + + Raises: + FileNotFoundError: If `file_path` does not exist. + OSError: If the file cannot be opened/read. + """ + self.module.log(f"PowerDNSConfigLoader::_load_file(file_path={file_path})") + + with open(file_path, "r") as f: + for line in f: + line = line.strip() + if not line or line.startswith("#"): + continue + + if "=" not in line: + continue + + key, value = map(str.strip, line.split("=", 1)) + lowered_key = key.lower() + + if lowered_key == "launch": + self.backends.clear() + if value: + self.backends.extend( + [v.strip() for v in value.split(",") if v.strip()] + ) + + elif lowered_key.startswith("launch+"): + if value: + self.backends.append(value.strip()) + + else: + is_backend_key = False + for backend in self.backends: + # Example: backend = gsqlite3 -> keys like "gsqlite3-database=..." + if lowered_key.startswith(f"{backend}-"): + self.backend_configs.setdefault(backend, {})[key] = ( + self._convert_value(value) + ) + is_backend_key = True + break + + if not is_backend_key: + self.config[key] = self._convert_value(value) + + def _convert_value(self, value: str) -> Union[bool, int, float, str]: + """ + Convert a configuration value to a more specific Python type when possible. + + Conversion rules (in order): + - "yes"/"true" -> True + - "no"/"false" -> False + - digits only -> int + - parseable float -> float + - otherwise returns the original string + + Args: + value: Raw string value from the config file. + + Returns: + bool | int | float | str: Converted value. + """ + lowered = value.lower() + if lowered in ("yes", "true"): + return True + if lowered in ("no", "false"): + return False + if lowered.isdigit(): + return int(value) + try: + return float(value) + except ValueError: + return value diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pdns/records.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pdns/records.py new file mode 100644 index 0000000..c63a4a6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pdns/records.py @@ -0,0 +1,471 @@ +""" +records.py + +PowerDNS RRset builder helpers. + +This module converts higher-level record definitions (typically lists of dict-like items) +into RRset dictionaries compatible with the PowerDNS API payload format by calling +`build_rrset(...)`. + +Imported utilities: + - fqdn(zone, name): Build a fully-qualified domain name (usually ensuring trailing dot). + - build_rrset(...): Create a PowerDNS RRset dict. + - reverse_zone_names(module, network=...): Helper to compute reverse names for PTR records. + +Design notes: + - All public helpers return a list of RRset dictionaries (PowerDNS JSON-compatible). + - Input records are typed as Mapping[str, Any] to support both dicts and similar structures. + - `ptr_records()` historically relies on a `module` name in scope; for typing and robustness, + an optional `module` argument is supported (preferred). +""" + +from __future__ import annotations + +import ipaddress +from collections import defaultdict +from typing import Any, DefaultDict, Dict, List, Mapping, Optional, Sequence, Set + +from ansible_collections.bodsch.dns.plugins.module_utils.pdns.utils import ( + build_rrset, + fqdn, +) +from ansible_collections.bodsch.dns.plugins.module_utils.utils import reverse_zone_names + +RRset = Dict[str, Any] +Record = Mapping[str, Any] + + +def host_records( + zone: str, + records: Sequence[Record], + comment: Optional[str] = None, + account: Optional[str] = None, +) -> List[RRset]: + """ + Build A/AAAA/CNAME RRsets for host entries. + + Each input item can describe: + - IPv4 address (`ip`) -> A record + - IPv6 address (`ipv6`) -> AAAA record + - alias list (`aliases`) -> CNAME records pointing to the canonical host name + + Args: + zone: Forward DNS zone (e.g. "example.com" or "example.com."). + records: Host definitions. Each record may contain keys: + - name: host label (required) + - ip: IPv4 address (optional) + - ipv6: IPv6 address (optional) + - aliases: list of alias labels (optional) + - ttl: TTL seconds (optional, default 3600) + comment: Optional comment stored on created RRsets. + account: Optional account string (currently unused here; kept for API symmetry). + + Returns: + list[RRset]: RRset dictionaries (PowerDNS API format). A host may produce: + - 0..1 AAAA rrset + - 0..1 A rrset + - 0..N CNAME rrsets (one per alias) + """ + _ = account # explicitly unused + + rrsets: List[RRset] = [] + + for record in records: + name = fqdn(zone, str(record.get("name"))) + ttl = int(record.get("ttl", 3600)) + ipv4 = record.get("ip") + ipv6 = record.get("ipv6") + aliases = record.get("aliases") + + if ipv6: + rrsets.append( + build_rrset( + name=name, + rtype="AAAA", + ttl=ttl, + records=[str(ipv6)], + comment=comment or "", + ) + ) + + if ipv4: + rrsets.append( + build_rrset( + name=name, + rtype="A", + ttl=ttl, + records=[str(ipv4)], + comment=comment or "", + ) + ) + + if aliases: + for a in aliases: + rrsets.append( + build_rrset( + name=fqdn(zone, str(a)), + rtype="CNAME", + ttl=ttl, + records=[name], + comment=comment or "", + ) + ) + + return rrsets + + +def srv_records( + zone: str, + records: Sequence[Record], + comment: Optional[str] = None, + account: Optional[str] = None, +) -> List[RRset]: + """ + Build SRV RRsets grouped by SRV owner name. + + SRV record format (content): + priority weight port target + + Input entries with the same `name` are aggregated into a single RRset. + + Args: + zone: Forward DNS zone. + records: SRV definitions. Each record should contain keys: + - name: SRV owner name (e.g. "_sip._tcp") + - weight: int + - port: int + - target: hostname label (will be fqdn()'d) + - priority: int (optional, default 0) + - ttl: int (optional, default 3600) + comment: Optional comment stored on created RRsets. + account: Optional account string (currently unused here; kept for API symmetry). + + Returns: + list[RRset]: One RRset per unique SRV owner name. + """ + _ = account # explicitly unused + + rrsets: List[RRset] = [] + grouped: DefaultDict[str, List[Record]] = defaultdict(list) + + for service in records: + grouped[str(service["name"])].append(service) + + for srv_name, entries in grouped.items(): + srv_content: List[Dict[str, Any]] = [] + ttl = 3600 + + for entry in entries: + ttl = int(entry.get("ttl", ttl)) + priority = int(entry.get("priority", 0)) + weight = int(entry["weight"]) + port = int(entry["port"]) + target = fqdn(zone, str(entry["target"])) + + srv_content.append( + {"content": f"{priority} {weight} {port} {target}", "disabled": False} + ) + + rrsets.append( + build_rrset( + name=fqdn(zone, srv_name), + rtype="SRV", + ttl=ttl, + records=srv_content, + comment=comment or "", + ) + ) + + return rrsets + + +def mx_records( + zone: str, + records: Sequence[Record], + comment: Optional[str] = None, + account: Optional[str] = None, +) -> List[RRset]: + """ + Build a single MX RRset for the zone apex. + + Each input record contributes one MX entry. + + Args: + zone: Forward DNS zone. + records: MX definitions. Each record may contain keys: + - name: exchange host (string) + - preference: int (optional, default 10) + - ttl: int (optional, default 3600; last value wins) + comment: Optional comment stored on the created RRset. + account: Optional account string (currently unused here; kept for API symmetry). + + Returns: + list[RRset]: A list containing exactly one MX RRset dict for the zone apex. + + Notes: + TTL is overwritten per input record; the last record's TTL wins. + """ + _ = account # explicitly unused + + rrsets: List[RRset] = [] + mx_content: List[Dict[str, Any]] = [] + zone_fqdn = zone if zone.endswith(".") else f"{zone}." + ttl = 3600 + + for record in records: + name = record.get("name") + ttl = int(record.get("ttl", ttl)) + preference = int(record.get("preference", 10)) + + mx_content.append( + dict(content=fqdn(zone, f"{preference} {name}"), disabled=False) + ) + + rrsets.append( + build_rrset( + name=zone_fqdn, + rtype="MX", + ttl=ttl, + records=mx_content, + comment=comment or "", + ) + ) + + return rrsets + + +def txt_records( + zone: str, + records: Sequence[Record], + comment: Optional[str] = None, + account: Optional[str] = None, +) -> List[RRset]: + """ + Build TXT RRsets from TXT entries. + + Each entry can specify either a single string (`text`) or a list of strings. + Each string becomes a TXT record. PowerDNS expects TXT content wrapped in quotes. + + Args: + zone: Forward DNS zone. + records: TXT definitions. Each record may contain keys: + - name: owner name label + - text: str | list[str] + - ttl: int (optional, default 3600) + comment: Optional comment stored on created RRsets. + account: Optional account string (currently unused here; kept for API symmetry). + + Returns: + list[RRset]: One TXT RRset per input entry. + """ + _ = account # explicitly unused + + rrsets: List[RRset] = [] + + for entry in records: + name = fqdn(zone, str(entry.get("name"))) + ttl = int(entry.get("ttl", 3600)) + txt_data = entry.get("text") + + if txt_data is None: + txt_items: List[str] = [] + elif isinstance(txt_data, str): + txt_items = [txt_data] + else: + txt_items = [str(x) for x in txt_data] + + txt_content: List[Dict[str, Any]] = [] + for line in txt_items: + txt_content.append({"content": f'"{line}"', "disabled": False}) + + rrsets.append( + build_rrset( + name=name, + rtype="TXT", + ttl=ttl, + records=txt_content, + comment=comment or "", + ) + ) + + return rrsets + + +def ptr_records( + zone: str, + records: Sequence[Record], + comment: Optional[str] = None, + account: Optional[str] = None, + module: Any = None, +) -> List[RRset]: + """ + Build PTR RRsets for given host records. + + For each input record, one PTR RRset is created using the IPv4 address (`ip`) + and the forward hostname as PTR target. + + Args: + zone: Forward DNS zone (used to build PTR target FQDN). + records: PTR definitions. Each record may contain keys: + - name: host label + - ip: IPv4 address string + - ttl: int (optional, default 3600) + comment: Optional comment stored on created RRsets. + account: Optional account string (currently unused here; kept for API symmetry). + module: Ansible module object passed to `reverse_zone_names(...)`. + If omitted, this function attempts to use a global name `module` + for backward compatibility. + + Returns: + list[RRset]: List of PTR RRset dictionaries. + + Raises: + NameError: If `module` is not provided and no global `module` exists. + """ + _ = account # explicitly unused + + if module is None: + # Backward compatibility: attempt to use global `module` if present + module = globals().get("module") + if module is None: + raise NameError( + "ptr_records() requires `module` (Ansible module) to compute reverse names." + ) + + rrsets: List[RRset] = [] + + for record in records: + name = fqdn(zone, str(record.get("name"))) + ttl = int(record.get("ttl", 3600)) + ipv4 = record.get("ip") + + rev_name = reverse_zone_names(module, network=ipv4) + + rrsets.append( + build_rrset( + name=rev_name, + rtype="PTR", + ttl=ttl, + records=[name], + comment=comment or "", + ) + ) + + return rrsets + + +def build_ptr_rrsets_by_zone( + *, + forward_zone: str, + hosts: Sequence[Record], + prefix_v4: int = 24, + prefix_v6: Optional[int] = 64, + comment: Optional[str] = None, + account: Optional[str] = None, +) -> Dict[str, List[RRset]]: + """ + Build PTR RRsets grouped by reverse-zone. + + Output format: + { + "": [ , , ... ], + ... + } + + Reverse-zone computation: + - IPv4 reverse zones are generated on octet boundaries: prefix_v4 must be 8, 16, or 24. + Default is /24. + - IPv6 reverse zones are generated on nibble boundaries: prefix_v6 must be a multiple of 4. + Default is /64. If prefix_v6 is None, IPv6 processing is disabled. + + Args: + forward_zone: Forward DNS zone (e.g. "example.com"). + hosts: Host definitions. Each host may contain keys: + - name: host label (required) + - ttl: int (optional, default 3600) + - ip: IPv4 address string (optional) + - ipv6: IPv6 address string (optional) + prefix_v4: IPv4 reverse-zone prefix (8, 16, 24). + prefix_v6: IPv6 reverse-zone prefix (multiple of 4, 1..128) or None to disable IPv6. + comment: Optional comment stored on created RRsets. + account: Optional account passed through to `build_rrset`. + + Returns: + dict[str, list[RRset]]: Mapping reverse-zone -> list of PTR RRset dicts. + + Raises: + ValueError: If `prefix_v4` is not one of {8,16,24} or `prefix_v6` is not a valid nibble boundary. + """ + + def _ipv4_reverse_zone(ip: ipaddress.IPv4Address, prefix: int) -> str: + if prefix not in (8, 16, 24): + raise ValueError( + "prefix_v4 must be one of 8, 16, 24 (octet boundary reverse zones)." + ) + host_octets = (32 - prefix) // 8 + labels = ip.reverse_pointer.split(".") + return ".".join(labels[host_octets:]) + + def _ipv6_reverse_zone(ip: ipaddress.IPv6Address, prefix: int) -> str: + if prefix % 4 != 0 or not (0 < prefix <= 128): + raise ValueError("prefix_v6 must be a multiple of 4 in range 1..128") + host_nibbles = (128 - prefix) // 4 + labels = ip.reverse_pointer.split(".") + return ".".join(labels[host_nibbles:]) + + targets: DefaultDict[str, DefaultDict[str, Set[str]]] = defaultdict( + lambda: defaultdict(set) + ) + ttls: DefaultDict[str, Dict[str, int]] = defaultdict(dict) + + for host in hosts or []: + hostname = host.get("name") + ttl = int(host.get("ttl", 3600)) + + if not hostname: + continue + + ptr_target = fqdn(forward_zone, str(hostname)) + + ipv4 = host.get("ip") + if ipv4: + try: + ip4 = ipaddress.IPv4Address(str(ipv4)) + rev_zone = _ipv4_reverse_zone(ip4, prefix_v4) + ptr_name = f"{ip4.reverse_pointer}." + targets[rev_zone][ptr_name].add(ptr_target) + ttls[rev_zone].setdefault(ptr_name, ttl) + except Exception: + pass + + ipv6 = host.get("ipv6") + if ipv6 and prefix_v6 is not None: + try: + ip6 = ipaddress.IPv6Address(str(ipv6)) + rev_zone = _ipv6_reverse_zone(ip6, int(prefix_v6)) + ptr_name = f"{ip6.reverse_pointer}." + targets[rev_zone][ptr_name].add(ptr_target) + ttls[rev_zone].setdefault(ptr_name, ttl) + except Exception: + pass + + rrsets_by_zone: Dict[str, List[RRset]] = {} + + for rev_zone, names in targets.items(): + zone_rrsets: List[RRset] = [] + + for ptr_name, ptr_targets in sorted(names.items(), key=lambda x: x[0]): + zone_rrsets.append( + build_rrset( + name=ptr_name, + rtype="PTR", + ttl=ttls[rev_zone].get(ptr_name, 3600), + records=sorted(ptr_targets), + comment=comment or "", + account=account, + ) + ) + + rrsets_by_zone[rev_zone] = zone_rrsets + + return rrsets_by_zone diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pdns/utils.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pdns/utils.py new file mode 100644 index 0000000..9059351 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pdns/utils.py @@ -0,0 +1,163 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +""" +utils.py + +Utility helpers for building PowerDNS-compatible RRset payloads and DNS helper values. + +This module provides: + - `generate_serial()`: Generate an RFC-1912-style SOA serial number based on UTC date. + - `fqdn()`: Normalize record names into fully-qualified domain names (FQDNs). + - `build_rrset()`: Build a PowerDNS RRset dictionary suitable for API PATCH/POST payloads. + +Intended usage: + These helpers are consumed by other PowerDNS-related modules to construct API payloads + and normalize hostnames. + +Notes: + - `generate_serial()` uses a `YYYYMMDDNN` format with a two-digit daily counter. + - `fqdn()` ensures trailing dots and supports '@' as zone apex. + - `build_rrset()` ensures the rrset `name` has a trailing dot and normalizes record + entries into PowerDNS API `{content, disabled}` objects. An optional comment is + added under `comments`. +""" + +from __future__ import annotations + +import datetime +from typing import Any, Dict, Mapping, Optional, Sequence, Union + +RRsetRecordInput = Union[str, Mapping[str, Any]] +RRset = Dict[str, Any] + + +def generate_serial(base_serial: Optional[Union[int, str]] = None) -> int: + """ + Generate a date-based SOA serial in the form `YYYYMMDDNN` (UTC). + + The serial is based on today's UTC date and a two-digit counter: + - First call of the day: YYYYMMDD01 + - If `base_serial` starts with today's date (YYYYMMDD), the counter is increased by 1. + + Args: + base_serial: Optional existing serial value (int or str). If provided and it matches + today's date prefix (YYYYMMDD), its last two digits are interpreted as the counter + and incremented. + + Returns: + int: The generated serial number. + + Raises: + ValueError: If `base_serial` matches today's prefix but its last two characters + are not a valid integer counter. + """ + today = datetime.datetime.utcnow().strftime("%Y%m%d") + counter = 1 + serial = int(f"{today}{counter:02d}") + + # Optional: increment the counter if the existing serial is from today. + if base_serial and str(base_serial).startswith(today): + old_counter = int(str(base_serial)[-2:]) + counter = old_counter + 1 + serial = int(f"{today}{counter:02d}") + + return serial + + +def fqdn(zone: str, name: str) -> str: + """ + Normalize a record name into a fully-qualified domain name (FQDN). + + Rules: + - '@' maps to the zone apex (`.`) + - If `name` already ends with '.', it is returned unchanged. + - If `name` already ends with `zone`, a trailing dot is added. + - Otherwise, `name` is treated as a label relative to `zone` and `..` is appended. + + Examples: + - fqdn("acme-inc.local", "srv001") -> "srv001.acme-inc.local." + - fqdn("acme-inc.local", "srv001.acme-inc.local.") -> "srv001.acme-inc.local." + - fqdn("acme-inc.local", "@") -> "acme-inc.local." + + Args: + zone: DNS zone name without trailing dot (recommended). + name: Record owner name (label, relative name, absolute name, or '@'). + + Returns: + str: Normalized FQDN with trailing dot. + """ + if name == "@": + return f"{zone}." # root of the zone + if name.endswith("."): + return name + if name.endswith(zone): + return f"{name}." + + return f"{name}.{zone}." + + +def build_rrset( + name: str, + rtype: str, + ttl: int, + records: Sequence[RRsetRecordInput], + changetype: str = "REPLACE", + comment: Optional[str] = None, + account: Optional[str] = None, +) -> RRset: + """ + Build a PowerDNS RRset dictionary for API operations. + + The returned structure is suitable for PowerDNS API payloads (e.g. PATCH with rrsets), + and has the form: + + { + "name": "", + "type": "", + "ttl": , + "changetype": "", + "records": [{"content": "...", "disabled": False}, ...], + "comments": [{"content": "...", "account": "..."}] # only if comment is provided + } + + Record normalization: + - If an item in `records` is a string, it is used as `content`. + - If an item is a mapping, `item["content"]` is used as `content`. + + Args: + name: RRset owner name. A trailing dot is enforced. + rtype: Record type (e.g. "A", "AAAA", "CNAME", "MX", "TXT", "SRV", "PTR"). + ttl: Time-to-live in seconds. + records: Sequence of record inputs (strings or mappings containing a "content" key). + changetype: PowerDNS change type (typically "REPLACE", "DELETE", "ADD"). Default "REPLACE". + comment: Optional comment string. If provided, a `comments` section is added. + account: Optional account value stored alongside the comment (defaults to empty string). + + Returns: + dict[str, Any]: RRset dictionary (PowerDNS JSON-compatible). + + Raises: + KeyError: If a mapping in `records` does not contain a "content" key. + TypeError: If `ttl` is not an int-like value or `records` contains unsupported items. + """ + rrset: RRset = { + "name": name if name.endswith(".") else f"{name}.", + "type": rtype, + "ttl": int(ttl), + "changetype": changetype, + "records": [ + {"content": r if isinstance(r, str) else r["content"], "disabled": False} + for r in records + ], + } + + if comment: + rrset["comments"] = [ + { + "content": comment, + "account": account or "", + } + ] + + return rrset diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pdns/web_api.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pdns/web_api.py new file mode 100644 index 0000000..2d06983 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pdns/web_api.py @@ -0,0 +1,601 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +""" +web_api.py + +PowerDNS HTTP API wrapper used by the `bodsch.dns` Ansible collection. + +This module provides a small client class (`PowerDNSWebApi`) to interact with the PowerDNS +Authoritative Server API (`/api/v1/servers//zones`). + +Responsibilities: + - Query zone information (exists, details, list). + - Create zones and patch RRsets via the PowerDNS API. + - Build desired RRsets from higher-level structures using record helper functions + (`host_records`, `srv_records`, `mx_records`, `txt_records`, optionally `ptr_records`). + - Compare existing RRsets to desired RRsets and compute minimal PATCH payloads. + +Notes: + - The class expects an Ansible-like `module` object providing `module.log(...)` for logging. + - TLS verification is disabled (`verify=False`) for POST/PATCH/DELETE calls as in the original code. + If you need secure TLS, pass `https://...` and enable verification. +""" + +from __future__ import absolute_import, print_function + +import fnmatch +from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union + +import requests +from ansible_collections.bodsch.dns.plugins.module_utils.pdns.records import ( + host_records, + mx_records, + ptr_records, + srv_records, + txt_records, +) +from ansible_collections.bodsch.dns.plugins.module_utils.pdns.utils import ( + build_rrset, + fqdn, +) + +JsonType = Any +RRset = Dict[str, Any] +RRsetKey = Tuple[str, str] # (name, type) +ExistingRRsets = Dict[RRsetKey, Dict[str, Any]] +CallUrlResult = Tuple[int, str, JsonType] +ZoneCreateResult = Tuple[int, str, JsonType] +ZonePatchResult = Tuple[int, str, JsonType] + + +class PowerDNSWebApi: + """ + PowerDNS API client for zones and RRset management. + + The class wraps common PowerDNS zone operations and provides helper methods + to build and compare RRsets. + + Attributes: + module: An Ansible-like module object used for logging (must provide `log()`). + headers: HTTP headers used for PowerDNS API calls. + base_url: Base URL for zone operations: + `http://:/api/v1/servers//zones` + """ + + def __init__(self, module: Any, config: Mapping[str, Any]) -> None: + """ + Initialize the API client. + + Args: + module: An Ansible-like module object used for logging. + config: Client configuration. Expected keys: + - server_id (str): PowerDNS server id (e.g. "localhost"). + - api_key (str): PowerDNS API key. + - webserver_address (str): Host/IP where the API is reachable. + - webserver_port (int): API port (default 8081). + + Returns: + None + """ + self.module = module + self.module.log(f"PowerDNSWebApi::__init__(config={config})") + + server_id = config.get("server_id") + api_key = config.get("api_key", None) + webserver_address = config.get("webserver_address", None) + webserver_port = config.get("webserver_port", 8081) + + self.headers: Dict[str, str] = { + "Accept": "application/json", + "X-API-Key": str(api_key) if api_key is not None else "", + } + + self.base_url: str = ( + f"http://{webserver_address}:{webserver_port}/api/v1/servers/{server_id}/zones" + ) + + def zone_data(self, zone: str) -> Optional[Dict[str, Any]]: + """ + Retrieve full zone data from PowerDNS. + + Args: + zone: Zone name (e.g. "example.com" or "example.com."). + + Returns: + Optional[dict[str, Any]]: + - Zone JSON object if the zone exists and the API returns HTTP 200/201. + - None if the zone does not exist or the request fails. + """ + self.module.log(msg=f"PowerDNSWebApi::zone_data({zone})") + + url = f"{self.base_url}/{zone}." + self.module.log(msg=f" - {url}") + + status_code, _response_text, json_response = self.__call_url(url=url) + + if status_code in (200, 201) and isinstance(json_response, dict): + return json_response + + return None + + def zone_exists(self, zone: str) -> Optional[str]: + """ + Check whether a zone exists in PowerDNS and return its kind. + + Args: + zone: Zone name. + + Returns: + Optional[str]: + - Uppercased zone kind (e.g. "NATIVE", "MASTER", "SLAVE") if present. + - None if the zone does not exist or kind is not available. + """ + self.module.log(msg=f"PowerDNSWebApi::zone_exists({zone})") + + data = self.zone_data(zone) + + if isinstance(data, dict): + kind = data.get("kind", None) + return kind.upper() if isinstance(kind, str) else None + + return None + + def zone_list(self, zone: Optional[str] = None) -> List[Dict[str, Any]]: + """ + List zones from PowerDNS, optionally filtered by a fnmatch pattern. + + Args: + zone: Optional zone filter pattern (fnmatch). + Examples: + - None: list all zones + - "example.com": match only example.com. + - "*.example.com": match sub-zones, depending on PowerDNS zone names + + Returns: + list[dict[str, Any]]: A list of dicts containing: + - name (str) + - kind (str, lowercased) + - serial (int | None) + """ + self.module.log(msg=f"PowerDNSWebApi::zone_list({zone})") + + zone_fqdn: Optional[str] = None + if isinstance(zone, str) and zone: + zone_fqdn = zone if zone.endswith(".") else f"{zone}." + + result: List[Dict[str, Any]] = [] + url = f"{self.base_url}" + + status_code, _response_text, json_response = self.__call_url(url=url) + + if status_code != 200: + self.module.log(msg=f"failed to enumerate zones at {url}: {json_response}") + return result + + if not isinstance(json_response, list): + return result + + for z in json_response: + if not isinstance(z, dict): + continue + if zone_fqdn is None or fnmatch.fnmatch(str(z.get("name", "")), zone_fqdn): + kind = z.get("kind") + result.append( + { + "name": z.get("name"), + "kind": str(kind).lower() if kind is not None else "", + "serial": z.get("serial"), + } + ) + + return result + + def extract_existing_rrsets(self, zone_data: Mapping[str, Any]) -> ExistingRRsets: + """ + Extract relevant RRset state (ttl + enabled record contents) from a zone JSON object. + + Args: + zone_data: Zone JSON object as returned by :meth:`zone_data`. + + Returns: + dict[tuple[str, str], dict[str, Any]]: + Mapping (rrset_name, rrset_type) -> {"ttl": int, "records": list[str]} + Only records with `disabled == False` are included. + """ + self.module.log(msg=f"PowerDNSWebApi::extract_existing_rrsets({zone_data})") + + rrsets: ExistingRRsets = {} + for rr in zone_data.get("rrsets", []) or []: + if not isinstance(rr, dict): + continue + + name = rr.get("name") + rtype = rr.get("type") + if not isinstance(name, str) or not isinstance(rtype, str): + continue + + key: RRsetKey = (name, rtype) + + records = rr.get("records", []) or [] + contents = sorted( + [ + r.get("content") + for r in records + if isinstance(r, dict) + and not r.get("disabled") + and r.get("content") is not None + ] + ) + + rrsets[key] = {"ttl": rr.get("ttl"), "records": contents} + + return rrsets + + def build_full_rrsets(self, zone: str, data: Mapping[str, Any]) -> List[RRset]: + """ + Build the desired RRset list for a zone based on structured input. + + The following keys are supported in `data`: + - hosts: list[dict] -> A/AAAA/CNAME (and PTR optionally) + - services: list[dict] -> SRV + - mail_servers: list[dict] -> MX + - text: list[dict] -> TXT + - create_forward_zones: bool -> if True, also build PTR rrsets from hosts + + Args: + zone: Forward zone name. + data: Structured record definition container. + + Returns: + list[RRset]: List of RRset dictionaries (PowerDNS API format). + """ + self.module.log(msg=f"PowerDNSWebApi::build_full_rrsets({zone}, data)") + + rrsets: List[RRset] = [] + rrsets += host_records(zone=zone, records=data.get("hosts", [])) + rrsets += srv_records(zone=zone, records=data.get("services", [])) + rrsets += mx_records(zone=zone, records=data.get("mail_servers", [])) + rrsets += txt_records(zone=zone, records=data.get("text", [])) + + if bool(data.get("create_forward_zones", False)): + rrsets += ptr_records(zone=zone, records=data.get("hosts", [])) + + return rrsets + + def compare_rrsets( + self, existing: ExistingRRsets, desired: Sequence[RRset] + ) -> List[RRset]: + """ + Compare existing RRsets with desired RRsets and compute minimal REPLACE updates. + + For each desired rrset: + - Compare enabled record contents (set-wise). + - If different from existing, create a minimal rrset payload with: + changetype="REPLACE" and records=[{"content": ..., "disabled": False}, ...] + + Args: + existing: Existing RRset snapshot as produced by :meth:`extract_existing_rrsets`. + desired: Desired RRsets (PowerDNS API rrset dicts). + + Returns: + list[RRset]: List of rrset PATCH entries that must be sent to PowerDNS. + """ + self.module.log("PowerDNSWebApi::compare_rrsets(existing, desired)") + + to_update: List[RRset] = [] + + for rr in desired: + key: RRsetKey = (str(rr.get("name")), str(rr.get("type"))) + existing_rr = existing.get(key) + + new_contents = sorted( + [r.get("content") for r in rr.get("records", []) if isinstance(r, dict)] + ) + existing_contents = existing_rr.get("records") if existing_rr else [] + + if set(new_contents) != set(existing_contents): + rrset: RRset = { + "name": rr.get("name"), + "type": rr.get("type"), + "ttl": rr.get("ttl"), + "changetype": "REPLACE", + "records": [ + {"content": content, "disabled": False} + for content in new_contents + ], + } + to_update.append(rrset) + + return to_update + + def zone_delete(self, base_url: str, zone: str) -> bool: + """ + Delete a zone in PowerDNS. + + This method is currently a stub (original code commented-out). + + Args: + base_url: Base URL for zones (unused in current implementation). + zone: Zone name (unused in current implementation). + + Returns: + bool: Always False (not implemented). + """ + _ = base_url + _ = zone + return False + + def zone_secondary( + self, base_url: str, zone: str, masters: str, comment: str + ) -> bool: + """ + Create a secondary (slave) zone. + + This method is currently a stub (original code commented-out). + + Args: + base_url: Base URL for zones (unused in current implementation). + zone: Zone name (unused in current implementation). + masters: Comma-separated master IP list (unused in current implementation). + comment: Comment string (unused in current implementation). + + Returns: + bool: Always False (not implemented). + """ + _ = base_url + _ = zone + _ = masters + _ = comment + return False + + def zone_primary( + self, + zone: str, + soa: str, + nameservers: Sequence[str], + comment: str, + ttl: int = 60, + wantkind: str = "Master", + ) -> bool: + """ + Create a primary zone (Master/Native) and apply initial SOA+NS rrsets. + + Workflow: + - If the zone already exists as MASTER/NATIVE: returns False. + - Else: + - Create the zone via :meth:`create_zone`. + - PATCH initial rrsets (SOA + NS) via :meth:`patch_zone`. + - Returns True (indicates an attempt was made). + + Args: + zone: Zone name. + soa: SOA content string. + nameservers: Nameserver hostnames (will be fqdn()'d). + comment: Comment string (currently only used in logs; not applied to rrsets here). + ttl: TTL for initial SOA/NS rrsets. + wantkind: "Master" or "Native" (PowerDNS kind title-cased internally). + + Returns: + bool: + - False if the zone already exists as MASTER/NATIVE. + - True if a create+patch attempt was performed (even if patch fails). + """ + self.module.log( + msg=f"PowerDNSWebApi::zone_primary({zone}, {soa}, {nameservers}, {comment}, {ttl}, {wantkind})" + ) + + kind = self.zone_exists(zone) + zone_fqdn = zone if zone.endswith(".") else f"{zone}." + + if kind in ("MASTER", "NATIVE"): + return False + + status_code, _msg, _json_response = self.create_zone( + zone, nameservers, kind=wantkind, masters=None + ) + + if status_code in (200, 201): + rrsets = [ + build_rrset(zone_fqdn, "SOA", ttl, [soa]), + build_rrset(zone_fqdn, "NS", ttl, [fqdn(zone, x) for x in nameservers]), + ] + self.patch_zone(zone, rrsets) + + return True + + def create_zone( + self, + zone: str, + nameservers: Optional[Sequence[str]], + kind: str = "Native", + masters: Optional[Union[str, Sequence[str]]] = None, + ) -> ZoneCreateResult: + """ + Create a zone in PowerDNS. + + Args: + zone: Zone name (without trailing dot is accepted). + nameservers: Nameserver hostnames. Dots are normalized to trailing-dot form. + kind: Zone kind (e.g. "Native", "Master", "Slave"). + masters: Optional masters list for secondary zones. Supported forms: + - comma-separated string + - sequence of strings + + Returns: + tuple[int, str, Any]: (status_code, message, json_response) + status_code: HTTP status code returned by PowerDNS. + message: Human-readable status message. + json_response: Parsed JSON response (type depends on API). + """ + self.module.log( + msg=f"PowerDNSWebApi::create_zone({zone}, {nameservers}, {masters}, {kind})" + ) + + zone_fqdn = zone if zone.endswith(".") else f"{zone}." + + ns_list: List[str] = [] + for ns in nameservers or []: + if not isinstance(ns, str) or not ns: + continue + ns_list.append(ns if ns.endswith(".") else f"{ns}.") + + master_list: List[str] = [] + if masters: + if isinstance(masters, str): + master_list = [m.strip() for m in masters.split(",") if m.strip()] + else: + master_list = [str(m).strip() for m in masters if str(m).strip()] + + data: Dict[str, Any] = { + "kind": str(kind).lower().title(), + "masters": master_list, + "name": zone_fqdn, + "nameservers": ns_list, + } + + url = f"{self.base_url}" + status_code, _response_text, json_response = self.__call_url( + url=url, method="POST", payload=data + ) + + if status_code not in (200, 201): + msg = f"Failed to create zone {zone} at {url}: {json_response}." + else: + msg = f"Zone {zone} at {url} successfully created." + + return status_code, msg, json_response + + def patch_zone(self, zone: str, rrsets: Sequence[RRset]) -> ZonePatchResult: + """ + Patch a zone by sending RRset modifications (typically REPLACE operations). + + Args: + zone: Zone name (without trailing dot is accepted). + rrsets: Sequence of rrset patch entries. + + Returns: + tuple[int, str, Any]: (status_code, message, json_response) + status_code: HTTP status code returned by PowerDNS. + message: Human-readable status message. + json_response: Parsed JSON response (type depends on API). + """ + self.module.log(msg=f"PowerDNSWebApi::patch_zone({zone}, {rrsets})") + + zone_fqdn = zone if zone.endswith(".") else f"{zone}." + url = f"{self.base_url}/{zone_fqdn}" + payload: Dict[str, Any] = {"rrsets": list(rrsets)} + + status_code, _response_text, json_response = self.__call_url( + url=url, method="PATCH", payload=payload + ) + + self.module.log("------------------------------") + self.module.log(f" status : {status_code}") + self.module.log(f" response: {json_response}") + self.module.log("------------------------------") + + if status_code in (200, 201, 204): + msg = f"Zone {zone} at {url} successfully updated." + else: + msg = f"Failed to update zone {zone} at {url}: {json_response}." + + return status_code, msg, json_response + + # --------------------------------------------------------------------------- + + def __call_url( + self, + url: str, + method: str = "GET", + payload: Optional[Mapping[str, Any]] = None, + ) -> CallUrlResult: + """ + Perform an HTTP request to the PowerDNS API and return status, raw text and parsed JSON. + + Args: + url: Full request URL. + method: HTTP method ("GET", "POST", "PATCH", "DELETE"). + payload: Optional JSON payload for POST/PATCH. + + Returns: + tuple[int, str, Any]: (status_code, response_text, json_response) + status_code: HTTP status code (or 500 on local/client errors). + response_text: Raw response body as text (or an error string on failures). + json_response: Parsed JSON if possible, otherwise `{}`. + + Notes: + - For unsupported methods this returns (500, ..., {"error": ...}). + - For HTTP errors it returns the server's response status/text/json if available. + - POST/PATCH/DELETE use `verify=False` as in the original implementation. + """ + response: Optional[requests.Response] = None + + try: + authentication: Tuple[()] = () + + if method == "GET": + response = requests.get(url, headers=self.headers, auth=authentication) + + elif method == "POST": + response = requests.post( + url, headers=self.headers, json=payload, verify=False + ) + + elif method == "PATCH": + response = requests.patch( + url, headers=self.headers, json=payload, verify=False + ) + + elif method == "DELETE": + response = requests.delete(url, headers=self.headers, verify=False) + + else: + self.module.log(msg=f"unsupported method: {method}") + return ( + 500, + f"unsupported method: '{method}'", + {"error": f"unsupported method: {method}"}, + ) + + response.raise_for_status() + + try: + json_data: JsonType = response.json() + except Exception: + json_data = {} + + return response.status_code, response.text, json_data + + except requests.exceptions.HTTPError as e: + self.module.log(msg="ERROR (HTTPError)") + self.module.log(msg=f" - {e}") + self.module.log(msg=f" - url: {url}") + self.module.log(msg=f" - payload: {payload}") + + if response is not None: + try: + return response.status_code, response.text, response.json() + except Exception: + return response.status_code, response.text, {} + return 500, f"HTTPError: {e}", {} + + except ConnectionError as e: + self.module.log(msg="ERROR (ConnectionError)") + error_text = ( + f"{type(e).__name__} {(str(e) if len(e.args) == 0 else str(e.args[0]))}" + ) + self.module.log(msg=f" - {error_text}") + return 500, error_text, {} + + except Exception as e: + self.module.log(msg="ERROR (Exception)") + error_text = f"{type(e).__name__}: {str(e)}" + + if response is not None: + try: + return response.status_code, response.text, response.json() + except Exception: + return response.status_code, response.text, {} + + return 500, error_text, {} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/adlist_manager.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/adlist_manager.py new file mode 100644 index 0000000..6b0da3f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/adlist_manager.py @@ -0,0 +1,82 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2025, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +import sqlite3 +from typing import Any, Dict, List, Optional + +from ansible_collections.bodsch.dns.plugins.module_utils.pihole.database import DataBase + + +class AdlistManager(DataBase): + """ """ + + def __init__(self, module: any, database: str): + """ """ + self.module = module + + super().__init__(module, database) + + def list_adlists(self): + self.execute("SELECT id, address, enabled FROM adlist") + return self.fetchall() + + def adlist_exists(self, address: str) -> bool: + return self.get_id_by_column("adlist", "address", address) is not None + + def add_adlist( + self, address: str, comment: Optional[str] = None, enabled: bool = True + ) -> Dict[str, Any]: + if self.adlist_exists(address): + return dict(changed=False, msg="Adlist already exists.") + + enabled_int = 1 if enabled else 0 + + try: + self.execute( + "INSERT INTO adlist (address, enabled, comment, date_added) VALUES (?, ?, ?, strftime('%s','now'))", + (address, enabled_int, comment), + ) + self.commit() + return dict(changed=True, msg="Adlist successfully added.") + except sqlite3.DatabaseError as e: + self.module.fail_json(msg=f"Failed to insert adlist: {str(e)}") + + def remove_adlist(self, address: str) -> Dict[str, Any]: + self.execute("DELETE FROM adlist WHERE address = ?", (address,)) + rows_deleted = self.cursor.rowcount + self.commit() + + if rows_deleted > 0: + return dict(changed=True, msg="Adlist removed.") + else: + return dict(changed=False, msg="Adlist not found.") + + def manage_adlists(self, adlists: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + result = [] + + for ad in adlists: + address = ad.get("address") + comment = ad.get("comment") + enabled = ad.get("enabled", True) + + if address and enabled: + result.append({address: self.add_adlist(address, comment, enabled)}) + elif address and not enabled: + result.append({address: self.remove_adlist(address)}) + + return result + + def sync_adlists(self, desired: List[str]) -> List[Dict[str, Any]]: + current = [addr for _, addr, _ in self.list_adlists()] + to_remove = set(current) - set(desired) + + result = [] + for addr in to_remove: + result.append({addr: self.remove_adlist(addr)}) + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/client_manager.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/client_manager.py new file mode 100644 index 0000000..693ae50 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/client_manager.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- + +# (c) 2020-2025, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +from typing import Any, Dict, List, Optional, Tuple + +from ansible_collections.bodsch.dns.plugins.module_utils.pihole.database import DataBase + + +class ClientManager(DataBase): + """ """ + + def __init__(self, module: any, database: str): + """ """ + self.module = module + + super().__init__(module, database) + + def list_clients(self) -> List[Tuple[int, str, int]]: + self.execute("SELECT id, ip, comment FROM client") + return self.fetchall() + + def client_by_ip(self, ip: str) -> Optional[int]: + return self.get_id_by_column("client", "ip", ip) + + def add_or_update_client( + self, ip: str, comment: str = "", groups: List[str] = [] + ) -> Dict[str, Any]: + changed = False + client_id = self.client_by_ip(ip) + + if client_id: + # Check ob Kommentar sich geändert hat + current_comment = self.get_client_comment(client_id) + if current_comment != comment: + self.execute( + "UPDATE client SET comment = ? WHERE id = ?", (comment, client_id) + ) + changed = True + else: + self.execute( + "INSERT INTO client (ip, comment) VALUES (?, ?)", (ip, comment) + ) + client_id = self.cursor.lastrowid + changed = True + + # Vergleiche Gruppenzugehörigkeit + current_group_ids = self.get_client_groups(client_id) + + # Ermittele Ziel-Group-IDs + target_group_ids = [] + for group in groups: + group_id = self.get_id_by_column("group", "name", group) + if group_id is not None: + target_group_ids.append(group_id) + + target_group_ids.sort() + + if current_group_ids != target_group_ids: + self.execute( + "DELETE FROM client_by_group WHERE client_id = ?", (client_id,) + ) + for gid in target_group_ids: + self.execute( + "INSERT INTO client_by_group (client_id, group_id) VALUES (?, ?)", + (client_id, gid), + ) + changed = True + + if changed: + self.commit() + + return dict( + changed=changed, + msg=f"Client '{ip}' {'updated' if changed else 'unchanged'}.", + ) + + def remove_client(self, ip: str) -> Dict[str, Any]: + client_id = self.client_by_ip(ip) + if not client_id: + return dict(changed=False, msg=f"Client '{ip}' not found.") + + self.execute("DELETE FROM client WHERE id = ?", (client_id,)) + self.commit() + return dict(changed=True, msg=f"Client '{ip}' removed.") + + def manage_clients(self, clients: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + results = [] + for c in clients: + ip = c.get("ip") + comment = c.get("comment", "") + groups = c.get("groups", []) + if not ip: + continue + results.append({ip: self.add_or_update_client(ip, comment, groups)}) + return results + + def get_client_comment(self, client_id: int) -> Optional[str]: + self.execute("SELECT comment FROM client WHERE id = ?", (client_id,)) + row = self.fetchone() + return row[0] if row else None + + def get_client_groups(self, client_id: int) -> List[int]: + self.execute( + "SELECT group_id FROM client_by_group WHERE client_id = ?", (client_id,) + ) + return sorted(row[0] for row in self.fetchall()) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/config.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/config.py new file mode 100644 index 0000000..8e9073e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/config.py @@ -0,0 +1,158 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2025, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +import json +from pathlib import Path +from typing import Any, Dict, Generator, Tuple + +import toml +from ansible_collections.bodsch.dns.plugins.module_utils.pihole.pihole import PiHole +from ansible_collections.bodsch.dns.plugins.module_utils.pihole.utils import ( + flatten_config_dict, + is_equal, + normalize_value, +) + + +class ConfigManager(PiHole): + """ """ + + def __init__(self, module: any): + """ """ + self.module = module + + super().__init__(module) + + def load_toml(self, path: str) -> Dict[str, Any]: + """ """ + # self.module.log(f"ConfigManager::load_toml(path={path})") + + toml_path = Path(path) + if not toml_path.exists(): + raise FileNotFoundError(f"TOML configuration file not found: {path}") + return None + + try: + with toml_path.open("r", encoding="utf-8") as f: + config = toml.load(f) + return config + except Exception as e: + raise RuntimeError(f"Failed to load TOML config: {e}") + return None + + def set_config(self, config: dict): + """ + e.g. /usr/bin/pihole-FTL --config dns.hosts '[ "192.168.0.4 matrix.vpn", "192.168.0.4 matrix.lan" ]' + """ + # self.module.log("ConfigManager::set_config(config)") + results = [] + + toml = self.load_toml("/etc/pihole/pihole.toml") + + current_gen = flatten_config_dict(toml) + desired_gen = flatten_config_dict(config) + + diff = self.changed_entries(current_gen, desired_gen) + + if diff: + for key, val in diff.items(): + res = {} + + changed = self.ensure(key, val) + + res[key] = dict( + msg=f"succesfuly to '{val}' changed.", + changed=changed, + ) + + results.append(res) + else: + changed = False + + return results + + def _config(self, param: str, value: Any): + """ """ + # self.module.log(f"ConfigManager::set_config(param={param}, value={value})") + + if isinstance(value, bool): + val = "true" if value else "false" + elif isinstance(value, (list, dict)): + val = json.dumps(value) + else: + val = str(value) + + cmd = [self.pihole_ftl_bin, "--config", f"{param}", f"{val}"] + + # self.module.log(f" - {cmd}") + + rc, out, err = self._exec(cmd) + if rc == 0: + return True + else: + self.module.log("pihole-FTL config failed") + self.module.log(f" param={param}") + self.module.log(f" value={val}") + self.module.log(f" stderr={err}") + return False + + def ensure(self, param: str, desired: Any): + changed = self._config(param, desired) + return changed + + def exit(self): + return dict(changed=self.changed, results=self.results) + + def changed_entries( + self, + current_gen: Generator[Tuple[str, Any], None, None], + desired_gen: Generator[Tuple[str, Any], None, None], + ) -> Dict[str, Any]: + """ """ + # self.module.log("ConfigManager::changed_entries(current_gen, desired_gen)") + + current = dict(current_gen) + desired = dict(desired_gen) + + changed = {} + + for key, des_val in desired.items(): + cur_val = current.get(key) + + if not is_equal(cur_val, des_val): + changed[key] = des_val + + return changed + + def changed_entries_older( + self, + current_gen: Generator[Tuple[str, Any], None, None], + desired_gen: Generator[Tuple[str, Any], None, None], + ) -> Dict[str, Any]: + """ """ + # self.module.log("ConfigManager::changed_entries(current_gen, desired_gen)") + + current = dict(current_gen) + desired = dict(desired_gen) + + changed = {} + + # Nur Keys aus der desired Config prüfen und mit current vergleichen + for key, des_val in desired.items(): + cur_val = current.get(key) + + # Normalisiere Werte vor Vergleich + norm_cur = normalize_value(cur_val) + norm_des = normalize_value(des_val) + + if norm_cur != norm_des: + self.module.log(f" - '{key}' changed from '{norm_cur}' to '{norm_des}'") + changed[key] = des_val + + return changed diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/database.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/database.py new file mode 100644 index 0000000..b83ab56 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/database.py @@ -0,0 +1,58 @@ +import sqlite3 +from pathlib import Path +from typing import Any, Optional + + +class DataBase: + def __init__(self, module: Any, database: str): + self.module = module + + # self.module.log(f"DataBase::__init__(module, database={database})") + + db_file = Path(database) + + if not db_file.exists(): + raise FileNotFoundError(f"Pi-hole DB not found at: {db_file}") + + self.conn = sqlite3.connect( + db_file, isolation_level=None, detect_types=sqlite3.PARSE_COLNAMES + ) + self.cursor = self.conn.cursor() + + def execute(self, query: str, params: tuple = ()): + """ """ + # self.module.log(f"DataBase::execute(query={query}, params={params})") + try: + self.cursor.execute(query, params) + except sqlite3.DatabaseError as e: + error_details = {"error": str(e), "query": query, "params": params} + self.module.fail_json(msg="Database query failed", **error_details) + + def fetchall(self): + """ """ + # self.module.log("DataBase::fetchall()") + return self.cursor.fetchall() + + def fetchone(self): + """ """ + # self.module.log("DataBase::fetchone()") + return self.cursor.fetchone() + + def commit(self): + """ """ + # self.module.log("DataBase::commit()") + self.conn.commit() + + def close(self): + """ """ + # self.module.log("DataBase::close()") + self.conn.close() + + def get_id_by_column(self, table: str, column: str, value: Any) -> Optional[int]: + """ """ + # self.module.log(f"DataBase::get_id_by_column(table={table}, column={column}, value={value})") + + query = f'SELECT id FROM "{table}" WHERE {column} = ?' + self.execute(query, (value,)) + row = self.fetchone() + return row[0] if row else None diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/group_manager.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/group_manager.py new file mode 100644 index 0000000..a9eb356 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/group_manager.py @@ -0,0 +1,94 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2025, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +import sqlite3 +from typing import Any, Dict, List, Optional, Tuple + +from ansible_collections.bodsch.dns.plugins.module_utils.pihole.database import DataBase + + +class GroupManager(DataBase): + """ """ + + def __init__(self, module: any, database: str): + """ """ + self.module = module + + # self.module.log(f"GroupManager::__init__(module, database={database})") + + super().__init__(module, database) + + def list_groups(self) -> List[Tuple[int, str, int]]: + """ """ + # self.module.log("GroupManager::list_groups()") + + self.execute("SELECT id, name, enabled FROM 'group'") + return self.fetchall() + + def group_exists(self, name: str) -> bool: + """ """ + # self.module.log(f"GroupManager::group_exists(name={name})") + return self.get_id_by_column("group", "name", name) is not None + + def add_group( + self, name: str, description: Optional[str] = None, enabled: bool = True + ): + """ """ + # self.module.log(f"GroupManager::add_group(name={name}, description={description}, enabled={enabled})") + if self.group_exists(name): + return dict(changed=False, msg="Group already created.") + + enabled_int = 1 if enabled else 0 + + try: + self.execute( + "INSERT INTO 'group' (enabled, name, description, date_added) VALUES (?, ?, ?, strftime('%s','now'))", + (enabled_int, name, description), + ) + self.commit() + return dict(changed=True, msg="Group successfully created.") + except sqlite3.DatabaseError as e: + return dict( + failed=True, + changed=False, + msg=f"Failed to insert group '{name}': {str(e)}", + ) + + def remove_group(self, name: str) -> Dict[str, Any]: + """ """ + # self.module.log(f"GroupManager::remove_group(name={name})") + try: + self.execute("DELETE FROM 'group' WHERE name = ?", (name,)) + rows_deleted = self.cursor.rowcount + self.commit() + + if rows_deleted > 0: + return dict(changed=True, msg="Group removed.") + else: + return dict(changed=False, msg="Group not found.") + except sqlite3.DatabaseError as e: + self.module.fail_json(msg=f"Failed to remove group: {str(e)}") + + def manage_groups( + self, groups: List[Dict[str, Any]] + ) -> List[Dict[str, Dict[str, Any]]]: + """ """ + # self.module.log(f"GroupManager::manage_groups(groups={groups})") + result = [] + for g in groups: + name = g.get("name") + description = g.get("description") + enabled = g.get("enabled", True) + + if name and enabled: + result.append({name: self.add_group(name, description, enabled)}) + elif name and not enabled: + result.append({name: self.remove_group(name)}) + + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/pihole.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/pihole.py new file mode 100644 index 0000000..b5b891c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/pihole.py @@ -0,0 +1,282 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2025, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, print_function + +import re +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple + +from ansible_collections.bodsch.core.plugins.module_utils.checksum import Checksum + + +class PiHole: + """ """ + + # --- Klassenzustände & Regex --- + _LINE_RE = re.compile(r"^\s*-\s+(?P\S+)(?:\s+\((?P[^)]+)\))?") + _HEADER_RE = re.compile(r"^\s*\[\s*(?P[✓✗])\s*]") + _COMMENT = "Domain already in the specified list" + _ACTION_RE = re.compile(r"^(?PAdded|Failed to add)\s+\d+\s+domain\(s\):$") + + def __init__(self, module): + self.module = module + + # self.module.log("PiHole::__init__()") + + self.pihole_bin = self.module.get_bin_path("pihole", False) + self.pihole_ftl_bin = self.module.get_bin_path("pihole-FTL", False) + + def status(self): + """ """ + args = [self.pihole_bin] + args.append("status") + rc, out, err = self._exec(args) + + return rc + + def version(self): + """ """ + args = [self.pihole_ftl_bin] + args.append("--version") + + rc, out, err = self._exec(args) + + _output = [] + _output += out.splitlines() + _output += err.splitlines() + + msg = "unknown message" + + pattern = re.compile(r".*v(?P\d+).(?P\d+).(?P\*|\d+)$") + + version = next( + (m.groupdict() for s in _output if (m := pattern.search(s))), None + ) + + if version and isinstance(version, dict): + # version_full_string = version.get("version") + version_major_string = version.get("major") + version_minor_string = version.get("minor") + version_patch_string = version.get("patch") + + version_full_string = ( + f"{version_major_string}.{version_minor_string}.{version_patch_string}" + ) + + result = dict( + msg=msg, + full_version=version_full_string, + version=dict( + major=int(version_major_string), + minor=int(version_minor_string), + patch=int(version_patch_string), + ), + excutable=self.pihole_ftl_bin, + ) + + return result + + def import_list( + self, domains: List[str], list_type: str, comment: str + ) -> Dict[str, Any]: + """ + Importiert eine Allow- oder Deny-Liste. + + :param domains: Liste von Domain-Strings. + :param list_type: "allow" oder "deny". + :param comment: Kommentar, der an pihole übergeben wird. + :returns: Dict mit Schlüsseln "changed", "added", "present". + """ + # self.module.log(f"PiHole::import_list({domains}, {list_type}, {comment})") + + if list_type not in ("allow", "deny"): + raise ValueError("list_type muss 'allow' oder 'deny' sein") + + cmd = [self.pihole_bin, list_type, *domains, "--comment", comment] + rc, out, err = self._exec(cmd, check_rc=True) + parsed = self._parse_output(out + "\n" + err) + + return { + "changed": bool(parsed["added"]), + "added": parsed["added"], + # "duplicates": parsed["duplicates"], + "present": parsed["present"], + } + + def import_allow(self, domains: List[str], comment: str = "allowlist import"): + return self.import_list(domains, "allow", comment) + + def import_deny(self, domains: List[str], comment: str = "denylist import"): + return self.import_list(domains, "deny", comment) + + def admin_password(self, password: str): + """ """ + # self.module.log(f"PiHole::admin_password({password})") + old_checksum = None + cur_checksum = None + + checksum = Checksum(self.module) + cur_checksum = checksum.checksum(password) + + _file = Path("/etc/pihole") / ".admin.checksum" + + if _file.exists(): + with open(_file, "r") as f: + old_checksum = f.read().rstrip("\n") + + if old_checksum == cur_checksum: + return dict( + changed=False, + failed=False, + msg="This admin password has already been set.", + ) + + args = [self.pihole_bin, "setpassword", password] + + rc, out, err = self._exec(args) + + if rc == 0: + with open(_file, "w") as f: + f.write(cur_checksum) + + return dict( + changed=True, + failed=False, + msg="The admin password has been successfully changed.", + ) + + return dict(changed=False, failed=True, msg=err) + + def update_gravity(self): + """ """ + _changed = False + + args = [self.pihole_bin, "updateGravity"] + + rc, out, err = self._exec(args) + + if rc == 0: + + # 'Status: No changes detected' + m = re.search(r"(?m)(?<=Status:\s).*", out) + if m: + status = m.group(0) + + _changed = "no changes detected" not in status.strip().casefold() + + return dict(changed=_changed, failed=False, msg=status) + + return dict(changed=_changed, failed=True, msg="An error has occurred.") + + def reload_lists(self): + return self.reload(command="reloadlists") + + def reload_dns(self): + return self.reload(command="reloaddns") + + def reload(self, command: str = "reloadlists"): + """ + reloaddns : Update the lists and flush the cache without restarting the DNS server + reloadlists: Update the lists WITHOUT flushing the cache or restarting the DNS server + """ + _changed = False + + args = [self.pihole_bin, command] + + rc, out, err = self._exec(args) + + if rc == 0: + if command == "reloadlists": + m = re.search(r"(?m)Reloading DNS lists.*", out) + if m: + _changed = True + status = "DNS lists have been successfully reloaded." + + if command == "reloaddns": + m = re.search(r"(?m)Flushing DNS cache.*", out) + if m: + _changed = True + status = "Lists have been successfully reloaded." + + return dict(changed=_changed, failed=False, msg=status) + + return dict(changed=_changed, failed=True, msg="An error has occurred.") + + # ------------------- + + def _exec(self, commands: List[str], check_rc: bool = True) -> Tuple[int, str, str]: + """ """ + # self.module.log(f"PiHole::_exec({commands}, {check_rc})") + + rc, out, err = self.module.run_command(commands, check_rc=check_rc) + + if rc != 0: + self.module.log(msg=f" out: '{out}'") + self.module.log(msg=f" err: '{err}'") + + return rc, out, err + + def _parse_output(self, raw: str) -> Dict[str, List[str]]: + """ + Parst die kombinierte Ausgabe aus stdout+stderr von `pihole allow|deny`. + Liefert Dict mit Listen: "added", "duplicates", "invalid". + """ + raw = re.sub(r"Logout attempt.*?Unauthorized!\n?", "", raw, flags=re.S) + raw = re.sub(r"(?m)^/opt/pihole/api\.sh:.*readonly variable\n?", "", raw) + + # # self.module.log(f"PiHole::_parse_output({raw})") + + added: List[str] = [] + duplicates: List[str] = [] + present: List[str] = [] + + current: Optional[List[str]] = None + + for line in raw.splitlines(): + line = line.strip() + if not line or line == self._COMMENT: + continue + + # Header-Zeile? + hmatch = self._HEADER_RE.match(line) + if hmatch: + _end = hmatch.end() + text = line[_end:].strip() + m = self._ACTION_RE.match(text) + if not m: + continue + action = m.group("action") + current = added if action == "Added" else [] # warn: temporär + # Für „Failed to add“ bauen wir beide Ziel-Listen auf + if action == "Failed to add": + # Wir entscheiden später beim individuellen Grund + current = None + continue + + # Detail-Zeile? + lmatch = self._LINE_RE.match(line) + if not lmatch: + continue + + dom = lmatch.group("domain") + reason = (lmatch.group("reason") or "").lower() + + if current is added: + added.append(dom) + else: + # fehlgeschlagene Domains aufteilen + if "already" in reason or "exists" in reason: + duplicates.append(dom) + else: + present.append(dom) + + return { + "added": added, + "duplicates": duplicates, + "present": present, + } diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/utils.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/utils.py new file mode 100644 index 0000000..8309b28 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/pihole/utils.py @@ -0,0 +1,78 @@ +# import math +from typing import Any, Dict, Generator, List +from urllib.parse import urlparse + + +def sanitize_adlist(adlists: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + seen = set() + cleaned = [] + + for ad in adlists: + address = ad.get("address") + if not address: + continue + + # normalize (e.g., strip whitespaces, lowercase if needed) + address = address.strip() + + # validate URL + parsed = urlparse(address) + if not parsed.scheme.startswith("http"): + continue # skip invalid + + if address in seen: + continue # skip duplicates + + seen.add(address) + + cleaned.append( + dict( + address=address, + comment=ad.get("comment", ""), + enabled=ad.get("enabled", True), + ) + ) + + return cleaned + + +def flatten_config_dict( + data: Dict[str, Any], prefix: str = "" +) -> Generator[tuple[str, Any], None, None]: + for key, value in data.items(): + full_key = f"{prefix}.{key}" if prefix else key + + # Wert ignorieren, wenn None oder leer (str, list, tuple) + if value is None: + continue + if isinstance(value, (str, list, tuple)) and len(value) == 0: + continue + + if isinstance(value, dict): + # rekursiv in Subdicts + yield from flatten_config_dict(value, prefix=full_key) + else: + yield full_key, value + + +def normalize_value(val: Any) -> Any: + if isinstance(val, str): + val_lower = val.lower() + if val_lower == "true": + return True + elif val_lower == "false": + return False + elif val.isdigit(): + return int(val) + try: + # Float-Parsing, z. B. "55.000000" + return float(val) + except ValueError: + pass + elif isinstance(val, float) and val.is_integer(): + return int(val) + return val + + +def is_equal(a: Any, b: Any) -> bool: + return normalize_value(a) == normalize_value(b) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/utils.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/utils.py new file mode 100644 index 0000000..b85a359 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/module_utils/utils.py @@ -0,0 +1,134 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +""" +utils.py + +Reverse-zone helper for DNS automation (Ansible context). + +This module provides `reverse_zone_names(...)`, a utility that derives the reverse-DNS +domain name for an IPv4 or IPv6 address/network. + +Key points: + - IPv4 inputs return `.in-addr.arpa` (without trailing dot). + - IPv6 inputs are processed via `netaddr` and trimmed for CIDR networks to the + reverse-zone boundary (nibble-based). + - The function expects an Ansible-like `module` object for logging (`module.log(...)`). + +Source: utils.py :contentReference[oaicite:0]{index=0} +""" + +from __future__ import absolute_import, print_function + +from typing import Any, Optional + +import netaddr +from ansible_collections.bodsch.dns.plugins.module_utils.network_type import ( + is_valid_ipv4, +) + + +def reverse_zone_names(module: Any, network: str) -> Optional[str]: + """ + Compute the reverse-DNS zone name for an IPv4/IPv6 address or network. + + For IPv4, the reverse name is created by reversing the dotted-quad octets and appending + ``.in-addr.arpa`` (no trailing dot). + + For IPv6, the function uses `netaddr.IPNetwork` / `netaddr.IPAddress.reverse_dns`. If the input + is a CIDR network (contains '/'), an offset is derived from the prefix length and the computed + reverse DNS name is trimmed to match the reverse-zone boundary implied by the prefix. + + Args: + module: An Ansible-like module object used for logging. Must provide `module.log(...)`. + network: IPv4/IPv6 address string or CIDR network string (e.g. "192.0.2.10" or "2001:db8::/64"). + + Returns: + Optional[str]: + - Reverse zone name (without trailing dot) if the input could be processed. + - None if the input is neither a valid IPv4 nor a valid IPv6 network/address. + """ + module.log(f"reverse_zone_names({network})") + + result: Optional[str] = None + + if is_valid_ipv4(network): + reverse_ip = ".".join(network.split(".")[::-1]) + result = f"{reverse_ip}.in-addr.arpa" + return result + + try: + offset: Optional[int] = None + + if network.count("/") == 1: + prefix = network.split("/")[1] + offset = int(9 + int(prefix) // 2) + + ip_net = netaddr.IPNetwork(str(network)) + ip_addr = netaddr.IPAddress(ip_net) + reverse_ip = ip_addr.reverse_dns # usually ends with a trailing "ip6.arpa." + + result = reverse_ip[-offset:] if offset else reverse_ip + + if result.endswith("."): + result = result[:-1] + + except Exception as e: + module.log(msg=f" => ERROR: {e}") + result = None + + if not result: + module.log( + f" PROBLEM: {network} is neither a valid IPv4 nor a valid IPv6 network." + ) + + return result + + +def reverse_zone_names_OLD(module, network): + """ """ + module.log(f"reverse_zone_names({network})") + + # ---------------------------------------------------- + reverse_ip = None + + if is_valid_ipv4(network): + + # module.log(f" + + reverse_ip = ".".join(network.replace(network + ".", "").split(".")[::-1]) + # reverse_ip = ".".join(ip.split(".")[::-1]) + + result = f"{reverse_ip}.in-addr.arpa" + + else: + try: + _offset = None + if network.count("/") == 1: + _prefix = network.split("/")[1] + _offset = int(9 + int(_prefix) // 2) + # module.log(msg=f" - {_prefix} - {_offset}") + + _network = netaddr.IPNetwork(str(network)) + _prefix = _network.prefixlen + _ipaddress = netaddr.IPAddress(_network) + reverse_ip = _ipaddress.reverse_dns + + if _offset: + result = reverse_ip[-_offset:] + + if result[-1] == ".": + result = result[:-1] + + except Exception as e: + module.log(msg=f" => ERROR: {e}") + pass + + if not result: + module.log( + msg=f" PROBLEM: {network} is neither a valid IPv4 nor a valid IPv6 network." + ) + + # module.log(msg=f" = '{result}'") + + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/bind_version.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/bind_version.py new file mode 100644 index 0000000..09a8300 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/bind_version.py @@ -0,0 +1,156 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2021, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +import re + +from ansible.module_utils.basic import AnsibleModule + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: bind_version +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +short_description: return the version of installed bind +description: return the version of installed bind + +options: + validate_version: + description: check against the installed version. + type: str + required: false + +""" + +EXAMPLES = r""" +- name: detect bind version + become: true + bodsch.dns.bind_version: + register: bind_version + check_mode: false + ignore_errors: true + +- name: detect bind version + become: true + bodsch.dns.bind_version: + validate_version: '9.18.0' + register: bind_version + check_mode: false + ignore_errors: true +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +class BindVersion(object): + """ + Main Class + """ + + module = None + + def __init__(self, module): + """ + Initialize all needed Variables + """ + self.module = module + + self.validate_version = module.params.get("validate_version") + self.named_bin = module.get_bin_path("named", False) + + def run(self): + """ + runner + """ + result = dict(rc=127, failed=True, changed=False, full_version="unknown") + + if not self.named_bin: + return dict(rc=0, failed=False, changed=False, msg="no named installed") + + rc, out, err = self._exec(["-v"]) + + if rc == 0: + _failed = True + msg = "unknown message" + + # + # named -v + # BIND 9.18.19-1~deb12u1-Debian (Extended Support Version) + pattern = re.compile( + r"^BIND (?P(?P\d+).(?P\d+).(?P\*|\d+)).*" + ) + version = re.search(pattern, out) + if version: + version_full_string = version.group("version") + version_major_string = version.group("major") + version_minor_string = version.group("minor") + version_patch_string = version.group("patch") + + if self.validate_version: + if version_full_string == self.validate_version: + _failed = False + msg = f"version {self.validate_version} successful installed." + else: + _failed = True + msg = f"version {self.validate_version} not installed." + else: + _failed = False + msg = "named is installed." + + result = dict( + failed=_failed, + rc=0, + msg=msg, + full_version=version_full_string, + version=dict( + major=int(version_major_string), + minor=int(version_minor_string), + patch=int(version_patch_string), + ), + excutable=self.named_bin, + ) + + return result + + def _exec(self, args): + """ """ + cmd = [self.named_bin] + args + + rc, out, err = self.module.run_command(cmd, check_rc=True) + # self.module.log(msg=" rc : '{}'".format(rc)) + # self.module.log(msg=" out: '{}' ({})".format(out, type(out))) + # self.module.log(msg=" err: '{}'".format(err)) + return rc, out, err + + +# =========================================== +# Module execution. +# + + +def main(): + + module = AnsibleModule( + argument_spec=dict(validate_version=dict(required=False, type="str")), + supports_check_mode=True, + ) + + icinga = BindVersion(module) + result = icinga.run() + + module.log(msg="= result: {}".format(result)) + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/bind_zone_data.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/bind_zone_data.py new file mode 100644 index 0000000..0dadd99 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/bind_zone_data.py @@ -0,0 +1,281 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2023, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +import os +import re + +import netaddr +from ansible.module_utils.basic import AnsibleModule + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: bind_zone_data +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD +description: TBD + +options: + zone_directory: + description: [] + type: str + required: true + zone_data: + description: [] + type: raw + required: true +""" + +EXAMPLES = r""" +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +class BindZoneData(object): + """ + Main Class + """ + + module = None + + def __init__(self, module): + """ """ + self.module = module + + self.zone_directory = module.params.get("zone_directory") + self.zone_data = module.params.get("zone_data") + + self.ipv6 = False + + def run(self): + """ + runner + """ + forward_zones = self.define_zone_forward_names() + reverse_zones = self.define_zone_reverse_names() + reverse_zones += self.define_zone_reverse_names(ipv6=True) + + # self.module.log(msg=f" forward_zones: '{forward_zones}'") + # self.module.log(msg=f" reverse_zones: '{reverse_zones}'") + + forward_data = self.forward_zone_data(forward_zones) + reverse_data = self.reverse_zone_data(reverse_zones) + # reverse_data = self.reverse_zone_data(reverse_zones) + + result = dict(zone_data=dict(forward=forward_data, reverse=reverse_data)) + + return result + + def forward_zone_data(self, forward_zones): + """ """ + self.module.log(msg=f"forward_zone_data({forward_zones})") + result = [] + for name in forward_zones: + self.module.log(msg=f" - '{name}'") + + res = {} + res[name] = {} + + hash, serial = self.read_zone_file(name) + + res[name] = dict(filename=str(name), sha256=str(hash), serial=str(serial)) + + result.append(res) + + self.module.log(msg=f" = '{result}'") + + return result + + def reverse_zone_data(self, reverse_zones): + """ """ + self.module.log(msg=f"reverse_zone_data({reverse_zones})") + + result = [] + for name in reverse_zones: + self.module.log(msg=f" - '{name}'") + + filename = self.reverse_zone_names(name) + + res = {} + res[name] = {} + + hash, serial = self.read_zone_file(filename) + + res[name] = dict( + filename=str(filename), + sha256=str(hash), + serial=str(serial), + network=str(name), + ) + + result.append(res) + + self.module.log(msg=f" = '{result}'") + + return result + + def read_zone_file(self, zone_file): + """ """ + # self.module.log(msg=f"read_zone_file({zone_file})") + # line = None + hash = None + serial = None + _file_name = os.path.join(self.zone_directory, zone_file) + + # self.module.log(msg=f" zone_directory: '{self.zone_directory}'") + # self.module.log(msg=f" zone_file : '{zone_file}'") + # self.module.log(msg=f" file_name : '{_file_name}'") + # self.module.log(msg=f" : '{os.path.join(self.zone_directory, _file_name)}'") + + if os.path.exists(_file_name): + with open(_file_name, "r") as f: + # zone_data = f.readlines() + # read first 4 lines from file + zone_data = [next(f) for _ in range(5)] + # self.module.log(msg=f" : {zone_data}") + pattern = re.compile( + r"; Hash:.*(?P[0-9A-Za-z]{64}) (?P[0-9]+)", + re.MULTILINE, + ) + + # find regex in list + # [0] # Read Note + _list = list(filter(pattern.match, zone_data)) + + if isinstance(_list, list) and len(_list) > 0: + line = _list[0].strip() + if len(line) > 0: + arr = line.split(" ") + hash = arr[2] + serial = arr[3] + + self.module.log(msg=f"= hash: {hash}, serial: {serial}") + + return (hash, serial) + + def define_zone_forward_names(self): + """ """ + return [ + x.get("name") + for x in self.zone_data + if x.get("state", "present") and x.get("create_forward_zones", True) + ] + + def define_zone_reverse_names(self, ipv6=False): + """ """ + # self.module.log(msg=f"define_zone_reverse_names({ipv6})") + + networks = [] + + if not ipv6: + networks = [ + x.get("networks", []) + for x in self.zone_data + if x.get("state", "present") and x.get("create_reverse_zones", True) + ] + else: + networks = [ + x.get("ipv6_networks", []) + for x in self.zone_data + if x.get("state", "present") and x.get("create_reverse_zones", True) + ] + + # self.module.log(msg=f" - {networks} (type(networks))") + if networks: + # flatten list of lists + networks = [x for row in networks for x in row] + else: + networks = [] + + # self.module.log(msg=f" = {networks}") + return networks + + def define_zone_networks(self): + """ """ + networks = [x.get("networks") for x in self.zone_data] + # flatten list of lists + return [x for row in networks for x in row] + + def reverse_zone_names(self, network): + """ """ + # self.module.log(msg=f"reverse_zone_names({network})") + + # ---------------------------------------------------- + from ansible_collections.bodsch.dns.plugins.module_utils.network_type import ( + is_valid_ipv4, + ) + + reverse_ip = None + + if is_valid_ipv4(network): + reverse_ip = ".".join(network.replace(network + ".", "").split(".")[::-1]) + # reverse_ip = ".".join(ip.split(".")[::-1]) + + result = f"{reverse_ip}.in-addr.arpa" + + else: + try: + _offset = None + if network.count("/") == 1: + _prefix = network.split("/")[1] + _offset = int(9 + int(_prefix) // 2) + # self.module.log(msg=f" - {_prefix} - {_offset}") + + _network = netaddr.IPNetwork(str(network)) + _prefix = _network.prefixlen + _ipaddress = netaddr.IPAddress(_network) + reverse_ip = _ipaddress.reverse_dns + + if _offset: + result = reverse_ip[-_offset:] + + if result[-1] == ".": + result = result[:-1] + + except Exception as e: + self.module.log(msg=f" => ERROR: {e}") + pass + + if not result: + self.module.log( + msg=f" PROBLEM: {network} is neither a valid IPv4 nor a valid IPv6 network." + ) + + # self.module.log(msg=f" = '{result}'") + + return result + + +def main(): + + arguments = dict( + zone_directory=dict(required=True, type="str"), + zone_data=dict(required=True, type="raw"), + ) + + module = AnsibleModule( + argument_spec=arguments, + supports_check_mode=True, + ) + + icinga = BindZoneData(module) + result = icinga.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/bind_zone_hash.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/bind_zone_hash.py new file mode 100644 index 0000000..159373e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/bind_zone_hash.py @@ -0,0 +1,232 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2023, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +import os +import re + +import netaddr +from ansible.module_utils.basic import AnsibleModule + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: bind_zone_hash +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD +description: TBD + +options: + zone_directory: + description: [] + type: str + required: true + zone_file: + description: [] + type: str + required: false + zone_data: + description: [] + type: raw + required: true + reverse_zone: + description: [] + type: bool + required: false + default: false + networks: + description: [] + type: raw + required: false + ipv6: + description: [] + type: bool + required: false + default: false + +""" + +EXAMPLES = r""" +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +class BindZoneHash(object): + """ + Main Class + """ + + module = None + + def __init__(self, module): + """ """ + self.module = module + + self.zone_directory = module.params.get("zone_directory") + self.zone_file = module.params.get("zone_file") + self.zone_data = module.params.get("zone_data") + self.reverse_zone = module.params.get("reverse_zone") + self.networks = module.params.get("networks") + self.ipv6 = module.params.get("ipv6") + + # self.module.log(msg=f"{self.networks}") + + def run(self): + """ + runner + """ + _hash = [] + + if self.reverse_zone: + zone_files = self.define_zone_networks() + else: + zone_files = self.define_zone_names() + + for name in zone_files: + self.module.log(msg=f" - '{name}'") + + if self.reverse_zone: + network = name + name = self.reverse_zone_names(name) + + line = self.read_zone_file(name) + + if not self.reverse_zone: + _hash.append(dict(name=str(name), hash=line)) + else: + _hash.append(dict(name=str(name), hash=line, network=str(network))) + + self.module.log(msg=f" = '{_hash}'") + + result = dict(failed=False, changed=False, hash=_hash) + + return result + + def read_zone_file(self, zone_file): + + # self.module.log(msg=f"read_zone_file({zone_file})") + + line = None + _file_name = os.path.join(self.zone_directory, zone_file) + + # self.module.log(msg=f"'{_file_name}'") + + if os.path.exists(self.zone_directory) and os.path.exists(_file_name): + with open(os.path.join(self.zone_directory, _file_name), "r") as f: + # read first 4 lines from file + zone_data = f.readlines() + # zone_data = [next(f) for _ in range(14)] + + # self.module.log(msg=f" - '{zone_data}'") + + pattern = re.compile( + r"; Hash:.*(?P[0-9A-Za-z]{64}) (?P[0-9]+)", + re.MULTILINE, + ) + + # find regex in list + # [0] # Read Note + _list = list(filter(pattern.match, zone_data)) + + self.module.log(msg=f" => '{_list}'") + + if isinstance(_list, list) and len(_list) > 0: + line = _list[0].strip() + + return line + + # #self.module.log(msg=f" -> '{line}'") + # #hash = line.split(" ")[2] + # #self.module.log(msg=f" -> '{hash}'") + # + # if not self.reverse_zone: + # _hash.append( + # dict( + # name=self.zone_file, + # hash=line + # ) + # ) + # else: + # _hash.append( + # dict( + # name=self.zone_file, + # hash=line, + # network=self.networks + # ) + # ) + # + # result = dict( + # failed=False, + # changed=False, + # hash=_hash + # ) + # + # return result + + def define_zone_names(self): + """ """ + return [x.get("name") for x in self.zone_data] + + def define_zone_networks(self): + """ """ + networks = [x.get("networks") for x in self.zone_data] + # flatten list of lists + return [x for row in networks for x in row] + + def reverse_zone_names(self, network): + """ """ + result = None + # create reverse names + if not self.ipv6: + result = ( + ".".join(network.replace(network + ".", "").split(".")[::-1]) + + ".in-addr.arpa" + ) + else: + # (item.1 | ansible.utils.ipaddr('revdns'))[-(9+(item.1|regex_replace('^.*/','')|int)//2):-1] }} + _network = netaddr.IPNetwork(str(network)) + _ipaddress = netaddr.IPAddress(_network) + result = _ipaddress.reverse_dns + pass + + self.module.log(msg=f" = '{result}'") + + return result + + +def main(): + + arguments = dict( + zone_directory=dict(required=True, type="str"), + zone_file=dict(required=False, type="str"), + zone_data=dict(required=True, type="raw"), + reverse_zone=dict(required=False, type="bool", default=False), + networks=dict(required=False, type="raw"), + ipv6=dict(default=False, type="bool"), + ) + + module = AnsibleModule( + argument_spec=arguments, + supports_check_mode=True, + ) + + icinga = BindZoneHash(module) + result = icinga.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/kdig.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/kdig.py new file mode 100644 index 0000000..747e08b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/kdig.py @@ -0,0 +1,510 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2022, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/licenses/Apache-2.0) + +from __future__ import absolute_import, division, print_function + +import hashlib +import json +import os +import re +import tempfile +import time +from typing import Any, Dict, List, Mapping, Optional, Protocol, Sequence, Tuple + +from ansible.module_utils.basic import AnsibleModule + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: kdig +short_description: Maintain a trusted DNSKEY file (trust anchor) using kdig +version_added: "0.9.0" +author: + - Bodo Schulz (@bodsch) + +description: + - Queries DNSKEY records for the root zone (C(.)) using C(kdig). + - Filters DNSKEY records by the configured flag value (typically C(257) for KSK). + - Writes the matching records to a trust key file in a deterministic order to ensure idempotency. + - Persists a SHA-256 checksum and only rewrites the trust key file when the checksum changes. + - If the trust key file already exists and needs to be updated, it is renamed with a timestamp suffix as a backup. + +options: + root_dns: + description: + - Root DNS server to query (kdig target after C(@)). + - Use any root server name or IP (for example C(k.root-servers.net) or C(198.41.0.4)). + type: str + required: false + default: k.root-servers.net + + signing_key: + description: + - DNSKEY flag value to select from the response. + - Typical values are C(257) (KSK) or C(256) (ZSK), depending on your use case. + type: int + required: false + default: 257 + + trust_keyfile: + description: + - Path to the trusted DNSKEY file to manage. + - The file content will be replaced atomically when updates are required. + type: str + required: false + default: /etc/trusted-key.key + + parameters: + description: + - Additional parameters appended to the C(kdig) invocation. + - Use this to pass transport and formatting options (for example C(+tcp), C(+timeout=2), C(+retry=3), C(+json)). + - JSON output (C(+json)) is preferred for robust parsing; if not provided, the module will attempt JSON first and fall back to text parsing. + type: list + elements: str + required: false + +notes: + - Check mode is supported. + - The checksum file path is fixed to C(/etc/.trusted-key.key.checksum) by the module implementation. + - Requires the C(kdig) executable on the target host. + +requirements: + - kdig (Knot DNS utilities) +""" + +EXAMPLES = r""" +- name: Maintain root trust anchor file using default root server + bodsch.dns.kdig: + trust_keyfile: /etc/trusted-key.key + +- name: Prefer JSON output and use TCP with custom timeouts + bodsch.dns.kdig: + root_dns: k.root-servers.net + signing_key: 257 + trust_keyfile: /etc/trusted-key.key + parameters: + - +json + - +tcp + - +timeout=2 + - +retry=3 + +- name: Use a specific root server IP + bodsch.dns.kdig: + root_dns: 198.41.0.4 + trust_keyfile: /etc/trusted-key.key +""" + +RETURN = r""" +changed: + description: + - Whether the trust key file was updated. + returned: always + type: bool + +failed: + description: + - Indicates failure (for example, kdig is missing, kdig execution failed, or no matching DNSKEY records were found). + returned: always + type: bool + +msg: + description: + - Human readable status or error message. + returned: always + type: str + sample: + - "/etc/trusted-key.key successfully updated" + - "/etc/trusted-key.key is up-to-date" + - "no installed kdig found" + - "No DNSKEY records with flags 257 found in kdig output." + +rc: + description: + - Return code when the module fails before running normal processing (for example missing kdig). + returned: sometimes + type: int + sample: 1 +""" + + +# --------------------------------------------------------------------------------------- + + +class AnsibleModuleLike(Protocol): + """Typing contract for the minimal AnsibleModule API used by this helper. + + The real AnsibleModule provides a much larger surface. This protocol intentionally + declares only the attributes and methods that are accessed by this module, to + improve static type checking without coupling to Ansible internals. + """ + + params: Mapping[str, Any] + check_mode: bool + + def get_bin_path(self, arg: str, required: bool = False) -> Optional[str]: + ... + + def run_command( + self, args: Sequence[str], check_rc: bool = True + ) -> Tuple[int, str, str]: + ... + + def log(self, msg: str = "", **kwargs: Any) -> None: + ... + + +class Kdig(object): + """Maintain a trusted DNSKEY file using `kdig`. + + The module queries DNSKEY records (typically for the root zone) from a configured + root server, selects the records matching `signing_key` (e.g. 257 for KSK), and + writes them to `trust_keyfile` in a canonical, deterministic order. + + To support idempotent Ansible runs, a SHA-256 checksum of the canonical content is + persisted in `trust_keyfile_checksum` and compared on subsequent runs. + + JSON output (`+json`) is preferred when available because it allows robust parsing. + The implementation falls back to parsing textual `+answer` output for older kdig + builds that do not support JSON. + """ + + module = None + + def __init__(self, module: AnsibleModuleLike): + """Initialize helper state from Ansible module parameters. + + Args: + module: The Ansible module instance (or a compatible object) providing + parameter access, logging, and command execution. + """ + self.module = module + + self.module.log("Kdig::__init__()") + + self._kdig_bin = module.get_bin_path("kdig", True) + + self.root_dns = module.params.get("root_dns") + self.signing_key = module.params.get("signing_key") + self.trust_keyfile = module.params.get("trust_keyfile") + self.parameters = module.params.get("parameters") + + self.trust_keyfile_checksum = "/etc/.trusted-key.key.checksum" + + def run(self) -> Dict[str, Any]: + """Execute the DNSKEY query and update trust files if required. + + Workflow: + 1. Read the previously stored checksum (if present). + 2. Query DNSKEY records using `kdig` (prefer JSON mode). + 3. Extract and canonically sort all DNSKEY records with the configured + `signing_key`. + 4. Compute a SHA-256 checksum over the canonical content. + 5. If the checksum differs, back up the previous trust file and atomically + write the new trust file and checksum. + + Returns: + An Ansible-style result dictionary suitable for `exit_json()`. + """ + self.module.log("Kdig::run()") + + result: Dict[str, Any] = dict(failed=True, ansible_module_results="failed") + + _checksum = "" + _old_checksum = "" + + if not self._kdig_bin: + return dict(rc=1, failed=True, msg="no installed kdig found") + + if os.path.isfile(self.trust_keyfile_checksum): + with open(self.trust_keyfile_checksum, "r", encoding="utf-8") as fp: + # Normalize to avoid newline-related false positives. + _old_checksum = fp.readline().strip() + + self.module.log(f" - _old_checksum: {_old_checksum}") + + args: List[str] = [] + args.append(self._kdig_bin) + args.append("DNSKEY") + args.append(".") + args.append(f"@{self.root_dns}") + args.append("+noall") + args.append("+answer") + + params: List[str] = ( + [str(x) for x in self.parameters] + if isinstance(self.parameters, list) + else [] + ) + has_json = "+json" in params + + # Preferred execution: JSON output (deterministic parsing + sorting). + json_args = args + (params if has_json else (["+json"] + params)) + + # Fallback execution: plain +answer output (older kdig versions may not support +json). + text_args = args + ([p for p in params if p != "+json"] if has_json else params) + + rc, out, err = self._exec(json_args) + if rc != 0 and json_args != text_args: + self.module.log( + msg=f" - kdig JSON mode failed, retrying without +json: {err}" + ) + rc, out, err = self._exec(text_args) + + if rc == 0: + # Prefer JSON parsing. If JSON is not available (older kdig) or parsing fails, + # fall back to the textual regex approach. + matches = self._extract_dnskeys(out) + + if not matches: + return dict( + failed=True, + changed=False, + msg=f"No DNSKEY records with flags {self.signing_key} found in kdig output.", + ) + + # Canonical representation for stable checksums and file content. + dnskey = "\n".join(matches) + "\n" + + _checksum = self.__checksum(dnskey) + + self.module.log(f" - _checksum: {_checksum}") + + if _old_checksum != _checksum: + """ + rename old trust file + """ + self.module.log(" - changed ...") + + if self.module.check_mode: + return dict( + failed=False, + changed=True, + msg=f"{self.trust_keyfile} would be updated (check mode).", + ) + + if os.path.isfile(self.trust_keyfile): + date_string = time.strftime("%Y%m%d%H%M%S") + _trust_keyfile_backup = f"{self.trust_keyfile}_{date_string}" + + os.rename(self.trust_keyfile, _trust_keyfile_backup) + + self._atomic_write(self.trust_keyfile, dnskey) + + """ + persist checksum + """ + self._atomic_write(self.trust_keyfile_checksum, _checksum + "\n") + + result = dict( + failed=False, + changed=True, + msg=f"{self.trust_keyfile} successfully updated", + ) + else: + result = dict( + failed=False, + changed=False, + msg=f"{self.trust_keyfile} is up-to-date", + ) + + else: + result = dict( + failed=True, + changed=False, + msg=err or "kdig execution failed", + ) + + return result + + def _extract_dnskeys(self, out: str) -> List[str]: + """Extract matching DNSKEY records from kdig output. + + The function prefers kdig's JSON output (+json) and falls back to parsing the + textual +answer output for older kdig versions. + + Returns: + A canonical, deterministic list of DNSKEY record lines. + """ + obj = self._parse_kdig_json(out) + if obj is not None: + records = self._dnskeys_from_json(obj) + if records: + return records + return self._dnskeys_from_text(out) + + def _parse_kdig_json(self, out: str) -> Optional[Dict[str, Any]]: + """Parse kdig JSON output. + + kdig typically prints a single JSON object. Some builds may include non-JSON + noise; therefore we extract the first '{' .. last '}' block and parse it. + """ + start = out.find("{") + end = out.rfind("}") + if start == -1 or end == -1 or end <= start: + return None + + payload = out[start: end + 1] + try: + parsed = json.loads(payload) + except Exception: + return None + + return parsed if isinstance(parsed, dict) else None + + def _dnskeys_from_json(self, obj: Dict[str, Any]) -> List[str]: + """Extract matching DNSKEY records from a parsed kdig JSON object.""" + answers = obj.get("answerRRs") + if not isinstance(answers, list): + return [] + + extracted: List[Tuple[int, int, int, str, str]] = [] + # tuple: (flags, protocol, algorithm, key, rendered_line) + + for rr in answers: + if not isinstance(rr, dict): + continue + + if rr.get("TYPEname") != "DNSKEY": + continue + + rdata = rr.get("rdataDNSKEY") + if not isinstance(rdata, str): + continue + + parts = rdata.split() + if len(parts) < 4: + continue + + try: + flags = int(parts[0]) + protocol = int(parts[1]) + algorithm = int(parts[2]) + except ValueError: + continue + + if flags != int(self.signing_key): + continue + + name = rr.get("NAME") or "." + ttl = rr.get("TTL") + class_name = rr.get("CLASSname") or "IN" + type_name = rr.get("TYPEname") or "DNSKEY" + + ttl_str = str(ttl) if isinstance(ttl, int) else "0" + line = f"{name} {ttl_str} {class_name} {type_name} {rdata}".rstrip() + key = " ".join(parts[3:]) + + extracted.append((flags, protocol, algorithm, key, line)) + + # Deterministic order independent from server output order. + extracted.sort(key=lambda t: (t[0], t[1], t[2], t[3])) + return [t[4] for t in extracted] + + def _dnskeys_from_text(self, out: str) -> List[str]: + """Extract matching DNSKEY records from textual kdig +answer output.""" + pattern = re.compile( + r"(?P^.*\sDNSKEY\s+{}\s+.*$)".format(self.signing_key), re.MULTILINE + ) + matches = [m.group("key").rstrip() for m in re.finditer(pattern, out)] + matches.sort() + return matches + + def _atomic_write(self, path: str, data: str) -> None: + """Atomically write text data to a file. + + The content is written to a temporary file in the target directory and then + moved into place via `os.replace()`, ensuring readers never observe partial + writes. + + Args: + path: Destination file path. + data: Text content to write. + """ + self.module.log(f"Kdig::_atomic_write(path: {path}, data: {data})") + + parent = os.path.dirname(path) or "." + os.makedirs(parent, exist_ok=True) + + with tempfile.NamedTemporaryFile( + "w", + delete=False, + dir=parent, + encoding="utf-8" + ) as tf: + tf.write(data) + tmp_name = tf.name + + os.replace(tmp_name, path) + os.chmod(path, 0o644) + + def _exec(self, commands: List[str], check_rc: bool = True) -> Tuple[int, str, str]: + """Execute a command via Ansible's `run_command()`. + + Args: + commands: The fully prepared argument vector. + check_rc: If True, Ansible will treat non-zero return codes as fatal. + + Returns: + A tuple of `(rc, stdout, stderr)`. + """ + self.module.log(f"Kdig::_exec(commands: {commands}, check_rc: {check_rc})") + + rc, out, err = self.module.run_command(commands, check_rc=check_rc) + + if rc != 0: + self.module.log(msg=f" out: '{out}'") + self.module.log(msg=f" err: '{err}'") + + return rc, out, err + + def __checksum(self, plaintext: str) -> str: + """Compute a SHA-256 checksum for the provided text. + + Args: + plaintext: Canonical DNSKEY text representation. + + Returns: + The hex-encoded SHA-256 digest. + """ + self.module.log(f"Kdig::__checksum(plaintext: {plaintext})") + + _bytes = plaintext.encode("utf-8") + _hash = hashlib.sha256(_bytes) + checksum = _hash.hexdigest() + + return checksum + + +def main(): + """Ansible module entry point. + + Parses module arguments, executes the helper, and returns the result via + `exit_json()`. + """ + module = AnsibleModule( + argument_spec=dict( + root_dns=dict(required=False, default="k.root-servers.net", type="str"), + signing_key=dict(required=False, default=257, type="int"), + trust_keyfile=dict( + required=False, default="/etc/trusted-key.key", type="str" + ), + parameters=dict(required=False, type="list"), + ), + supports_check_mode=True, + ) + + c = Kdig(module) + result = c.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/knot_zone.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/knot_zone.py new file mode 100644 index 0000000..de8df2d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/knot_zone.py @@ -0,0 +1,334 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# BSD 2-clause (see LICENSE or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function + +import hashlib +import json +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.directory import ( + create_directory, +) + +__metaclass__ = type + +ANSIBLE_METADATA = { + "metadata_version": "0.1", + "status": ["preview"], + "supported_by": "community", +} + +DOCUMENTATION = """ +module: knot_zone +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD +description: TBD + +options: + state: + description: + - Whether to install (C(present)), or remove (C(absent)) a package. + default: present + required: true + + zone: + description: [] + required: true + type: str + zone_ttl: + description: [] + required: true + type: int + zone_soa: + description: [] + required: true + type: dict + name_servers: + description: [] + required: true + type: dict + records: + description: [] + required: true + type: dict + debug: + description: [] + required: false + type: bool + default: false + database_path: + description: [] + required: true + type: str + owner: + description: [] + required: false + type: str + group: + description: [] + required: false + type: str + mode: + description: [] + required: false + type: str + default: 0666 + +""" + +EXAMPLES = r""" +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +class KnotZoneConfig(object): + """ """ + + module = None + + def __init__(self, module): + """ """ + self.module = module + + self.state = module.params.get("state") + # + self.zone = module.params.get("zone") + self.zone_ttl = module.params.get("zone_ttl") + self.zone_soa = module.params.get("zone_soa") + self.name_servers = module.params.get("name_servers") + self.records = module.params.get("records") + + self.database_path = module.params.get("database_path") + self.owner = module.params.get("owner") + self.group = module.params.get("group") + self.mode = module.params.get("mode") + + self.zone_path = f"{self.database_path}" + self.config_file = f"{self.zone_path}/{self.zone}.zone" + self.config_checksum = f"{self.zone_path}/.{self.zone}.checksum" + self.config_serial = f"{self.zone_path}/.{self.zone}.serial" + + # self.module.log(msg="---------------------------------------------") + # self.module.log(msg="zone_path : {}".format(self.zone_path)) + # self.module.log(msg="config_file : {}".format(self.config_file)) + # self.module.log(msg="config_checksum: {}".format(self.config_checksum)) + # self.module.log(msg="config_serial : {}".format(self.config_serial)) + # self.module.log(msg="---------------------------------------------") + + def run(self): + """ + run + """ + if self.state == "absent": + """ + remove created zone directory + """ + _changed = False + for f in [self.config_file, self.config_checksum, self.config_serial]: + if os.path.isdir(f): + _changed = True + os.remove(f) + + return dict(changed=_changed, failed=False, msg="zone removed") + + create_directory(directory=self.zone_path, mode="0750") + + _checksum = "" + _old_checksum = "" + _data_changed = False + + data = dict() + + data.update( + { + "zone": self.zone, + "zone_ttl": self.zone_ttl, + "soa": self.zone_soa, + "name_servers": self.name_servers, + "records": self.records, + } + ) + + # self.module.log(msg="---------------------------------------------") + # self.module.log(msg="data : {}".format(json.dumps(data, sort_keys=True))) + # self.module.log(msg="---------------------------------------------") + + _checksum = self.__checksum(json.dumps(data, sort_keys=True)) + + if os.path.isfile(self.config_checksum): + with open(self.config_checksum, "r") as fp: + _old_checksum = fp.readlines()[0] + + if _old_checksum == _checksum: + return dict( + changed=False, + failed=False, + msg=f"zone file {self.zone} has no changes.", + ) + else: + if os.path.isfile(self.config_file): + msg = f"zone file {self.zone} successfully updated." + else: + msg = f"zone file {self.zone} successfully created." + + _data_changed = True + + soa_serial = self.__zone_serial() + data["soa"]["serial"] = soa_serial + + self.__write_template(self.config_file, data) + + with open(self.config_serial, "w") as fp: + fp.write(soa_serial) + + with open(self.config_checksum, "w") as fp: + fp.write(_checksum) + + return dict(changed=_data_changed, failed=False, soa_serial=soa_serial, msg=msg) + + def __zone_serial(self): + """ """ + from datetime import datetime + + now = datetime.now().strftime("%Y%m%d") + id = "01" + + if os.path.isfile(self.config_serial): + with open(self.config_serial, "r") as fp: + _serial = fp.read() + + # self.module.log(msg="serial : {}".format(_serial)) + # self.module.log(msg="date : {}".format(_serial[:-2])) + # self.module.log(msg="number : {}".format(_serial[8:])) + + if now == _serial[:-2]: + id = int(_serial[8:]) + # id = id + 1 + id = "{0:02d}".format(id + 1) + + _serial = f"{now}{id}" + + # self.module.log(msg="serial : {}".format(_serial)) + + return _serial + + def __checksum(self, plaintext): + """ + create checksum from string + """ + _bytes = plaintext.encode("utf-8") + _hash = hashlib.sha256(_bytes) + + return _hash.hexdigest() + + def __write_template(self, file_name, data): + """ """ + tpl = """ +$ORIGIN {{ item.zone }}. +$TTL {{ item.zone_ttl }} + +@ SOA {{ item.soa.primary_dns }}. {{ item.soa.hostmaster }}. ( + {{ item.soa.serial.ljust(2) }} ; serial + {{ (item.soa.refresh | default('6h')).ljust(10) }} ; refresh + {{ (item.soa.retry | default('1h')).ljust(10) }} ; retry + {{ (item.soa.expire | default('1w')).ljust(10) }} ; expire + {{ item.soa.minimum | default('1d') }}) ; minimum + +{% if item.name_servers | count > 0 %} + {% for k, v in item.name_servers.items() %} +{{ (v.ttl | default('3600')).ljust(10).rjust(42) }} {{ "NS".ljust(19) }} {{ (k + '.') }} +{{ (k + '.').ljust(30) }} {{ (v.ttl | default('3600')).ljust(10) }} {{ "A".ljust(19) }} {{ v.ip }} + {% endfor %} +{% endif %} + +{% if item.records | count > 0 %} + {% for k, v in item.records.items() %} + {% if v.description is defined and v.description | length != 0 %} +;; {{ v.description }} + {% endif %} + {% if k == '@' %} + {% set source = item.zone %} + {% else %} + {% set source = v %} + {% endif -%} + {% if v.type == 'A' %} +{{ (k + '.').ljust(30) }} {{ (v.ttl | default('3600')).ljust(10) }} {{ v.type.ljust(20) }} {{ v.ip }} + {% if v.aliases is defined and v.aliases | count > 0 %} + {% for a in v.aliases %} + {% set _source = a + '.' + item.zone %} + {% set _type = 'CNAME' %} +{{ (_source + '.').ljust(30) }} {{ (v.ttl | default(item.zone_ttl) | string).ljust(10) }} {{ _type.ljust(20) }} {{ (k + '.') }} + {% endfor %} + {% endif %} + {% endif %} + {% if v.type == 'CNAME' %} +{{ (k + '.').ljust(30) }} {{ (v.ttl | default('3600')).ljust(10) }} {{ v.type.ljust(20) }} {{ v.target }}. + {% endif %} + {% if v.type == 'TXT' %} +{{ (source + '.').ljust(30) }} {{ (v.ttl | default('3600')).ljust(10) }} {{ v.type.ljust(20) }} "{{ v.text }}" + {% endif %} + {% if v.type == 'SRV' %} +{{ (k + '.').ljust(30) }} {{ (v.ttl | default('3600')).ljust(10) }} {{ v.type.ljust(20) }} {{ v.priority }} {{ v.weight }} {{ v.port }} {{ v.target }}. + {% endif %} + {% endfor %} +{% endif %} + +""" + from jinja2 import Template + + tm = Template(tpl, trim_blocks=True, lstrip_blocks=True) + d = tm.render(item=data) + + with open(file_name, "w") as fp: + fp.write(d) + + return True + + +# --------------------------------------------------------------------------------------- +# Module execution. +# + + +def main(): + """ """ + args = dict( + state=dict(default="present", choices=["absent", "present"]), + # + zone=dict(required=True, type="str"), + zone_ttl=dict(required=True, type="int"), + zone_soa=dict(required=True, type="dict"), + name_servers=dict(required=True, type="dict"), + records=dict(required=True, type="dict"), + debug=dict(required=False, type="bool", default=False), + database_path=dict(required=True, type="str"), + owner=dict(required=False, type="str"), + group=dict(required=False, type="str"), + mode=dict(required=False, type="str", default="0666"), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=True, + ) + + c = KnotZoneConfig(module) + result = c.run() + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pdns_mysql_backend.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pdns_mysql_backend.py new file mode 100644 index 0000000..1cfa4f5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pdns_mysql_backend.py @@ -0,0 +1,286 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2021-2025, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.dns.plugins.module_utils.database import Database + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: pdns_mysql_backend +short_description: Ensure a PowerDNS MariaDB/MySQL backend schema is present (import schema if missing) +version_added: "0.9.0" +author: + - Bodo Schulz (@bodsch) + +description: + - Connects to a MariaDB/MySQL server and verifies the PowerDNS schema by checking for the C(domains) table. + - If the schema is missing, imports the given SQL schema file. + - Intended to bootstrap the PowerDNS gmysql backend schema. + - Note: While C(state=delete) is accepted by the argument spec, this module implementation only performs schema validation/import. + +options: + state: + description: + - Desired state. + - C(create) validates the schema and imports it if missing. + - C(delete) is currently not implemented for MariaDB/MySQL by this module code path. + type: str + default: create + choices: [create, delete] + + database: + description: + - Database connection parameters for MariaDB/MySQL. + type: dict + required: true + suboptions: + hostname: + description: + - Database hostname or IP. + type: str + required: false + port: + description: + - Database port. + type: int + required: false + default: 3306 + socket: + description: + - Path to the UNIX socket (optional, alternative to hostname/port). + type: str + required: false + config_file: + description: + - Optional client config file used by the underlying database helper. + type: str + required: false + schemaname: + description: + - Database/schema name to connect to. + type: str + required: false + login: + description: + - Login credentials. + type: dict + required: false + suboptions: + username: + description: + - Login user. + type: str + required: false + password: + description: + - Login password. + type: str + required: false + no_log: true + + schema_file: + description: + - Path to the SQL schema file to import if the PowerDNS schema is missing. + type: str + required: true + + owner: + description: + - Optional compatibility parameter (not used directly by this module code path). + type: str + required: false + + group: + description: + - Optional compatibility parameter (not used directly by this module code path). + type: str + required: false + + mode: + description: + - Optional compatibility parameter (not used directly by this module code path). + type: str + default: "0644" + required: false + +notes: + - Check mode is supported. + +requirements: + - MariaDB/MySQL connectivity as provided by the collection's C(Database) helper utilities. +""" + +EXAMPLES = r""" +- name: Ensure PowerDNS schema exists in MariaDB and import if missing + become: true + bodsch.dns.pdns_mysql_backend: + state: create + database: + hostname: 127.0.0.1 + port: 3306 + schemaname: powerdns + login: + username: pdns + password: secret + schema_file: /usr/share/pdns/schema.mysql.sql + +- name: Use a UNIX socket and a client config file + become: true + bodsch.dns.pdns_mysql_backend: + state: create + database: + socket: /run/mysqld/mysqld.sock + config_file: /root/.my.cnf + schemaname: powerdns + schema_file: /usr/share/pdns/schema.mysql.sql +""" + +RETURN = r""" +changed: + description: + - Whether the module imported the schema. + returned: always + type: bool + +failed: + description: + - Indicates failure. + returned: always + type: bool + +msg: + description: + - Human readable status or error message. + returned: always + type: str + sample: + - "schema already present" + - "imported schema successfully" + - "connection failed:
" + +rc: + description: + - Return code used by the module implementation (may be absent depending on the executed code path). + returned: sometimes + type: int +""" + +# --------------------------------------------------------------------------------------- + + +class PdnsBackendMariadb(Database): + """ + Main Class + """ + + module = None + + def __init__(self, module): + """ + Initialize all needed Variables + """ + self.module = module + + self.state = module.params.get("state") + self.database = module.params.get("database") + self.schema_file = module.params.get("schema_file") + self.owner = module.params.get("owner") + self.group = module.params.get("group") + self.mode = module.params.get("mode") + + self.db_hostname = self.database.get("hostname", None) + self.db_port = self.database.get("port", 3306) + self.db_socket = self.database.get("socket", None) + self.db_config = self.database.get("config_file", None) + self.db_schemaname = self.database.get("schemaname", None) + self.db_login_username = self.database.get("login", {}).get("username", None) + self.db_login_password = self.database.get("login", {}).get("password", None) + + def run(self): + """ + runner + """ + result = dict(rc=127, failed=True, changed=False, full_version="unknown") + + result = self._mariadb() + + return result + + def _mariadb(self): + """ + mysql / mariadb support + """ + + valid, msg = self.validate() + + if not valid: + return dict(failed=True, msg=msg) + + self.db_credentials( + self.db_login_username, self.db_login_password, self.db_schemaname + ) + + db_connect_error, db_message = self.db_connect() + + if db_connect_error: + return dict(failed=True, msg=db_message) + + state, db_error, db_error_message = self.check_table_schema("domains") + + if state: + return dict(changed=False, msg=db_error_message) + + # import DB schema + if os.path.exists(self.schema_file): + """ """ + # file_name = os.path.basename(self.schema_file) + # self.module.log(msg=f"import schema from '{file_name}'") + + state, _msg = self.import_sqlfile( + sql_file=self.schema_file, + commit=True, + rollback=True, + close_cursor=False, + ) + + return dict(failed=False, changed=(not state), msg=_msg) + + +def main(): + + arguments = dict( + state=dict(default="create", choices=["create", "delete"]), + database=dict(required=True, type="dict"), + owner=dict(required=False, type="str"), + group=dict(required=False, type="str"), + mode=dict(required=False, type="str", default="0644"), + schema_file=dict( + required=True, + type="str", + ), + ) + + module = AnsibleModule( + argument_spec=arguments, + supports_check_mode=True, + ) + + r = PdnsBackendMariadb(module) + result = r.run() + + # module.log(msg="= result: {}".format(result)) + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pdns_sqlite_backend.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pdns_sqlite_backend.py new file mode 100644 index 0000000..1d0d3be --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pdns_sqlite_backend.py @@ -0,0 +1,268 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2021, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.directory import ( + create_directory, +) +from ansible_collections.bodsch.dns.plugins.module_utils.database import Database + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: pdns_sqlite_backend +short_description: Create, delete or recreate a PowerDNS SQLite backend database and import the schema +version_added: "0.9.0" +author: + - Bodo Schulz (@bodsch) + +description: + - Manages a PowerDNS SQLite backend database file. + - Ensures the parent directory exists (created with mode C(0750)) before operating on the database. + - Imports the given SQL schema file when creating the database (implementation provided by the collection database helper). + - Can delete or recreate the database. + +options: + state: + description: + - Desired state of the SQLite backend. + - C(create) ensures the database exists and schema is present. + - C(delete) removes the database. + - C(recreate) deletes and recreates the database. + type: str + default: create + choices: [create, delete, recreate] + + database: + description: + - SQLite database definition. + - The module expects the database file path in C(database.database). + type: dict + required: true + suboptions: + database: + description: + - Path to the SQLite database file. + type: str + required: true + + schema_file: + description: + - Path to the SQL schema file to import when creating the database. + type: str + required: true + + owner: + description: + - Owner for the created directory/database (handled by collection helpers). + type: str + required: false + + group: + description: + - Group for the created directory/database (handled by collection helpers). + type: str + required: false + + mode: + description: + - File mode for the SQLite database file (octal string, e.g. C(0644)). + type: str + default: "0644" + required: false + +notes: + - Check mode is supported (C(supports_check_mode=true)). +""" + +EXAMPLES = r""" +- name: Create PowerDNS SQLite backend and import schema if needed + become: true + bodsch.dns.pdns_sqlite_backend: + state: create + database: + database: /var/lib/powerdns/pdns.sqlite3 + schema_file: /usr/share/pdns/schema.sqlite3.sql + owner: pdns + group: pdns + mode: "0640" + +- name: Recreate PowerDNS SQLite backend (drop + create) + become: true + bodsch.dns.pdns_sqlite_backend: + state: recreate + database: + database: /var/lib/powerdns/pdns.sqlite3 + schema_file: /usr/share/pdns/schema.sqlite3.sql + owner: pdns + group: pdns + mode: "0640" + +- name: Delete PowerDNS SQLite backend database file + become: true + bodsch.dns.pdns_sqlite_backend: + state: delete + database: + database: /var/lib/powerdns/pdns.sqlite3 + schema_file: /usr/share/pdns/schema.sqlite3.sql +""" + +RETURN = r""" +rc: + description: + - Return code used by the module implementation. + returned: always + type: int + sample: 0 + +changed: + description: + - Whether the module made changes (e.g. created/imported schema, deleted database, recreated database). + returned: always + type: bool + +failed: + description: + - Indicates failure. + returned: always + type: bool + +msg: + description: + - Human readable status or error message. + returned: always + type: str + sample: + - "Database successfully created." + - "Database successfully recreated." + - "The database has been successfully deleted." +""" + +# --------------------------------------------------------------------------------------- + + +class PdnsBackendSqlite(Database): + """ + Main Class + """ + + module = None + + def __init__(self, module): + """ + Initialize all needed Variables + """ + self.module = module + + self.module.log("PdnsBackendSqlite::__init__()") + + self.state = module.params.get("state") + self.database = module.params.get("database") + self.schema_file = module.params.get("schema_file") + self.owner = module.params.get("owner") + self.group = module.params.get("group") + self.mode = module.params.get("mode") + + def run(self): + """ + runner + """ + self.module.log("PdnsBackendSqlite::run()") + + result = dict(rc=127, failed=True, changed=False, full_version="unknown") + + res = [] + + if isinstance(self.database, list): + for db in self.database: + self.module.log(msg=f" db: '{db}'") + + dbname = db.get("database") + dirname = os.path.dirname(dbname) + + self.module.log(msg=f" dirname: '{dirname}'") + + create_directory( + directory=dirname, owner=self.owner, group=self.group, mode="0750" + ) + + result = self._sqlite(dbname) + + res.append(result) + elif isinstance(self.database, dict): + self.module.log(msg=f" self.database: '{self.database}'") + + dbname = self.database.get("database") + dirname = os.path.dirname(dbname) + + self.module.log(msg=f" dirname: '{dirname}'") + + create_directory( + directory=dirname, owner=self.owner, group=self.group, mode="0750" + ) + + result = self._sqlite(dbname) + + res.append(result) + + return result + + def _sqlite(self, dbname): + """ """ + self.module.log(msg=f"PdnsBackendSqlite::_sqlite({dbname})") + + if self.state == "create": + """ """ + return self.sqlite_create(dbname) + + elif self.state == "delete": + """ """ + return self.sqlite_remove(dbname) + + elif self.state == "recreate": + """ """ + self.sqlite_remove(dbname) + self.sqlite_create(dbname) + + return dict( + failed=False, changed=True, msg="Database successfully recreated." + ) + + +def main(): + + arguments = dict( + state=dict(default="create", choices=["create", "delete", "recreate"]), + database=dict(required=True, type="dict"), + owner=dict(required=False, type="str"), + group=dict(required=False, type="str"), + mode=dict(required=False, type="str", default="0644"), + schema_file=dict( + required=True, + type="str", + ), + ) + + module = AnsibleModule( + argument_spec=arguments, + supports_check_mode=True, + ) + + r = PdnsBackendSqlite(module) + result = r.run() + + # module.log(msg="= result: {}".format(result)) + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pdns_version.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pdns_version.py new file mode 100644 index 0000000..1c320f5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pdns_version.py @@ -0,0 +1,248 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2021, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +import re + +from ansible.module_utils.basic import AnsibleModule + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: pdns_version +short_description: Return the installed PowerDNS Authoritative Server version +version_added: "0.9.0" +author: + - Bodo Schulz (@bodsch) + +description: + - Executes C(pdns_server --version) on the target host and parses the version string. + - Optionally validates the installed version against a desired version string. + +options: + validate_version: + description: + - If set, the module validates the installed PowerDNS version against this value. + - The module fails when the installed version does not match. + type: str + required: false + +notes: + - Check mode is supported. + - If C(pdns_server) is not installed, the module returns C(failed=false) and C(msg="no pdns installed"). +""" + +EXAMPLES = r""" +- name: Detect PowerDNS version + become: true + bodsch.dns.pdns_version: + register: pdns_version + check_mode: false + ignore_errors: true + +- name: Validate PowerDNS version + become: true + bodsch.dns.pdns_version: + validate_version: "4.9.18" + register: pdns_version + check_mode: false + ignore_errors: true + +- name: Show parsed version + ansible.builtin.debug: + msg: "PowerDNS {{ pdns_version.full_version }} ({{ pdns_version.version.major }}.{{ pdns_version.version.minor }}.{{ pdns_version.version.patch }})" +""" + +RETURN = r""" +failed: + description: + - Indicates whether the module considers the result a failure. + - With C(validate_version) set, C(true) if the installed version does not match. + returned: always + type: bool + +changed: + description: + - Always C(false); the module is read-only. + returned: always + type: bool + +rc: + description: + - Return code used by the module implementation. + returned: always + type: int + sample: 0 + +msg: + description: + - Human readable status message. + returned: always + type: str + sample: + - "pdns is installed." + - "no pdns installed" + - "version 4.9.18 successful installed." + - "version 4.9.18 not installed." + +full_version: + description: + - Parsed version string as reported by C(pdns_server --version). + returned: when pdns_server is installed + type: str + sample: "4.9.18" + +version: + description: + - Parsed version components. + returned: when pdns_server is installed + type: dict + contains: + major: + description: Major version. + type: int + returned: always + minor: + description: Minor version. + type: int + returned: always + patch: + description: Patch version. + type: int + returned: always + +excutable: + description: + - Absolute path to the C(pdns_server) executable discovered on the target. + returned: always + type: str + sample: "/usr/sbin/pdns_server" +""" + + +# --------------------------------------------------------------------------------------- + + +class PdnsVersion(object): + """ + Main Class + """ + + module = None + + def __init__(self, module): + """ + Initialize all needed Variables + """ + self.module = module + + self.validate_version = module.params.get("validate_version") + self.pdns_bin = module.get_bin_path("pdns_server", False) + + def run(self): + """ + runner + """ + result = dict(rc=127, failed=True, changed=False, full_version="unknown") + + if not self.pdns_bin: + return dict(rc=0, failed=False, changed=False, msg="no pdns installed") + + args = [] + args.append(self.pdns_bin) + args.append("--version") + + # self.module.log(msg=f" args : '{args}'") + + rc, out, err = self._exec(args) + + _output = [] + _output += out.splitlines() + _output += err.splitlines() + + msg = "unknown message" + + pattern = re.compile( + r".*PowerDNS Authoritative Server (?P(?P\d+).(?P\d+).(?P\*|\d+)).*" + ) + + version = next( + (m.groupdict() for s in _output if (m := pattern.search(s))), None + ) + + if version and isinstance(version, dict): + version_full_string = version.get("version") + version_major_string = version.get("major") + version_minor_string = version.get("minor") + version_patch_string = version.get("patch") + + if self.validate_version: + if version_full_string == self.validate_version: + _failed = False + msg = f"version {self.validate_version} successful installed." + else: + _failed = True + msg = f"version {self.validate_version} not installed." + else: + _failed = False + msg = "pdns is installed." + + result = dict( + failed=_failed, + rc=0, + msg=msg, + full_version=version_full_string, + version=dict( + major=int(version_major_string), + minor=int(version_minor_string), + patch=int(version_patch_string), + ), + excutable=self.pdns_bin, + ) + + return result + + def _exec(self, commands): + """ """ + # self.module.log(msg=f" commands: '{commands}'") + rc, out, err = self.module.run_command(commands, check_rc=False) + + # self.module.log(msg=f" rc : '{rc}'") + # if int(rc) != 0: + # self.module.log(msg=f" out: '{out}'") + # self.module.log(msg=f" err: '{err}'") + # for line in err.splitlines(): + # self.module.log(msg=f" {line}") + + return (rc, out, err) + + +# =========================================== +# Module execution. +# + + +def main(): + + arguments = dict(validate_version=dict(required=False, type="str")) + + module = AnsibleModule( + argument_spec=arguments, + supports_check_mode=True, + ) + + r = PdnsVersion(module) + result = r.run() + + # module.log(msg="= result: {}".format(result)) + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pdns_zone_data.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pdns_zone_data.py new file mode 100644 index 0000000..1b9e9a6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pdns_zone_data.py @@ -0,0 +1,660 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2023-2025, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.module_results import results +from ansible_collections.bodsch.dns.plugins.module_utils.pdns.config_loader import ( + PowerDNSConfigLoader, +) +from ansible_collections.bodsch.dns.plugins.module_utils.pdns.records import ( + build_ptr_rrsets_by_zone, +) +from ansible_collections.bodsch.dns.plugins.module_utils.pdns.utils import ( + build_rrset, + fqdn, + generate_serial, +) +from ansible_collections.bodsch.dns.plugins.module_utils.pdns.web_api import ( + PowerDNSWebApi, +) + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: pdns_zone_data +short_description: Synchronize PowerDNS forward zones and optional reverse (PTR) zones via the PowerDNS API +version_added: "0.9.0" +author: + - Bodo Schulz (@bodsch) + +description: + - Reads PowerDNS configuration from the local PowerDNS configuration files to obtain API settings. + - For each forward zone in C(zone_data), ensures the zone exists (creates it if missing) and synchronizes all RRsets. + - Optionally creates and synchronizes reverse zones (IPv4/IPv6) and their PTR RRsets derived from the host definitions. + - Changes are applied via the PowerDNS HTTP API (PATCH). + +options: + zone_data: + description: + - List of zone definitions to manage. + - Each item describes a forward zone and its records, and can optionally trigger creation/sync of reverse PTR zones. + type: raw + required: true + +notes: + - Check mode is supported. + - Requires the C(pdnsutil) binary on the target host; if not found, the module returns C(changed=false) with a message. + - PowerDNS must have API enabled and reachable using the configuration loaded from the local PowerDNS configuration. + +requirements: + - PowerDNS authoritative server with API enabled. + - C(pdnsutil) installed on the target host. +""" + +EXAMPLES = r""" +- name: Manage a forward zone and records + bodsch.dns.pdns_zone_data: + zone_data: + - name: example.com + name_servers: + - ns1.example.com + - ns2.example.com + hosts: + - name: www + type: A + address: 203.0.113.10 + - name: api + type: A + address: 203.0.113.11 + records: + - name: "@" + type: MX + ttl: 3600 + content: + - "10 mail.example.com." + - name: "@" + type: TXT + ttl: 3600 + content: + - "v=spf1 -all" + +- name: Manage forward zone and create reverse zones from hosts (PTR) + bodsch.dns.pdns_zone_data: + zone_data: + - name: example.com + name_servers: + - ns1.example.com + - ns2.example.com + create_reverse_zones: true + reverse_prefix_v4: 24 + reverse_prefix_v6: 64 + hosts: + - name: host1 + address: + - 192.0.2.10 + - "2001:db8::10" + - name: host2 + address: + - 192.0.2.11 + +- name: Use custom reverse SOA/NS timing values + bodsch.dns.pdns_zone_data: + zone_data: + - name: example.com + name_servers: + - ns1.example.com + create_reverse_zones: true + reverse_zone_ttl: 3600 + reverse_zone_refresh: 10800 + reverse_zone_retry: 3600 + reverse_zone_expire: 604800 + reverse_zone_minimum: 3600 + hosts: + - name: host1 + address: + - 192.0.2.10 +""" + +RETURN = r""" +changed: + description: + - Whether any zone (forward and/or reverse) was changed. + returned: always + type: bool + +failed: + description: + - Indicates failure (for example missing API configuration or API errors). + returned: always + type: bool + +msg: + description: + - Per-zone results list. Each element contains a dict mapping zone name to its result. + returned: always + type: list + elements: dict + sample: + - example.com: + failed: false + changed: true + msg: "zone succesfully updated." + - 2.0.192.in-addr.arpa: + failed: false + changed: false + msg: "reverse zone is up-to-date." + +rc: + description: + - Not returned by this module. + returned: never + type: int +""" + +# --------------------------------------------------------------------------------------- + + +class PdnsZoneData(object): + """ + Main Class + """ + + module = None + + def __init__(self, module): + """ """ + self.module = module + + self.module.log("PdnsZoneData::__init__()") + + self.zone_data = module.params.get("zone_data") + self._pdnsutil_bin = module.get_bin_path("pdnsutil", True) + + def run(self): + """ """ + self.module.log("PdnsZoneData::run()") + + if not self._pdnsutil_bin: + return dict(failed=False, changed=False, msg="no pdns installed.") + + cfg_invalid, pdns_cfg, msg = self.pdns_config_loader() + + if cfg_invalid: + return dict(failed=True, msg=msg) + + config = dict( + server_id=pdns_cfg.get("server-id", "localhost"), + api_key=pdns_cfg.get("api_key"), + webserver_address=pdns_cfg.get("webserver_address"), + webserver_port=pdns_cfg.get("webserver_port"), + ) + + pdns_api = PowerDNSWebApi(module=self.module, config=config) + + result_state = [] + + for d in self.zone_data: + """ """ + forward_zone = d.get("name") + nameservers = d.get("name_servers", []) + create_reverse_zones = bool(d.get("create_reverse_zones", False)) + reverse_prefix_v4 = d.get("reverse_prefix_v4", 24) + reverse_prefix_v6 = d.get("reverse_prefix_v6", 64) + + self.module.log(f" - {forward_zone}") + + res = {} + + # ----------------------------------------------------------------- + # forward zone + # ----------------------------------------------------------------- + zone_rrsets = {} + zone_data = pdns_api.zone_data(forward_zone) + + if zone_data: + zone_rrsets = pdns_api.extract_existing_rrsets(zone_data) + else: + # keine zone vorhanden + _ = self.create_zone(pdns_api, forward_zone, nameservers) + zone_data = pdns_api.zone_data(forward_zone) + if zone_data: + zone_rrsets = pdns_api.extract_existing_rrsets(zone_data) + + # prevent legacy PTR handling in PowerDNSWebApi.build_full_rrsets() + d_forward = dict(d) + d_forward["create_forward_zones"] = False + + zone_new_rrsets = pdns_api.build_full_rrsets(forward_zone, d_forward) + rrset_changes = pdns_api.compare_rrsets(zone_rrsets, zone_new_rrsets) + + if rrset_changes: + status_code, _, json_resp = pdns_api.patch_zone( + forward_zone, rrset_changes + ) + + if status_code in [200, 201, 204]: + res[forward_zone] = dict( + failed=False, changed=True, msg="zone succesfully updated." + ) + else: + res[forward_zone] = dict(failed=True, changed=False, msg=json_resp) + else: + res[forward_zone] = dict( + failed=False, changed=False, msg="zone is up-to-date." + ) + + # ----------------------------------------------------------------- + # reverse zones (PTR) + # ----------------------------------------------------------------- + if create_reverse_zones: + self.module.log(" create reverse zones") + + ptr_rrsets_by_zone = build_ptr_rrsets_by_zone( + forward_zone=forward_zone, + hosts=d.get("hosts", []), + prefix_v4=reverse_prefix_v4, + prefix_v6=reverse_prefix_v6, + comment="ansible automation", + ) + + for rev_zone, desired_ptr_rrsets in sorted(ptr_rrsets_by_zone.items()): + if not desired_ptr_rrsets: + continue + + res[rev_zone] = self._sync_reverse_zone( + pdns_api, + rev_zone=rev_zone, + forward_zone=forward_zone, + nameservers=nameservers, + desired_ptr_rrsets=desired_ptr_rrsets, + cfg=d, + ) + + result_state.append(res) + + _state, _changed, _failed, state, changed, failed = results( + self.module, result_state + ) + + result = dict(changed=_changed, failed=_failed, msg=result_state) + + return result + + def pdns_config_loader(self): + """ """ + self.module.log("PdnsZoneData::pdns_config_loader()") + + config_loader = PowerDNSConfigLoader(module=self.module) + pdns_cfg = config_loader.load() + + config_values = { + "api": pdns_cfg.get("api"), + "webserver": pdns_cfg.get("webserver"), + "api_key": pdns_cfg.get("api-key"), + "webserver_address": pdns_cfg.get("webserver-address"), + "webserver_port": pdns_cfg.get("webserver-port"), + } + + # self.module.log(msg=f" config_values: '{config_values}'") + + missing_keys = [key for key, value in config_values.items() if value is None] + + if missing_keys: + _keys = ", ".join(missing_keys) + + return (True, None, f"Missing configuration(s): {_keys}") + else: + return (False, config_values, "configuration are valid.") + + def create_zone(self, pdns_api, zone, nameservers): + self.module.log( + f"PdnsZoneData::create_zone(pdns_api={pdns_api}, zone={zone}, nameservers={nameservers})" + ) + + if isinstance(nameservers, list) and nameservers: + ns = nameservers[0] + else: + ns = "nsX" + + serial = generate_serial() + + # ns darf short oder fqdn sein – fqdn sauber lassen, sonst an zone hängen + if "." in ns: + mname = ns if ns.endswith(".") else f"{ns}." + else: + mname = f"{ns}.{zone}." + + rname = f"hostmaster.{zone}." + + soa = f"{mname} {rname} {serial} 3600 1800 604800 86400" + + changed = pdns_api.zone_primary( + zone=zone, + soa=soa, + nameservers=nameservers, + ttl=640, + comment="ansible automation", + wantkind="native", + ) + + return changed + + def _normalize_reverse_nameservers_OLD(self, forward_zone, nameservers): + """ """ + self.module.log( + f"PdnsZoneData::_normalize_reverse_nameservers(forward_zone={forward_zone}, nameservers={nameservers})" + ) + + if not isinstance(nameservers, list) or len(nameservers) == 0: + return [fqdn(forward_zone, "nsX")] + + normalized = [] + for ns in nameservers: + # absolute external NS? keep, just ensure trailing dot + if isinstance(ns, str) and ("." in ns) and (not ns.endswith(forward_zone)): + normalized.append(ns if ns.endswith(".") else f"{ns}.") + else: + normalized.append(fqdn(forward_zone, ns)) + + return normalized + + def _create_reverse_zone(self, pdns_api, reverse_zone, forward_zone, nameservers): + """ """ + self.module.log( + "PdnsZoneData::_create_reverse_zone(pdns_api=%s, reverse_zone=%s, forward_zone=%s, nameservers=%s)" + % (pdns_api, reverse_zone, forward_zone, nameservers) + ) + + if isinstance(nameservers, list) and len(nameservers) > 0: + primary_ns = str(nameservers[0]) + else: + primary_ns = fqdn(forward_zone, "nsX") + nameservers = [fqdn(forward_zone, "nsX")] + + if not primary_ns.endswith("."): + primary_ns = f"{primary_ns}." + + rname = fqdn(forward_zone, "hostmaster") + if not rname.endswith("."): + rname = f"{rname}." + + serial = generate_serial() + + soa = f"{primary_ns} {rname} {serial} 3600 1800 604800 86400" + self.module.log(f" SOA: {soa}") + + return pdns_api.zone_primary( + zone=reverse_zone, + soa=soa, + nameservers=nameservers, + ttl=640, + comment="ansible automation", + wantkind="native", + ) + + def _reverse_nameservers(self, forward_zone, nameservers): + """ + Reverse-Zonen müssen NS als FQDN der Forward-Zone bekommen + (nicht ns1.). + """ + self.module.log( + f"PdnsZoneData::_reverse_nameservers(forward_zone={forward_zone}, nameservers={nameservers})" + ) + + if not isinstance(nameservers, list) or not nameservers: + return [fqdn(forward_zone, "nsX")] + + out = [] + for ns in nameservers: + if not isinstance(ns, str) or not ns: + continue + # Wenn bereits FQDN (oder extern), nur trailing dot erzwingen + if "." in ns: + out.append(ns if ns.endswith(".") else f"{ns}.") + else: + out.append(fqdn(forward_zone, ns)) + return out + + def ensure_reverse_zone_soa_ns( + self, + pdns_api, + reverse_zone, + forward_zone, + nameservers, + ttl=3600, + refresh=10800, + retry=3600, + expire=604800, + minimum=3600, + ): + """ + Stellt SOA/NS für Reverse-Zonen korrekt ein: + SOA MNAME = erster NS aus Forward-Zone (FQDN) + SOA RNAME = hostmaster. (FQDN) + Serial = generate_serial(existing_serial) + """ + self.module.log( + f"PdnsZoneData::ensure_reverse_zone_soa_ns(pdns_api, reverse_zone, forward_zone, nameservers={nameservers}, ...)" + ) + + ns_fqdns = self._reverse_nameservers(forward_zone, nameservers) + primary_ns = ns_fqdns[0].rstrip(".") + rname = fqdn(forward_zone, "hostmaster").rstrip(".") + + zone_data = pdns_api.zone_data(reverse_zone) + existing_serial = ( + zone_data.get("serial") if isinstance(zone_data, dict) else None + ) + serial = generate_serial(existing_serial) + + soa = f"{primary_ns} {rname} {serial} {refresh} {retry} {expire} {minimum}" + + rrsets = [ + build_rrset(f"{reverse_zone}.", "SOA", ttl, [soa]), + build_rrset(f"{reverse_zone}.", "NS", ttl, ns_fqdns), + ] + + status_code, _, json_resp = pdns_api.patch_zone(reverse_zone, rrsets) + + if status_code not in [200, 201, 204]: + return dict(failed=True, changed=False, msg=json_resp) + + return dict(failed=False, changed=True, msg="reverse SOA/NS updated.") + + # ---------------------------------------------------------------------------------------------- + + @staticmethod + def _strip_dot(value): + return value[:-1] if isinstance(value, str) and value.endswith(".") else value + + def _normalize_reverse_nameservers(self, forward_zone, nameservers): + """Reverse-Zonen müssen NS als FQDN der Forward-Zone bekommen.""" + if not isinstance(nameservers, list) or len(nameservers) == 0: + return [fqdn(forward_zone, "nsX")] + + normalized = [] + for ns in nameservers: + if not isinstance(ns, str) or not ns: + continue + + # extern oder bereits fqdn -> nur trailing dot erzwingen + if ("." in ns) and (not ns.endswith(forward_zone)): + normalized.append(ns if ns.endswith(".") else f"{ns}.") + else: + normalized.append(fqdn(forward_zone, ns)) + + return normalized + + def _extract_existing_soa_serial(self, zone_fqdn, existing_rrsets, zone_data=None): + """Try SOA content first; fallback to zone_data.serial.""" + existing_rr = ( + existing_rrsets.get((zone_fqdn, "SOA")) if existing_rrsets else None + ) + if existing_rr: + records = existing_rr.get("records") or [] + if records: + parts = str(records[0]).split() + if len(parts) >= 3: + try: + return int(parts[2]) + except Exception: + pass + + if isinstance(zone_data, dict): + try: + serial = zone_data.get("serial") + return int(serial) if serial is not None else None + except Exception: + return None + + return None + + def _build_reverse_zone_soa_ns_rrsets( + self, + *, + reverse_zone, + forward_zone, + nameservers, + serial, + ttl, + refresh, + retry, + expire, + minimum, + comment="ansible automation", + ): + """Build desired SOA+NS rrsets for a reverse zone. + + PowerDNS expects SOA mname and rname as FQDNs with trailing dots. + """ + zone_fqdn = reverse_zone if reverse_zone.endswith(".") else f"{reverse_zone}." + + ns_fqdns = self._normalize_reverse_nameservers(forward_zone, nameservers) + + primary_ns = ns_fqdns[0] if ns_fqdns[0].endswith(".") else f"{ns_fqdns[0]}." + rname = fqdn(forward_zone, "hostmaster") + rname = rname if rname.endswith(".") else f"{rname}." + + soa = f"{primary_ns} {rname} {int(serial)} {int(refresh)} {int(retry)} {int(expire)} {int(minimum)}" + + return [ + build_rrset(zone_fqdn, "SOA", int(ttl), [soa], comment=comment), + build_rrset(zone_fqdn, "NS", int(ttl), ns_fqdns, comment=comment), + ] + + def _sync_reverse_zone( + self, + pdns_api, + *, + rev_zone, + forward_zone, + nameservers, + desired_ptr_rrsets, + cfg, + ): + """One reverse-zone sync: SOA/NS + PTR, with serial bump only on changes.""" + ttl = int(cfg.get("reverse_zone_ttl", 3600)) + refresh = int(cfg.get("reverse_zone_refresh", 10800)) + retry = int(cfg.get("reverse_zone_retry", 3600)) + expire = int(cfg.get("reverse_zone_expire", 604800)) + minimum = int(cfg.get("reverse_zone_minimum", 3600)) + + # ensure zone exists + rev_zone_data = pdns_api.zone_data(rev_zone) + if not rev_zone_data: + ns_fqdns = self._normalize_reverse_nameservers(forward_zone, nameservers) + _ = self._create_reverse_zone( + pdns_api, + reverse_zone=rev_zone, + forward_zone=forward_zone, + nameservers=ns_fqdns, + ) + rev_zone_data = pdns_api.zone_data(rev_zone) + + if not rev_zone_data: + return dict( + failed=True, + changed=False, + msg="failed to read reverse zone after create", + ) + + existing_rrsets = pdns_api.extract_existing_rrsets(rev_zone_data) + + zone_fqdn = rev_zone if rev_zone.endswith(".") else f"{rev_zone}." + base_serial = self._extract_existing_soa_serial( + zone_fqdn, existing_rrsets, zone_data=rev_zone_data + ) + if base_serial is None: + base_serial = generate_serial() + + # baseline desired (keep current serial) to test if anything changes + desired_soa_ns = self._build_reverse_zone_soa_ns_rrsets( + reverse_zone=rev_zone, + forward_zone=forward_zone, + nameservers=nameservers, + serial=base_serial, + ttl=ttl, + refresh=refresh, + retry=retry, + expire=expire, + minimum=minimum, + ) + desired_all = list(desired_soa_ns) + list(desired_ptr_rrsets) + changes = pdns_api.compare_rrsets(existing_rrsets, desired_all) + + if not changes: + return dict(failed=False, changed=False, msg="reverse zone is up-to-date.") + + # bump serial only if we actually change something + new_serial = generate_serial(base_serial) + desired_soa_ns_bumped = self._build_reverse_zone_soa_ns_rrsets( + reverse_zone=rev_zone, + forward_zone=forward_zone, + nameservers=nameservers, + serial=new_serial, + ttl=ttl, + refresh=refresh, + retry=retry, + expire=expire, + minimum=minimum, + ) + desired_all_bumped = list(desired_soa_ns_bumped) + list(desired_ptr_rrsets) + changes_bumped = pdns_api.compare_rrsets(existing_rrsets, desired_all_bumped) + + status_code, _, json_resp = pdns_api.patch_zone(rev_zone, changes_bumped) + + if status_code in [200, 201, 204]: + return dict( + failed=False, changed=True, msg="reverse zone successfully updated." + ) + + return dict(failed=True, changed=False, msg=json_resp) + + +def main(): + + arguments = dict( + zone_data=dict(required=True, type="raw"), + ) + + module = AnsibleModule( + argument_spec=arguments, + supports_check_mode=True, + ) + + r = PdnsZoneData(module) + result = r.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_adlists.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_adlists.py new file mode 100644 index 0000000..0a91608 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_adlists.py @@ -0,0 +1,100 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2021, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.module_results import results +from ansible_collections.bodsch.dns.plugins.module_utils.pihole.adlist_manager import ( + AdlistManager, +) +from ansible_collections.bodsch.dns.plugins.module_utils.pihole.utils import ( + sanitize_adlist, +) + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: pihole_custom_lists +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD +description: TBD + +options: + allow_list: + description: TBD + type: list + required: false + deny_list: + description: TBD + type: list + required: false + +""" + +EXAMPLES = r""" + +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +class PiHoleAdlist(AdlistManager): + """ """ + + module = None + + def __init__(self, module: any): + """ """ + self.module = module + + self.adlists = module.params.get("adlists") + + super().__init__(module, database="/etc/pihole/gravity.db") + + def run(self): + """ """ + result = dict(rc=127, failed=True, changed=False, msg="unknown") + + sanitized = sanitize_adlist(self.adlists) + + result_state = self.manage_adlists(adlists=sanitized) + + _state, _changed, _failed, state, changed, failed = results( + self.module, result_state + ) + + result = dict(changed=_changed, failed=failed, state=result_state) + + return result + + +def main(): + + argument_spec = dict( + adlists=dict(required=False, type="list"), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + p = PiHoleAdlist(module) + result = p.run() + + # module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_admin_password.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_admin_password.py new file mode 100644 index 0000000..74306a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_admin_password.py @@ -0,0 +1,77 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2021, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.dns.plugins.module_utils.pihole.pihole import PiHole + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: pihole_admin_password +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD +description: TBD + +options: + password: + description: TBD + type: str + required: true + +""" + +EXAMPLES = r""" + +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +class PiholeAdminPassword(PiHole): + """ """ + + module = None + + def __init__(self, module: any): + """ """ + self.module = module + self.password = module.params.get("password") + + super().__init__(module) + + def run(self): + """ """ + return self.admin_password(self.password) + + +def main(): + + argument_spec = dict( + password=dict(required=True, type="str", no_log=True), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + p = PiholeAdminPassword(module) + result = p.run() + + # module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_clients.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_clients.py new file mode 100644 index 0000000..7ef81b2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_clients.py @@ -0,0 +1,95 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2021, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.module_results import results +from ansible_collections.bodsch.dns.plugins.module_utils.pihole.client_manager import ( + ClientManager, +) + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: pihole_custom_lists +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD +description: TBD + +options: + allow_list: + description: TBD + type: list + required: false + deny_list: + description: TBD + type: list + required: false + +""" + +EXAMPLES = r""" + +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +class PiHoleClients(ClientManager): + """ """ + + module = None + + def __init__(self, module: any): + """ """ + self.module = module + + self.clients = module.params.get("clients") + + super().__init__(module, database="/etc/pihole/gravity.db") + + def run(self): + """ """ + result = dict(rc=127, failed=True, changed=False, msg="unknown") + + result_state = self.manage_clients(clients=self.clients) + + _state, _changed, _failed, state, changed, failed = results( + self.module, result_state + ) + + result = dict(changed=_changed, failed=failed, state=result_state) + + return result + + +def main(): + + argument_spec = dict( + clients=dict(required=False, type="list"), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + p = PiHoleClients(module) + result = p.run() + + # module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_command.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_command.py new file mode 100644 index 0000000..69a135f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_command.py @@ -0,0 +1,85 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2021, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.dns.plugins.module_utils.pihole.pihole import PiHole + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: pihole_admin_password +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD +description: TBD + +options: + password: + description: TBD + type: str + required: true + +""" + +EXAMPLES = r""" + +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +class PiholeCommand(PiHole): + """ """ + + module = None + + def __init__(self, module: any): + """ """ + self.module = module + self.command = module.params.get("command") + + super().__init__(module) + + def run(self): + """ """ + if self.command == "update_gravity": + return self.update_gravity() + if self.command == "reloadlists": + return self.reload_lists() + if self.command == "reloaddns": + return self.reload_dns() + + +def main(): + + argument_spec = dict( + command=dict( + default="reloadlists", + choices=["update_gravity", "reloaddns", "reloadlists"], + ), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + p = PiholeCommand(module) + result = p.run() + + # module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_config.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_config.py new file mode 100644 index 0000000..e4cf50e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_config.py @@ -0,0 +1,119 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2021, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.module_results import results +from ansible_collections.bodsch.dns.plugins.module_utils.pihole.config import ( + ConfigManager, +) +from packaging.version import Version + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: pihole_custom_lists +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD +description: TBD + +options: + allow_list: + description: TBD + type: list + required: false + deny_list: + description: TBD + type: list + required: false + +""" + +EXAMPLES = r""" + +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +class PiHoleConfig(ConfigManager): + """ """ + + module = None + + def __init__(self, module: any): + """ """ + self.module = module + + self.config = module.params.get("config") + + super().__init__(module) + + def run(self): + """ """ + result = dict(rc=127, failed=True, changed=False, msg="unknown") + + version = self.version() + # self.module.log(f"{version}") + version = version.get("full_version") + + if Version(version) >= Version("6.3"): + # self.module.log(f"{version}") + # self.module.log(f"{self.config}") + + dns = self.config.get("dns", {}) + dns_domain = dns.get("domain", None) + + # self.module.log(f"{type(dns_domain)}") + + if dns_domain and isinstance(dns_domain, dict): + pass + # dns_domain_name = dns_domain.get("name") + else: + self.config["dns"].pop("domain") + self.config["dns"]["domain"] = {} + self.config["dns"]["domain"]["name"] = dns_domain + + # self.module.log(f"{self.config}") + + result_state = self.set_config(config=self.config) + + _state, _changed, _failed, state, changed, failed = results( + self.module, result_state + ) + + result = dict(changed=_changed, failed=failed, state=result_state) + + return result + + +def main(): + + argument_spec = dict( + config=dict(required=True, type="dict"), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + p = PiHoleConfig(module) + result = p.run() + + # module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_custom_lists.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_custom_lists.py new file mode 100644 index 0000000..34c2f2f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_custom_lists.py @@ -0,0 +1,113 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2021, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.module_results import results +from ansible_collections.bodsch.dns.plugins.module_utils.pihole.pihole import PiHole + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: pihole_custom_lists +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD +description: TBD + +options: + allow_list: + description: TBD + type: list + required: false + deny_list: + description: TBD + type: list + required: false + +""" + +EXAMPLES = r""" + +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +class PiholeCustomLists(PiHole): + """ """ + + module = None + + def __init__(self, module: any): + """ """ + self.module = module + + self.allow_list = module.params.get("allow_list") + self.deny_list = module.params.get("deny_list") + + super().__init__(module) + + def run(self): + """ """ + result = dict(rc=127, failed=True, changed=False, msg="unknown") + + # pihole_status = self.status() + + result_state = [] + + if len(self.allow_list) > 0: + res = {} + result_allow = self.import_allow(self.allow_list) + # self.module.log(f"{result_allow}") + + res["allow"] = result_allow + result_state.append(res) + + if len(self.deny_list) > 0: + res = {} + result_deny = self.import_deny(self.deny_list) + # self.module.log(f"{result_deny}") + + res["deny"] = result_deny + result_state.append(res) + + _state, _changed, _failed, state, changed, failed = results( + self.module, result_state + ) + + result = dict(changed=_changed, failed=failed, state=result_state) + + return result + + +def main(): + + argument_spec = dict( + allow_list=dict(required=False, type="list"), + deny_list=dict(required=False, type="list"), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + p = PiholeCustomLists(module) + result = p.run() + + # module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_groups.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_groups.py new file mode 100644 index 0000000..047a1e6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_groups.py @@ -0,0 +1,95 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2021, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.module_results import results +from ansible_collections.bodsch.dns.plugins.module_utils.pihole.group_manager import ( + GroupManager, +) + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: pihole_custom_lists +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD +description: TBD + +options: + allow_list: + description: TBD + type: list + required: false + deny_list: + description: TBD + type: list + required: false + +""" + +EXAMPLES = r""" + +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +class PiHoleGroups(GroupManager): + """ """ + + module = None + + def __init__(self, module: any): + """ """ + self.module = module + + self.groups = module.params.get("groups") + + super().__init__(module, database="/etc/pihole/gravity.db") + + def run(self): + """ """ + result = dict(rc=127, failed=True, changed=False, msg="unknown") + + result_state = self.manage_groups(groups=self.groups) + + _state, _changed, _failed, state, changed, failed = results( + self.module, result_state + ) + + result = dict(changed=_changed, failed=failed, state=result_state) + + return result + + +def main(): + + argument_spec = dict( + groups=dict(required=False, type="list"), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + p = PiHoleGroups(module) + result = p.run() + + # module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_version.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_version.py new file mode 100644 index 0000000..7382a4f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/pihole_version.py @@ -0,0 +1,114 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2024-2025, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.dns.plugins.module_utils.pihole.pihole import PiHole + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: pihole_version +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +short_description: return the version of installed pihole +description: return the version of installed pihole + +options: + validate_version: + description: check against the installed version. + type: str + required: false + +""" + +EXAMPLES = r""" +- name: detect pihole version + become: true + bodsch.dns.pihole_version: + register: pihole_version + check_mode: false + ignore_errors: true + +- name: detect pihole version + become: true + bodsch.dns.pihole_version: + validate_version: '9.18.0' + register: pihole_version + check_mode: false + ignore_errors: true +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +class PiholeVersion(PiHole): + """ + Main Class + """ + + module = None + + def __init__(self, module): + """ """ + self.module = module + + super().__init__(module) + + self.validate_version = module.params.get("validate_version") + + def run(self): + """ """ + version: dict = self.version() + + # self.module.log(f" version: {version}") + + if self.validate_version: + if version.get("full_version") == self.validate_version: + _failed = False + msg = f"version {self.validate_version} successful installed." + else: + _failed = True + msg = f"version {self.validate_version} not installed." + else: + _failed = False + msg = "pihole is installed." + + version["failed"] = _failed + version["msg"] = msg + + return version + + +# =========================================== +# Module execution. +# + + +def main(): + + arguments = dict(validate_version=dict(required=False, type="str")) + + module = AnsibleModule( + argument_spec=arguments, + supports_check_mode=True, + ) + + r = PiholeVersion(module) + result = r.run() + + # module.log(msg="= result: {}".format(result)) + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/recursor_version.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/recursor_version.py new file mode 100644 index 0000000..e53e96a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/plugins/modules/recursor_version.py @@ -0,0 +1,196 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2021, Bodo Schulz + +from __future__ import absolute_import, division, print_function + +import re + +from ansible.module_utils.basic import AnsibleModule + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: recursor_version +version_added: 0.9.0 +author: "Bodo Schulz (@bodsch) " + +short_description: return the version of installed powerdns-recursor +description: return the version of installed powerdns-recursor + +options: + validate_version: + description: check against the installed version. + type: str + required: false + +""" + +EXAMPLES = r""" +- name: detect powerdns-recursor version + become: true + bodsch.dns.recursor_version: + register: recursor_version + check_mode: false + ignore_errors: true + +- name: detect powerdns-recursor version + become: true + bodsch.dns.recursor_version: + validate_version: '9.18.0' + register: recursor_version + check_mode: false + ignore_errors: true +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + + +class RecursorVersion(object): + """ + Main Class + """ + + module = None + + def __init__(self, module): + """ + Initialize all needed Variables + """ + self.module = module + + self.validate_version = module.params.get("validate_version") + self.pdns_bin = module.get_bin_path("pdns_recursor", False) + + def run(self): + """ + runner + """ + result = dict(rc=127, failed=True, changed=False, full_version="unknown") + + if not self.pdns_bin: + return dict(rc=0, failed=False, changed=False, msg="no pdns installed") + + args = [] + args.append(self.pdns_bin) + args.append("--version") + + self.module.log(msg=f" args : '{args}'") + + rc, out, err = self._exec(args) + + # self.module.log(msg=f"= out: {out} {type(out)}") + # self.module.log(msg=f"= err: {err} {type(err)}") + + _out = out.splitlines() + _err = err.splitlines() + + # self.module.log(msg=f"= _out: {_out} {type(_out)}") + # self.module.log(msg=f"= _err: {_err} {type(_err)}") + + _output = [] + _output += _out + _output += _err + + # if rc == 0: + # output = out + # else: + # output = err + + # self.module.log(msg=f"= output: {_output}") + # self.module.log(msg=f"= output: {set(_output)}") + + msg = "unknown message" + + pattern = re.compile( + r".*PowerDNS Recursor (?P(?P\d+).(?P\d+).(?P\*|\d+)).*" + ) + + version = next( + (m.groupdict() for s in _output if (m := pattern.search(s))), None + ) + + # version = next(re.search(pattern, s).group(0) for s in _output if re.search(pattern, s)) + # version = re.search(pattern, _output) + if version: + + self.module.log(msg=f"= version: {version} {type(version)}") + if isinstance(version, dict): + version_full_string = version.get("version") + version_major_string = version.get("major") + version_minor_string = version.get("minor") + version_patch_string = version.get("patch") + else: + version_full_string = version.group("version") + version_major_string = version.group("major") + version_minor_string = version.group("minor") + version_patch_string = version.group("patch") + + if self.validate_version: + if version_full_string == self.validate_version: + _failed = False + msg = f"version {self.validate_version} successful installed." + else: + _failed = True + msg = f"version {self.validate_version} not installed." + else: + _failed = False + msg = "powerdns-recursor is installed." + + result = dict( + failed=_failed, + rc=0, + msg=msg, + full_version=version_full_string, + version=dict( + major=int(version_major_string), + minor=int(version_minor_string), + patch=int(version_patch_string), + ), + excutable=self.pdns_bin, + ) + + return result + + def _exec(self, commands): + """ """ + # self.module.log(msg=f" commands: '{commands}'") + rc, out, err = self.module.run_command(commands, check_rc=False) + + self.module.log(msg=f" rc : '{rc}'") + if int(rc) != 0: + self.module.log(msg=f" out: '{out}'") + self.module.log(msg=f" err: '{err}'") + for line in err.splitlines(): + self.module.log(msg=f" {line}") + + return (rc, out, err) + + +# =========================================== +# Module execution. +# + + +def main(): + + module = AnsibleModule( + argument_spec=dict(validate_version=dict(required=False, type="str")), + supports_check_mode=True, + ) + + r = RecursorVersion(module) + result = r.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/Makefile new file mode 100644 index 0000000..3abaf48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_6.1 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/README.md new file mode 100644 index 0000000..36165f7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/README.md @@ -0,0 +1,321 @@ + +# Ansible Role: `bodsch.dns.bind` + +Ansible role to install and configure bind on various linux systems. + + +## usage + +```yaml +# List of zones for which this name server is authoritative +bind_zones: [] + +# List of acls. +bind_acls: [] + +# Key binding for secondary servers +bind_dns_keys: [] +# - name: primary_key +# algorithm: hmac-sha256 +# secret: "azertyAZERTY123456" + +# Key binding for DDNS hosts +bind_update_keys: [] +# - name: ddns_host_key +# algorithm: hmac-sha256 +# secret: "azertyAZERTY123456" + +# List of IPv4 address of the network interface(s) to listen on. Set to "any" +# to listen on all interfaces +bind_listen: + ipv4: + - port: 53 + addresses: + - "127.0.0.1" + ipv6: + - port: 53 + addresses: + - "::1" + +# List of hosts that are allowed to query this DNS server. +bind_allow_query: + - "localhost" + +# A key-value list mapping server-IPs to TSIG keys for signing requests +bind_key_mapping: {} + +# Determines whether recursion should be allowed. +# - If you are building an AUTHORITATIVE DNS server, do NOT enable recursion. +# - If you are building a RECURSIVE (caching) DNS server, you need to enable +# recursion. +# - If your recursive DNS server has a public IP address, you MUST enable access +# control to limit queries to your legitimate users. Failing to do so will +# cause your server to become part of large scale DNS amplification +# attacks. Implementing BCP38 within your network would greatly +# reduce such attack surface +# Typically, an authoritative name server should have recursion turned OFF. +bind_recursion: false +bind_allow_recursion: + - "any" + +# Allows BIND to be set up as a caching name server +bind_forward_only: false + +# List of name servers to forward DNS requests to. +bind_forwarders: [] + +# DNS round robin order (random or cyclic) +bind_rrset_order: "random" + +# statistics channels configuration +bind_statistics: + channels: false + port: 8053 + host: 127.0.0.1 + allow: + - "127.0.0.1" + +# DNSSEC configuration +# NOTE In version 9.16.0 the dnssec-enable option was made obsolete and in 9.18.0 the option was entirely removed. +bind_dnssec: + enable: true +# dnssec-validation ( yes | no | auto ); + validation: true + +bind_extra_include_files: [] + +# SOA information +bind_zone_soa: + ttl: "1W" + time_to_refresh: "1D" + time_to_retry: "1H" + time_to_expire: "1W" + minimum_ttl: "1D" + +bind_logging: {} + +# File mode for primary zone files (needs to be something like 0660 for dynamic updates) +bind_zone_file_mode: "0640" + +# DNS64 support +bind_dns64: false +bind_dns64_clients: + - "any" +``` + +### `bind_listen` + +```yaml +bind_listen: + ipv4: + - port: 53 + addresses: + - "127.0.0.1" + - "{{ ansible_default_ipv4.address }}" + - port: 5353 + addresses: + - "127.0.1.1" + ipv6: + - port: 53 + addresses: + - "{{ ansible_default_ipv4.address }}" +``` + + +### `bind_logging` + +```yaml +bind_logging: + enable: true + channels: + - channel: general + file: "data/general.log" + versions: 3 + size: 10M + print_time: true # true | false + print_category: true + print_severity: true + severity: dynamic # critical | error | warning | notice | info | debug [level] | dynamic + - channel: query + file: "data/query.log" + versions: 5 + size: 10M + print_time: "" # true | false + severity: info # + - channel: dnssec + file: "data/dnssec.log" + versions: 5 + size: 10M + print_time: "" # true | false + severity: info # + - channel: notify + file: "data/notify.log" + versions: 5 + size: 10M + print_time: "" # true | false + severity: info # + - channel: transfers + file: "data/transfers.log" + versions: 5 + size: 10M + print_time: "" # true | false + severity: info # + - channel: slog + syslog: security # kern | user | mail | daemon | auth | syslog | lpr | + # news | uucp | cron | authpriv | ftp | + # local0 | local1 | local2 | local3 | + # local4 | local5 | local6 | local7 + # file: "data/transfers.log" + #versions: 5 + #size: 10M + print_time: "" # true | false + severity: info # + categories: + "xfer-out": + - transfers + - slog + "xfer-in": + - transfers + - slog + notify: + - notify + "lame-servers": + - general + config: + - general + default: + - general + security: + - general + - slog + dnssec: + - dnssec + queries: + - query +``` + +### `bind_zones` + +```yaml +bind_zones: + - name: 'example.com' + # default: primary [primary, secondary, forward] + # type: + create_forward_zones: true + # Skip creation of reverse zones + create_reverse_zones: false + # fpr type: secondary + #primaries: + # - 10.11.0.4 + networks: + - '192.0.2' + ipv6_networks: + - '2001:db9::/48' + name_servers: + - ns1.acme-inc.local. + - ns2.acme-inc.local. + hostmaster_email: admin + # + allow_updates: + - "10.0.1.2" + - 'key "external-dns"' + allow_transfers: + - 'key "external-dns"' + hosts: + - name: srv001 + ip: 192.0.2.1 + ipv6: '2001:db9::1' + aliases: + - www + - name: srv002 + ip: 192.0.2.2 + ipv6: '2001:db9::2' + - name: mail001 + ip: 192.0.2.10 + ipv6: '2001:db9::3' + mail_servers: + - name: mail001 + preference: 10 + + - name: 'acme-inc.local' + primaries: + - 10.11.0.4 + networks: + - '10.11' + ipv6_networks: + - '2001:db8::/48' + name_servers: + - ns1 + - ns2 + hosts: + - name: ns1 + ip: 10.11.0.4 + - name: ns2 + ip: 10.11.0.5 + - name: srv001 + ip: 10.11.1.1 + ipv6: 2001:db8::1 + aliases: + - www + - name: srv002 + ip: 10.11.1.2 + ipv6: 2001:db8::2 + aliases: + - mysql + - name: mail001 + ip: 10.11.2.1 + ipv6: 2001:db8::d:1 + aliases: + - smtp + - mail-in + - name: mail002 + ip: 10.11.2.2 + ipv6: 2001:db8::d:2 + - name: mail003 + ip: 10.11.2.3 + ipv6: 2001:db8::d:3 + aliases: + - imap + - mail-out + - name: srv010 + ip: 10.11.0.10 + - name: srv011 + ip: 10.11.0.11 + - name: srv012 + ip: 10.11.0.12 + mail_servers: + - name: mail001 + preference: 10 + - name: mail002 + preference: 20 + services: + - name: _ldap._tcp + weight: 100 + port: 88 + target: srv010 + text: + - name: _kerberos + text: KERBEROS.ACME-INC.COM + - name: '@' + text: + - 'some text' + - 'more text' +``` + + +## Contribution + +Please read [Contribution](CONTRIBUTING.md) + +## Development, Branches (Git Tags) + + +## Author + +- Bodo Schulz + +## License + +[Apache](LICENSE) + +**FREE SOFTWARE, HELL YEAH!** diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/defaults/main.yml new file mode 100644 index 0000000..9448d23 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/defaults/main.yml @@ -0,0 +1,100 @@ +--- + +# List of zones for which this name server is authoritative +bind_zones: [] + +# List of acls. +bind_acls: [] + +# Key binding for secondary servers +bind_dns_keys: [] +# - name: primary_key +# algorithm: hmac-sha256 +# secret: "azertyAZERTY123456" + +# Key binding for DDNS updates +bind_update_keys: [] +# - name: update_key +# algorithm: hmac-sha256 +# secret: "azertyAZERTY123456" + +# List of IPv4 address of the network interface(s) to listen on. Set to "any" +# to listen on all interfaces +bind_listen: + ipv4: + - port: 53 + addresses: + - "127.0.0.1" + ipv6: + - port: 53 + addresses: + - "::1" + +# List of hosts that are allowed to query this DNS server. +bind_allow_query: + - "localhost" + +# A key-value list mapping server-IPs to TSIG keys for signing requests +bind_key_mapping: {} + +# Determines whether recursion should be allowed. Typically, an authoritative +# name server should have recursion turned OFF. +bind_recursion: false +bind_allow_recursion: + - "any" + +# Allows BIND to be set up as a caching name server +bind_forward_only: false + +# List of name servers to forward DNS requests to. +bind_forwarders: [] + +# DNS round robin order (random or cyclic) +bind_rrset_order: "random" + +# statistics channels configuration +bind_statistics: + channels: false + port: 8053 + host: 127.0.0.1 + allow: + - "127.0.0.1" + +# DNSSEC configuration +# NOTE In version 9.16.0 the dnssec-enable option was made obsolete and in 9.18.0 the option was entirely removed. +bind_dnssec: + enable: true + # dnssec-validation ( yes | no | auto ); + validation: auto + +bind_extra_include_files: [] + +# SOA information +bind_zone_soa: + ttl: "1W" + time_to_refresh: "1D" + time_to_retry: "1H" + time_to_expire: "1W" + minimum_ttl: "1D" + +bind_logging: + enable: true + channels: + - channel: general + file: "data/general.log" + versions: 3 + size: 10M + print_time: true # true | false + print_category: true + print_severity: true + severity: dynamic # critical | error | warning | notice | info | debug [level] | dynamic + +# File mode for primary zone files (needs to be something like 0660 for dynamic updates) +bind_zone_file_mode: "0640" + +# DNS64 support +bind_dns64: false +bind_dns64_clients: + - "any" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/handlers/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/handlers/main.yml new file mode 100644 index 0000000..fe76ebf --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/handlers/main.yml @@ -0,0 +1,17 @@ +--- + +- name: systemctl daemon-reload + become: true + ansible.builtin.systemd: + daemon_reload: true + force: true + when: + - ansible_facts.service_mgr | lower == "systemd" + +- name: reload bind + become: true + ansible.builtin.service: + name: "{{ bind_service }}" + state: reloaded + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/meta/.galaxy_install_info b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/meta/.galaxy_install_info new file mode 100644 index 0000000..853c66e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: 'Fr 19 Nov 2021 05:32:37 ' +version: '' diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/meta/main.yml new file mode 100644 index 0000000..10ddf7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/meta/main.yml @@ -0,0 +1,32 @@ +--- + +galaxy_info: + role_name: bind + + author: Bodo Schulz + description: ansible role for install and configure isc bind server + + license: Apache + min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + # 11 + - bullseye + - bookworm + - name: Ubuntu + versions: + # 20.04 + - focal + + galaxy_tags: + - system + - dns + +dependencies: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/configured/converge.yml new file mode 100644 index 0000000..c7131a2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/configured/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: all + any_errors_fatal: true + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.bind diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..a754d41 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,227 @@ +--- + +bind_statistics: + channels: true + allow: + - any + host: "127.0.0.1" + +bind_allow_query: + - any + +bind_listen: + ipv4: + - port: 53 + addresses: + - "127.0.0.1" + - "10.11.0.1" + - port: 5353 + addresses: + - "127.0.1.1" + # ipv6: + # - port: 53 + # addresses: + # - "{{ ansible_facts.default_ipv6.address }}" + +bind_acls: + - name: acl1 + match_list: + - 10.11.0.0/24 + +bind_forwarders: + - '9.9.9.9' + - '141.1.1.1' + +bind_recursion: true +bind_dns64: true + +bind_check_names: + - master + - ignore + +bind_logging: + enable: true + channels: + - channel: general + file: "data/general.log" + versions: 3 + size: 10M + print_time: true # true | false + print_category: true + print_severity: true + severity: dynamic # critical | error | warning | notice | info | debug [level] | dynamic + - channel: query + file: "data/query.log" + versions: 5 + size: 10M + print_time: "" # true | false + severity: debug2 # + - channel: dnssec + file: "data/dnssec.log" + versions: 5 + size: 10M + print_time: "" # true | false + severity: info # + - channel: notify + file: "data/notify.log" + versions: 5 + size: 10M + print_time: "" # true | false + severity: info # + - channel: transfers + file: "data/transfers.log" + versions: 5 + size: 10M + print_time: "" # true | false + severity: info # + - channel: slog + syslog: security # kern | user | mail | daemon | auth | syslog | lpr | + # news | uucp | cron | authpriv | ftp | + # local0 | local1 | local2 | local3 | + # local4 | local5 | local6 | local7 + print_time: "" # true | false + severity: info # + categories: + "xfer-out": + - transfers + - slog + "xfer-in": + - transfers + - slog + notify: + - notify + "lame-servers": + - general + config: + - general + default: + - general + security: + - general + - slog + dnssec: + - dnssec + queries: + - query + +bind_zone_soa: + minimum_ttl: "32H" + ttl: "48H" + time_to_refresh: "24H" + time_to_retry: "2H" + time_to_expire: "2D" + +bind_zones: + - name: 'acme-inc.local' + type: primary + create_forward_zones: true + create_reverse_zones: true + #primaries: + # - 10.11.0.1 + networks: + - '10.11.0' + ipv6_networks: + - '2001:db8::/48' + name_servers: + - ns1 + - ns2 + hosts: + - name: ns1 + ip: 10.11.0.1 + - name: ns2 + ip: 10.11.0.2 + - name: srv001 + ip: 10.11.1.1 + ipv6: 2001:db8::1 + aliases: + - www + - name: srv002 + ip: 10.11.1.2 + ipv6: 2001:db8::2 + aliases: + - mysql + - name: mail001 + ip: 10.11.2.1 + ipv6: 2001:db8::d:1 + aliases: + - smtp + - mail-in + - name: mail002 + ip: 10.11.2.2 + ipv6: 2001:db8::d:2 + - name: mail003 + ip: 10.11.2.3 + ipv6: 2001:db8::d:3 + aliases: + - imap + - mail-out + - name: srv010 + ip: 10.11.0.10 + - name: srv011 + ip: 10.11.0.11 + - name: srv012 + ip: 10.11.0.12 + mail_servers: + - name: mail001 + preference: 10 + - name: mail002 + preference: 20 + services: + - name: _ldap._tcp + weight: 100 + port: 88 + target: srv010 + text: + - name: _kerberos + text: KERBEROS.ACME-INC.COM + - name: '@' + text: + - 'some text' + - 'more text' + + - name: cm.local + # type: primary # default: primary [primary, secondary, forward] + # create_forward_zones: true + # create_reverse_zones: true # Skip creation of reverse zones + primaries: + - "{{ ansible_facts.default_ipv4.address }}" # Primary server(s) for this zone + name_servers: + - 'dns' + networks: + - '192.168.124' + hosts: + - name: '@' + name_servers: 'dns.cm.local.' + ip: "{{ ansible_facts.default_ipv4.address }}" + - name: dns + ip: "{{ ansible_facts.default_ipv4.address }}" + - name: cms + ip: 192.168.124.21 + aliases: + - content-management-server + - name: mls + ip: 192.168.124.30 + aliases: + - master-live-server + - name: rls-01 + ip: 192.168.124.35 + aliases: + - replication-live-server-01 + + # - name: matrix.local + # type: primary + # create_reverse_zones: true # Skip creation of reverse zones + # primaries: + # - "{{ ansible_facts.default_ipv4.address }}" # Primary server(s) for this zone + # name_servers: + # - 'dns' + # networks: + # - '192.168.111' + # ipv6_networks: + # - '2001:0db8:85a3::8a2e:0370:7334/48' + # hosts: + # - name: '@' + # name_servers: 'dns.cm.local.' + # ip: "{{ ansible_facts.default_ipv4.address }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/configured/molecule.yml new file mode 100644 index 0000000..0437c1f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/configured/molecule.yml @@ -0,0 +1,68 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + tty: true + environment: + container: docker + groups: + - dns + docker_networks: + - name: bind + ipam_config: + - subnet: "10.11.0.0/24" + gateway: "10.11.0.254" + networks: + - name: bind + ipv4_address: "10.11.0.1" + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/configured/prepare.yml new file mode 100644 index 0000000..34b7c5e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/configured/prepare.yml @@ -0,0 +1,63 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + become: true + ansible.builtin.command: + argv: + - pacman + - --refresh + - --sync + - --sysupgrade + - --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..ca39f68 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/configured/tests/test_default.py @@ -0,0 +1,378 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "all" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +def dig(host, domains): + + local_dns = "@127.0.0.1" + + for d in domains: + output_msg = "" + domain = d.get("domain") + dns_type = d.get("type", "A").upper() + result = d.get("result") + + if dns_type == "PTR": + dig_type = "-x" + else: + dig_type = f"-t {dns_type}" + + command = f"dig {dig_type} {domain} {local_dns} +short" + print(f"{command}") + cmd = host.run(command) + + if cmd.succeeded: + output = cmd.stdout + output_arr = sorted(output.splitlines()) + + if len(output_arr) == 1: + output_msg = output.strip() + if len(output_arr) > 1: + output_msg = ",".join(output_arr) + + print(f"[{domain} - {dns_type}] => {output_msg}") + print(f" {len(output)} - {type(output)}") + print(f" {output_msg}") + + return output_msg == result + else: + return cmd.failed + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directories(host, get_vars): + """ + used config directory + """ + pp_json(get_vars) + + directories = [ + get_vars.get("bind_dir"), + get_vars.get("bind_conf_dir"), + get_vars.get("bind_zone_dir"), + get_vars.get("bind_secondary_dir"), + ] + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [get_vars.get("bind_config", "/etc/bind/named.conf")] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +def test_cache_files(host, get_vars): + """ + created config files + """ + bind_dir = get_vars.get("bind_dir", "/var/cache/bind") + + files = [ + f"{bind_dir}/0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa", + f"{bind_dir}/0.11.10.in-addr.arpa", + f"{bind_dir}/acme-inc.local", + f"{bind_dir}/124.168.192.in-addr.arpa", + f"{bind_dir}/cm.local", + ] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = get_vars.get("bind_service", "bind9") + + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + bind_port = "53" + bind_address = "127.0.0.1" + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + listen.append(f"udp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening + + +def test_records_A(host): + """ """ + domains = [ + {"domain": "ns1.acme-inc.local", "type": "A", "result": "10.11.0.1"}, + {"domain": "ns2.acme-inc.local", "type": "A", "result": "10.11.0.2"}, + {"domain": "srv001.acme-inc.local", "type": "A", "result": "10.11.1.1"}, + {"domain": "srv002.acme-inc.local", "type": "A", "result": "10.11.1.2"}, + {"domain": "mail001.acme-inc.local", "type": "A", "result": "10.11.2.1"}, + {"domain": "mail002.acme-inc.local", "type": "A", "result": "10.11.2.2"}, + {"domain": "mail003.acme-inc.local", "type": "A", "result": "10.11.2.3"}, + {"domain": "srv010.acme-inc.local", "type": "A", "result": "10.11.0.10"}, + {"domain": "srv011.acme-inc.local", "type": "A", "result": "10.11.0.11"}, + {"domain": "srv012.acme-inc.local", "type": "A", "result": "10.11.0.12"}, + # + {"domain": "cms.cm.local", "type": "A", "result": "192.168.124.21"}, + ] + + assert dig(host, domains) + + +def test_records_PTR(host): + """ """ + domains = [ + # IPv4 Reverse lookups + {"domain": "10.11.0.1", "type": "PTR", "result": "ns1.acme-inc.local."}, + {"domain": "10.11.0.2", "type": "PTR", "result": "ns2.acme-inc.local."}, + {"domain": "10.11.1.1", "type": "PTR", "result": "srv001.acme-inc.local."}, + {"domain": "10.11.1.2", "type": "PTR", "result": "srv002.acme-inc.local."}, + {"domain": "10.11.2.1", "type": "PTR", "result": "mail001.acme-inc.local."}, + {"domain": "10.11.2.2", "type": "PTR", "result": "mail002.acme-inc.local."}, + {"domain": "10.11.2.3", "type": "PTR", "result": "mail003.acme-inc.local."}, + {"domain": "10.11.0.10", "type": "PTR", "result": "srv010.acme-inc.local."}, + {"domain": "10.11.0.11", "type": "PTR", "result": "srv011.acme-inc.local."}, + {"domain": "10.11.0.12", "type": "PTR", "result": "srv012.acme-inc.local."}, + # # IPv6 Reverse lookups + {"domain": "2001:db8::1", "type": "PTR", "result": "srv001.acme-inc.local."}, + # + {"domain": "192.168.124.21", "type": "PTR", "result": "cms.cm.local"}, + ] + + assert dig(host, domains) + + +def test_records_CNAME(host): + """ """ + domains = [ + # IPv4 Alias lookups + { + "domain": "www.acme-inc.local", + "type": "CNAME", + "result": "srv001.acme-inc.local.", + }, + { + "domain": "mysql.acme-inc.local", + "type": "CNAME", + "result": "srv002.acme-inc.local.", + }, + { + "domain": "smtp.acme-inc.local", + "type": "CNAME", + "result": "mail001.acme-inc.local.", + }, + { + "domain": "mail-in.acme-inc.local", + "type": "CNAME", + "result": "mail001.acme-inc.local.", + }, + { + "domain": "imap.acme-inc.local", + "type": "CNAME", + "result": "mail003.acme-inc.local.", + }, + { + "domain": "mail-out.acme-inc.local", + "type": "CNAME", + "result": "mail003.acme-inc.local.", + }, + # + {"domain": "cms.cm.local", "type": "CNAME", "result": "192.168.124.21"}, + ] + + assert dig(host, domains) + + +def test_records_AAAA(host): + """ """ + domains = [ + # IPv6 Forward lookups + {"domain": "srv001.acme-inc.local", "type": "AAAA", "result": "2001:db8::1"}, + ] + + assert dig(host, domains) + + +def test_records_NS(host): + """ """ + domains = [ + # NS records lookup + { + "domain": "acme-inc.local", + "type": "NS", + "result": "ns1.acme-inc.local.,ns2.acme-inc.local.", + }, + {"domain": "cm.local", "type": "NS", "result": "dns.cm.local."}, + ] + + assert dig(host, domains) + + +def test_records_MX(host): + """ """ + domains = [ + # MX records lookup + { + "domain": "acme-inc.local", + "type": "MX", + "result": "10 mail001.acme-inc.local.,20 mail002.acme-inc.local.", + }, + ] + + assert dig(host, domains) + + +def test_records_SRV(host): + """ """ + domains = [ + # Service records lookup + { + "domain": "_ldap._tcp.acme-inc.local", + "type": "SRV", + "result": "0 100 88 srv010.acme-inc.local.", + }, + ] + + assert dig(host, domains) + + +def test_records_TXT(host): + """ """ + domains = [ + # TXT records lookup + {"domain": "acme-inc.local", "type": "TXT", "result": '"more text","some text"'}, + ] + + assert dig(host, domains) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/default/converge.yml new file mode 100644 index 0000000..d7c4032 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/default/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.bind diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/default/molecule.yml new file mode 100644 index 0000000..fda92e3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/default/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/default/prepare.yml new file mode 100644 index 0000000..4c14c51 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/default/prepare.yml @@ -0,0 +1,57 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/default/tests/test_default.py new file mode 100644 index 0000000..3006563 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/default/tests/test_default.py @@ -0,0 +1,175 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "all" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directories(host, get_vars): + """ + used config directory + """ + pp_json(get_vars) + + directories = [ + get_vars.get("bind_dir"), + get_vars.get("bind_conf_dir"), + get_vars.get("bind_zone_dir"), + get_vars.get("bind_secondary_dir"), + ] + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [get_vars.get("bind_config", "/etc/bind/named.conf")] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = get_vars.get("bind_service", "bind9") + + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + bind_port = "53" + bind_address = "127.0.0.1" + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + listen.append(f"udp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/require-systemd-unit/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/require-systemd-unit/converge.yml new file mode 100644 index 0000000..5780dc5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/require-systemd-unit/converge.yml @@ -0,0 +1,10 @@ +--- +- name: converge + hosts: all + any_errors_fatal: false + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.bind diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/require-systemd-unit/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/require-systemd-unit/group_vars/all/vars.yml new file mode 100644 index 0000000..5c3d38c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/require-systemd-unit/group_vars/all/vars.yml @@ -0,0 +1,28 @@ +--- + +dnsmasq_systemd: + unit: + after: + - ssh.service + wants: [] + requires: [] + +dnsmasq_addresses: + - address: 192.168.202.133 + name: node1.test.com + - address: 127.0.0.1 + name: youtubei.googleapis.com + +dnsmasq_interfaces: + listen_address: "127.0.0.1" + +dnsmasq_server: + nameservers: + - 192.168.0.1 + - 46.182.19.48 + - 9.9.9.9 + forwarders: + - domain: matrix.lan + address: 127.0.0.1#5353 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/require-systemd-unit/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/require-systemd-unit/molecule.yml new file mode 100644 index 0000000..e8abaaa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/require-systemd-unit/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + # - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + # remote_tmp: /tmp/ansible-${USER} + fact_caching: "jsonfile" + fact_caching_timeout: 8640 + fact_caching_connection: ".facts" + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/require-systemd-unit/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/require-systemd-unit/prepare.yml new file mode 100644 index 0000000..c235be1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/require-systemd-unit/prepare.yml @@ -0,0 +1,51 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/require-systemd-unit/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/require-systemd-unit/tests/test_default.py new file mode 100644 index 0000000..a26a7dd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/require-systemd-unit/tests/test_default.py @@ -0,0 +1,198 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directories(host, get_vars): + """ + used config directory + """ + pp_json(get_vars) + + directories = [ + get_vars.get("dnsmasq_config_directory"), + ] + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [get_vars.get("dnsmasq_config_file")] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +# def test_user(host, get_vars): +# """ +# created user +# """ +# shell = '/bin/false' +# +# distribution = host.system_info.distribution +# +# if distribution in ['centos', 'redhat', 'ol']: +# shell = "/sbin/nologin" +# elif distribution == "arch": +# shell = "/usr/bin/nologin" +# +# user_name = "mysql" +# u = host.user(user_name) +# g = host.group(user_name) +# +# assert g.exists +# assert u.exists +# assert user_name in u.groups +# assert u.shell == shell + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = "dnsmasq" + + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + _conf_global = get_vars.get("dnsmasq_global", {}) + _conf_interfaces = get_vars.get("dnsmasq_interfaces", {}) + + bind_port = _conf_global.get("port", 53) + bind_address = _conf_interfaces.get("listen_address", "0.0.0.0") + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + listen.append(f"udp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/converge.yml new file mode 100644 index 0000000..a6b17f1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/converge.yml @@ -0,0 +1,22 @@ +--- + +- name: converge + hosts: all + any_errors_fatal: false + + # environment: + # NETRC: '' + # + # vars: + # ansible_facts.all_ipv4_addresses: [] + # bind_dnssec_enable: false + # bind_check_names: 'master ignore' + # bind_query_log: + # name: querylog + # file: data/query.log + # versions: 200 + # size: "10m" + + roles: + # - role: bertvv.bind + - role: bodsch.dns.bind diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/group_vars/all/vars.yml new file mode 100644 index 0000000..c4bb450 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/group_vars/all/vars.yml @@ -0,0 +1,89 @@ +--- + +# ----------------------------------------------------------------------------------------- + +bind_statistics_channels: true +bind_statistics_allow: + - any + +# bind_zone_dir: /var/local/named-zones +bind_zone_file_mode: '0660' + +# bind_allow_query: +# - any + +bind_listen_ipv4: + - any + +bind_listen_ipv6: + - any + +# bind_acls: +# - name: acl1 +# match_list: +# - 10.11.0.0/16 +# +# bind_forwarders: +# - '8.8.8.8' +# - '8.8.4.4' + +bind_recursion: true +bind_dns64: true +bind_query_log: 'data/query.log' +bind_check_names: 'master ignore' +bind_zone_minimum_ttl: "2D" +bind_zone_ttl: "2W" +bind_zone_time_to_refresh: "2D" +bind_zone_time_to_retry: "2H" +bind_zone_time_to_expire: "2W" +bind_statistics_host: "{{ ansible_facts.default_ipv4.address }}" + +# ----------------------------------------------------------------------------------------- + +bind_statistics: + channels: true + allow: + - any + host: "{{ ansible_facts.default_ipv4.address }}" + +bind_allow_query: + - any + +bind_listen: + ipv4: + - port: 53 + addresses: + - "127.0.0.1" + - "{{ ansible_facts.default_ipv4.address }}" + - port: 5353 + addresses: + - "127.0.1.1" + ipv6: + - port: 53 + addresses: + - "{{ ansible_facts.default_ipv4.address }}" + +bind_acls: + - name: acl1 + match_list: + - 10.11.0/24 + +bind_forwarders: + - '9.9.9.9' + - '141.1.1.1' + +bind_recursion: true +bind_dns64: true + +bind_check_names: + - master + - ignore + +bind_zone_soa: + minimum_ttl: "32H" + ttl: "48H" + time_to_refresh: "24H" + time_to_retry: "2H" + time_to_expire: "2D" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/host_vars/ns1.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/host_vars/ns1.yml new file mode 100644 index 0000000..5f32ea0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/host_vars/ns1.yml @@ -0,0 +1,98 @@ +--- + +# Primary Configuration + +bind_zones: + # Primary server for domain example.com (use zone type autodetection) + - name: 'example.com' + primaries: + - 10.11.0.1 + networks: + - '192.0.2' + ipv6_networks: + - '2001:db9::/48' + name_servers: + - ns1.acme-inc.local. + - ns2.acme-inc.local. + hostmaster_email: admin + hosts: + - name: srv001 + ip: 192.0.2.1 + ipv6: '2001:db9::1' + aliases: + - www + - name: srv002 + ip: 192.0.2.2 + ipv6: '2001:db9::2' + - name: mail001 + ip: 192.0.2.10 + ipv6: '2001:db9::3' + mail_servers: + - name: mail001 + preference: 10 + + # Primary server for domain acme-inc.local (specify zone type explicitly) + - name: 'acme-inc.local' + type: primary + primaries: + - 10.11.0.1 + networks: + - '10.11' + ipv6_networks: + - '2001:db8::/48' + name_servers: + - ns1 + - ns2 + hosts: + - name: ns1 + ip: 10.11.0.1 + - name: ns2 + ip: 10.11.0.2 + - name: srv001 + ip: 10.11.1.1 + ipv6: 2001:db8::1 + aliases: + - www + - name: srv002 + ip: 10.11.1.2 + ipv6: 2001:db8::2 + aliases: + - mysql + - name: mail001 + ip: 10.11.2.1 + ipv6: 2001:db8::d:1 + aliases: + - smtp + - mail-in + - name: mail002 + ip: 10.11.2.2 + ipv6: 2001:db8::d:2 + - name: mail003 + ip: 10.11.2.3 + ipv6: 2001:db8::d:3 + aliases: + - imap + - mail-out + - name: srv010 + ip: 10.11.0.10 + - name: srv011 + ip: 10.11.0.11 + - name: srv012 + ip: 10.11.0.12 + mail_servers: + - name: mail001 + preference: 10 + - name: mail002 + preference: 20 + services: + - name: _ldap._tcp + weight: 100 + port: 88 + target: srv010 + text: + - name: _kerberos + text: KERBEROS.ACME-INC.COM + - name: '@' + text: + - 'some text' + - 'more text' diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/host_vars/ns2.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/host_vars/ns2.yml new file mode 100644 index 0000000..d587c8a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/host_vars/ns2.yml @@ -0,0 +1,21 @@ +--- +# Secondary Configuration + +bind_zones: + # Secondary server for domain example.com (use zone type autodetection) + - name: 'example.com' + primaries: + - 10.11.0.1 + networks: + - '192.0.2' + ipv6_networks: + - '2001:db9::/48' + # Secondary server for domain acme-inc.local (specify zone type explicitly) + - name: 'acme-inc.local' + type: secondary + primaries: + - 10.11.0.1 + networks: + - '10.11' + ipv6_networks: + - '2001:db8::/48' diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/host_vars/ns3.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/host_vars/ns3.yml new file mode 100644 index 0000000..f782ca1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/host_vars/ns3.yml @@ -0,0 +1,27 @@ +--- +# Forwarder Configuration + +bind_dnssec: + enable: false + validation: no + +bind_zones: + # Forwarder for domain example.com (use zone type autodetection) + - name: 'example.com' + forwarders: + - 10.11.0.1 + - 10.11.0.2 + networks: + - '192.0.2' + ipv6_networks: + - '2001:db9::/48' + # Forwarder server for domain acme-inc.local (specify zone type explicitly) + - name: 'acme-inc.local' + type: forward + forwarders: + - 10.11.0.1 + - 10.11.0.2 + networks: + - '10.11' + ipv6_networks: + - '2001:db8::/48' diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/molecule.yml new file mode 100644 index 0000000..407494f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/molecule.yml @@ -0,0 +1,116 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: ns1 + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + tty: true + environment: + container: docker + groups: + - dns + docker_networks: + - name: bind + ipam_config: + - subnet: "10.11.0.0/24" + gateway: "10.11.0.254" + networks: + - name: bind + ipv4_address: "10.11.0.1" + + - name: ns2 + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + tty: true + environment: + container: docker + groups: + - dns + networks: + - name: bind + ipv4_address: "10.11.0.2" + + - name: ns3 + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + tty: true + environment: + container: docker + groups: + - dns + networks: + - name: bind + ipv4_address: "10.11.0.3" + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/prepare.yml new file mode 100644 index 0000000..6605484 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/prepare.yml @@ -0,0 +1,75 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +- name: install bertvv.bind + hosts: all + gather_facts: true + + vars: + ansible_all_ipv4_addresses: [] + bind_dnssec_enable: false + bind_check_names: 'master ignore' + + pre_tasks: + - name: do facts module to get latest information + ansible.builtin.setup: + gather_subset: + - 'all' + + roles: + - role: bertvv.bind + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/requirements.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/requirements.yml new file mode 100644 index 0000000..b4c12ff --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/requirements.yml @@ -0,0 +1,4 @@ +--- + +- name: bertvv.bind + src: https://github.com/bertvv/ansible-role-bind.git diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/tests/test_default.py new file mode 100644 index 0000000..3006563 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/tests/test_default.py @@ -0,0 +1,175 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "all" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directories(host, get_vars): + """ + used config directory + """ + pp_json(get_vars) + + directories = [ + get_vars.get("bind_dir"), + get_vars.get("bind_conf_dir"), + get_vars.get("bind_zone_dir"), + get_vars.get("bind_secondary_dir"), + ] + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [get_vars.get("bind_config", "/etc/bind/named.conf")] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = get_vars.get("bind_service", "bind9") + + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + bind_port = "53" + bind_address = "127.0.0.1" + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + listen.append(f"udp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/tests/test_ns1.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/tests/test_ns1.py new file mode 100644 index 0000000..865c23f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/tests/test_ns1.py @@ -0,0 +1,326 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "ns1" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directories(host, get_vars): + """ + used config directory + """ + pp_json(get_vars) + + directories = [ + get_vars.get("bind_dir"), + get_vars.get("bind_conf_dir"), + get_vars.get("bind_zone_dir"), + get_vars.get("bind_secondary_dir"), + ] + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + bind_dir = get_vars.get("bind_dir", "/var/cache/bind") + + files = [ + f"{bind_dir}/0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa", + f"{bind_dir}/11.10.in-addr.arpa", + f"{bind_dir}/acme-inc.local", + ] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +def dig(host, domains): + + local_dns = "@127.0.0.1" + for d in domains: + domain = d.get("domain") + dns_type = d.get("type", "A").upper() + result = d.get("result") + + if dns_type == "PTR": + dig_type = "-x" + else: + dig_type = f"-t {dns_type}" + + command = f"dig {dig_type} {domain} {local_dns} +short" + # print(f"{command}") + cmd = host.run(command) + + if cmd.succeeded: + output = cmd.stdout + output_arr = sorted(output.splitlines()) + + if len(output_arr) == 1: + output_msg = output.strip() + if len(output_arr) > 1: + output_msg = ",".join(output_arr) + + # print(f"[{domain} - {dns_type}] => {output_msg}") + # print(f" {len(output)} - {type(output)}") + # print(f" {output_msg}") + + return output_msg == result + else: + return cmd.failed + + +def test_records_A(host): + """ """ + domains = [ + {"domain": "ns1.acme-inc.local", "type": "A", "result": "10.11.0.1"}, + {"domain": "ns2.acme-inc.local", "type": "A", "result": "10.11.0.2"}, + {"domain": "srv001.acme-inc.local", "type": "A", "result": "10.11.1.1"}, + {"domain": "srv002.acme-inc.local", "type": "A", "result": "10.11.1.2"}, + {"domain": "mail001.acme-inc.local", "type": "A", "result": "10.11.2.1"}, + {"domain": "mail002.acme-inc.local", "type": "A", "result": "10.11.2.2"}, + {"domain": "mail003.acme-inc.local", "type": "A", "result": "10.11.2.3"}, + {"domain": "srv010.acme-inc.local", "type": "A", "result": "10.11.0.10"}, + {"domain": "srv011.acme-inc.local", "type": "A", "result": "10.11.0.11"}, + {"domain": "srv012.acme-inc.local", "type": "A", "result": "10.11.0.12"}, + ] + + assert dig(host, domains) + + +def test_records_PTR(host): + """ """ + domains = [ + # IPv4 Reverse lookups + {"domain": "10.11.0.1", "type": "PTR", "result": "ns1.acme-inc.local."}, + {"domain": "10.11.0.2", "type": "PTR", "result": "ns2.acme-inc.local."}, + {"domain": "10.11.1.1", "type": "PTR", "result": "srv001.acme-inc.local."}, + {"domain": "10.11.1.2", "type": "PTR", "result": "srv002.acme-inc.local."}, + {"domain": "10.11.2.1", "type": "PTR", "result": "mail001.acme-inc.local."}, + {"domain": "10.11.2.2", "type": "PTR", "result": "mail002.acme-inc.local."}, + {"domain": "10.11.2.3", "type": "PTR", "result": "mail003.acme-inc.local."}, + {"domain": "10.11.0.10", "type": "PTR", "result": "srv010.acme-inc.local."}, + {"domain": "10.11.0.11", "type": "PTR", "result": "srv011.acme-inc.local."}, + {"domain": "10.11.0.12", "type": "PTR", "result": "srv012.acme-inc.local."}, + # # IPv6 Reverse lookups + {"domain": "2001:db8::1", "type": "PTR", "result": "srv001.acme-inc.local."}, + ] + + assert dig(host, domains) + + +def test_records_CNAME(host): + """ """ + domains = [ + # IPv4 Alias lookups + { + "domain": "www.acme-inc.local", + "type": "CNAME", + "result": "srv001.acme-inc.local.", + }, + { + "domain": "mysql.acme-inc.local", + "type": "CNAME", + "result": "srv002.acme-inc.local.", + }, + { + "domain": "smtp.acme-inc.local", + "type": "CNAME", + "result": "mail001.acme-inc.local.", + }, + { + "domain": "mail-in.acme-inc.local", + "type": "CNAME", + "result": "mail001.acme-inc.local.", + }, + { + "domain": "imap.acme-inc.local", + "type": "CNAME", + "result": "mail003.acme-inc.local.", + }, + { + "domain": "mail-out.acme-inc.local", + "type": "CNAME", + "result": "mail003.acme-inc.local.", + }, + ] + + assert dig(host, domains) + + +def test_records_AAAA(host): + """ """ + domains = [ + # IPv6 Forward lookups + {"domain": "srv001.acme-inc.local", "type": "AAAA", "result": "2001:db8::1"}, + ] + + assert dig(host, domains) + + +def test_records_NS(host): + """ """ + domains = [ + # NS records lookup + { + "domain": "acme-inc.local", + "type": "NS", + "result": "ns1.acme-inc.local.,ns2.acme-inc.local.", + }, + ] + + assert dig(host, domains) + + +def test_records_MX(host): + """ """ + domains = [ + # MX records lookup + { + "domain": "acme-inc.local", + "type": "MX", + "result": "10 mail001.acme-inc.local.,20 mail002.acme-inc.local.", + }, + ] + + assert dig(host, domains) + + +def test_records_SRV(host): + """ """ + domains = [ + # Service records lookup + { + "domain": "_ldap._tcp.acme-inc.local", + "type": "SRV", + "result": "0 100 88 srv010.acme-inc.local.", + }, + ] + + assert dig(host, domains) + + +def test_records_TXT(host): + """ """ + domains = [ + # TXT records lookup + {"domain": "acme-inc.local", "type": "TXT", "result": '"more text","some text"'}, + ] + + assert dig(host, domains) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/tests/test_ns2.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/tests/test_ns2.py new file mode 100644 index 0000000..929bc7a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/tests/test_ns2.py @@ -0,0 +1,151 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "ns2" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directories(host, get_vars): + """ + used config directory + """ + pp_json(get_vars) + + directories = [ + get_vars.get("bind_dir"), + get_vars.get("bind_conf_dir"), + get_vars.get("bind_zone_dir"), + get_vars.get("bind_secondary_dir"), + ] + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + bind_dir = get_vars.get("bind_secondary_dir", "/var/cache/bind/secondary") + + files = [ + f"{bind_dir}/0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa", + f"{bind_dir}/11.10.in-addr.arpa", + f"{bind_dir}/acme-inc.local", + ] + + for _file in files: + f = host.file(_file) + assert f.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/tests/test_ns3.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/tests/test_ns3.py new file mode 100644 index 0000000..19da03c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/upgrade/tests/test_ns3.py @@ -0,0 +1,151 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "ns3" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directories(host, get_vars): + """ + used config directory + """ + pp_json(get_vars) + + directories = [ + get_vars.get("bind_dir"), + get_vars.get("bind_conf_dir"), + get_vars.get("bind_zone_dir"), + get_vars.get("bind_secondary_dir"), + ] + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +# def test_files(host, get_vars): +# """ +# created config files +# """ +# bind_dir = get_vars.get("bind_secondary_dir", "/var/cache/bind/secondary") +# +# files = [ +# f"{bind_dir}/0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa", +# f"{bind_dir}/11.10.in-addr.arpa", +# f"{bind_dir}/acme-inc.local" +# ] +# +# for _file in files: +# f = host.file(_file) +# assert f.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/zone-transfer/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/zone-transfer/converge.yml new file mode 100644 index 0000000..c7131a2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/zone-transfer/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: all + any_errors_fatal: true + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.bind diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/zone-transfer/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/zone-transfer/group_vars/all/vars.yml new file mode 100644 index 0000000..17e782f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/zone-transfer/group_vars/all/vars.yml @@ -0,0 +1,242 @@ +--- + +bind_statistics: + channels: true + allow: + - any + host: "127.0.0.1" + +bind_allow_query: + - any + +bind_dns_keys: + - name: primary_key + algorithm: hmac-sha256 + secret: "azertyAZERTY123456" + - name: primary-dns + algorithm: hmac-sha256 + secret: "Rz6sUQHdTG7MMv3w608TZYig" + +bind_listen: + ipv4: + - port: 53 + addresses: + - "127.0.0.1" + - "10.11.0.1" + - port: 5353 + addresses: + - "127.0.1.1" + # ipv6: + # - port: 53 + # addresses: + # - "{{ ansible_facts.default_ipv6.address }}" + +bind_acls: + - name: acl1 + match_list: + - 10.11.0.0/24 + +bind_forwarders: + - '9.9.9.9' + - '141.1.1.1' + +bind_recursion: true +bind_dns64: true + +bind_check_names: + - master + - ignore + +bind_logging: + enable: true + channels: + - channel: general + file: "data/general.log" + versions: 3 + size: 10M + print_time: true # true | false + print_category: true + print_severity: true + severity: dynamic # critical | error | warning | notice | info | debug [level] | dynamic + - channel: query + file: "data/query.log" + versions: 5 + size: 10M + print_time: "" # true | false + severity: debug2 # + - channel: dnssec + file: "data/dnssec.log" + versions: 5 + size: 10M + print_time: "" # true | false + severity: info # + - channel: notify + file: "data/notify.log" + versions: 5 + size: 10M + print_time: "" # true | false + severity: info # + - channel: transfers + file: "data/transfers.log" + versions: 5 + size: 10M + print_time: "" # true | false + severity: info # + - channel: slog + syslog: security # kern | user | mail | daemon | auth | syslog | lpr | + # news | uucp | cron | authpriv | ftp | + # local0 | local1 | local2 | local3 | + # local4 | local5 | local6 | local7 + print_time: "" # true | false + severity: info # + categories: + "xfer-out": + - transfers + - slog + "xfer-in": + - transfers + - slog + notify: + - notify + "lame-servers": + - general + config: + - general + default: + - general + security: + - general + - slog + dnssec: + - dnssec + queries: + - query + +bind_zone_soa: + minimum_ttl: "32H" + ttl: "48H" + time_to_refresh: "24H" + time_to_retry: "2H" + time_to_expire: "2D" + +bind_zones: + - name: 'acme-inc.local' + type: primary + create_forward_zones: true + create_reverse_zones: true + #primaries: + # - 10.11.0.1 + networks: + - '10.11.0' + ipv6_networks: + - '2001:db8::/48' + name_servers: + - ns1 + - ns2 + # + allow_updates: + - "10.0.1.2" + - 'key "external-dns"' + allow_transfers: + - 'key "external-dns"' + + hosts: + - name: ns1 + ip: 10.11.0.1 + - name: ns2 + ip: 10.11.0.2 + - name: srv001 + ip: 10.11.1.1 + ipv6: 2001:db8::1 + aliases: + - www + - name: srv002 + ip: 10.11.1.2 + ipv6: 2001:db8::2 + aliases: + - mysql + - name: mail001 + ip: 10.11.2.1 + ipv6: 2001:db8::d:1 + aliases: + - smtp + - mail-in + - name: mail002 + ip: 10.11.2.2 + ipv6: 2001:db8::d:2 + - name: mail003 + ip: 10.11.2.3 + ipv6: 2001:db8::d:3 + aliases: + - imap + - mail-out + - name: srv010 + ip: 10.11.0.10 + - name: srv011 + ip: 10.11.0.11 + - name: srv012 + ip: 10.11.0.12 + mail_servers: + - name: mail001 + preference: 10 + - name: mail002 + preference: 20 + services: + - name: _ldap._tcp + weight: 100 + port: 88 + target: srv010 + text: + - name: _kerberos + text: KERBEROS.ACME-INC.COM + - name: '@' + text: + - 'some text' + - 'more text' + + - name: cm.local + # type: primary # default: primary [primary, secondary, forward] + # create_forward_zones: true + # create_reverse_zones: true # Skip creation of reverse zones + primaries: + - "{{ ansible_facts.default_ipv4.address }}" # Primary server(s) for this zone + name_servers: + - 'dns' + networks: + - '192.168.124' + hosts: + - name: '@' + name_servers: 'dns.cm.local.' + ip: "{{ ansible_facts.default_ipv4.address }}" + - name: dns + ip: "{{ ansible_facts.default_ipv4.address }}" + - name: cms + ip: 192.168.124.21 + aliases: + - content-management-server + - name: mls + ip: 192.168.124.30 + aliases: + - master-live-server + - name: rls-01 + ip: 192.168.124.35 + aliases: + - replication-live-server-01 + + # - name: matrix.local + # type: primary + # create_reverse_zones: true # Skip creation of reverse zones + # primaries: + # - "{{ ansible_facts.default_ipv4.address }}" # Primary server(s) for this zone + # name_servers: + # - 'dns' + # networks: + # - '192.168.111' + # ipv6_networks: + # - '2001:0db8:85a3::8a2e:0370:7334/48' + # hosts: + # - name: '@' + # name_servers: 'dns.cm.local.' + # ip: "{{ ansible_facts.default_ipv4.address }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/zone-transfer/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/zone-transfer/molecule.yml new file mode 100644 index 0000000..0437c1f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/zone-transfer/molecule.yml @@ -0,0 +1,68 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + tty: true + environment: + container: docker + groups: + - dns + docker_networks: + - name: bind + ipam_config: + - subnet: "10.11.0.0/24" + gateway: "10.11.0.254" + networks: + - name: bind + ipv4_address: "10.11.0.1" + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/zone-transfer/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/zone-transfer/prepare.yml new file mode 100644 index 0000000..34b7c5e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/zone-transfer/prepare.yml @@ -0,0 +1,63 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + become: true + ansible.builtin.command: + argv: + - pacman + - --refresh + - --sync + - --sysupgrade + - --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/zone-transfer/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/zone-transfer/tests/test_default.py new file mode 100644 index 0000000..ca39f68 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/molecule/zone-transfer/tests/test_default.py @@ -0,0 +1,378 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "all" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +def dig(host, domains): + + local_dns = "@127.0.0.1" + + for d in domains: + output_msg = "" + domain = d.get("domain") + dns_type = d.get("type", "A").upper() + result = d.get("result") + + if dns_type == "PTR": + dig_type = "-x" + else: + dig_type = f"-t {dns_type}" + + command = f"dig {dig_type} {domain} {local_dns} +short" + print(f"{command}") + cmd = host.run(command) + + if cmd.succeeded: + output = cmd.stdout + output_arr = sorted(output.splitlines()) + + if len(output_arr) == 1: + output_msg = output.strip() + if len(output_arr) > 1: + output_msg = ",".join(output_arr) + + print(f"[{domain} - {dns_type}] => {output_msg}") + print(f" {len(output)} - {type(output)}") + print(f" {output_msg}") + + return output_msg == result + else: + return cmd.failed + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directories(host, get_vars): + """ + used config directory + """ + pp_json(get_vars) + + directories = [ + get_vars.get("bind_dir"), + get_vars.get("bind_conf_dir"), + get_vars.get("bind_zone_dir"), + get_vars.get("bind_secondary_dir"), + ] + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [get_vars.get("bind_config", "/etc/bind/named.conf")] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +def test_cache_files(host, get_vars): + """ + created config files + """ + bind_dir = get_vars.get("bind_dir", "/var/cache/bind") + + files = [ + f"{bind_dir}/0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa", + f"{bind_dir}/0.11.10.in-addr.arpa", + f"{bind_dir}/acme-inc.local", + f"{bind_dir}/124.168.192.in-addr.arpa", + f"{bind_dir}/cm.local", + ] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = get_vars.get("bind_service", "bind9") + + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + bind_port = "53" + bind_address = "127.0.0.1" + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + listen.append(f"udp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening + + +def test_records_A(host): + """ """ + domains = [ + {"domain": "ns1.acme-inc.local", "type": "A", "result": "10.11.0.1"}, + {"domain": "ns2.acme-inc.local", "type": "A", "result": "10.11.0.2"}, + {"domain": "srv001.acme-inc.local", "type": "A", "result": "10.11.1.1"}, + {"domain": "srv002.acme-inc.local", "type": "A", "result": "10.11.1.2"}, + {"domain": "mail001.acme-inc.local", "type": "A", "result": "10.11.2.1"}, + {"domain": "mail002.acme-inc.local", "type": "A", "result": "10.11.2.2"}, + {"domain": "mail003.acme-inc.local", "type": "A", "result": "10.11.2.3"}, + {"domain": "srv010.acme-inc.local", "type": "A", "result": "10.11.0.10"}, + {"domain": "srv011.acme-inc.local", "type": "A", "result": "10.11.0.11"}, + {"domain": "srv012.acme-inc.local", "type": "A", "result": "10.11.0.12"}, + # + {"domain": "cms.cm.local", "type": "A", "result": "192.168.124.21"}, + ] + + assert dig(host, domains) + + +def test_records_PTR(host): + """ """ + domains = [ + # IPv4 Reverse lookups + {"domain": "10.11.0.1", "type": "PTR", "result": "ns1.acme-inc.local."}, + {"domain": "10.11.0.2", "type": "PTR", "result": "ns2.acme-inc.local."}, + {"domain": "10.11.1.1", "type": "PTR", "result": "srv001.acme-inc.local."}, + {"domain": "10.11.1.2", "type": "PTR", "result": "srv002.acme-inc.local."}, + {"domain": "10.11.2.1", "type": "PTR", "result": "mail001.acme-inc.local."}, + {"domain": "10.11.2.2", "type": "PTR", "result": "mail002.acme-inc.local."}, + {"domain": "10.11.2.3", "type": "PTR", "result": "mail003.acme-inc.local."}, + {"domain": "10.11.0.10", "type": "PTR", "result": "srv010.acme-inc.local."}, + {"domain": "10.11.0.11", "type": "PTR", "result": "srv011.acme-inc.local."}, + {"domain": "10.11.0.12", "type": "PTR", "result": "srv012.acme-inc.local."}, + # # IPv6 Reverse lookups + {"domain": "2001:db8::1", "type": "PTR", "result": "srv001.acme-inc.local."}, + # + {"domain": "192.168.124.21", "type": "PTR", "result": "cms.cm.local"}, + ] + + assert dig(host, domains) + + +def test_records_CNAME(host): + """ """ + domains = [ + # IPv4 Alias lookups + { + "domain": "www.acme-inc.local", + "type": "CNAME", + "result": "srv001.acme-inc.local.", + }, + { + "domain": "mysql.acme-inc.local", + "type": "CNAME", + "result": "srv002.acme-inc.local.", + }, + { + "domain": "smtp.acme-inc.local", + "type": "CNAME", + "result": "mail001.acme-inc.local.", + }, + { + "domain": "mail-in.acme-inc.local", + "type": "CNAME", + "result": "mail001.acme-inc.local.", + }, + { + "domain": "imap.acme-inc.local", + "type": "CNAME", + "result": "mail003.acme-inc.local.", + }, + { + "domain": "mail-out.acme-inc.local", + "type": "CNAME", + "result": "mail003.acme-inc.local.", + }, + # + {"domain": "cms.cm.local", "type": "CNAME", "result": "192.168.124.21"}, + ] + + assert dig(host, domains) + + +def test_records_AAAA(host): + """ """ + domains = [ + # IPv6 Forward lookups + {"domain": "srv001.acme-inc.local", "type": "AAAA", "result": "2001:db8::1"}, + ] + + assert dig(host, domains) + + +def test_records_NS(host): + """ """ + domains = [ + # NS records lookup + { + "domain": "acme-inc.local", + "type": "NS", + "result": "ns1.acme-inc.local.,ns2.acme-inc.local.", + }, + {"domain": "cm.local", "type": "NS", "result": "dns.cm.local."}, + ] + + assert dig(host, domains) + + +def test_records_MX(host): + """ """ + domains = [ + # MX records lookup + { + "domain": "acme-inc.local", + "type": "MX", + "result": "10 mail001.acme-inc.local.,20 mail002.acme-inc.local.", + }, + ] + + assert dig(host, domains) + + +def test_records_SRV(host): + """ """ + domains = [ + # Service records lookup + { + "domain": "_ldap._tcp.acme-inc.local", + "type": "SRV", + "result": "0 100 88 srv010.acme-inc.local.", + }, + ] + + assert dig(host, domains) + + +def test_records_TXT(host): + """ """ + domains = [ + # TXT records lookup + {"domain": "acme-inc.local", "type": "TXT", "result": '"more text","some text"'}, + ] + + assert dig(host, domains) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/configure/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/configure/main.yml new file mode 100644 index 0000000..c4e0853 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/configure/main.yml @@ -0,0 +1,91 @@ +--- + +- name: do facts module to get latest information + ansible.builtin.setup: + # gather_subset: + # - 'network' + # - 'all_ipv4_addresses' + # - 'all_ipv6_addresses' + +- name: merge bind configuration between defaults and custom + ansible.builtin.set_fact: + bind_listen: "{{ bind_defaults_listen | combine(bind_listen, recursive=True) }}" + bind_statistics: "{{ bind_defaults_statistics | combine(bind_statistics, recursive=True) }}" + bind_logging: "{{ bind_defaults_logging | combine(bind_logging, recursive=True) }}" + bind_zone_soa: "{{ bind_defaults_zone_soa | combine(bind_zone_soa, recursive=True) }}" + +- name: create runtime directories + become: true + ansible.builtin.file: + state: directory + path: "{{ item }}" + owner: "{{ bind_owner }}" + group: "{{ bind_group }}" + mode: "0770" + loop: + - "{{ bind_dir }}/dynamic" + - "{{ bind_dir }}/data" + - "{{ bind_zone_dir }}" + tags: + - bind + +- name: create directory for cached secondary zones + become: true + ansible.builtin.file: + state: directory + path: "{{ bind_secondary_dir }}" + owner: "{{ bind_owner }}" + group: "{{ bind_group }}" + mode: "0770" + tags: + - bind + +- name: create extra config for authenticated XFR request + become: true + ansible.builtin.template: + src: etc/auth_transfer.j2 + dest: "{{ bind_auth_file }}" + owner: root + group: "{{ bind_group }}" + mode: "0640" + when: + - bind_dns_keys is defined + - bind_dns_keys | length > 0 + notify: + - reload bind + tags: + - bind + +- name: create extra config for authenticated DDNS updates + become: true + ansible.builtin.template: + src: etc/auth_update.j2 + dest: "{{ bind_auth_update_file }}" + owner: root + group: "{{ bind_group }}" + mode: '0640' + when: + - bind_update_keys is defined + - bind_update_keys | length > 0 + notify: + - reload bind + tags: + - bind + +- name: configure + ansible.builtin.include_tasks: configure/zones.yml + +- name: create main bind config file {{ bind_config }} + become: true + ansible.builtin.template: + src: etc/named.conf.j2 + dest: "{{ bind_config }}" + owner: "{{ bind_owner }}" + group: "{{ bind_group }}" + mode: "0640" + backup: true + validate: 'named-checkconf %s' + notify: + - reload bind + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/configure/zones.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/configure/zones.yml new file mode 100644 index 0000000..02e0709 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/configure/zones.yml @@ -0,0 +1,145 @@ +--- + +- name: do facts module to get latest information + ansible.builtin.setup: + gather_subset: + - 'network' + - 'all_ipv4_addresses' + - 'all_ipv6_addresses' + +- name: define a list of all host IP addresses + ansible.builtin.set_fact: + host_all_addresses: "{{ ansible_facts.all_ipv4_addresses | union(ansible_facts.all_ipv6_addresses) }}" + tags: bind + +- name: read zone data + bodsch.dns.bind_zone_data: + zone_directory: "{{ bind_zone_dir }}" + zone_data: "{{ bind_zones }}" + register: bind_zone_data + tags: bind + +- name: Create forward lookup zone file + become: true + ansible.builtin.template: + src: etc/bind_zone.j2 + dest: "{{ bind_zone_dir }}/{{ item.name | bodsch.dns.zone_filename(bind_zone_data) }}" + owner: "{{ bind_owner }}" + group: "{{ bind_group }}" + mode: "{{ bind_zone_file_mode }}" + backup: true + validate: 'named-checkzone -d {{ item.name }} %s' + loop: "{{ bind_zones }}" + loop_control: + label: "{{ item.name }}" + when: > + (item.name | bodsch.dns.zone_filename(bind_zone_data) | string | length > 0) and + (item.state | default('present') == 'present') and + (item.create_forward_zones | default('true') | bool) and + ((item.type is defined and item.type == 'primary') or + (item.type is not defined and item.primaries is defined and + (host_all_addresses | intersect(item.primaries) | length > 0))) + # notify: reload bind + tags: bind + +# - name: reverse lookup zone file +# run_once: true +# debug: +# msg: +# - "{{ (item.1 | bodsch.dns.zone_filename(bind_zone_data) | string | length > 0) }}" +# - "{{ (item.state | default('present') == 'present') }}" +# - "{{ (item.create_reverse_zones is not defined or item.create_reverse_zones) }}" +# - "{{ ( +# (item[0].type is defined and item[0].type == 'primary') or +# (item[0].type is not defined and item[0].primaries is defined and +# (host_all_addresses | intersect(item[0].primaries) | length > 0)) +# ) +# }}" +# - "{{ (host_all_addresses | intersect(item[0].primaries)) }}" +# with_subelements: +# - "{{ bind_zones }}" +# - networks +# - flags: +# skip_missing: true +# loop_control: +# label: "{{ item.1 }}" + +- name: Create reverse lookup zone file + become: true + ansible.builtin.template: + src: etc/reverse_zone.j2 + dest: "{{ bind_zone_dir }}/{{ item.1 | bodsch.dns.zone_filename(bind_zone_data) }}" + owner: "{{ bind_owner }}" + group: "{{ bind_group }}" + mode: "{{ bind_zone_file_mode }}" + backup: true + # validate: "named-checkzone {{ ('.'.join(item.1.replace(item.1 + '.','').split('.')[::-1])) }}.in-addr.arpa %s" + with_subelements: + - "{{ bind_zones }}" + - networks + - flags: + skip_missing: true + loop_control: + label: "{{ item.1 }}" + when: > + (item.1 | bodsch.dns.zone_filename(bind_zone_data) | string | length > 0) and + (item.state | default('present') == 'present') and + (item.create_reverse_zones | default('true') | bool) and + ( + (item[0].type is defined and item[0].type == 'primary') or + (item[0].type is not defined and item[0].primaries is defined and + (host_all_addresses | intersect(item[0].primaries) | length > 0)) + ) + # notify: reload bind + tags: bind + +# - name: reverse IPv6 lookup zone file +# run_once: true +# debug: +# msg: +# - "{{ (item.1 | bodsch.dns.zone_filename(bind_zone_data) | string | length > 0) }}" +# - "{{ (item.state | default('present') == 'present') }}" +# - "{{ (item.create_reverse_zones is not defined or item.create_reverse_zones) }}" +# - "{{ ( +# (item[0].type is defined and item[0].type == 'primary') or +# (item[0].type is not defined and item[0].primaries is defined and +# (host_all_addresses | intersect(item[0].primaries) | length > 0)) +# ) +# }}" +# - "{{ (host_all_addresses | intersect(item[0].primaries)) }}" +# with_subelements: +# - "{{ bind_zones }}" +# - ipv6_networks +# - flags: +# skip_missing: true +# loop_control: +# label: "{{ item.1 }}" + +- name: Create reverse IPv6 lookup zone file + ansible.builtin.template: + src: etc/reverse_zone_ipv6.j2 + dest: "{{ bind_zone_dir }}/{{ item.1 | bodsch.dns.zone_filename(bind_zone_data) }}" + owner: "{{ bind_owner }}" + group: "{{ bind_group }}" + mode: "{{ bind_zone_file_mode }}" + backup: true + # validate: "named-checkzone {{ (item.1 | ansible.utils.ipaddr('revdns'))[-(9+(item.1 | regex_replace('^.*/','')|int)//2):] }} %s" + become: true + with_subelements: + - "{{ bind_zones }}" + - ipv6_networks + - flags: + skip_missing: true + loop_control: + label: "{{ item.1 }}" + when: > + (item.1 | bodsch.dns.zone_filename(bind_zone_data) | string | length > 0) and + (item.state | default('present') == 'present') and + (item.create_reverse_zones | default('true') | bool) and + ((item[0].type is defined and item[0].type == 'primary') or + (item[0].type is not defined and item[0].primaries is defined and + (host_all_addresses | intersect(item[0].primaries) | length > 0))) + # notify: reload bind + tags: bind + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/install.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/install.yml new file mode 100644 index 0000000..bf8df26 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/install.yml @@ -0,0 +1,25 @@ +--- + +- name: install bind + become: true + ansible.builtin.package: + name: "{{ bind_packages }}" + state: present + tags: + - bind + +- name: detect bind version + become: true + bodsch.dns.bind_version: + register: bind_version + check_mode: false + ignore_errors: true + +- name: create custom fact file + bodsch.core.facts: + name: bind + facts: + full_version: "{{ bind_version.full_version }}" + version: "{{ bind_version.version }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/main.yml new file mode 100644 index 0000000..30c5ff3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yml + +- name: install + ansible.builtin.include_tasks: install.yml + +- name: configure + ansible.builtin.include_tasks: configure/main.yml + +- name: service + ansible.builtin.include_tasks: service.yml + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/prepare.yml new file mode 100644 index 0000000..366578c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/prepare.yml @@ -0,0 +1,31 @@ +--- + +- name: include OS specific configuration ({{ ansible_facts.distribution }} ({{ ansible_facts.os_family }}) {{ ansible_facts.distribution_major_version }}) + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_facts.os_family | lower }}.yml" + - default.yaml + skip: true + +- name: install dependency + ansible.builtin.package: + name: "{{ bind_requirements }}" + state: present + when: + - bind_requirements | default([]) | count > 0 + +- name: get latest system information + ansible.builtin.setup: + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/service.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/service.yml new file mode 100644 index 0000000..58ca438 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/tasks/service.yml @@ -0,0 +1,12 @@ +--- + +- name: ensure bind is enabled on boot + become: true + ansible.builtin.service: + name: "{{ bind_service }}" + enabled: true + state: started + tags: + - bind + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/auth_transfer.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/auth_transfer.j2 new file mode 100644 index 0000000..5743c16 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/auth_transfer.j2 @@ -0,0 +1,22 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +// +// {{ ansible_managed }} + +{% if bind_key_mapping | length > 0 %} + {% for primary in bind_key_mapping.keys() %} +server {{ primary }} { + keys { {{ bind_key_mapping[primary] }}; }; +}; + {% endfor %} + +{% endif %} +server {{ ansible_facts.default_ipv4.address }} { + keys { {% for mykey in bind_dns_keys %} {{ mykey.name }}; {% endfor %} }; +}; + +{% for mykey in bind_dns_keys %} +key {{ mykey.name }} { + algorithm {{ mykey.algorithm }}; + secret "{{ mykey.secret }}"; +}; +{% endfor %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/auth_update.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/auth_update.j2 new file mode 100644 index 0000000..768e011 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/auth_update.j2 @@ -0,0 +1,13 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# Authentication definitions to allow DDNS update +# +# Copyright 2025 Buo-ren Lin +# SPDX-License-Identifier: Apache-2.0 +# +# {{ ansible_managed }} +{% for mykey in bind_update_keys %} +key {{ mykey.name }} { + algorithm {{ mykey.algorithm }}; + secret "{{ mykey.secret }}"; +}; +{% endfor %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/bind_zone.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/bind_zone.j2 new file mode 100644 index 0000000..ed671fe --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/bind_zone.j2 @@ -0,0 +1,134 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +{# + # First create a dict holding the entire zone information and create a hash + # from it, that it can be compared with subsequent task executions. In this + # way the serial will only be updated if there are some content changes. + #} +{% set _data = item | bodsch.dns.forward_zone_data(bind_zone_soa, ansible_facts.hostname) %} +{% set _zone_data = _data.get("forward_zone_data", None) %} +{% set _zone_hash = _data.get("zone_hash", None) %} +{% set _zone = _zone_data['domain'] | bodsch.dns.zone_serial(_zone_hash, bind_zone_data) %} +; {{ ansible_managed }} +; +; Zone file for {{ _zone_data['domain'] }} +; Hash: {{ _zone['hash'] }} {{ _zone['serial'] }} + +$TTL {{ _zone_data['ttl'] }} +$ORIGIN {{ _zone_data['domain'] }}. + +; soa +@ IN SOA {{ _zone_data['soa_name_server'] | first }} {{ _zone_data['hostmaster_email'] }} ( + {{ (_zone['serial'] | string).ljust(20) }} ; serial + {{ (_zone_data['refresh'] | string).ljust(20) }} ; refresh (1 week) + {{ (_zone_data['retry'] | string).ljust(20) }} ; retry (1 day) + {{ (_zone_data['expire'] | string).ljust(20) }} ; expire (4 weeks) + {{ (_zone_data['minimum'] | string).ljust(20) }} ; minimum (1 day) + ) +; -------------------------------------------------------------------------------------- +; dns +{% if _zone_data['soa_name_server'] | length > 0 %} + {% for ns in _zone_data['soa_name_server'] %} + IN NS {{ ns }} + {% endfor %} +{% endif %} +{% if _zone_data.other_name_servers is defined and + _zone_data.other_name_servers | bodsch.core.type == "list" %} + {% for ns in _zone_data.other_name_servers %} + IN NS {{ ns }} + {% endfor %} +{% endif %} +; -------------------------------------------------------------------------------------- +; mail +{% for mail in _zone_data['mail'] %} +{% if loop.first %}@{% else %} {% endif %} IN MX {{ mail.preference | default('90') }} {{ mail.name }} +{% endfor %} +; -------------------------------------------------------------------------------------- +; delegate +{% if _zone_data['delegate'] | length > 0 %} + {% for host in _zone_data['delegate'] %} +{{ host.zone.ljust(20) }} IN NS {{ host.dns }} + {% endfor %} +{% endif %} +; -------------------------------------------------------------------------------------- +; hosts +{% if _zone_data['hosts'] | length > 0 %} + {% for host in _zone_data['hosts'] %} + {% set ttl = '' %} + {% if host.ttl is defined and host.ttl | string | length > 0 %} + {% set ttl = host.ttl %} + {% endif %} + {% if host.ip is defined %} + {% if host.ip is string %} + {% if "$GENERATE" not in host.name.upper() %} +{{ host.name.ljust(20) }}{{ (ttl | string).rjust(6) }} IN A {{ host.ip }} + {% endif %} + {% if "$GENERATE" in host.name.upper() %} +{{ host.name.ljust(20) }}{{ (ttl | string).rjust(6) }} IN A {{ host.ip }} + {% endif %} + {% else %} + {% for ip in host.ip %} +{{ host.name.ljust(20) }}{{ (ttl | string).rjust(6) }} IN A {{ ip }} + {% endfor %} + {% endif %} + {% endif %} + {% if host.ipv6 is defined %} + {% if host.ipv6 is string %} +{{ host.name.ljust(20) }}{{ (ttl | string).rjust(6) }} IN AAAA {{ host.ipv6 }} + {% else %} + {% for ip6 in host.ipv6 %} +{{ host.name.ljust(20) }}{{ (ttl | string).rjust(6) }} IN AAAA {{ ip6 }} + {% endfor %} + {% endif %} + {% endif %} + {% if host.aliases is defined %} + {% for alias in host.aliases %} + {% if "$GENERATE" not in host.name.upper() %} +{{ (alias.name | default(alias)).ljust(20) }}{{ (ttl | string).rjust(6) }} IN {{ alias.type | default('cname')|upper}} {{ host.name }} + {% endif %} + {% if "$GENERATE" in host.name.upper() %} +{{ alias.ljust(20) }} IN CNAME {{ host.name.rsplit(None, 1)[1] }} + {% endif %} + {% endfor %} + {% endif %} + {% if host.sshfp is defined %} + {% for sshfp in host.sshfp %} +{{ host.name.ljust(20) }} IN SSHFP {{ sshfp }} + {% endfor %} + {% endif %} + {% endfor %} +{% else %} +{{ ansible_facts.hostname.ljust(26) }} IN A {{ ansible_facts.default_ipv4.address }} +{% endif %} +; -------------------------------------------------------------------------------------- +; services +{% for service in _zone_data['services'] %} +{{ service.name.ljust(20) }}{{ (service.ttl | string).rjust(6) if service.ttl is defined else ''.ljust(6) }} IN SRV {{ service.priority | default('0') }} {{ service.weight | default('0') }} {{ service.port }} {{ service.target }} +{% endfor %} +; -------------------------------------------------------------------------------------- +; text +{% for text in _zone_data['text'] %} + {% if text.text is string %} +{{ text.name.ljust(20) }} IN TXT "{{ text.text }}" + {% else %} + {% for entry in text.text %} +{{ text.name.ljust(20) }} IN TXT "{{ entry }}" + {% endfor %} + {% endif %} +{% endfor %} +; -------------------------------------------------------------------------------------- +; caa +{% for caa in _zone_data['caa'] %} + {% if caa.text is string %} +{{ caa.name.ljust(20) }} IN CAA {{ caa.text }} + {% else %} + {% for entry in caa.text %} +{{ caa.name.ljust(20) }} IN CAA {{ entry }} + {% endfor %} + {% endif %} +{% endfor %} +; -------------------------------------------------------------------------------------- +; naptr +{% for naptr in _zone_data['naptr'] %} +{{ naptr.name.ljust(20) }} IN NAPTR {{ naptr.order|default('100') }} {{ naptr.pref|default('10') }} "{{ naptr.flags }}" "{{ naptr.service }}" "{{ naptr.regex }}" {{ naptr.replacement }} +{% endfor %} +; -------------------------------------------------------------------------------------- diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/acl.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/acl.j2 new file mode 100644 index 0000000..59559fa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/acl.j2 @@ -0,0 +1,8 @@ + +{% for acl in bind_acls %} +acl "{{ acl.name }}" { + {% for match in acl.match_list %} + {{ match }}; + {% endfor %} +}; +{% endfor %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/allow-transfer.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/allow-transfer.j2 new file mode 100644 index 0000000..5f88259 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/allow-transfer.j2 @@ -0,0 +1,13 @@ +{% if bind_acls is defined and + bind_acls | count > 0 %} + + allow-transfer { + {% for acl in bind_acls %} + "{{ acl.name }}"; + {% endfor %} + }; +{% else %} + allow-transfer { + none; + }; +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/check-names.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/check-names.j2 new file mode 100644 index 0000000..f84e7ef --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/check-names.j2 @@ -0,0 +1,9 @@ +{% if bind_check_names is defined and + bind_check_names | string | length > 0 %} + + {% set _check_names = bind_check_names %} + {% if bind_check_names | bodsch.core.type == "list" %} + {% set _check_names = bind_check_names | join(' ') %} + {% endif %} + check-names {{ _check_names }}; +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/dns64.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/dns64.j2 new file mode 100644 index 0000000..9022ce8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/dns64.j2 @@ -0,0 +1,8 @@ +{% if bind_dns64 %} + + dns64 64:ff9b::/96 { + clients { + {{ bind_dns64_clients | join(';\n') | indent(6) }}; + }; + }; +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/dnssec.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/dnssec.j2 new file mode 100644 index 0000000..364b814 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/dnssec.j2 @@ -0,0 +1,24 @@ +{% if bind_dnssec is defined and + bind_dnssec | bodsch.core.type == "dict" %} + + {% if bind_dnssec.enable is defined and + bind_dnssec.enable | string | length > 0 and + bind_version.full_version is version('9.18.0', '<') %} + dnssec-enable {{ bind_dnssec.enable | bodsch.core.config_bool(true_as='yes', false_as='no') }}; + {% else %} + /* + NOTE: In version 9.16.0 the dnssec-enable option was made obsolete and in 9.18.0 the option was entirely removed. + dnssec-enable {{ bind_dnssec.enable | bodsch.core.config_bool(true_as='yes', false_as='no') }}; + */ + {% endif %} + {% if bind_dnssec.validation is defined and + bind_dnssec.validation | string | length > 0 %} + {% if bind_dnssec.validation | bodsch.core.type == "string" and + bind_dnssec.validation == "auto" %} + dnssec-validation {{ bind_dnssec.validation }}; + {% endif %} + {% if bind_dnssec.validation | bodsch.core.type == "bool" %} + dnssec-validation {{ bind_dnssec.validation | bodsch.core.config_bool(true_as='yes', false_as='no') }}; + {% endif %} + {% endif %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/forwarders.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/forwarders.j2 new file mode 100644 index 0000000..36fc658 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/forwarders.j2 @@ -0,0 +1,17 @@ + {% if bind_forwarders is defined and + bind_forwarders | count > 0 %} + + forwarders { + {{ bind_forwarders | join(';\n') | indent(4) }}; + }; + {% endif %} + {% if bind_forward_only is defined and + bind_forward_only | string | length > 0 and + bind_forward_only | bool %} + + forward only; + {% endif %} + + rrset-order { + order {{ bind_rrset_order }}; + }; diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/includes.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/includes.j2 new file mode 100644 index 0000000..9a5b049 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/includes.j2 @@ -0,0 +1,6 @@ +{% for file in bind_default_zone_files %} +include "{{ file }}"; +{% endfor %} +{% for file in bind_extra_include_files %} +include "{{ file }}"; +{% endfor %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/listen.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/listen.j2 new file mode 100644 index 0000000..4382ab3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/listen.j2 @@ -0,0 +1,25 @@ +{% if bind_listen is defined and + bind_listen | bodsch.core.type == "dict" %} + + {% if bind_listen.ipv4 is defined and + bind_listen.ipv4 | bodsch.core.type == "list" %} + {% for listener in bind_listen.ipv4 %} + {% if listener.port is defined and listener.addresses is defined %} + listen-on port {{ listener.port | default('53') }} { + {{ listener.addresses | join(';\n') | indent(4) }}; + }; + {% endif %} + {% endfor %} + {% endif %} + {% if bind_listen.ipv6 is defined and + bind_listen.ipv6 | bodsch.core.type == "list" %} + + {% for listener in bind_listen.ipv6 %} + {% if listener.port is defined and listener.addresses is defined %} + listen-on-v6 port {{ listener.port | default('53') }} { + {{ listener.addresses | join(';\n') | indent(4) }}; + }; + {% endif %} + {% endfor %} + {% endif %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/logging.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/logging.j2 new file mode 100644 index 0000000..3f69b0b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/logging.j2 @@ -0,0 +1,45 @@ +{% if bind_logging is defined %} + {% if bind_logging.enable is defined and + bind_logging.enable | string | length > 0 and + bind_logging.enable %} + {% if bind_logging.channels is defined %} + +logging { + {% for channel in bind_logging.channels %} + channel {{ channel.channel }} { + {% if channel.file is defined%} + file "{{ channel.file }}"; + {% endif %} + {% if channel.syslog is defined%} + syslog {{ channel.syslog }}; + {% endif %} + {% if channel.print_category is defined and + channel.print_category | bodsch.core.type == "bool" %} + print-category yes; + {% endif %} + {% if channel.print_severity is defined and + channel.print_severity | bodsch.core.type == "bool" %} + print-severity yes; + {% endif %} + {% if channel.print_time is defined and + channel.print_time | bodsch.core.type == "bool" %} + print-time {{ channel.print_time | bodsch.core.config_bool(true_as='yes', false_as='no') }}; + {% endif %} + {% if channel.severity is defined and + channel.severity in ["critical","error","warning","notice","info","dynamic"] %} + severity {{ channel.severity }}; + {% endif %} + }; + {% endfor %} + {% if bind_logging.categories is defined and + bind_logging.categories | bodsch.core.type == "dict" and + bind_logging.categories | count > 0 %} + + {% for categories, values in bind_logging.categories.items() %} + category {{ categories }} { {{ values | join('; ') }}; }; + {% endfor %} + {% endif %} +}; + {% endif %} + {% endif %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/querylog.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/querylog.j2 new file mode 100644 index 0000000..050f8f8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/querylog.j2 @@ -0,0 +1,8 @@ +{% if bind_logging is defined and + bind_logging | bodsch.core.type == "dict" %} + {% if bind_logging.enable is defined and + bind_logging.enable | string | length > 0 %} + + querylog {{ bind_logging.enable | bodsch.core.config_bool(true_as='yes', false_as='no') }}; + {% endif %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/recursion.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/recursion.j2 new file mode 100644 index 0000000..21911b3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/recursion.j2 @@ -0,0 +1,10 @@ + + recursion {{ bind_recursion | bodsch.core.config_bool(true_as='yes', false_as='no') }}; + {% if bind_recursion is defined and + bind_recursion | string | length > 0 and + bind_recursion | bool %} + + allow-recursion { + {{ bind_allow_recursion | join(';\n') | indent(4) }}; + }; + {% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/statistics.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/statistics.j2 new file mode 100644 index 0000000..4940f88 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/statistics.j2 @@ -0,0 +1,12 @@ +{% if bind_statistics is defined and + bind_statistics | bodsch.core.type == "dict" %} + {% if bind_statistics.channels is defined and + bind_statistics.channels | string | length > 0 %} + +statistics-channels { + inet {{ bind_statistics.host }} port {{ bind_statistics.port }} allow { + {{ bind_statistics.allow| join(';\n') | indent(4) }}; + }; +}; + {% endif %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/zones.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/zones.j2 new file mode 100644 index 0000000..67e02bc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.d/zones.j2 @@ -0,0 +1,212 @@ +{% if bind_zones is defined and + bind_zones | default([]) | bodsch.core.type == "list" and + bind_zones | count > 0 %} + + {% for bind_zone in bind_zones %} + {% if bind_zone.create_forward_zones | default('true') | bool %} +{# Start: set zone type #} +{# {% set _all_addresses = ansible_facts.all_ipv4_addresses | union(ansible_facts.all_ipv6_addresses) %} #} +{#// -- {{ bind_zone.name }} --------------------------------------- +// type : {{ bind_zone.type | default('') }} +// primaries : {{ bind_zone.primaries | default([]) }} +// forwarders: {{ bind_zone.forwarders | default([]) }} +// ------------------------------------------------------------ +#} +{# +{% if bind_zone.type is defined and bind_zone.type == 'primary' %} +{% set _type = 'primary' %} +{% elif bind_zone.type is defined and bind_zone.type == 'secondary' %} +{% set _type = 'secondary' %} +{% elif bind_zone.type is defined and bind_zone.type == 'forward' %} +{% set _type = 'forward' %} +{% elif bind_zone.type is not defined and bind_zone.primaries is defined and (_all_addresses|intersect(bind_zone.primaries)|length > 0) %} +{% set _type = 'primary' %} +{% elif bind_zone.type is not defined and bind_zone.primaries is defined and not (_all_addresses|intersect(bind_zone.primaries)|length > 0) %} +{% set _type = 'secondary' %} +{% elif bind_zone.type is not defined and bind_zone.forwarders is defined %} +{% set _type = 'forward' %} +{% endif %} +// #1: '{{ _type }}' +// ------------------------------------------------------------ +#} +{# {% set _all_addresses = ansible_facts.all_ipv4_addresses | union(ansible_facts.all_ipv6_addresses) %} #} + {% set _type = bind_zone | bodsch.dns.zone_type(all_addresses=host_all_addresses) %} +{#// #2: '{{ _type }}' +// ------------------------------------------------------------ +#} + +zone "{{ bind_zone.name }}" IN { + {% if (_type | string).upper() == 'PRIMARY' %} + type master; + file "{{ bind_zone_dir }}/{{ bind_zone.name }}"; + notify yes; + + {# -------------------------------------------- #} + {% if bind_zone.allow_updates | default([]) | count > 0 %} + {% if bind_zone.allow_updates | bodsch.core.type == "list" %} + allow-update { + {% for u in bind_zone.allow_updates %} + {{ u }}; + {% endfor %} + }; + {% else %} + allow-update { + none + }; + {% endif %} + {% endif %} + {# -------------------------------------------- #} + {# -------------------------------------------- #} + {% if bind_zone.allow_transfers | default([]) | count > 0 %} + {% if bind_zone.allow_transfers | bodsch.core.type == "list" %} + allow-transfer { + {% for u in bind_zone.allow_transfers %} + {{ u }}; + {% endfor %} + }; + {% endif %} + {% endif %} + {# -------------------------------------------- #} + + {% if bind_zone.also_notify is defined %} + also-notify { + {{ bind_zone.also_notify| join(';\n') | indent(4) }}; + }; + {% endif %} + + {% elif (_type | string).upper() == 'SECONDARY' %} + type slave; + masters { + {{ bind_zone.primaries| join(';\n') | indent(4) }}; + }; + file "{{ bind_secondary_dir }}/{{ bind_zone.name }}"; + {% elif (_type | string).upper() == 'FORWARD' %} + type forward; + forward only; + forwarders { + {{ bind_zone.forwarders| join(';\n') | indent(4) }}; + }; + {% endif %} +}; + {% endif %} + + {% if bind_zone.networks is defined %} + {% if bind_zone.create_reverse_zones | default('true') | bool %} + {% set _type = bind_zone | bodsch.dns.zone_type(all_addresses=bind_zone.networks) %} + {% for network in bind_zone.networks %} + +zone "{{ ('.'.join(network.replace(network+'.','').split('.')[::-1])) }}.in-addr.arpa" IN { + {% if (_type | string).upper() == 'PRIMARY' %} + type master; + file "{{ bind_zone_dir }}/{{ ('.'.join(network.replace(network+'.','').split('.')[::-1])) }}.in-addr.arpa"; + notify yes; + {% if bind_zone.also_notify is defined %} + also-notify { + {{ bind_zone.also_notify| join(';\n') | indent(4) }}; + }; + {% endif %} + {# -------------------------------------------- #} + {% if bind_zone.allow_updates | default([]) | count > 0 %} + {% if bind_zone.allow_updates | bodsch.core.type == "list" %} + allow-update { + {% for u in bind_zone.allow_updates %} + {{ u }}; + {% endfor %} + }; + {% else %} + allow-update { + none + }; + {% endif %} + {% endif %} + {# -------------------------------------------- #} + {# -------------------------------------------- #} + {% if bind_zone.allow_transfers | default([]) | count > 0 %} + {% if bind_zone.allow_transfers | bodsch.core.type == "list" %} + allow-transfer { + {% for u in bind_zone.allow_transfers %} + {{ u }}; + {% endfor %} + }; + {% endif %} + {% endif %} + {# -------------------------------------------- #} + {% elif (_type | string).upper() == 'SECONDARY' %} + type slave; + masters { + {{ bind_zone.primaries| join(';\n') | indent(4) }}; + }; + file "{{ bind_secondary_dir }}/{{ ('.'.join(network.replace(network+'.','').split('.')[::-1])) }}.in-addr.arpa"; + {% elif _type == 'forward' %} + type forward; + forward only; + forwarders { + {{ bind_zone.forwarders| join(';\n') | indent(4) }}; + }; + {% endif %} +}; + {% endfor %} + {% endif %} + {% endif %} + + {% if bind_zone.ipv6_networks is defined %} + {% if bind_zone.create_reverse_zones | default('true') | bool %} + {% set _type = bind_zone | bodsch.dns.zone_type(all_addresses=bind_zone.ipv6_networks) %} + {% for network in bind_zone.ipv6_networks %} + +zone "{{ (network | ansible.utils.ipaddr('revdns'))[-(9+(network | regex_replace('^.*/','') | int)//2):] }}" IN { + {% if (_type | string).upper() == 'PRIMARY' %} + type master; + file "{{ bind_zone_dir }}/{{ (network | ansible.utils.ipaddr('revdns'))[-(9+(network | regex_replace('^.*/','') | int)//2):-1] }}"; + notify yes; + {% if bind_zone.also_notify is defined %} + also-notify { + {{ bind_zone.also_notify| join(';\n') | indent(4) }}; + }; + {% endif %} + {# -------------------------------------------- #} + {% if bind_zone.allow_updates | default([]) | count > 0 %} + {% if bind_zone.allow_updates | bodsch.core.type == "list" %} + allow-update { + {% for u in bind_zone.allow_updates %} + {{ u }}; + {% endfor %} + }; + {% else %} + allow-update { + none + }; + {% endif %} + {% endif %} + {# -------------------------------------------- #} + {# -------------------------------------------- #} + {% if bind_zone.allow_transfers | default([]) | count > 0 %} + {% if bind_zone.allow_transfers | bodsch.core.type == "list" %} + allow-transfer { + {% for u in bind_zone.allow_transfers %} + {{ u }}; + {% endfor %} + }; + {% endif %} + {% endif %} + {# -------------------------------------------- #} + {% elif (_type | string).upper() == 'SECONDARY' %} + type slave; + masters { + {{ bind_zone.primaries| join(';\n') | indent(4) }}; + }; + file "{{ bind_secondary_dir }}/{{ (network | ansible.utils.ipaddr('revdns'))[-(9+(network | regex_replace('^.*/','') | int)//2):-1] }}"; + {% elif _type == 'forward' %} + type forward; + forward only; + forwarders { + {{ bind_zone.forwarders| join(';\n') | indent(4) }}; + }; + {% endif %} +}; + {% endfor %} + {% endif %} + {% endif %} + + {% endfor %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.j2 new file mode 100644 index 0000000..eac15dc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/named.conf.j2 @@ -0,0 +1,37 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +// +// named.conf +// +// {{ ansible_managed }} +{% include('named.conf.d/acl.j2') %} +// ------------------------------------------------------------------- +options { +{% include('named.conf.d/listen.j2') %} + directory "{{ bind_dir }}"; + dump-file "{{ bind_dir }}/data/cache_dump.db"; + statistics-file "{{ bind_dir }}/data/named_stats.txt"; + memstatistics-file "{{ bind_dir }}/data/named_mem_stats.txt"; + + /* Path to ISC DLV key */ + bindkeys-file "{{ bind_bindkeys_file }}"; + + managed-keys-directory "{{ bind_dir }}/dynamic"; + pid-file "{{ bind_pid_file }}"; + session-keyfile "{{ bind_session_keyfile }}"; + + allow-query { + {{ bind_allow_query | join(';\n') | indent(4) }}; + }; +{% include('named.conf.d/allow-transfer.j2') %} +{% include('named.conf.d/check-names.j2') %} +{% include('named.conf.d/recursion.j2') %} +{% include('named.conf.d/forwarders.j2') %} +{% include('named.conf.d/dnssec.j2') %} +{% include('named.conf.d/querylog.j2') %} +{% include('named.conf.d/dns64.j2') %} +}; +// ------------------------------------------------------------------- +{% include('named.conf.d/includes.j2') %} +{% include('named.conf.d/statistics.j2') %} +{% include('named.conf.d/logging.j2') %} +{% include('named.conf.d/zones.j2') %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/reverse_zone.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/reverse_zone.j2 new file mode 100644 index 0000000..7d94e17 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/reverse_zone.j2 @@ -0,0 +1,79 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +{% set _data = item | bodsch.dns.reverse_zone_data(bind_zone_soa, ansible_facts.hostname) %} +{% set _zone_data = _data.get("reverse_zone_data", None) %} +{% set _zone_hash = _data.get("zone_hash", None) %} +{% set _zone = _zone_data['domain'] | bodsch.dns.zone_serial(_zone_hash, bind_zone_data, network=item.1) %} +; {{ ansible_managed }} +; +; Reverse zone file for {{ _zone_data['domain'] }} +; Hash: {{ _zone['hash'] }} {{ _zone['serial'] }} + +$TTL {{ _zone_data['ttl'] }} +$ORIGIN {{ _zone_data['revip'] }}. +; soa +@ IN SOA {{ _zone_data['soa_name_server'] | first }} {{ _zone_data['hostmaster_email'] }} ( + {{ (_zone['serial'] | string).ljust(20) }} ; serial + {{ (_zone_data['refresh'] | string).ljust(20) }} ; refresh (1 week) + {{ (_zone_data['retry'] | string).ljust(20) }} ; retry (1 day) + {{ (_zone_data['expire'] | string).ljust(20) }} ; expire (4 weeks) + {{ (_zone_data['minimum'] | string).ljust(20) }} ; minimum (1 day) + ) +; -------------------------------------------------------------------------------------- +; dns +{% if _zone_data['soa_name_server'] | length > 0 %} + {% for ns in _zone_data['soa_name_server'] %} + IN NS {{ ns }} + {% endfor %} +{% endif %} +{% if _zone_data.other_name_servers is defined and + _zone_data.other_name_servers | bodsch.core.type == "list" %} + {% for ns in _zone_data.other_name_servers %} + IN NS {{ ns }} + {% endfor %} +{% endif %} + +; -------------------------------------------------------------------------------------- +; hosts +{% if _zone_data['hosts'] | length > 0 %} + {% for host in _zone_data['hosts'] %} + {% set ttl = '' %} + {% if host.ttl is defined and host.ttl | string | length > 0 %} + {% set ttl = host.ttl %} + {% endif %} + {% if host.ip is defined %} + {% if host.ip == item.1 %} +@ IN PTR {{ host.name }}.{{ _zone_data['domain'] }}. + {% else %} +{# + ; name {{ host.name }} + ; ip {{ host.ip }} + ; .1 {{ item.1 }} + ; {{ host.ip.startswith(item.1) }} +#} + {% if host.ip is string and host.ip.startswith(item.1) %} + {% if host.name == '@' %} +{{ ('.'.join(host.ip.replace(item.1 + '.','').split('.')[::-1])).ljust(20) }}{{ (ttl | string).rjust(6) }} IN PTR {{ _zone_data['domain'] }}. + {% else %} + {% if "$GENERATE" not in host.name.upper() %} +{{ ('.'.join(host.ip.replace(item.1+'.','').split('.')[::-1])).ljust(20) }}{{ (ttl | string).rjust(6) }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}. + {% endif %} + {% if "$GENERATE" in host.name.upper() %} +{{ host.name.rsplit(None, 1)[0] }} {{ ('.'.join(host.ip.replace(item.1+'.','').split('.')[::-1])).ljust(20) }} IN PTR {{ host.name.rsplit(None, 1)[1] }}.{{ _zone_data['domain'] }}. + {% endif %} + {% endif %} + {% else %} + {% for ip in host.ip %} + {% if ip.startswith(item.1) %} +{{ ('.'.join(ip.replace(item.1+'.','').split('.')[::-1])).ljust(20) }}{{ (ttl | string).rjust(6) }} IN PTR {{ _zone_data['domain'] }}. + {% if host.name == '@' %} + {% else %} +{{ ('.'.join(ip.replace(item.1+'.','').split('.')[::-1])).ljust(20) }}{{ (ttl | string).rjust(6) }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}. + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} +{% endif %} +; -------------------------------------------------------------------------------------- diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/reverse_zone_ipv6.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/reverse_zone_ipv6.j2 new file mode 100644 index 0000000..4d9f90d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/etc/reverse_zone_ipv6.j2 @@ -0,0 +1,74 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +{% set _data = item | bodsch.dns.reverse_zone_data(bind_zone_soa, ansible_facts.hostname) %} +{% set _zone_data = _data.get("reverse_zone_data", None) %} +{% set _zone_hash = _data.get("zone_hash", None) %} +{% set _zone = _zone_data['domain'] | bodsch.dns.zone_serial(_zone_hash, bind_zone_data, network=item.1) %} +; {{ ansible_managed }} +; +; Reverse zone file for {{ _zone_data['domain'] }} +; Hash: {{ _zone['hash'] }} {{ _zone['serial'] }} + +$TTL {{ _zone_data['ttl'] }} +$ORIGIN {{ _zone_data['revip'] }} + +; soa +@ IN SOA {{ _zone_data['soa_name_server'] | first }} {{ _zone_data['hostmaster_email'] }} ( + {{ (_zone['serial'] | string).ljust(20) }} ; serial + {{ (_zone_data['refresh'] | string).ljust(20) }} ; refresh (1 week) + {{ (_zone_data['retry'] | string).ljust(20) }} ; retry (1 day) + {{ (_zone_data['expire'] | string).ljust(20) }} ; expire (4 weeks) + {{ (_zone_data['minimum'] | string).ljust(20) }} ; minimum (1 day) + ) +; -------------------------------------------------------------------------------------- +; dns +{% if _zone_data['soa_name_server'] | length > 0 %} + {% for ns in _zone_data['soa_name_server'] %} + IN NS {{ ns }} + {% endfor %} +{% endif %} +{% if _zone_data.other_name_servers is defined and + _zone_data.other_name_servers | bodsch.core.type == "list" %} + {% for ns in _zone_data.other_name_servers %} + IN NS {{ ns }} + {% endfor %} +{% endif %} + +; -------------------------------------------------------------------------------------- +; hosts +{% if _zone_data['hosts'] | length > 0 %} + {% for host in _zone_data['hosts'] %} + {% set ttl = '' %} + {% if host.ttl is defined and host.ttl | string | length > 0 %} + {% set ttl = host.ttl %} + {% endif %} + {% if host.ipv6 is defined %} + {% if host.ipv6 == item.1 %} +@ IN PTR {{ host.name }}.{{ _zone_data['domain'] }}. + {% else %} +{# + ; name {{ host.name }} + ; ip {{ host.ipv6 }} + ; .1 {{ item.1 }} + ; {{ host.ipv6.startswith(item.1) }} +#} + {% if host.ipv6 is string and host.ipv6.startswith(item.1 | regex_replace(':\/.*$','')) %} + {% if host.name == '@' %} +{{ host.ipv6 | ansible.utils.ipaddr('revdns') }}{{ (ttl | string).ljust(6) }} IN PTR {{ _zone_data['domain'] }}. + {% else %} +{{ host.ipv6 | ansible.utils.ipaddr('revdns') }}{{ (ttl | string).ljust(6) }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}. + {% endif %} + {% else %} + {% for ip in host.ipv6 %} + {% if ip.startswith(item.1 | regex_replace(':\/.*$','')) %} +{{ ip | ansible.utils.ipaddr('revdns') }}{{ (ttl | string).ljust(6) }} IN PTR {{ _zone_data['domain'] }}. + {% if host.name == '@' %} + {% else %} +{{ ip | ansible.utils.ipaddr('revdns') }}{{ (ttl | string).ljust(6) }} IN PTR {{ host.name }}.{{ _zone_data['domain'] }}. + {% endif %} + {% endif %} + {% endfor %} + {% endif %} + {% endif %} + {% endif %} + {% endfor %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/init/systemd/override.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/init/systemd/override.conf.j2 new file mode 100644 index 0000000..16430b5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/templates/init/systemd/override.conf.j2 @@ -0,0 +1,19 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +[Unit] +{% if dnsmasq_systemd.unit.after is defined and + dnsmasq_systemd.unit.after | count > 0 %} +After = +After = network.target {{ dnsmasq_systemd.unit.after | join(' ') }} +{% endif %} +{% if dnsmasq_systemd.unit.wants is defined and + dnsmasq_systemd.unit.wants | count > 0 %} +Wants = +Wants = nss-lookup.target {{ dnsmasq_systemd.unit.wants | join(' ') }} +{% endif %} +{% if dnsmasq_systemd.unit.requires is defined and + dnsmasq_systemd.unit.requires | count > 0 %} +Requires = +Requires = network.target {{ dnsmasq_systemd.unit.requires | join(' ') }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/vars/archlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/vars/archlinux.yml new file mode 100644 index 0000000..37c919e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/vars/archlinux.yml @@ -0,0 +1,24 @@ +--- + +bind_packages: + - python-netaddr + - python-dnspython + - bind + - bind-tools + +bind_service: named + +# Main config file +bind_config: /etc/named.conf + +# Zone files included in the installation +bind_default_zone_files: [] + +# Directory with run-time stuff +bind_dir: /var/named +bind_conf_dir: "{{ bind_dir }}" + +bind_owner: root +bind_group: named + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/vars/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/vars/debian.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/vars/debian.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/vars/main.yml new file mode 100644 index 0000000..0d07efc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/bind/vars/main.yml @@ -0,0 +1,138 @@ +--- + +bind_requirements: [] + +bind_packages: + - python3-netaddr + - python3-dnspython + - bind9 + - bind9utils + - bind9-dnsutils + +bind_service: bind9 + +# Main config file +bind_config: /etc/bind/named.conf + +# Localhost zone +bind_default_zone_files: + - /etc/bind/named.conf.default-zones + +# Directory with run-time stuff +bind_dir: /var/cache/bind +bind_conf_dir: "/etc/bind" +bind_auth_file: "{{ bind_conf_dir }}/auth_transfer.conf" +bind_auth_update_file: "{{ bind_conf_dir }}/auth_update.conf" + +bind_owner: root +bind_group: bind + +bind_bindkeys_file: "/etc/named.iscdlv.key" +bind_pid_file: "/run/named/named.pid" +bind_session_keyfile: "/run/named/session.key" + +# Custom location for zone files +bind_zone_dir: "{{ bind_dir }}" +bind_secondary_dir: "{{ bind_dir }}/secondary" + +bind_defaults_listen: + ipv4: + - port: 53 + addresses: + - "127.0.0.1" + ipv6: + - port: 53 + addresses: + - "::1" + +# statistics channels configuration +bind_defaults_statistics: + channels: false + port: 8053 + host: 127.0.0.1 + allow: + - "127.0.0.1" + +# DNSSEC configuration +bind_defaults_dnssec: + enable: true + validation: auto + +# SOA information +bind_defaults_zone_soa: + ttl: "86400" # 1 day + time_to_refresh: "604800" # refresh (1 week) + time_to_retry: "86400" # retry (1 day) + time_to_expire: "2419200" # expire (4 weeks) + minimum_ttl: "86400" # minimum (1 day) + +bind_defaults_query_log: + enable: true + +bind_defaults_logging: + enable: false + channels: + - channel: general + file: "data/general.log" + versions: 3 + size: 10M + print_time: true # true | false + print_category: true + print_severity: true + severity: dynamic # critical | error | warning | notice | info | debug [level] | dynamic + # - channel: query + # file: "data/query.log" + # versions: 5 + # size: 10M + # print_time: "" # true | false + # severity: info # + # - channel: dnssec + # file: "data/dnssec.log" + # versions: 5 + # size: 10M + # print_time: "" # true | false + # severity: info # + # - channel: notify + # file: "data/notify.log" + # versions: 5 + # size: 10M + # print_time: "" # true | false + # severity: info # + # - channel: transfers + # file: "data/transfers.log" + # versions: 5 + # size: 10M + # print_time: "" # true | false + # severity: info # + # - channel: slog + # syslog: security # kern | user | mail | daemon | auth | syslog | lpr | + # # news | uucp | cron | authpriv | ftp | + # # local0 | local1 | local2 | local3 | + # # local4 | local5 | local6 | local7 + # # file: "data/transfers.log" + # #versions: 5 + # #size: 10M + # print_time: "" # true | false + # severity: info # + categories: + # "xfer-out": + # - transfers + # - slog + # "xfer-in": + # - transfers + # - slog + # notify: + # - notify + # "lame-servers": + # - general + config: + - general + default: + - general + # security: + # - general + # - slog + # dnssec: + # - dnssec + # queries: + # - query diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.markdown-lint.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.markdown-lint.yml new file mode 100644 index 0000000..fb1accf --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.markdown-lint.yml @@ -0,0 +1,6 @@ +--- + +# First line in a file should be a top-level heading +MD041: false + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.yamllint new file mode 100644 index 0000000..20fd7aa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + +ignore: | + molecule/ + .github diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/Makefile new file mode 100644 index 0000000..3abaf48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_6.1 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/README.md new file mode 100644 index 0000000..e3c39d8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/README.md @@ -0,0 +1,313 @@ + +# Ansible Role: `dnsmasq` + +Ansible role to install and configure dnsmasq on various linux systems. + +[sourcecode](https://thekelleys.org.uk/gitweb/?p=dnsmasq.git;a=summary) + + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-dnsmasq/main.yml?branch=main)][ci] +[![GitHub issues](https://img.shields.io/github/issues/bodsch/ansible-dnsmasq)][issues] +[![GitHub release (latest by date)](https://img.shields.io/github/v/release/bodsch/ansible-dnsmasq)][releases] +[![Ansible Quality Score](https://img.shields.io/ansible/quality/50067?label=role%20quality)][quality] + +[ci]: https://github.com/bodsch/ansible-dnsmasq/actions +[issues]: https://github.com/bodsch/ansible-dnsmasq/issues?q=is%3Aopen+is%3Aissue +[releases]: https://github.com/bodsch/ansible-dnsmasq/releases +[quality]: https://galaxy.ansible.com/bodsch/dnsmasq + + +## usage + +```yaml +dnsmasq_systemd: + unit: + after: [] + wants: [] + requires: [] + +dnsmasq_global: {} +# port: 53 +# user: "" +# group: "" +# filterwin2k: false +# resolv_file: "" +# strict_order: false +# no_hosts: false +# no_resolv: false +# no_poll: false +# domain_needed: false +# bogus_priv: false +# cache_size: 150 +# all_servers: false +# no_negcache: false +# conf_file: "" +# conf_dir: "" + +dnsmasq_interfaces: + listen_address: "127.0.0.1" + # Define specific interfaces to listen on + interfaces: [] + # - "{{ ansible_default_ipv4['interface'] }}" + # - eth0 + # - eth1 + # Define any interface to not listen on + except_interfaces: [] + # - eth1 + # Defines if DNSMasq only listens on specific interfaces instead of all interfaces + bind_only: false + +dnsmasq_logging: + log_queries: false + log_facility: /var/log/dnsmasq.log + log_dhcp: false + +dnsmasq_addresses: [] +# - address: 192.168.202.133 +# name: node1.test.com + +dnsmasq_alias: {} + +dnsmasq_dhcp: +# enabled: false +# dhcp_authoritative: false +# dhcp_boot: "pxelinux.0,{{ inventory_hostname }},{{ dnsmasq_domain }}" +# dhcp_hosts: [] +# dhcp_options: [] +# dhcp_options_tagged: [] +# dhcp_range: [] + +dnsmasq_dnssec: {} +# enabled: false +# conf_file: "" +# dnssec_check_unsigned: false + +dnsmasq_domain: + name: example.org + custom: [] + +dnsmasq_ipset: {} + +dnsmasq_local: {} + +dnsmasq_mx: {} + +dnsmasq_nftset: {} + +dnsmasq_pxe: {} + +dnsmasq_server: {} +# nameservers: [] +# forwarders: [] + +dnsmasq_tftp: {} +# enabled: false +# tftp_root: "" +# tftp_no_fail: false +# tftp_secure: false +# tftp_no_blocksize: false + +dnsmasq_records: + cname: [] +# - target: +# cnames: +# - cname + ptr: [] + srv: [] + txt: [] +``` +### `dnsmasq_systemd` + +Adds a possibility to make the service dependent on others. + +For example, if binding to a VPN network interface is desired and the VPN must be started beforehand. + +```yaml +dnsmasq_systemd: + unit: + after: + - ssh.service + wants: [] + requires: [] +``` + + +### `dnsmasq_global` + +```yaml +dnsmasq_global: + port: 53 + user: "" + group: "" + filterwin2k: false + resolv_file: "" + strict_order: false + no_hosts: false + no_resolv: false + no_poll: false + domain_needed: false + bogus_priv: false + cache_size: 150 + all_servers: false + no_negcache: false + conf_file: "" + conf_dir: "" +``` + +### `dnsmasq_interfaces` + +```yaml +dnsmasq_interfaces: + listen_address: "127.0.0.1" + # Define specific interfaces to listen on + interfaces: [] + # - "{{ ansible_default_ipv4['interface'] }}" + # - eth0 + # - eth1 + # Define any interface to not listen on + except_interfaces: [] + # - eth1 + # Defines if DNSMasq only listens on specific interfaces instead of all interfaces + bind_only: false +``` + +### `dnsmasq_logging` + +```yaml +dnsmasq_logging: + log_queries: false + log_facility: /var/log/dnsmasq.log + log_dhcp: false +``` + +### `dnsmasq_address` + +```yaml +dnsmasq_address: [] +# - address: 192.168.202.133 +# name: node1.test.com +``` + +### `dnsmasq_alias` + +```yaml +dnsmasq_alias: {} +``` + +### `dnsmasq_dhcp` + +```yaml +dnsmasq_dhcp: {} +# enabled: false +# dhcp_authoritative: false +# dhcp_boot: "pxelinux.0,{{ inventory_hostname }},{{ dnsmasq_domain }}" +# dhcp_hosts: [] +# dhcp_options: [] +# dhcp_options_tagged: [] +# dhcp_range: [] +``` + +### `dnsmasq_dnssec` + +```yaml +dnsmasq_dnssec: {} +# enabled: false +# conf_file: "" +# dnssec_check_unsigned: false +``` + +### `dnsmasq_domain` + +```yaml +dnsmasq_domain: + name: example.org + # Define custom domains per subnet, ip range, etc. + custom: + - domain: "example.local" + network: + - 192.168.10.0/24 # Define as range +``` + +### `dnsmasq_ipset` + +```yaml +dnsmasq_ipset: {} +``` + +### `dnsmasq_local` + +```yaml +dnsmasq_local: {} +``` + +### `dnsmasq_mx` + +```yaml +dnsmasq_mx: {} +``` + +### `dnsmasq_nftset` + +```yaml +dnsmasq_nftset: {} +``` + +### `dnsmasq_pxe` + +```yaml +dnsmasq_pxe: {} +``` + +### `dnsmasq_server` + +```yaml +dnsmasq_server: {} +# nameservers: [] +# forwarders: [] +``` + +### `dnsmasq_tftp` + +```yaml +dnsmasq_tftp: {} +# enabled: false +# tftp_root: "" +# tftp_no_fail: false +# tftp_secure: false +# tftp_no_blocksize: false +``` + +### `dnsmasq_records` + +```yaml +dnsmasq_records: + cname: [] +# - target: +# cnames: +# - cname + ptr: [] + srv: [] + txt: [] +``` + + +## Contribution + +Please read [Contribution](CONTRIBUTING.md) + +## Development, Branches (Git Tags) + +The `master` Branch is my *Working Horse* includes the "latest, hot shit" and can be complete broken! + +If you want to use something stable, please use a [Tagged Version](https://github.com/bodsch/ansible-dnsmasq/tags)! + + +## Author + +- Bodo Schulz + +## License + +[Apache](LICENSE) + +**FREE SOFTWARE, HELL YEAH!** diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/defaults/main.yml new file mode 100644 index 0000000..caade47 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/defaults/main.yml @@ -0,0 +1,54 @@ +--- + +dnsmasq_systemd: + unit: + after: [] + wants: [] + requires: [] + +dnsmasq_global: {} + +dnsmasq_interfaces: + listen_address: "127.0.0.1" + interfaces: [] + except_interfaces: [] + bind_only: false + +dnsmasq_logging: + log_queries: false + log_facility: /var/log/dnsmasq.log + log_dhcp: false + +dnsmasq_addresses: [] + +dnsmasq_alias: {} + +dnsmasq_dhcp: {} + +dnsmasq_dnssec: {} + +dnsmasq_domain: + name: example.org + custom: [] + +dnsmasq_ipset: {} + +dnsmasq_local: {} + +dnsmasq_mx: {} + +dnsmasq_nftset: {} + +dnsmasq_pxe: {} + +dnsmasq_server: {} + +dnsmasq_tftp: {} + +dnsmasq_records: + cname: [] + ptr: [] + srv: [] + txt: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/handlers/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/handlers/main.yml new file mode 100644 index 0000000..c72b859 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/handlers/main.yml @@ -0,0 +1,32 @@ +--- + +- name: systemctl daemon-reload + become: true + ansible.builtin.systemd: + daemon_reload: true + force: true + when: + - ansible_facts.service_mgr | lower == "systemd" + +- name: validate configuration + ansible.builtin.command: + cmd: dnsmasq --test + register: dnsmasq_test + changed_when: dnsmasq_test.rc != 0 + failed_when: dnsmasq_test.rc != 0 + +- name: restart service + ansible.builtin.service: + name: dnsmasq + state: restarted + listen: restart dnsmasq + when: service_default_state | default('started') == 'started' + +- name: reload service + ansible.builtin.service: + name: dnsmasq + state: reloaded + listen: reload dnsmasq + when: service_default_state | default('started') == 'started' + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/meta/main.yml new file mode 100644 index 0000000..d73cb96 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/meta/main.yml @@ -0,0 +1,32 @@ +--- + +galaxy_info: + role_name: dnsmasq + + author: Bodo Schulz + description: ansible role for install and configure dnsmasq server + + license: Apache + min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + # 11 + - bullseye + - bookworm + - name: Ubuntu + versions: + # 20.04 + - focal + + galaxy_tags: + - system + - dns + +dependencies: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/configured/converge.yml new file mode 100644 index 0000000..2460676 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/configured/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.dnsmasq diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..2218e41 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,21 @@ +--- + +dnsmasq_addresses: + - address: 192.168.202.133 + name: node1.test.com + - address: 127.0.0.1 + name: youtubei.googleapis.com + +dnsmasq_interfaces: + listen_address: "127.0.0.1" + +dnsmasq_server: + nameservers: + - 192.168.0.1 + - 46.182.19.48 + - 9.9.9.9 + forwarders: + - domain: matrix.lan + address: 127.0.0.1#5353 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/configured/molecule.yml new file mode 100644 index 0000000..fda92e3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/configured/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/configured/prepare.yml new file mode 100644 index 0000000..c001b80 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/configured/prepare.yml @@ -0,0 +1,48 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..a26a7dd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/configured/tests/test_default.py @@ -0,0 +1,198 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directories(host, get_vars): + """ + used config directory + """ + pp_json(get_vars) + + directories = [ + get_vars.get("dnsmasq_config_directory"), + ] + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [get_vars.get("dnsmasq_config_file")] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +# def test_user(host, get_vars): +# """ +# created user +# """ +# shell = '/bin/false' +# +# distribution = host.system_info.distribution +# +# if distribution in ['centos', 'redhat', 'ol']: +# shell = "/sbin/nologin" +# elif distribution == "arch": +# shell = "/usr/bin/nologin" +# +# user_name = "mysql" +# u = host.user(user_name) +# g = host.group(user_name) +# +# assert g.exists +# assert u.exists +# assert user_name in u.groups +# assert u.shell == shell + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = "dnsmasq" + + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + _conf_global = get_vars.get("dnsmasq_global", {}) + _conf_interfaces = get_vars.get("dnsmasq_interfaces", {}) + + bind_port = _conf_global.get("port", 53) + bind_address = _conf_interfaces.get("listen_address", "0.0.0.0") + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + listen.append(f"udp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/default/converge.yml new file mode 100644 index 0000000..2460676 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/default/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.dnsmasq diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/default/molecule.yml new file mode 100644 index 0000000..fda92e3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/default/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/default/prepare.yml new file mode 100644 index 0000000..c001b80 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/default/prepare.yml @@ -0,0 +1,48 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/default/tests/test_default.py new file mode 100644 index 0000000..a26a7dd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/default/tests/test_default.py @@ -0,0 +1,198 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directories(host, get_vars): + """ + used config directory + """ + pp_json(get_vars) + + directories = [ + get_vars.get("dnsmasq_config_directory"), + ] + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [get_vars.get("dnsmasq_config_file")] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +# def test_user(host, get_vars): +# """ +# created user +# """ +# shell = '/bin/false' +# +# distribution = host.system_info.distribution +# +# if distribution in ['centos', 'redhat', 'ol']: +# shell = "/sbin/nologin" +# elif distribution == "arch": +# shell = "/usr/bin/nologin" +# +# user_name = "mysql" +# u = host.user(user_name) +# g = host.group(user_name) +# +# assert g.exists +# assert u.exists +# assert user_name in u.groups +# assert u.shell == shell + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = "dnsmasq" + + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + _conf_global = get_vars.get("dnsmasq_global", {}) + _conf_interfaces = get_vars.get("dnsmasq_interfaces", {}) + + bind_port = _conf_global.get("port", 53) + bind_address = _conf_interfaces.get("listen_address", "0.0.0.0") + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + listen.append(f"udp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/require-systemd-unit/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/require-systemd-unit/converge.yml new file mode 100644 index 0000000..2460676 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/require-systemd-unit/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.dnsmasq diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/require-systemd-unit/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/require-systemd-unit/group_vars/all/vars.yml new file mode 100644 index 0000000..5c3d38c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/require-systemd-unit/group_vars/all/vars.yml @@ -0,0 +1,28 @@ +--- + +dnsmasq_systemd: + unit: + after: + - ssh.service + wants: [] + requires: [] + +dnsmasq_addresses: + - address: 192.168.202.133 + name: node1.test.com + - address: 127.0.0.1 + name: youtubei.googleapis.com + +dnsmasq_interfaces: + listen_address: "127.0.0.1" + +dnsmasq_server: + nameservers: + - 192.168.0.1 + - 46.182.19.48 + - 9.9.9.9 + forwarders: + - domain: matrix.lan + address: 127.0.0.1#5353 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/require-systemd-unit/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/require-systemd-unit/molecule.yml new file mode 100644 index 0000000..fda92e3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/require-systemd-unit/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/require-systemd-unit/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/require-systemd-unit/prepare.yml new file mode 100644 index 0000000..c001b80 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/require-systemd-unit/prepare.yml @@ -0,0 +1,48 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/require-systemd-unit/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/require-systemd-unit/tests/test_default.py new file mode 100644 index 0000000..a26a7dd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/molecule/require-systemd-unit/tests/test_default.py @@ -0,0 +1,198 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directories(host, get_vars): + """ + used config directory + """ + pp_json(get_vars) + + directories = [ + get_vars.get("dnsmasq_config_directory"), + ] + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [get_vars.get("dnsmasq_config_file")] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +# def test_user(host, get_vars): +# """ +# created user +# """ +# shell = '/bin/false' +# +# distribution = host.system_info.distribution +# +# if distribution in ['centos', 'redhat', 'ol']: +# shell = "/sbin/nologin" +# elif distribution == "arch": +# shell = "/usr/bin/nologin" +# +# user_name = "mysql" +# u = host.user(user_name) +# g = host.group(user_name) +# +# assert g.exists +# assert u.exists +# assert user_name in u.groups +# assert u.shell == shell + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = "dnsmasq" + + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + _conf_global = get_vars.get("dnsmasq_global", {}) + _conf_interfaces = get_vars.get("dnsmasq_interfaces", {}) + + bind_port = _conf_global.get("port", 53) + bind_address = _conf_interfaces.get("listen_address", "0.0.0.0") + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + listen.append(f"udp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/tasks/configure.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/tasks/configure.yml new file mode 100644 index 0000000..a064b62 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/tasks/configure.yml @@ -0,0 +1,54 @@ +--- + +- name: create configuration directory + ansible.builtin.file: + state: directory + path: /etc/dnsmasq.d + owner: root + group: root + mode: "0750" + +- name: create configuration files + ansible.builtin.template: + src: etc/dnsmasq.d/{{ item }}.j2 + dest: "{{ dnsmasq_config_directory }}/{{ item }}" + owner: root + group: root + mode: "0644" + backup: true + loop: + - 10-interfaces.conf + - 10-logging.conf + - 20-address.conf + - 20-alias.conf + - 20-dhcp.conf + - 20-dnssec.conf + - 20-domain.conf + - 20-ipset.conf + - 20-local.conf + - 20-mx.conf + - 20-nftset.conf + - 20-pxe.conf + - 20-server.conf + - 20-tftp.conf + - 25-cname-records.conf + - 25-ptr-records.conf + - 25-srv-records.conf + - 25-txt-records.conf + notify: + - validate configuration + - restart dnsmasq + +- name: create dnsmasq.conf + ansible.builtin.template: + src: etc/dnsmasq.conf.j2 + dest: /etc/dnsmasq.conf + owner: root + group: root + mode: "0644" + backup: true + notify: + - validate configuration + - restart dnsmasq + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/tasks/install.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/tasks/install.yml new file mode 100644 index 0000000..b254a4f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/tasks/install.yml @@ -0,0 +1,36 @@ +--- + +- name: install dnsmasq + become: true + ansible.builtin.package: + name: "{{ dnsmasq_packages }}" + state: present + +- name: support overrides for systemd + when: + - ansible_facts.service_mgr | lower == "systemd" + - dnsmasq_systemd is defined + - dnsmasq_systemd.unit is defined + - dnsmasq_systemd.unit | count > 0 + - ( + (dnsmasq_systemd.unit.after is defined and dnsmasq_systemd.unit.after | count > 0) or + (dnsmasq_systemd.unit.wants is defined and dnsmasq_systemd.unit.wants | count > 0) or + (dnsmasq_systemd.unit.requires is defined and dnsmasq_systemd.unit.requires | count > 0) + ) + block: + - name: ensure dnsmasq.service.d is present + ansible.builtin.file: + dest: /etc/systemd/system/dnsmasq.service.d + state: directory + mode: "0755" + + - name: create overwrite.conf for systemd + ansible.builtin.template: + src: init/systemd/override.conf.j2 + dest: "/etc/systemd/system/dnsmasq.service.d/override.conf" + mode: "0444" + notify: + - systemctl daemon-reload + - restart service + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/tasks/main.yml new file mode 100644 index 0000000..af539a1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yml + +- name: install + ansible.builtin.include_tasks: install.yml + +- name: configure + ansible.builtin.include_tasks: configure.yml + +- name: service + ansible.builtin.include_tasks: service.yml + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/tasks/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/tasks/prepare.yml new file mode 100644 index 0000000..3a20c1e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/tasks/prepare.yml @@ -0,0 +1,52 @@ +--- + +- name: include OS specific configuration ({{ ansible_facts.distribution }} ({{ ansible_facts.os_family }}) {{ ansible_facts.distribution_major_version }}) + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_facts.os_family | lower }}.yml" + - default.yaml + skip: true + +- name: update package cache + ansible.builtin.package: + update_cache: true + +- name: install dependency + ansible.builtin.package: + name: "{{ dnsmasq_requirements }}" + state: present + +- name: get latest system information + ansible.builtin.setup: + +- name: merge dnsmasq configuration between defaults and custom + ansible.builtin.set_fact: + dnsmasq_global: "{{ dnsmasq_defaults_global | combine(dnsmasq_global, recursive=True) }}" + dnsmasq_interfaces: "{{ dnsmasq_defaults_interfaces | combine(dnsmasq_interfaces, recursive=True) }}" + dnsmasq_logging: "{{ dnsmasq_defaults_logging | combine(dnsmasq_logging, recursive=True) }}" + dnsmasq_addresses: "{{ dnsmasq_defaults_addresses | union(dnsmasq_addresses) }}" + dnsmasq_alias: "{{ dnsmasq_defaults_alias | combine(dnsmasq_alias, recursive=True) }}" + dnsmasq_dhcp: "{{ dnsmasq_defaults_dhcp | combine(dnsmasq_dhcp, recursive=True) }}" + dnsmasq_dnssec: "{{ dnsmasq_defaults_dnssec | combine(dnsmasq_dnssec, recursive=True) }}" + dnsmasq_domain: "{{ dnsmasq_defaults_domain | combine(dnsmasq_domain, recursive=True) }}" + dnsmasq_ipset: "{{ dnsmasq_defaults_ipset | combine(dnsmasq_ipset, recursive=True) }}" + dnsmasq_local: "{{ dnsmasq_defaults_local | combine(dnsmasq_local, recursive=True) }}" + dnsmasq_mx: "{{ dnsmasq_defaults_mx | combine(dnsmasq_mx, recursive=True) }}" + dnsmasq_nftset: "{{ dnsmasq_defaults_nftset | combine(dnsmasq_nftset, recursive=True) }}" + dnsmasq_pxe: "{{ dnsmasq_defaults_pxe | combine(dnsmasq_pxe, recursive=True) }}" + dnsmasq_server: "{{ dnsmasq_defaults_server | combine(dnsmasq_server, recursive=True) }}" + dnsmasq_tftp: "{{ dnsmasq_defaults_tftp | combine(dnsmasq_tftp, recursive=True) }}" + dnsmasq_records: "{{ dnsmasq_defaults_records | combine(dnsmasq_records, recursive=True) }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/tasks/service.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/tasks/service.yml new file mode 100644 index 0000000..b2bc5a9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/tasks/service.yml @@ -0,0 +1,38 @@ +--- + +- name: lockup for existing systemd-resolved + ansible.builtin.shell: | + set -o pipefail && systemctl show --property FragmentPath systemd-resolved | cut -d '=' -f2 + args: + executable: /bin/bash + changed_when: false + register: _systemd_resolved + when: + - ansible_facts.service_mgr | lower == "systemd" + +- name: stop and disable (systemd-resolved) service + ansible.builtin.service: + name: systemd-resolved + state: stopped + enabled: false + when: + - not ansible_check_mode + - dnsmasq_resolved_disabled | default('true') | bool + - ansible_facts.service_mgr | lower == "systemd" + - not _systemd_resolved.stdout_lines | count == 0 + tags: + - configuration + - dnsmasq + - dnsmasq-stop-disable-service + +- name: restart dnsmasq if needed + ansible.builtin.meta: flush_handlers + +- name: ensure dnsmasq is enabled on boot + become: true + ansible.builtin.service: + name: dnsmasq + enabled: true + state: started + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.conf b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.conf new file mode 100644 index 0000000..ebb3177 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.conf @@ -0,0 +1,689 @@ +# Configuration file for dnsmasq. +# +# Format is one option per line, legal options are the same +# as the long options legal on the command line. See +# "/usr/sbin/dnsmasq --help" or "man 8 dnsmasq" for details. + +# Listen on this specific port instead of the standard DNS port +# (53). Setting this to zero completely disables DNS function, +# leaving only DHCP and/or TFTP. +#port=5353 + +# The following two options make you a better netizen, since they +# tell dnsmasq to filter out queries which the public DNS cannot +# answer, and which load the servers (especially the root servers) +# unnecessarily. If you have a dial-on-demand link they also stop +# these requests from bringing up the link unnecessarily. + +# Never forward plain names (without a dot or domain part) +#domain-needed +# Never forward addresses in the non-routed address spaces. +#bogus-priv + +# Uncomment these to enable DNSSEC validation and caching: +# (Requires dnsmasq to be built with DNSSEC option.) +#conf-file=%%PREFIX%%/share/dnsmasq/trust-anchors.conf +#dnssec + +# Replies which are not DNSSEC signed may be legitimate, because the domain +# is unsigned, or may be forgeries. Setting this option tells dnsmasq to +# check that an unsigned reply is OK, by finding a secure proof that a DS +# record somewhere between the root and the domain does not exist. +# The cost of setting this is that even queries in unsigned domains will need +# one or more extra DNS queries to verify. +#dnssec-check-unsigned + +# Uncomment this to filter useless windows-originated DNS requests +# which can trigger dial-on-demand links needlessly. +# Note that (amongst other things) this blocks all SRV requests, +# so don't use it if you use eg Kerberos, SIP, XMMP or Google-talk. +# This option only affects forwarding, SRV records originating for +# dnsmasq (via srv-host= lines) are not suppressed by it. +#filterwin2k + +# Change this line if you want dns to get its upstream servers from +# somewhere other that /etc/resolv.conf +#resolv-file= + +# By default, dnsmasq will send queries to any of the upstream +# servers it knows about and tries to favour servers to are known +# to be up. Uncommenting this forces dnsmasq to try each query +# with each server strictly in the order they appear in +# /etc/resolv.conf +#strict-order + +# If you don't want dnsmasq to read /etc/resolv.conf or any other +# file, getting its servers from this file instead (see below), then +# uncomment this. +#no-resolv + +# If you don't want dnsmasq to poll /etc/resolv.conf or other resolv +# files for changes and re-read them then uncomment this. +#no-poll + +# Add other name servers here, with domain specs if they are for +# non-public domains. +#server=/localnet/192.168.0.1 + +# Example of routing PTR queries to nameservers: this will send all +# address->name queries for 192.168.3/24 to nameserver 10.1.2.3 +#server=/3.168.192.in-addr.arpa/10.1.2.3 + +# Add local-only domains here, queries in these domains are answered +# from /etc/hosts or DHCP only. +#local=/localnet/ + +# Add domains which you want to force to an IP address here. +# The example below send any host in double-click.net to a local +# web-server. +#address=/double-click.net/127.0.0.1 + +# --address (and --server) work with IPv6 addresses too. +#address=/www.thekelleys.org.uk/fe80::20d:60ff:fe36:f83 + +# Add the IPs of all queries to yahoo.com, google.com, and their +# subdomains to the vpn and search ipsets: +#ipset=/yahoo.com/google.com/vpn,search + +# Add the IPs of all queries to yahoo.com, google.com, and their +# subdomains to netfilters sets, which is equivalent to +# 'nft add element ip test vpn { ... }; nft add element ip test search { ... }' +#nftset=/yahoo.com/google.com/ip#test#vpn,ip#test#search + +# Use netfilters sets for both IPv4 and IPv6: +# This adds all addresses in *.yahoo.com to vpn4 and vpn6 for IPv4 and IPv6 addresses. +#nftset=/yahoo.com/4#ip#test#vpn4 +#nftset=/yahoo.com/6#ip#test#vpn6 + +# You can control how dnsmasq talks to a server: this forces +# queries to 10.1.2.3 to be routed via eth1 +# server=10.1.2.3@eth1 + +# and this sets the source (ie local) address used to talk to +# 10.1.2.3 to 192.168.1.1 port 55 (there must be an interface with that +# IP on the machine, obviously). +# server=10.1.2.3@192.168.1.1#55 + +# If you want dnsmasq to change uid and gid to something other +# than the default, edit the following lines. +#user= +#group= + +# If you want dnsmasq to listen for DHCP and DNS requests only on +# specified interfaces (and the loopback) give the name of the +# interface (eg eth0) here. +# Repeat the line for more than one interface. +#interface= +# Or you can specify which interface _not_ to listen on +#except-interface= +# Or which to listen on by address (remember to include 127.0.0.1 if +# you use this.) +#listen-address= +# If you want dnsmasq to provide only DNS service on an interface, +# configure it as shown above, and then use the following line to +# disable DHCP and TFTP on it. +#no-dhcp-interface= + +# On systems which support it, dnsmasq binds the wildcard address, +# even when it is listening on only some interfaces. It then discards +# requests that it shouldn't reply to. This has the advantage of +# working even when interfaces come and go and change address. If you +# want dnsmasq to really bind only the interfaces it is listening on, +# uncomment this option. About the only time you may need this is when +# running another nameserver on the same machine. +#bind-interfaces + +# If you don't want dnsmasq to read /etc/hosts, uncomment the +# following line. +#no-hosts +# or if you want it to read another file, as well as /etc/hosts, use +# this. +#addn-hosts=/etc/banner_add_hosts + +# Set this (and domain: see below) if you want to have a domain +# automatically added to simple names in a hosts-file. +#expand-hosts + +# Set the domain for dnsmasq. this is optional, but if it is set, it +# does the following things. +# 1) Allows DHCP hosts to have fully qualified domain names, as long +# as the domain part matches this setting. +# 2) Sets the "domain" DHCP option thereby potentially setting the +# domain of all systems configured by DHCP +# 3) Provides the domain part for "expand-hosts" +#domain=thekelleys.org.uk + +# Set a different domain for a particular subnet +#domain=wireless.thekelleys.org.uk,192.168.2.0/24 + +# Same idea, but range rather then subnet +#domain=reserved.thekelleys.org.uk,192.68.3.100,192.168.3.200 + +# Uncomment this to enable the integrated DHCP server, you need +# to supply the range of addresses available for lease and optionally +# a lease time. If you have more than one network, you will need to +# repeat this for each network on which you want to supply DHCP +# service. +#dhcp-range=192.168.0.50,192.168.0.150,12h + +# This is an example of a DHCP range where the netmask is given. This +# is needed for networks we reach the dnsmasq DHCP server via a relay +# agent. If you don't know what a DHCP relay agent is, you probably +# don't need to worry about this. +#dhcp-range=192.168.0.50,192.168.0.150,255.255.255.0,12h + +# This is an example of a DHCP range which sets a tag, so that +# some DHCP options may be set only for this network. +#dhcp-range=set:red,192.168.0.50,192.168.0.150 + +# Use this DHCP range only when the tag "green" is set. +#dhcp-range=tag:green,192.168.0.50,192.168.0.150,12h + +# Specify a subnet which can't be used for dynamic address allocation, +# is available for hosts with matching --dhcp-host lines. Note that +# dhcp-host declarations will be ignored unless there is a dhcp-range +# of some type for the subnet in question. +# In this case the netmask is implied (it comes from the network +# configuration on the machine running dnsmasq) it is possible to give +# an explicit netmask instead. +#dhcp-range=192.168.0.0,static + +# Enable DHCPv6. Note that the prefix-length does not need to be specified +# and defaults to 64 if missing/ +#dhcp-range=1234::2, 1234::500, 64, 12h + +# Do Router Advertisements, BUT NOT DHCP for this subnet. +#dhcp-range=1234::, ra-only + +# Do Router Advertisements, BUT NOT DHCP for this subnet, also try and +# add names to the DNS for the IPv6 address of SLAAC-configured dual-stack +# hosts. Use the DHCPv4 lease to derive the name, network segment and +# MAC address and assume that the host will also have an +# IPv6 address calculated using the SLAAC algorithm. +#dhcp-range=1234::, ra-names + +# Do Router Advertisements, BUT NOT DHCP for this subnet. +# Set the lifetime to 46 hours. (Note: minimum lifetime is 2 hours.) +#dhcp-range=1234::, ra-only, 48h + +# Do DHCP and Router Advertisements for this subnet. Set the A bit in the RA +# so that clients can use SLAAC addresses as well as DHCP ones. +#dhcp-range=1234::2, 1234::500, slaac + +# Do Router Advertisements and stateless DHCP for this subnet. Clients will +# not get addresses from DHCP, but they will get other configuration information. +# They will use SLAAC for addresses. +#dhcp-range=1234::, ra-stateless + +# Do stateless DHCP, SLAAC, and generate DNS names for SLAAC addresses +# from DHCPv4 leases. +#dhcp-range=1234::, ra-stateless, ra-names + +# Do router advertisements for all subnets where we're doing DHCPv6 +# Unless overridden by ra-stateless, ra-names, et al, the router +# advertisements will have the M and O bits set, so that the clients +# get addresses and configuration from DHCPv6, and the A bit reset, so the +# clients don't use SLAAC addresses. +#enable-ra + +# Supply parameters for specified hosts using DHCP. There are lots +# of valid alternatives, so we will give examples of each. Note that +# IP addresses DO NOT have to be in the range given above, they just +# need to be on the same network. The order of the parameters in these +# do not matter, it's permissible to give name, address and MAC in any +# order. + +# Always allocate the host with Ethernet address 11:22:33:44:55:66 +# The IP address 192.168.0.60 +#dhcp-host=11:22:33:44:55:66,192.168.0.60 + +# Always set the name of the host with hardware address +# 11:22:33:44:55:66 to be "fred" +#dhcp-host=11:22:33:44:55:66,fred + +# Always give the host with Ethernet address 11:22:33:44:55:66 +# the name fred and IP address 192.168.0.60 and lease time 45 minutes +#dhcp-host=11:22:33:44:55:66,fred,192.168.0.60,45m + +# Give a host with Ethernet address 11:22:33:44:55:66 or +# 12:34:56:78:90:12 the IP address 192.168.0.60. Dnsmasq will assume +# that these two Ethernet interfaces will never be in use at the same +# time, and give the IP address to the second, even if it is already +# in use by the first. Useful for laptops with wired and wireless +# addresses. +#dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.0.60 + +# Give the machine which says its name is "bert" IP address +# 192.168.0.70 and an infinite lease +#dhcp-host=bert,192.168.0.70,infinite + +# Always give the host with client identifier 01:02:02:04 +# the IP address 192.168.0.60 +#dhcp-host=id:01:02:02:04,192.168.0.60 + +# Always give the InfiniBand interface with hardware address +# 80:00:00:48:fe:80:00:00:00:00:00:00:f4:52:14:03:00:28:05:81 the +# ip address 192.168.0.61. The client id is derived from the prefix +# ff:00:00:00:00:00:02:00:00:02:c9:00 and the last 8 pairs of +# hex digits of the hardware address. +#dhcp-host=id:ff:00:00:00:00:00:02:00:00:02:c9:00:f4:52:14:03:00:28:05:81,192.168.0.61 + +# Always give the host with client identifier "marjorie" +# the IP address 192.168.0.60 +#dhcp-host=id:marjorie,192.168.0.60 + +# Enable the address given for "judge" in /etc/hosts +# to be given to a machine presenting the name "judge" when +# it asks for a DHCP lease. +#dhcp-host=judge + +# Never offer DHCP service to a machine whose Ethernet +# address is 11:22:33:44:55:66 +#dhcp-host=11:22:33:44:55:66,ignore + +# Ignore any client-id presented by the machine with Ethernet +# address 11:22:33:44:55:66. This is useful to prevent a machine +# being treated differently when running under different OS's or +# between PXE boot and OS boot. +#dhcp-host=11:22:33:44:55:66,id:* + +# Send extra options which are tagged as "red" to +# the machine with Ethernet address 11:22:33:44:55:66 +#dhcp-host=11:22:33:44:55:66,set:red + +# Send extra options which are tagged as "red" to +# any machine with Ethernet address starting 11:22:33: +#dhcp-host=11:22:33:*:*:*,set:red + +# Give a fixed IPv6 address and name to client with +# DUID 00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2 +# Note the MAC addresses CANNOT be used to identify DHCPv6 clients. +# Note also that the [] around the IPv6 address are obligatory. +#dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::5] + +# Ignore any clients which are not specified in dhcp-host lines +# or /etc/ethers. Equivalent to ISC "deny unknown-clients". +# This relies on the special "known" tag which is set when +# a host is matched. +#dhcp-ignore=tag:!known + +# Send extra options which are tagged as "red" to any machine whose +# DHCP vendorclass string includes the substring "Linux" +#dhcp-vendorclass=set:red,Linux + +# Send extra options which are tagged as "red" to any machine one +# of whose DHCP userclass strings includes the substring "accounts" +#dhcp-userclass=set:red,accounts + +# Send extra options which are tagged as "red" to any machine whose +# MAC address matches the pattern. +#dhcp-mac=set:red,00:60:8C:*:*:* + +# If this line is uncommented, dnsmasq will read /etc/ethers and act +# on the ethernet-address/IP pairs found there just as if they had +# been given as --dhcp-host options. Useful if you keep +# MAC-address/host mappings there for other purposes. +#read-ethers + +# Send options to hosts which ask for a DHCP lease. +# See RFC 2132 for details of available options. +# Common options can be given to dnsmasq by name: +# run "dnsmasq --help dhcp" to get a list. +# Note that all the common settings, such as netmask and +# broadcast address, DNS server and default route, are given +# sane defaults by dnsmasq. You very likely will not need +# any dhcp-options. If you use Windows clients and Samba, there +# are some options which are recommended, they are detailed at the +# end of this section. + +# Override the default route supplied by dnsmasq, which assumes the +# router is the same machine as the one running dnsmasq. +#dhcp-option=3,1.2.3.4 + +# Do the same thing, but using the option name +#dhcp-option=option:router,1.2.3.4 + +# Override the default route supplied by dnsmasq and send no default +# route at all. Note that this only works for the options sent by +# default (1, 3, 6, 12, 28) the same line will send a zero-length option +# for all other option numbers. +#dhcp-option=3 + +# Set the NTP time server addresses to 192.168.0.4 and 10.10.0.5 +#dhcp-option=option:ntp-server,192.168.0.4,10.10.0.5 + +# Send DHCPv6 option. Note [] around IPv6 addresses. +#dhcp-option=option6:dns-server,[1234::77],[1234::88] + +# Send DHCPv6 option for namservers as the machine running +# dnsmasq and another. +#dhcp-option=option6:dns-server,[::],[1234::88] + +# Ask client to poll for option changes every six hours. (RFC4242) +#dhcp-option=option6:information-refresh-time,6h + +# Set option 58 client renewal time (T1). Defaults to half of the +# lease time if not specified. (RFC2132) +#dhcp-option=option:T1,1m + +# Set option 59 rebinding time (T2). Defaults to 7/8 of the +# lease time if not specified. (RFC2132) +#dhcp-option=option:T2,2m + +# Set the NTP time server address to be the same machine as +# is running dnsmasq +#dhcp-option=42,0.0.0.0 + +# Set the NIS domain name to "welly" +#dhcp-option=40,welly + +# Set the default time-to-live to 50 +#dhcp-option=23,50 + +# Set the "all subnets are local" flag +#dhcp-option=27,1 + +# Send the etherboot magic flag and then etherboot options (a string). +#dhcp-option=128,e4:45:74:68:00:00 +#dhcp-option=129,NIC=eepro100 + +# Specify an option which will only be sent to the "red" network +# (see dhcp-range for the declaration of the "red" network) +# Note that the tag: part must precede the option: part. +#dhcp-option = tag:red, option:ntp-server, 192.168.1.1 + +# The following DHCP options set up dnsmasq in the same way as is specified +# for the ISC dhcpcd in +# http://www.samba.org/samba/ftp/docs/textdocs/DHCP-Server-Configuration.txt +# adapted for a typical dnsmasq installation where the host running +# dnsmasq is also the host running samba. +# you may want to uncomment some or all of them if you use +# Windows clients and Samba. +#dhcp-option=19,0 # option ip-forwarding off +#dhcp-option=44,0.0.0.0 # set netbios-over-TCP/IP nameserver(s) aka WINS server(s) +#dhcp-option=45,0.0.0.0 # netbios datagram distribution server +#dhcp-option=46,8 # netbios node type + +# Send an empty WPAD option. This may be REQUIRED to get windows 7 to behave. +#dhcp-option=252,"\n" + +# Send RFC-3397 DNS domain search DHCP option. WARNING: Your DHCP client +# probably doesn't support this...... +#dhcp-option=option:domain-search,eng.apple.com,marketing.apple.com + +# Send RFC-3442 classless static routes (note the netmask encoding) +#dhcp-option=121,192.168.1.0/24,1.2.3.4,10.0.0.0/8,5.6.7.8 + +# Send vendor-class specific options encapsulated in DHCP option 43. +# The meaning of the options is defined by the vendor-class so +# options are sent only when the client supplied vendor class +# matches the class given here. (A substring match is OK, so "MSFT" +# matches "MSFT" and "MSFT 5.0"). This example sets the +# mtftp address to 0.0.0.0 for PXEClients. +#dhcp-option=vendor:PXEClient,1,0.0.0.0 + +# Send microsoft-specific option to tell windows to release the DHCP lease +# when it shuts down. Note the "i" flag, to tell dnsmasq to send the +# value as a four-byte integer - that's what microsoft wants. See +# http://technet2.microsoft.com/WindowsServer/en/library/a70f1bb7-d2d4-49f0-96d6-4b7414ecfaae1033.mspx?mfr=true +#dhcp-option=vendor:MSFT,2,1i + +# Send the Encapsulated-vendor-class ID needed by some configurations of +# Etherboot to allow is to recognise the DHCP server. +#dhcp-option=vendor:Etherboot,60,"Etherboot" + +# Send options to PXELinux. Note that we need to send the options even +# though they don't appear in the parameter request list, so we need +# to use dhcp-option-force here. +# See http://syslinux.zytor.com/pxe.php#special for details. +# Magic number - needed before anything else is recognised +#dhcp-option-force=208,f1:00:74:7e +# Configuration file name +#dhcp-option-force=209,configs/common +# Path prefix +#dhcp-option-force=210,/tftpboot/pxelinux/files/ +# Reboot time. (Note 'i' to send 32-bit value) +#dhcp-option-force=211,30i + +# Set the boot filename for netboot/PXE. You will only need +# this if you want to boot machines over the network and you will need +# a TFTP server; either dnsmasq's built-in TFTP server or an +# external one. (See below for how to enable the TFTP server.) +#dhcp-boot=pxelinux.0 + +# The same as above, but use custom tftp-server instead machine running dnsmasq +#dhcp-boot=pxelinux,server.name,192.168.1.100 + +# Boot for iPXE. The idea is to send two different +# filenames, the first loads iPXE, and the second tells iPXE what to +# load. The dhcp-match sets the ipxe tag for requests from iPXE. +#dhcp-boot=undionly.kpxe +#dhcp-match=set:ipxe,175 # iPXE sends a 175 option. +#dhcp-boot=tag:ipxe,http://boot.ipxe.org/demo/boot.php + +# Encapsulated options for iPXE. All the options are +# encapsulated within option 175 +#dhcp-option=encap:175, 1, 5b # priority code +#dhcp-option=encap:175, 176, 1b # no-proxydhcp +#dhcp-option=encap:175, 177, string # bus-id +#dhcp-option=encap:175, 189, 1b # BIOS drive code +#dhcp-option=encap:175, 190, user # iSCSI username +#dhcp-option=encap:175, 191, pass # iSCSI password + +# Test for the architecture of a netboot client. PXE clients are +# supposed to send their architecture as option 93. (See RFC 4578) +#dhcp-match=peecees, option:client-arch, 0 #x86-32 +#dhcp-match=itanics, option:client-arch, 2 #IA64 +#dhcp-match=hammers, option:client-arch, 6 #x86-64 +#dhcp-match=mactels, option:client-arch, 7 #EFI x86-64 + +# Do real PXE, rather than just booting a single file, this is an +# alternative to dhcp-boot. +#pxe-prompt="What system shall I netboot?" +# or with timeout before first available action is taken: +#pxe-prompt="Press F8 for menu.", 60 + +# Available boot services. for PXE. +#pxe-service=x86PC, "Boot from local disk" + +# Loads /pxelinux.0 from dnsmasq TFTP server. +#pxe-service=x86PC, "Install Linux", pxelinux + +# Loads /pxelinux.0 from TFTP server at 1.2.3.4. +# Beware this fails on old PXE ROMS. +#pxe-service=x86PC, "Install Linux", pxelinux, 1.2.3.4 + +# Use bootserver on network, found my multicast or broadcast. +#pxe-service=x86PC, "Install windows from RIS server", 1 + +# Use bootserver at a known IP address. +#pxe-service=x86PC, "Install windows from RIS server", 1, 1.2.3.4 + +# If you have multicast-FTP available, +# information for that can be passed in a similar way using options 1 +# to 5. See page 19 of +# http://download.intel.com/design/archives/wfm/downloads/pxespec.pdf + + +# Enable dnsmasq's built-in TFTP server +#enable-tftp + +# Set the root directory for files available via FTP. +#tftp-root=/var/ftpd + +# Do not abort if the tftp-root is unavailable +#tftp-no-fail + +# Make the TFTP server more secure: with this set, only files owned by +# the user dnsmasq is running as will be send over the net. +#tftp-secure + +# This option stops dnsmasq from negotiating a larger blocksize for TFTP +# transfers. It will slow things down, but may rescue some broken TFTP +# clients. +#tftp-no-blocksize + +# Set the boot file name only when the "red" tag is set. +#dhcp-boot=tag:red,pxelinux.red-net + +# An example of dhcp-boot with an external TFTP server: the name and IP +# address of the server are given after the filename. +# Can fail with old PXE ROMS. Overridden by --pxe-service. +#dhcp-boot=/var/ftpd/pxelinux.0,boothost,192.168.0.3 + +# If there are multiple external tftp servers having a same name +# (using /etc/hosts) then that name can be specified as the +# tftp_servername (the third option to dhcp-boot) and in that +# case dnsmasq resolves this name and returns the resultant IP +# addresses in round robin fashion. This facility can be used to +# load balance the tftp load among a set of servers. +#dhcp-boot=/var/ftpd/pxelinux.0,boothost,tftp_server_name + +# Set the limit on DHCP leases, the default is 150 +#dhcp-lease-max=150 + +# The DHCP server needs somewhere on disk to keep its lease database. +# This defaults to a sane location, but if you want to change it, use +# the line below. +#dhcp-leasefile=/var/lib/misc/dnsmasq.leases + +# Set the DHCP server to authoritative mode. In this mode it will barge in +# and take over the lease for any client which broadcasts on the network, +# whether it has a record of the lease or not. This avoids long timeouts +# when a machine wakes up on a new network. DO NOT enable this if there's +# the slightest chance that you might end up accidentally configuring a DHCP +# server for your campus/company accidentally. The ISC server uses +# the same option, and this URL provides more information: +# http://www.isc.org/files/auth.html +#dhcp-authoritative + +# Set the DHCP server to enable DHCPv4 Rapid Commit Option per RFC 4039. +# In this mode it will respond to a DHCPDISCOVER message including a Rapid Commit +# option with a DHCPACK including a Rapid Commit option and fully committed address +# and configuration information. This must only be enabled if either the server is +# the only server for the subnet, or multiple servers are present and they each +# commit a binding for all clients. +#dhcp-rapid-commit + +# Run an executable when a DHCP lease is created or destroyed. +# The arguments sent to the script are "add" or "del", +# then the MAC address, the IP address and finally the hostname +# if there is one. +#dhcp-script=/bin/echo + +# Set the cachesize here. +#cache-size=150 + +# If you want to disable negative caching, uncomment this. +#no-negcache + +# Normally responses which come from /etc/hosts and the DHCP lease +# file have Time-To-Live set as zero, which conventionally means +# do not cache further. If you are happy to trade lower load on the +# server for potentially stale date, you can set a time-to-live (in +# seconds) here. +#local-ttl= + +# If you want dnsmasq to detect attempts by Verisign to send queries +# to unregistered .com and .net hosts to its sitefinder service and +# have dnsmasq instead return the correct NXDOMAIN response, uncomment +# this line. You can add similar lines to do the same for other +# registries which have implemented wildcard A records. +#bogus-nxdomain=64.94.110.11 + +# If you want to fix up DNS results from upstream servers, use the +# alias option. This only works for IPv4. +# This alias makes a result of 1.2.3.4 appear as 5.6.7.8 +#alias=1.2.3.4,5.6.7.8 +# and this maps 1.2.3.x to 5.6.7.x +#alias=1.2.3.0,5.6.7.0,255.255.255.0 +# and this maps 192.168.0.10->192.168.0.40 to 10.0.0.10->10.0.0.40 +#alias=192.168.0.10-192.168.0.40,10.0.0.0,255.255.255.0 + +# Change these lines if you want dnsmasq to serve MX records. + +# Return an MX record named "maildomain.com" with target +# servermachine.com and preference 50 +#mx-host=maildomain.com,servermachine.com,50 + +# Set the default target for MX records created using the localmx option. +#mx-target=servermachine.com + +# Return an MX record pointing to the mx-target for all local +# machines. +#localmx + +# Return an MX record pointing to itself for all local machines. +#selfmx + +# Change the following lines if you want dnsmasq to serve SRV +# records. These are useful if you want to serve ldap requests for +# Active Directory and other windows-originated DNS requests. +# See RFC 2782. +# You may add multiple srv-host lines. +# The fields are ,,,, +# If the domain part if missing from the name (so that is just has the +# service and protocol sections) then the domain given by the domain= +# config option is used. (Note that expand-hosts does not need to be +# set for this to work.) + +# A SRV record sending LDAP for the example.com domain to +# ldapserver.example.com port 389 +#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389 + +# A SRV record sending LDAP for the example.com domain to +# ldapserver.example.com port 389 (using domain=) +#domain=example.com +#srv-host=_ldap._tcp,ldapserver.example.com,389 + +# Two SRV records for LDAP, each with different priorities +#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,1 +#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,2 + +# A SRV record indicating that there is no LDAP server for the domain +# example.com +#srv-host=_ldap._tcp.example.com + +# The following line shows how to make dnsmasq serve an arbitrary PTR +# record. This is useful for DNS-SD. (Note that the +# domain-name expansion done for SRV records _does_not +# occur for PTR records.) +#ptr-record=_http._tcp.dns-sd-services,"New Employee Page._http._tcp.dns-sd-services" + +# Change the following lines to enable dnsmasq to serve TXT records. +# These are used for things like SPF and zeroconf. (Note that the +# domain-name expansion done for SRV records _does_not +# occur for TXT records.) + +#Example SPF. +#txt-record=example.com,"v=spf1 a -all" + +#Example zeroconf +#txt-record=_http._tcp.example.com,name=value,paper=A4 + +# Provide an alias for a "local" DNS name. Note that this _only_ works +# for targets which are names from DHCP or /etc/hosts. Give host +# "bert" another name, bertrand +#cname=bertand,bert + +# For debugging purposes, log each DNS query as it passes through +# dnsmasq. +#log-queries + +# Log lots of extra information about DHCP transactions. +#log-dhcp + +# Include another lot of configuration options. +#conf-file=/etc/dnsmasq.more.conf +#conf-dir=/etc/dnsmasq.d + +# Include all the files in a directory except those ending in .bak +#conf-dir=/etc/dnsmasq.d,.bak + +# Include all files in a directory which end in .conf +#conf-dir=/etc/dnsmasq.d/,*.conf + +# If a DHCP client claims that its name is "wpad", ignore that. +# This fixes a security hole. see CERT Vulnerability VU#598349 +#dhcp-name-match=set:wpad-ignore,wpad +#dhcp-ignore-names=tag:wpad-ignore diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.conf.j2 new file mode 100644 index 0000000..3a7b800 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.conf.j2 @@ -0,0 +1,68 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +port={{ dnsmasq_global.port }} + +# If you want dnsmasq to change uid and gid to something other +# than the default, edit the following lines. +{% if dnsmasq_global.user is defined and + dnsmasq_global.user | string | length > 0 %} +user={{ dnsmasq_global.user }} +{% endif %} +{% if dnsmasq_global.group is defined and + dnsmasq_global.group | string | length > 0 %} +group={{ dnsmasq_global.group }} +{% endif %} +{% if dnsmasq_global.filterwin2k is defined and + dnsmasq_global.filterwin2k %} +# Uncomment this to filter useless windows-originated DNS requests +# which can trigger dial-on-demand links needlessly. +# Note that (amongst other things) this blocks all SRV requests, +# so don't use it if you use eg Kerberos, SIP, XMMP or Google-talk. +# This option only affects forwarding, SRV records originating for +# dnsmasq (via srv-host= lines) are not suppressed by it. +filterwin2k +{% endif %} +{% if dnsmasq_global.resolv_file is defined and + dnsmasq_global.resolv_file | string | length > 0 %} +# Change this line if you want dns to get its upstream servers from +# somewhere other that /etc/resolv.conf +resolv-file={{ dnsmasq_global.resolv_file }} +{% endif %} +{% if dnsmasq_global.strict_order is defined and + dnsmasq_global.strict_order %} +# By default, dnsmasq will send queries to any of the upstream +# servers it knows about and tries to favour servers to are known +# to be up. Uncommenting this forces dnsmasq to try each query +# with each server strictly in the order they appear in +# /etc/resolv.conf +strict-order +{% endif %} +{% if not dnsmasq_global.no_hosts %} +no-hosts +{% endif %} +{% if not dnsmasq_global.no_resolv %} +no-resolv +{% endif %} +{% if not dnsmasq_global.no_poll %} +no-poll +{% endif %} +{% if dnsmasq_global.domain_needed %} +domain-needed +{% endif %} +{% if dnsmasq_global.bogus_priv %} +bogus-priv +{% endif %} +{% if dnsmasq_global.cache_size is defined and + dnsmasq_global.cache_size | string | length > 0 %} +cache-size={{ dnsmasq_global.cache_size }} +{% endif %} +{% if dnsmasq_global.all_servers %} +all-servers +{% endif %} +{% if dnsmasq_global.no_negcache %} +no-negcache +{% endif %} +# Include another lot of configuration options. +#conf-file=/etc/dnsmasq.more.conf +conf-dir={{ dnsmasq_config_directory }}/,*.conf diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/10-interfaces.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/10-interfaces.conf.j2 new file mode 100644 index 0000000..43b8247 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/10-interfaces.conf.j2 @@ -0,0 +1,26 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if dnsmasq_interfaces.listen_address is defined and + dnsmasq_interfaces.listen_address | string | length > 0 %} +# Or which to listen on by address (remember to include 127.0.0.1 if +# you use this.) +listen-address={{ dnsmasq_interfaces.listen_address }} +{% endif %} +{% if dnsmasq_interfaces.interfaces is defined and + dnsmasq_interfaces.interfaces | count > 0 %} + + {% for item in dnsmasq_interfaces.interfaces %} +interface={{ item }} + {% endfor %} +{% endif %} +{% if dnsmasq_interfaces.bind_only is defined and + dnsmasq_interfaces.bind_only %} +bind-interfaces +{% endif %} +{% if dnsmasq_interfaces.except_interfaces is defined and + dnsmasq_interfaces.except_interfaces | count > 0 %} + {% for item in dnsmasq_interfaces.except_interfaces %} +except-interface={{ item }} + {% endfor %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/10-logging.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/10-logging.conf.j2 new file mode 100644 index 0000000..b02a4c1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/10-logging.conf.j2 @@ -0,0 +1,30 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if dnsmasq_logging.log_queries %} +log-queries +log-facility={{ dnsmasq_log_facility }} +{% endif %} +{% if dnsmasq_logging.log_dhcp %} +# Log lots of extra information about DHCP transactions. +log-dhcp +{% endif %} +{# +--log-debug + Enable extra logging intended for debugging rather than information. +--log-async[=] + Enable asynchronous logging and optionally set the limit on the number of lines which will be queued by dnsmasq when writing to the syslog is slow. + Dnsmasq can log asynchronously: this allows it to continue functioning without being blocked by syslog, + and allows syslog to use dnsmasq for DNS queries without risking deadlock. If the queue of log-lines + becomes full, dnsmasq will log the overflow, and the number of messages lost. + The default queue length is 5, a sane value would be 5-25, and a maximum limit of 100 is imposed. +#} +{# +# For debugging purposes, log each DNS query as it passes through +# dnsmasq. +#log-queries + +# Log lots of extra information about DHCP transactions. +#log-dhcp + +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-address.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-address.conf.j2 new file mode 100644 index 0000000..cb5a9d5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-address.conf.j2 @@ -0,0 +1,9 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if dnsmasq_addresses is defined and + dnsmasq_addresses | count > 0 %} +{% for item in dnsmasq_addresses %} +address=/{{ item.get('name') }}/{{ item.get('address') }} +{% endfor %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-alias.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-alias.conf.j2 new file mode 100644 index 0000000..72bdaff --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-alias.conf.j2 @@ -0,0 +1,13 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{# +# If you want to fix up DNS results from upstream servers, use the +# alias option. This only works for IPv4. +# This alias makes a result of 1.2.3.4 appear as 5.6.7.8 +#alias=1.2.3.4,5.6.7.8 +# and this maps 1.2.3.x to 5.6.7.x +#alias=1.2.3.0,5.6.7.0,255.255.255.0 +# and this maps 192.168.0.10->192.168.0.40 to 10.0.0.10->10.0.0.40 +#alias=192.168.0.10-192.168.0.40,10.0.0.0,255.255.255.0 +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-dhcp.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-dhcp.conf.j2 new file mode 100644 index 0000000..58eb914 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-dhcp.conf.j2 @@ -0,0 +1,411 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if dnsmasq_dhcp.enabled %} +dhcp-client-update + {% if dnsmasq_dhcp.dhcp_boot is defined and + dnsmasq_dhcp.dhcp_boot | string | length > 0 %} +dhcp-boot={{ dnsmasq_dhcp.dhcp_boot }} + {% endif %} + {% if dnsmasq_dhcp.dhcp_hosts is defined and + dnsmasq_dhcp.dhcp_hosts | count > 0 %} + {% for item in dnsmasq_dhcp.dhcp_hosts %} +dhcp-host={{ item.mac_address | join(',') }}{% if item.name is defined %},{{ item.name }}.{{ dnsmasq_domain }}{% endif %}{% if item.address is defined %},{{ item.address }}{% endif %}{% if item.lease_time is defined %},{{ item.lease_time }}{% endif %} + {% endfor %} + {% endif %} + {% if dnsmasq_dhcp.dhcp_options is defined and + dnsmasq_dhcp.dhcp_options | count > 0 %} + {% for item in dnsmasq_dhcp.dhcp_options %} +dhcp-option=option:{{ item.option }},{{ item.value | join(',') }} + {% endfor %} + {% endif %} + {% if dnsmasq_dhcp.dhcp_options_tagged is defined and + dnsmasq_dhcp.dhcp_options_tagged | count > 0 %} + {% for item in dnsmasq_dhcp.dhcp_options_tagged %} +dhcp-option=tag:{{ item.tag }},option:{{ item.option }},{{ item.value | join(',') }} + {% endfor %} + {% endif %} + {% if dnsmasq_dhcp.dhcp_range is defined and + dnsmasq_dhcp.dhcp_range | count > 0 %} + {% for item in dnsmasq_dhcp.dhcp_range %} +dhcp-range={% if item.set is defined %}set:{{ item.set }},{% endif %}{{ item.start }},{{ item.end }},{{ item.netmask }}{% if item.lease_time is defined %},{{ item.lease_time }}{% endif %} + {% endfor %} + {% endif %} +{% endif %} +{% if dnsmasq_dhcp.dhcp_authoritative %} +dhcp-authoritative +{% endif %} + +{# +# Uncomment this to enable the integrated DHCP server, you need +# to supply the range of addresses available for lease and optionally +# a lease time. If you have more than one network, you will need to +# repeat this for each network on which you want to supply DHCP +# service. +#dhcp-range=192.168.0.50,192.168.0.150,12h + +# This is an example of a DHCP range where the netmask is given. This +# is needed for networks we reach the dnsmasq DHCP server via a relay +# agent. If you don't know what a DHCP relay agent is, you probably +# don't need to worry about this. +#dhcp-range=192.168.0.50,192.168.0.150,255.255.255.0,12h + +# This is an example of a DHCP range which sets a tag, so that +# some DHCP options may be set only for this network. +#dhcp-range=set:red,192.168.0.50,192.168.0.150 + +# Use this DHCP range only when the tag "green" is set. +#dhcp-range=tag:green,192.168.0.50,192.168.0.150,12h + +# Specify a subnet which can't be used for dynamic address allocation, +# is available for hosts with matching --dhcp-host lines. Note that +# dhcp-host declarations will be ignored unless there is a dhcp-range +# of some type for the subnet in question. +# In this case the netmask is implied (it comes from the network +# configuration on the machine running dnsmasq) it is possible to give +# an explicit netmask instead. +#dhcp-range=192.168.0.0,static + +# Enable DHCPv6. Note that the prefix-length does not need to be specified +# and defaults to 64 if missing/ +#dhcp-range=1234::2, 1234::500, 64, 12h + +# Do Router Advertisements, BUT NOT DHCP for this subnet. +#dhcp-range=1234::, ra-only + +# Do Router Advertisements, BUT NOT DHCP for this subnet, also try and +# add names to the DNS for the IPv6 address of SLAAC-configured dual-stack +# hosts. Use the DHCPv4 lease to derive the name, network segment and +# MAC address and assume that the host will also have an +# IPv6 address calculated using the SLAAC algorithm. +#dhcp-range=1234::, ra-names + +# Do Router Advertisements, BUT NOT DHCP for this subnet. +# Set the lifetime to 46 hours. (Note: minimum lifetime is 2 hours.) +#dhcp-range=1234::, ra-only, 48h + +# Do DHCP and Router Advertisements for this subnet. Set the A bit in the RA +# so that clients can use SLAAC addresses as well as DHCP ones. +#dhcp-range=1234::2, 1234::500, slaac + +# Do Router Advertisements and stateless DHCP for this subnet. Clients will +# not get addresses from DHCP, but they will get other configuration information. +# They will use SLAAC for addresses. +#dhcp-range=1234::, ra-stateless + +# Do stateless DHCP, SLAAC, and generate DNS names for SLAAC addresses +# from DHCPv4 leases. +#dhcp-range=1234::, ra-stateless, ra-names + +# Do router advertisements for all subnets where we're doing DHCPv6 +# Unless overridden by ra-stateless, ra-names, et al, the router +# advertisements will have the M and O bits set, so that the clients +# get addresses and configuration from DHCPv6, and the A bit reset, so the +# clients don't use SLAAC addresses. +#enable-ra + +# Supply parameters for specified hosts using DHCP. There are lots +# of valid alternatives, so we will give examples of each. Note that +# IP addresses DO NOT have to be in the range given above, they just +# need to be on the same network. The order of the parameters in these +# do not matter, it's permissible to give name, address and MAC in any +# order. + +# Always allocate the host with Ethernet address 11:22:33:44:55:66 +# The IP address 192.168.0.60 +#dhcp-host=11:22:33:44:55:66,192.168.0.60 + +# Always set the name of the host with hardware address +# 11:22:33:44:55:66 to be "fred" +#dhcp-host=11:22:33:44:55:66,fred + +# Always give the host with Ethernet address 11:22:33:44:55:66 +# the name fred and IP address 192.168.0.60 and lease time 45 minutes +#dhcp-host=11:22:33:44:55:66,fred,192.168.0.60,45m + +# Give a host with Ethernet address 11:22:33:44:55:66 or +# 12:34:56:78:90:12 the IP address 192.168.0.60. Dnsmasq will assume +# that these two Ethernet interfaces will never be in use at the same +# time, and give the IP address to the second, even if it is already +# in use by the first. Useful for laptops with wired and wireless +# addresses. +#dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.0.60 + +# Give the machine which says its name is "bert" IP address +# 192.168.0.70 and an infinite lease +#dhcp-host=bert,192.168.0.70,infinite + +# Always give the host with client identifier 01:02:02:04 +# the IP address 192.168.0.60 +#dhcp-host=id:01:02:02:04,192.168.0.60 + +# Always give the InfiniBand interface with hardware address +# 80:00:00:48:fe:80:00:00:00:00:00:00:f4:52:14:03:00:28:05:81 the +# ip address 192.168.0.61. The client id is derived from the prefix +# ff:00:00:00:00:00:02:00:00:02:c9:00 and the last 8 pairs of +# hex digits of the hardware address. +#dhcp-host=id:ff:00:00:00:00:00:02:00:00:02:c9:00:f4:52:14:03:00:28:05:81,192.168.0.61 + +# Always give the host with client identifier "marjorie" +# the IP address 192.168.0.60 +#dhcp-host=id:marjorie,192.168.0.60 + +# Enable the address given for "judge" in /etc/hosts +# to be given to a machine presenting the name "judge" when +# it asks for a DHCP lease. +#dhcp-host=judge + +# Never offer DHCP service to a machine whose Ethernet +# address is 11:22:33:44:55:66 +#dhcp-host=11:22:33:44:55:66,ignore + +# Ignore any client-id presented by the machine with Ethernet +# address 11:22:33:44:55:66. This is useful to prevent a machine +# being treated differently when running under different OS's or +# between PXE boot and OS boot. +#dhcp-host=11:22:33:44:55:66,id:* + +# Send extra options which are tagged as "red" to +# the machine with Ethernet address 11:22:33:44:55:66 +#dhcp-host=11:22:33:44:55:66,set:red + +# Send extra options which are tagged as "red" to +# any machine with Ethernet address starting 11:22:33: +#dhcp-host=11:22:33:*:*:*,set:red + +# Give a fixed IPv6 address and name to client with +# DUID 00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2 +# Note the MAC addresses CANNOT be used to identify DHCPv6 clients. +# Note also that the [] around the IPv6 address are obligatory. +#dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::5] + +# Ignore any clients which are not specified in dhcp-host lines +# or /etc/ethers. Equivalent to ISC "deny unknown-clients". +# This relies on the special "known" tag which is set when +# a host is matched. +#dhcp-ignore=tag:!known + +# Send extra options which are tagged as "red" to any machine whose +# DHCP vendorclass string includes the substring "Linux" +#dhcp-vendorclass=set:red,Linux + +# Send extra options which are tagged as "red" to any machine one +# of whose DHCP userclass strings includes the substring "accounts" +#dhcp-userclass=set:red,accounts + +# Send extra options which are tagged as "red" to any machine whose +# MAC address matches the pattern. +#dhcp-mac=set:red,00:60:8C:*:*:* + +# If this line is uncommented, dnsmasq will read /etc/ethers and act +# on the ethernet-address/IP pairs found there just as if they had +# been given as --dhcp-host options. Useful if you keep +# MAC-address/host mappings there for other purposes. +#read-ethers + +# Send options to hosts which ask for a DHCP lease. +# See RFC 2132 for details of available options. +# Common options can be given to dnsmasq by name: +# run "dnsmasq --help dhcp" to get a list. +# Note that all the common settings, such as netmask and +# broadcast address, DNS server and default route, are given +# sane defaults by dnsmasq. You very likely will not need +# any dhcp-options. If you use Windows clients and Samba, there +# are some options which are recommended, they are detailed at the +# end of this section. + +# Override the default route supplied by dnsmasq, which assumes the +# router is the same machine as the one running dnsmasq. +#dhcp-option=3,1.2.3.4 + +# Do the same thing, but using the option name +#dhcp-option=option:router,1.2.3.4 + +# Override the default route supplied by dnsmasq and send no default +# route at all. Note that this only works for the options sent by +# default (1, 3, 6, 12, 28) the same line will send a zero-length option +# for all other option numbers. +#dhcp-option=3 + +# Set the NTP time server addresses to 192.168.0.4 and 10.10.0.5 +#dhcp-option=option:ntp-server,192.168.0.4,10.10.0.5 + +# Send DHCPv6 option. Note [] around IPv6 addresses. +#dhcp-option=option6:dns-server,[1234::77],[1234::88] + +# Send DHCPv6 option for namservers as the machine running +# dnsmasq and another. +#dhcp-option=option6:dns-server,[::],[1234::88] + +# Ask client to poll for option changes every six hours. (RFC4242) +#dhcp-option=option6:information-refresh-time,6h + +# Set option 58 client renewal time (T1). Defaults to half of the +# lease time if not specified. (RFC2132) +#dhcp-option=option:T1,1m + +# Set option 59 rebinding time (T2). Defaults to 7/8 of the +# lease time if not specified. (RFC2132) +#dhcp-option=option:T2,2m + +# Set the NTP time server address to be the same machine as +# is running dnsmasq +#dhcp-option=42,0.0.0.0 + +# Set the NIS domain name to "welly" +#dhcp-option=40,welly + +# Set the default time-to-live to 50 +#dhcp-option=23,50 + +# Set the "all subnets are local" flag +#dhcp-option=27,1 + +# Send the etherboot magic flag and then etherboot options (a string). +#dhcp-option=128,e4:45:74:68:00:00 +#dhcp-option=129,NIC=eepro100 + +# Specify an option which will only be sent to the "red" network +# (see dhcp-range for the declaration of the "red" network) +# Note that the tag: part must precede the option: part. +#dhcp-option = tag:red, option:ntp-server, 192.168.1.1 + +# The following DHCP options set up dnsmasq in the same way as is specified +# for the ISC dhcpcd in +# http://www.samba.org/samba/ftp/docs/textdocs/DHCP-Server-Configuration.txt +# adapted for a typical dnsmasq installation where the host running +# dnsmasq is also the host running samba. +# you may want to uncomment some or all of them if you use +# Windows clients and Samba. +#dhcp-option=19,0 # option ip-forwarding off +#dhcp-option=44,0.0.0.0 # set netbios-over-TCP/IP nameserver(s) aka WINS server(s) +#dhcp-option=45,0.0.0.0 # netbios datagram distribution server +#dhcp-option=46,8 # netbios node type + +# Send an empty WPAD option. This may be REQUIRED to get windows 7 to behave. +#dhcp-option=252,"\n" + +# Send RFC-3397 DNS domain search DHCP option. WARNING: Your DHCP client +# probably doesn't support this...... +#dhcp-option=option:domain-search,eng.apple.com,marketing.apple.com + +# Send RFC-3442 classless static routes (note the netmask encoding) +#dhcp-option=121,192.168.1.0/24,1.2.3.4,10.0.0.0/8,5.6.7.8 + +# Send vendor-class specific options encapsulated in DHCP option 43. +# The meaning of the options is defined by the vendor-class so +# options are sent only when the client supplied vendor class +# matches the class given here. (A substring match is OK, so "MSFT" +# matches "MSFT" and "MSFT 5.0"). This example sets the +# mtftp address to 0.0.0.0 for PXEClients. +#dhcp-option=vendor:PXEClient,1,0.0.0.0 + +# Send microsoft-specific option to tell windows to release the DHCP lease +# when it shuts down. Note the "i" flag, to tell dnsmasq to send the +# value as a four-byte integer - that's what microsoft wants. See +# http://technet2.microsoft.com/WindowsServer/en/library/a70f1bb7-d2d4-49f0-96d6-4b7414ecfaae1033.mspx?mfr=true +#dhcp-option=vendor:MSFT,2,1i + +# Send the Encapsulated-vendor-class ID needed by some configurations of +# Etherboot to allow is to recognise the DHCP server. +#dhcp-option=vendor:Etherboot,60,"Etherboot" + +# Send options to PXELinux. Note that we need to send the options even +# though they don't appear in the parameter request list, so we need +# to use dhcp-option-force here. +# See http://syslinux.zytor.com/pxe.php#special for details. +# Magic number - needed before anything else is recognised +#dhcp-option-force=208,f1:00:74:7e +# Configuration file name +#dhcp-option-force=209,configs/common +# Path prefix +#dhcp-option-force=210,/tftpboot/pxelinux/files/ +# Reboot time. (Note 'i' to send 32-bit value) +#dhcp-option-force=211,30i + +# Set the boot filename for netboot/PXE. You will only need +# this if you want to boot machines over the network and you will need +# a TFTP server; either dnsmasq's built-in TFTP server or an +# external one. (See below for how to enable the TFTP server.) +#dhcp-boot=pxelinux.0 + +# The same as above, but use custom tftp-server instead machine running dnsmasq +#dhcp-boot=pxelinux,server.name,192.168.1.100 + +# Boot for iPXE. The idea is to send two different +# filenames, the first loads iPXE, and the second tells iPXE what to +# load. The dhcp-match sets the ipxe tag for requests from iPXE. +#dhcp-boot=undionly.kpxe +#dhcp-match=set:ipxe,175 # iPXE sends a 175 option. +#dhcp-boot=tag:ipxe,http://boot.ipxe.org/demo/boot.php + +# Encapsulated options for iPXE. All the options are +# encapsulated within option 175 +#dhcp-option=encap:175, 1, 5b # priority code +#dhcp-option=encap:175, 176, 1b # no-proxydhcp +#dhcp-option=encap:175, 177, string # bus-id +#dhcp-option=encap:175, 189, 1b # BIOS drive code +#dhcp-option=encap:175, 190, user # iSCSI username +#dhcp-option=encap:175, 191, pass # iSCSI password + +# Test for the architecture of a netboot client. PXE clients are +# supposed to send their architecture as option 93. (See RFC 4578) +#dhcp-match=peecees, option:client-arch, 0 #x86-32 +#dhcp-match=itanics, option:client-arch, 2 #IA64 +#dhcp-match=hammers, option:client-arch, 6 #x86-64 +#dhcp-match=mactels, option:client-arch, 7 #EFI x86-64 + + +# Set the boot file name only when the "red" tag is set. +#dhcp-boot=tag:red,pxelinux.red-net + +# An example of dhcp-boot with an external TFTP server: the name and IP +# address of the server are given after the filename. +# Can fail with old PXE ROMS. Overridden by --pxe-service. +#dhcp-boot=/var/ftpd/pxelinux.0,boothost,192.168.0.3 + +# If there are multiple external tftp servers having a same name +# (using /etc/hosts) then that name can be specified as the +# tftp_servername (the third option to dhcp-boot) and in that +# case dnsmasq resolves this name and returns the resultant IP +# addresses in round robin fashion. This facility can be used to +# load balance the tftp load among a set of servers. +#dhcp-boot=/var/ftpd/pxelinux.0,boothost,tftp_server_name + +# Set the limit on DHCP leases, the default is 150 +#dhcp-lease-max=150 + +# The DHCP server needs somewhere on disk to keep its lease database. +# This defaults to a sane location, but if you want to change it, use +# the line below. +#dhcp-leasefile=/var/lib/misc/dnsmasq.leases + +# Set the DHCP server to authoritative mode. In this mode it will barge in +# and take over the lease for any client which broadcasts on the network, +# whether it has a record of the lease or not. This avoids long timeouts +# when a machine wakes up on a new network. DO NOT enable this if there's +# the slightest chance that you might end up accidentally configuring a DHCP +# server for your campus/company accidentally. The ISC server uses +# the same option, and this URL provides more information: +# http://www.isc.org/files/auth.html +#dhcp-authoritative + +# Set the DHCP server to enable DHCPv4 Rapid Commit Option per RFC 4039. +# In this mode it will respond to a DHCPDISCOVER message including a Rapid Commit +# option with a DHCPACK including a Rapid Commit option and fully committed address +# and configuration information. This must only be enabled if either the server is +# the only server for the subnet, or multiple servers are present and they each +# commit a binding for all clients. +#dhcp-rapid-commit + +# Run an executable when a DHCP lease is created or destroyed. +# The arguments sent to the script are "add" or "del", +# then the MAC address, the IP address and finally the hostname +# if there is one. +#dhcp-script=/bin/echo + +# If you want dnsmasq to provide only DNS service on an interface, +# configure it as shown above, and then use the following line to +# disable DHCP and TFTP on it. +#no-dhcp-interface= +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-dnssec.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-dnssec.conf.j2 new file mode 100644 index 0000000..e6f4478 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-dnssec.conf.j2 @@ -0,0 +1,17 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{# +# Uncomment these to enable DNSSEC validation and caching: +# (Requires dnsmasq to be built with DNSSEC option.) +#conf-file=%%PREFIX%%/share/dnsmasq/trust-anchors.conf +#dnssec + +# Replies which are not DNSSEC signed may be legitimate, because the domain +# is unsigned, or may be forgeries. Setting this option tells dnsmasq to +# check that an unsigned reply is OK, by finding a secure proof that a DS +# record somewhere between the root and the domain does not exist. +# The cost of setting this is that even queries in unsigned domains will need +# one or more extra DNS queries to verify. +#dnssec-check-unsigned +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-domain.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-domain.conf.j2 new file mode 100644 index 0000000..774efc8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-domain.conf.j2 @@ -0,0 +1,30 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if dnsmasq_domain.name is defined %} +domain={{ dnsmasq_domain.name }} +{% endif %} + +{% if dnsmasq_domain.custom is defined and + dnsmasq_domain.custom | count > 0 %} + {% for item in dnsmasq_domain.custom %} +domain={{ item.get('domain') }},{{ item.get('network') | join (',') }} + {% endfor %} +{% endif %} +{# + +# Set the domain for dnsmasq. this is optional, but if it is set, it +# does the following things. +# 1) Allows DHCP hosts to have fully qualified domain names, as long +# as the domain part matches this setting. +# 2) Sets the "domain" DHCP option thereby potentially setting the +# domain of all systems configured by DHCP +# 3) Provides the domain part for "expand-hosts" +#domain=thekelleys.org.uk + +# Set a different domain for a particular subnet +#domain=wireless.thekelleys.org.uk,192.168.2.0/24 + +# Same idea, but range rather then subnet +#domain=reserved.thekelleys.org.uk,192.68.3.100,192.168.3.200 +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-ipset.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-ipset.conf.j2 new file mode 100644 index 0000000..f42af56 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-ipset.conf.j2 @@ -0,0 +1,8 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{# +# Add the IPs of all queries to yahoo.com, google.com, and their +# subdomains to the vpn and search ipsets: +#ipset=/yahoo.com/google.com/vpn,search +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-local.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-local.conf.j2 new file mode 100644 index 0000000..081bb45 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-local.conf.j2 @@ -0,0 +1,15 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{# +# Add local-only domains here, queries in these domains are answered +# from /etc/hosts or DHCP only. +#local=/localnet/ + +# Normally responses which come from /etc/hosts and the DHCP lease +# file have Time-To-Live set as zero, which conventionally means +# do not cache further. If you are happy to trade lower load on the +# server for potentially stale date, you can set a time-to-live (in +# seconds) here. +#local-ttl= +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-mx.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-mx.conf.j2 new file mode 100644 index 0000000..659caea --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-mx.conf.j2 @@ -0,0 +1,20 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{# +# Change these lines if you want dnsmasq to serve MX records. + +# Return an MX record named "maildomain.com" with target +# servermachine.com and preference 50 +#mx-host=maildomain.com,servermachine.com,50 + +# Set the default target for MX records created using the localmx option. +#mx-target=servermachine.com + +# Return an MX record pointing to the mx-target for all local +# machines. +#localmx + +# Return an MX record pointing to itself for all local machines. +#selfmx +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-nftset.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-nftset.conf.j2 new file mode 100644 index 0000000..00a2530 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-nftset.conf.j2 @@ -0,0 +1,14 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{# +# Add the IPs of all queries to yahoo.com, google.com, and their +# subdomains to netfilters sets, which is equivalent to +# 'nft add element ip test vpn { ... }; nft add element ip test search { ... }' +#nftset=/yahoo.com/google.com/ip#test#vpn,ip#test#search + +# Use netfilters sets for both IPv4 and IPv6: +# This adds all addresses in *.yahoo.com to vpn4 and vpn6 for IPv4 and IPv6 addresses. +#nftset=/yahoo.com/4#ip#test#vpn4 +#nftset=/yahoo.com/6#ip#test#vpn6 +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-pxe.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-pxe.conf.j2 new file mode 100644 index 0000000..c3d9e1f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-pxe.conf.j2 @@ -0,0 +1,26 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{# +# Do real PXE, rather than just booting a single file, this is an +# alternative to dhcp-boot. +#pxe-prompt="What system shall I netboot?" +# or with timeout before first available action is taken: +#pxe-prompt="Press F8 for menu.", 60 + +# Available boot services. for PXE. +#pxe-service=x86PC, "Boot from local disk" + +# Loads /pxelinux.0 from dnsmasq TFTP server. +#pxe-service=x86PC, "Install Linux", pxelinux + +# Loads /pxelinux.0 from TFTP server at 1.2.3.4. +# Beware this fails on old PXE ROMS. +#pxe-service=x86PC, "Install Linux", pxelinux, 1.2.3.4 + +# Use bootserver on network, found my multicast or broadcast. +#pxe-service=x86PC, "Install windows from RIS server", 1 + +# Use bootserver at a known IP address. +#pxe-service=x86PC, "Install windows from RIS server", 1, 1.2.3.4 +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-server.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-server.conf.j2 new file mode 100644 index 0000000..b269446 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-server.conf.j2 @@ -0,0 +1,35 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if dnsmasq_server.forwarders is defined and + dnsmasq_server.forwarders | count > 0 %} + {% for item in dnsmasq_server.forwarders %} +server=/{{ item.domain }}/{{ item.address }} + {% endfor %} +{% endif %} + +{% if dnsmasq_server.nameservers is defined and + dnsmasq_server.nameservers | count > 0 %} + {% for item in dnsmasq_server.nameservers %} +server={{ item }} + {% endfor %} +{% endif %} + +{# +# Add other name servers here, with domain specs if they are for +# non-public domains. +#server=/localnet/192.168.0.1 + +# Example of routing PTR queries to nameservers: this will send all +# address->name queries for 192.168.3/24 to nameserver 10.1.2.3 +#server=/3.168.192.in-addr.arpa/10.1.2.3 + +# You can control how dnsmasq talks to a server: this forces +# queries to 10.1.2.3 to be routed via eth1 +# server=10.1.2.3@eth1 + +# and this sets the source (ie local) address used to talk to +# 10.1.2.3 to 192.168.1.1 port 55 (there must be an interface with that +# IP on the machine, obviously). +# server=10.1.2.3@192.168.1.1#55 +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-tftp.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-tftp.conf.j2 new file mode 100644 index 0000000..f05142d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/20-tftp.conf.j2 @@ -0,0 +1,29 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if dnsmasq_tftp.enable is defined and + dnsmasq_tftp.enable %} +enable-tftp + {% if dnsmasq_tftp.tftp_root is defined and + dnsmasq_tftp.tftp_root | string | length > 0 %} +tftp-root={{ dnsmasq_tftp.tftp_root }} + {% endif %} + {% if dnsmasq_tftp.tftp_no_fail is defined and + dnsmasq_tftp.tftp_no_fail %} +# Do not abort if the tftp-root is unavailable +tftp-no-fail + {% endif %} + {% if dnsmasq_tftp.tftp_secure is defined and + dnsmasq_tftp.tftp_secure %} +# Make the TFTP server more secure: with this set, only files owned by +# the user dnsmasq is running as will be send over the net. +tftp-secure + {% endif %} + {% if dnsmasq_tftp.tftp_no_blocksize is defined and + dnsmasq_tftp.tftp_no_blocksize %} +# This option stops dnsmasq from negotiating a larger blocksize for TFTP +# transfers. It will slow things down, but may rescue some broken TFTP +# clients. +tftp-no-blocksize + {% endif %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/25-cname-records.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/25-cname-records.conf.j2 new file mode 100644 index 0000000..e510e7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/25-cname-records.conf.j2 @@ -0,0 +1,16 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if dnsmasq_records.cname is defined and + dnsmasq_records.cname | count > 0 %} + {% for item in dnsmasq_records.cname %} + {% set cnames = item.cnames | join(',') %} +cname={{ cnames }},{{ item.target }}{{ ',' if item.ttl is defined else '' }}{{ item.ttl | default('') }} + {% endfor %} +{% endif %} +{# +# Provide an alias for a "local" DNS name. Note that this _only_ works +# for targets which are names from DHCP or /etc/hosts. Give host +# "bert" another name, bertrand +#cname=bertand,bert +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/25-ptr-records.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/25-ptr-records.conf.j2 new file mode 100644 index 0000000..78efbda --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/25-ptr-records.conf.j2 @@ -0,0 +1,14 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if dnsmasq_records.ptr is defined and + dnsmasq_records.ptr | count > 0 %} + +{% endif %} +{# +# The following line shows how to make dnsmasq serve an arbitrary PTR +# record. This is useful for DNS-SD. (Note that the +# domain-name expansion done for SRV records _does_not +# occur for PTR records.) +#ptr-record=_http._tcp.dns-sd-services,"New Employee Page._http._tcp.dns-sd-services" +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/25-srv-records.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/25-srv-records.conf.j2 new file mode 100644 index 0000000..a5aa7c9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/25-srv-records.conf.j2 @@ -0,0 +1,36 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if dnsmasq_records.srv is defined and + dnsmasq_records.srv | count > 0 %} + +{% endif %} +{# +# Change the following lines if you want dnsmasq to serve SRV +# records. These are useful if you want to serve ldap requests for +# Active Directory and other windows-originated DNS requests. +# See RFC 2782. +# You may add multiple srv-host lines. +# The fields are ,,,, +# If the domain part if missing from the name (so that is just has the +# service and protocol sections) then the domain given by the domain= +# config option is used. (Note that expand-hosts does not need to be +# set for this to work.) + +# A SRV record sending LDAP for the example.com domain to +# ldapserver.example.com port 389 +#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389 + +# A SRV record sending LDAP for the example.com domain to +# ldapserver.example.com port 389 (using domain=) +#domain=example.com +#srv-host=_ldap._tcp,ldapserver.example.com,389 + +# Two SRV records for LDAP, each with different priorities +#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,1 +#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,2 + +# A SRV record indicating that there is no LDAP server for the domain +# example.com +#srv-host=_ldap._tcp.example.com +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/25-txt-records.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/25-txt-records.conf.j2 new file mode 100644 index 0000000..1a9d197 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/etc/dnsmasq.d/25-txt-records.conf.j2 @@ -0,0 +1,19 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if dnsmasq_records.txt is defined and + dnsmasq_records.txt | count > 0 %} + +{% endif %} +{# +# Change the following lines to enable dnsmasq to serve TXT records. +# These are used for things like SPF and zeroconf. (Note that the +# domain-name expansion done for SRV records _does_not +# occur for TXT records.) + +#Example SPF. +#txt-record=example.com,"v=spf1 a -all" + +#Example zeroconf +#txt-record=_http._tcp.example.com,name=value,paper=A4 +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/init/systemd/override.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/init/systemd/override.conf.j2 new file mode 100644 index 0000000..16430b5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/templates/init/systemd/override.conf.j2 @@ -0,0 +1,19 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +[Unit] +{% if dnsmasq_systemd.unit.after is defined and + dnsmasq_systemd.unit.after | count > 0 %} +After = +After = network.target {{ dnsmasq_systemd.unit.after | join(' ') }} +{% endif %} +{% if dnsmasq_systemd.unit.wants is defined and + dnsmasq_systemd.unit.wants | count > 0 %} +Wants = +Wants = nss-lookup.target {{ dnsmasq_systemd.unit.wants | join(' ') }} +{% endif %} +{% if dnsmasq_systemd.unit.requires is defined and + dnsmasq_systemd.unit.requires | count > 0 %} +Requires = +Requires = network.target {{ dnsmasq_systemd.unit.requires | join(' ') }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/vars/archlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/vars/archlinux.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/vars/archlinux.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/vars/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/vars/debian.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/vars/debian.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/vars/main.yml new file mode 100644 index 0000000..22bd310 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/dnsmasq/vars/main.yml @@ -0,0 +1,147 @@ +--- + +dnsmasq_requirements: + - iproute2 + +dnsmasq_packages: + - dnsmasq + +dnsmasq_config_file: /etc/dnsmasq.conf +dnsmasq_config_directory: /etc/dnsmasq.d + +dnsmasq_defaults_global: + port: 53 + user: "" + group: "" + filterwin2k: false + resolv_file: "" + strict_order: false + no_hosts: false + no_resolv: false + no_poll: false + domain_needed: false + bogus_priv: false + cache_size: 150 + all_servers: false + no_negcache: false + conf_file: "" + conf_dir: "" + +dnsmasq_defaults_interfaces: + listen_address: "127.0.0.1" + interfaces: [] + except_interfaces: [] + bind_only: false + +dnsmasq_defaults_logging: + log_queries: false + log_facility: /var/log/dnsmasq.log + log_dhcp: false + +dnsmasq_defaults_addresses: [] +# - address: 192.168.202.133 +# name: node1.test.com + +dnsmasq_defaults_alias: {} + +dnsmasq_defaults_dhcp: + enabled: false + dhcp_authoritative: false + dhcp_boot: "pxelinux.0,{{ inventory_hostname }},{{ dnsmasq_domain }}" + dhcp_hosts: [] +# - address: 192.168.0.60 +# lease_time: 1h +# mac_address: +# - "11:22:33:44:55:66" # Multiple MAC addresses may be assigned +# # - "12:34:56:78:90:12" +# name: fred + dhcp_options: [] +# - option: dns-server +# value: +# - 192.168.2.200 +# - 192.168.2.201 +# # - option: domain-name +# # value: +# # - "another.{{ dnsmasq_pri_domain_name }}" +# - option: domain-search +# value: +# - "dev.{{ dnsmasq_pri_domain_name }}" +# - "prod.{{ dnsmasq_pri_domain_name }}" +# - "test.{{ dnsmasq_pri_domain_name }}" +# - option: ntp-server +# value: +# - 192.168.2.200 +# - 192.168.2.201 +# - option: router +# value: +# - 192.168.2.1 + dhcp_options_tagged: [] +# - option: router +# tag: net1 +# value: +# - 192.168.1.200 +# - option: router +# tag: net2 +# value: +# - 192.168.2.200 + dhcp_range: [] +# - start: 192.168.1.128 +# end: 192.168.1.224 +# netmask: 255.255.255.0 +# lease_time: 24h # Define a specific lease time if desired..Default is 1h +# set: net1 +# - start: 192.168.2.128 +# end: 192.168.2.224 +# netmask: 255.255.255.0 +# lease_time: 24h +# set: net2 + +dnsmasq_defaults_dnssec: + enabled: false + conf_file: "" + dnssec_check_unsigned: false + +dnsmasq_defaults_domain: + name: example.org + custom: [] + +# dnsmasq_defaults_domain: "" +dnsmasq_defaults_ipset: {} + +dnsmasq_defaults_local: {} + +dnsmasq_defaults_mx: {} + +dnsmasq_defaults_nftset: {} + +dnsmasq_defaults_pxe: {} + +dnsmasq_defaults_server: + nameservers: [] + forwarders: [] +# dnsmasq_enable_forwarders: true +# dnsmasq_nameservers: +# # dns2.digitalcourage.de +# - 46.182.19.48 +# # dns9.quad9.net +# - 9.9.9.9 +# dnsmasq_conditional_forwarders: [] +# # - address: 172.16.24.1 +# # domain: etsbv.internal + +dnsmasq_defaults_tftp: + enabled: false + tftp_root: "" + tftp_no_fail: false + tftp_secure: false + tftp_no_blocksize: false + +dnsmasq_defaults_records: + cname: [] +# - target: +# cnames: +# - cname + ptr: [] + srv: [] + txt: [] +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.travis.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.travis.yml new file mode 100644 index 0000000..2004fe4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.travis.yml @@ -0,0 +1,54 @@ +--- +sudo: required +dist: trusty + +language: python +python: "2.7" + +env: + - ANSIBLE_VERSION=latest + - ANSIBLE_VERSION=2.2.1.0 + - ANSIBLE_VERSION=2.2.0.0 + - ANSIBLE_VERSION=2.1.4 + - ANSIBLE_VERSION=2.1.3 + - ANSIBLE_VERSION=2.1.2 + - ANSIBLE_VERSION=2.1.1.0 + - ANSIBLE_VERSION=2.1.0.0 + - ANSIBLE_VERSION=2.0.2.0 + - ANSIBLE_VERSION=2.0.1.0 + - ANSIBLE_VERSION=2.0.0.2 + - ANSIBLE_VERSION=2.0.0.1 + - ANSIBLE_VERSION=2.0.0.0 + +branches: + only: + - master + +before_install: + - sudo apt-get update -qq + - sudo apt-get install -y python-apt aptitude + +install: + # Install Ansible + - if [ "$ANSIBLE_VERSION" = "latest" ]; then pip install ansible; else pip install --no-binary ansible ansible==$ANSIBLE_VERSION; fi + - if [ "$ANSIBLE_VERSION" = "latest" ]; then pip install ansible-lint; fi + +script: + # Check the role/playbook's syntax + - ansible-playbook -i tests/inventory tests/test.yml --syntax-check + + # Run the role/playbook with ansible-playbook + - ansible-playbook -i tests/inventory tests/test.yml -vvvv + + # Run the role/playbook again, checking to make sure it's idempotent + - > + ansible-playbook -i tests/inventory tests/test.yml + | grep -q 'changed=0.*failed=0' + && (echo 'Idempotence test: pass' && exit 0) + || (echo 'Idempotence test: fail' && exit 1) + + # Check the role/playbook for practices and behaviour that could potentially be improved + - if [ "$ANSIBLE_VERSION" = "latest" ]; then ansible-lint tests/test.yml || true; fi + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.yamllint new file mode 100644 index 0000000..20fd7aa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + +ignore: | + molecule/ + .github diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/LICENSE new file mode 100644 index 0000000..9d3cdfc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Roman Gorodeckij + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/Makefile new file mode 100644 index 0000000..40857c8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/Makefile @@ -0,0 +1,20 @@ +# +export TOX_SCENARIO ?= default +# export TOX_PYTHON ?= py310 +export TOX_ANSIBLE ?= ansible_6.1 + +.PHONY: converge destroy verify lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/README.md new file mode 100644 index 0000000..5f4950e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/README.md @@ -0,0 +1,55 @@ +fqdn [![Build Status](https://travis-ci.org/holms/ansible-fqdn.svg?branch=master)](https://travis-ci.org/holms/ansible-fqdn) +==== + +Sets Fully qualified domain name (FQDN) + +Requirements +------------ + +Ansible version 2.10+ + +## Platforms + +* ArchLinux +* ArtixLinux +* Ubuntu +* Debian +* Centos +* Redhat +* Windows + +Role Variables +-------------- + + +| Variable name | Variable value | Default | +|---------------|----------------|---------| +|*hostname* | hostname (eg. vm1) | `inventory_hostname_short` | +|*fqdn* | domain name (eg. vm1.test.com) | `inventory_hostname` | +|*ip_address* | ip address (eg. 192.168.0.20) | `ansible_default_ipv4.address` | + +Example +------- + +``` +- hosts: mx.mydomain.com:mx + user: root + + roles: + - role: fqdn + vars: + fqdn: "mx.mydomain.com" + hostname: "mx" +``` + +License +------- + +MIT + +Author Information +------------------ + +Roman Gorodeckij () +John Brooker (jb-github@outlook.com) +Bodo Schulz (bodo@boone-schulz.de) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/Vagrantfile b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/Vagrantfile new file mode 100644 index 0000000..036b903 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/Vagrantfile @@ -0,0 +1,77 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby ts=2 sw=2 tw=0 et : + +role = File.basename(File.expand_path(File.dirname(__FILE__))) + +boxes = [ + { + :name => "ubuntu-1204", + :box => "bento/ubuntu-12.04", + :ip => '10.0.0.11', + :cpu => "50", + :ram => "256" + }, + { + :name => "ubuntu-1404", + :box => "bento/ubuntu-14.04", + :ip => '10.0.0.12', + :cpu => "50", + :ram => "256" + }, + { + :name => "ubuntu-1604", + :box => "bento/ubuntu-16.04", + :ip => '10.0.0.13', + :cpu => "50", + :ram => "256" + }, + { + :name => "debian-711", + :box => "bento/debian-7.11", + :ip => '10.0.0.14', + :cpu => "50", + :ram => "256" + }, + { + :name => "debian-86", + :box => "bento/debian-8.6", + :ip => '10.0.0.15', + :cpu => "50", + :ram => "256" + }, + { + :name => "centos-6.8", + :box => "bento/centos-6.8", + :ip => '10.0.0.16', + :cpu => "50", + :ram => "256" + }, + { + :name => "centos-7.3", + :box => "bento/centos-7.3", + :ip => '10.0.0.17', + :cpu => "50", + :ram => "256" + }, +] + +Vagrant.configure("2") do |config| + boxes.each do |box| + config.vm.define box[:name] do |vms| + vms.vm.box = box[:box] + vms.vm.hostname = "ansible-#{role}-#{box[:name]}" + + vms.vm.provider "virtualbox" do |v| + v.customize ["modifyvm", :id, "--cpuexecutioncap", box[:cpu]] + v.customize ["modifyvm", :id, "--memory", box[:ram]] + end + + vms.vm.network :private_network, ip: box[:ip] + + vms.vm.provision :ansible do |ansible| + ansible.playbook = "tests/vagrant.yml" + ansible.verbose = "vv" + end + end + end +end diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/defaults/main.yml new file mode 100644 index 0000000..0711b0e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/defaults/main.yml @@ -0,0 +1,6 @@ +--- + +fqdn: + hostname: "{{ inventory_hostname }}" + short_hostname: "{{ inventory_hostname_short }}" + ip_address: "{{ ansible_facts.default_ipv4.address }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/handlers/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/handlers/main.yml new file mode 100644 index 0000000..f3d7b0a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/handlers/main.yml @@ -0,0 +1,15 @@ +--- + +- name: restart hostname + ansible.builtin.service: + name: "hostname{{ '.sh' if (ansible_facts.distribution_release == 'wheezy') else '' }}" + state: restarted + when: + - not ansible_facts.service_mgr == 'systemd' + +- name: restart systemd-logind + ansible.builtin.service: + name: systemd-logind + state: restarted + when: + - ansible_facts.service_mgr == 'systemd' diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/_tox_base b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/_tox_base new file mode 100644 index 0000000..a15f7c3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/_tox_base @@ -0,0 +1,9 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/converge new file mode 100755 index 0000000..5df5ad6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/converge @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +. hooks/_tox_base + +tox ${TOX_OPTS} -- molecule converge ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/destroy new file mode 100755 index 0000000..98fcf16 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/destroy @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +. hooks/_tox_base + +tox ${TOX_OPTS} -- molecule destroy ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/lint new file mode 100755 index 0000000..6cf7ff3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/lint @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +. hooks/_tox_base + +tox ${TOX_OPTS} -- molecule lint ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/verify new file mode 100755 index 0000000..79a38d4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/hooks/verify @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +. hooks/_tox_base + +tox ${TOX_OPTS} -- molecule verify ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/meta/main.yml new file mode 100644 index 0000000..a1900c9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/meta/main.yml @@ -0,0 +1,34 @@ +--- + +galaxy_info: + role_name: fqdn + + author: Bodo Schulz + description: ansible role for managing FQDN information. + + license: Apache + min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + - bullseye + - bookworm + - name: Ubuntu + versions: + # 20.04 + - focal + # 22.04 + - jammy + + galaxy_tags: + - system + - dns + - networking + +dependencies: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/configured/converge.yml new file mode 100644 index 0000000..91c44cf --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/configured/converge.yml @@ -0,0 +1,10 @@ +--- +- name: converge + hosts: all + any_errors_fatal: false + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.fqdn diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..7c96285 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,8 @@ +--- + +fqdn: + hostname: "instance.molecule.local" + short_hostname: "instance" + ip_address: "{{ ansible_facts.default_ipv4.address }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/configured/molecule.yml new file mode 100644 index 0000000..fda92e3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/configured/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/configured/prepare.yml new file mode 100644 index 0000000..4c14c51 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/configured/prepare.yml @@ -0,0 +1,57 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..af15554 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/configured/tests/test_default.py @@ -0,0 +1,132 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """...""" + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = "molecule/{}".format( + os.environ.get("MOLECULE_SCENARIO_NAME") + ) + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + ext_arr = ["yml", "yaml"] + + read_file = None + + for e in ext_arr: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return "file={} name={}".format(read_file, role_name) + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + + print(" -> {}".format(distribution)) + print(" -> {}".format(base_dir)) + + if distribution in ["debian", "ubuntu"]: + os = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + os = "redhat" + elif distribution in ["arch"]: + os = "archlinux" + + print(" -> {} / {}".format(distribution, os)) + + file_defaults = read_ansible_yaml( + "{}/defaults/main".format(base_dir), "role_defaults" + ) + file_vars = read_ansible_yaml("{}/vars/main".format(base_dir), "role_vars") + file_distibution = read_ansible_yaml( + "{}/vars/{}".format(base_dir, os), "role_distibution" + ) + file_molecule = read_ansible_yaml( + "{}/group_vars/all/vars".format(molecule_dir), "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_files(host): + """ """ + files = [] + files.append("/etc/hostname") + + for _file in files: + f = host.file(_file) + assert f.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/default/converge.yml new file mode 100644 index 0000000..91c44cf --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/default/converge.yml @@ -0,0 +1,10 @@ +--- +- name: converge + hosts: all + any_errors_fatal: false + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.fqdn diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/default/molecule.yml new file mode 100644 index 0000000..fda92e3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/default/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/default/prepare.yml new file mode 100644 index 0000000..4c14c51 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/default/prepare.yml @@ -0,0 +1,57 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/default/tests/test_default.py new file mode 100644 index 0000000..af15554 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/molecule/default/tests/test_default.py @@ -0,0 +1,132 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """...""" + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = "molecule/{}".format( + os.environ.get("MOLECULE_SCENARIO_NAME") + ) + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + ext_arr = ["yml", "yaml"] + + read_file = None + + for e in ext_arr: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return "file={} name={}".format(read_file, role_name) + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + + print(" -> {}".format(distribution)) + print(" -> {}".format(base_dir)) + + if distribution in ["debian", "ubuntu"]: + os = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + os = "redhat" + elif distribution in ["arch"]: + os = "archlinux" + + print(" -> {} / {}".format(distribution, os)) + + file_defaults = read_ansible_yaml( + "{}/defaults/main".format(base_dir), "role_defaults" + ) + file_vars = read_ansible_yaml("{}/vars/main".format(base_dir), "role_vars") + file_distibution = read_ansible_yaml( + "{}/vars/{}".format(base_dir, os), "role_distibution" + ) + file_molecule = read_ansible_yaml( + "{}/group_vars/all/vars".format(molecule_dir), "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_files(host): + """ """ + files = [] + files.append("/etc/hostname") + + for _file in files: + f = host.file(_file) + assert f.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/configure.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/configure.yml new file mode 100644 index 0000000..c75be4b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/configure.yml @@ -0,0 +1,53 @@ +--- + +- name: ensure that dbus service are running + ansible.builtin.service: + name: dbus + state: started + +- name: set hostname with hostname command + ansible.builtin.hostname: + name: "{{ fqdn.short_hostname }}" + when: + - ansible_facts.service_mgr == 'systemd' + notify: + - restart hostname + - restart systemd-logind + +- name: set hostname with conf.d + ansible.builtin.lineinfile: + dest: /etc/conf.d/hostname + line: 'hostname="{{ fqdn.short_hostname }}"' + regexp: "hostname=" + mode: "0644" + when: + - ansible_facts.service_mgr == 'openrc' + +- name: update hosts file (backups will be made) + ansible.builtin.lineinfile: + dest: /etc/hosts + regexp: "^{{ fqdn.ip_address }}" + line: "{{ fqdn.ip_address }} {{ fqdn.hostname }} {{ fqdn.short_hostname }}" + state: present + backup: true + mode: "0644" + when: + - fqdn.ip_address is defined + - not is_docker_guest + notify: + - restart hostname + - restart systemd-logind + +- name: set hostname to /etc/hostname + ansible.builtin.lineinfile: + path: /etc/hostname + line: '{{ fqdn.short_hostname }}' + create: true + mode: "0644" + +- name: re-gather facts + ansible.builtin.setup: + when: + - ansible_facts.hostname != fqdn.hostname + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/debian.yml new file mode 100644 index 0000000..85bcb6f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/debian.yml @@ -0,0 +1,5 @@ +--- +- name: Debian | Install dbus + ansible.builtin.apt: + name: dbus + state: present diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/gentoo.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/gentoo.yml new file mode 100644 index 0000000..dbefb4b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/gentoo.yml @@ -0,0 +1,9 @@ +--- + +- name: Update conf.d + ansible.builtin.lineinfile: + dest: /etc/conf.d/hostname + line: "hostname={{ fqdn.short_hostname }}" + regexp: "hostname=" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/linux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/linux.yml new file mode 100644 index 0000000..39eeb6b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/linux.yml @@ -0,0 +1,25 @@ +--- + +- name: set hostname with hostname command + ansible.builtin.hostname: + name: "{{ fqdn.short_hostname }}" + notify: + - restart hostname + - restart systemd-logind + +- name: Re-gather facts + ansible.builtin.setup: + when: ansible_facts.hostname != fqdn.hostname + +- name: Build hosts file (backups will be made) + ansible.builtin.lineinfile: + dest: /etc/hosts + regexp: "^{{ fqdn.ip_address }}" + line: "{{ fqdn.ip_address }} {{ fqdn.hostname }} {{ fqdn.short_hostname }}" + state: present + backup: yes + when: + - fqdn.ip_address is defined + notify: + - restart hostname + - restart systemd-logind diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/main.yml new file mode 100644 index 0000000..26380f5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/main.yml @@ -0,0 +1,9 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yml + +- name: configure + ansible.builtin.include_tasks: configure.yml + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/prepare.yml new file mode 100644 index 0000000..0188b35 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/tasks/prepare.yml @@ -0,0 +1,43 @@ +--- + +- name: include OS specific configuration ({{ ansible_facts.distribution }} ({{ ansible_facts.os_family }}) {{ ansible_facts.distribution_major_version }}) + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yml" + # eg. artixlinux + - "{{ ansible_facts.distribution | lower | replace(' ', '') }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_facts.os_family | lower }}.yml" + - default.yaml + skip: true + +- name: detect docker environment + ansible.builtin.set_fact: + is_docker_guest: "{{ + ansible_facts.virtualization_role | default('host') == 'guest' and + ansible_facts.virtualization_type | default('none') == 'docker' }}" + +- name: update package cache + ansible.builtin.package: + update_cache: true + +- name: install requirements + ansible.builtin.package: + name: "{{ fqdn_dependencies }}" + state: present + when: + - fqdn_dependencies | default([]) | count > 0 + +- name: re-read ansible facts + ansible.builtin.setup: + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/vars/archlinux-openrc.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/vars/archlinux-openrc.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/vars/archlinux-openrc.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/vars/archlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/vars/archlinux.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/vars/archlinux.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/vars/artixlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/vars/artixlinux.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/vars/artixlinux.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/vars/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/vars/debian.yml new file mode 100644 index 0000000..26cd537 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/vars/debian.yml @@ -0,0 +1,7 @@ +--- + +fqdn_dependencies: + - iproute2 + - dbus + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/vars/main.yml new file mode 100644 index 0000000..c97eb0d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/fqdn/vars/main.yml @@ -0,0 +1,5 @@ +--- + +fqdn_dependencies: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/README.md new file mode 100644 index 0000000..4abb66c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/README.md @@ -0,0 +1,63 @@ + +# Ansible Role: `bodsch.dns.hosts` + +ansible role for configuring entries in the /etc/hosts file. + + +## usage + +```yaml +hosts_add_default_ipv4: true +hosts_add_basic_ipv6: false + +hosts_add_ansible_managed_hosts: false +hosts_add_ansible_managed_hosts_groups: + - 'all' + +hosts_cloud_template_location: "/etc/cloud/templates/hosts.{{ ansible_os_family | lower }}.tmpl" + +hosts_ip_protocol: 'ipv4' + +hosts_network_interface: "{{ ansible_default_ipv4.interface }}" + +hosts_file_backup: false + +# Custom hosts entries to be added +hosts_entries: [] + +# Custom host file snippets to be added +hosts_file_snippets: [] +``` + +### `hosts_entries` + +```yaml +hosts_entries: + - ip: 192.168.11.1 + name: test.molecule.local + aliases: + - test + - foo-test.molecule.local +``` + +### `hosts_file_snippets` + +```yaml +``` + +## Contribution + +Please read [Contribution](CONTRIBUTING.md) + +## Development, Branches (Git Tags) + + +## Author + +- Bodo Schulz + +## License + +[Apache](LICENSE) + +**FREE SOFTWARE, HELL YEAH!** diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/defaults/main.yml new file mode 100644 index 0000000..f355416 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/defaults/main.yml @@ -0,0 +1,29 @@ +--- + +# If set to true, an entry for `ansible_facts.hostname`, bound to the host's default IPv4 address is added added. +hosts_add_default_ipv4: true +# If set to true, basic IPv6 entries (localhost6, ip6-localnet, etc) are added. +hosts_add_basic_ipv6: false +# If set to true, an entry for every host managed by Ansible is added. +# Remark that this makes `hosts_add_default_ipv4` unnecessary, as it will be added as well by this setting. +hosts_add_ansible_managed_hosts: false +# Select specific groups of Ansible managed hosts to be added in the hosts file. +hosts_add_ansible_managed_hosts_groups: + - 'all' +# for using cloud templates +hosts_cloud_template_location: "/etc/cloud/templates/hosts.{{ ansible_facts.os_family | lower }}.tmpl" + +# IP protocol to use +hosts_ip_protocol: 'ipv4' + +hosts_network_interface: "{{ ansible_facts.default_ipv4.interface }}" + +hosts_file_backup: false + +# Custom hosts entries to be added +hosts_entries: [] + +# Custom host file snippets to be added +hosts_file_snippets: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/meta/main.yml new file mode 100644 index 0000000..a6b387e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/meta/main.yml @@ -0,0 +1,32 @@ +--- + +galaxy_info: + role_name: hosts + + author: Bodo Schulz + description: ansible role for configuring entries in the /etc/hosts file. + + license: Apache + min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + # 11 + - bullseye + - bookworm + - name: Ubuntu + versions: + # 20.04 + - focal + + galaxy_tags: + - system + - dns + +dependencies: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/molecule/default/converge.yml new file mode 100644 index 0000000..a8b121f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/molecule/default/converge.yml @@ -0,0 +1,11 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + become: false + + roles: + - role: bodsch.dns.hosts + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..af48164 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,23 @@ +--- + +# If set to true, an entry for `ansible_facts.hostname`, bound to the host's default IPv4 address is added added. +hosts_add_default_ipv4: true +# If set to true, basic IPv6 entries (localhost6, ip6-localnet, etc) are added. +hosts_add_basic_ipv6: true +# If set to true, an entry for every host managed by Ansible is added. +# Remark that this makes `hosts_add_default_ipv4` unnecessary, as it will be added as well by this setting. +hosts_add_ansible_managed_hosts: true +# Select specific groups of Ansible managed hosts to be added in the hosts file. +hosts_add_ansible_managed_hosts_groups: + - 'all' + +# for using cloud templates +hosts_cloud_template_location: "/etc/cloud/templates/hosts.{{ ansible_facts.os_family | lower }}.tmpl" + +hosts_entries: + - ip: 192.168.11.1 + name: test.molecule.local + aliases: + - test + - foo-test.molecule.local +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/molecule/default/molecule.yml new file mode 100644 index 0000000..0437c1f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/molecule/default/molecule.yml @@ -0,0 +1,68 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + tty: true + environment: + container: docker + groups: + - dns + docker_networks: + - name: bind + ipam_config: + - subnet: "10.11.0.0/24" + gateway: "10.11.0.254" + networks: + - name: bind + ipv4_address: "10.11.0.1" + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/molecule/default/prepare.yml new file mode 100644 index 0000000..c235be1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/molecule/default/prepare.yml @@ -0,0 +1,51 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/tasks/main.yml new file mode 100644 index 0000000..4f091d2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/tasks/main.yml @@ -0,0 +1,57 @@ +--- + +- name: install dependencies + ansible.builtin.package: + name: "{{ hosts_dependencies }}" + state: present + when: + - hosts_dependencies | default([]) | count > 0 + +- name: re-read ansible facts + ansible.builtin.setup: + +- name: detect docker environment + ansible.builtin.set_fact: + is_docker_guest: "{{ + ansible_facts.virtualization_role | default('host') == 'guest' and + ansible_facts.virtualization_type | default('none') == 'docker' }}" + +- name: detect if cloud template present + become: true + ansible.builtin.stat: + path: "{{ hosts_cloud_template_location }}" + register: hosts_cloud_template + +- name: add managed block to {{ hosts_file }} + ansible.builtin.template: + src: etc/hosts.j2 + dest: "{{ hosts_file }}" + owner: root + group: root + mode: "0644" + backup: "{{ hosts_file_backup }}" + when: + - not is_docker_guest + +- name: add managed block to {{ hosts_file }}.docker + ansible.builtin.template: + src: etc/hosts.j2 + dest: "{{ hosts_file }}.docker" + owner: root + group: root + mode: "0644" + backup: "{{ hosts_file_backup }}" + when: + - is_docker_guest + +- name: add host entries to cloud template file + become: true + ansible.builtin.template: + src: etc/hosts.j2 + dest: "{{ hosts_file }}" + owner: root + group: root + mode: "0644" + backup: "{{ hosts_file_backup }}" + when: + hosts_cloud_template.stat.exists diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/templates/etc/hosts.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/templates/etc/hosts.j2 new file mode 100644 index 0000000..2ea81f3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/templates/etc/hosts.j2 @@ -0,0 +1,85 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +127.0.0.1 localhost.localdomain localhost + +{% if hosts_add_default_ipv4 is defined and + hosts_add_default_ipv4 | string | length > 0 and + hosts_add_default_ipv4 %} + {% set _ipv4_address = "" %} + {% set _ipv4_fqdn = "" %} + {% set _hostname = "" %} + {% set _domainname = "" %} + {% if ansible_facts['default_ipv4']['address'] is defined %} + {% set _ipv4_address = ansible_facts['default_ipv4']['address'] %} + {% endif %} + {% if ansible_facts['fqdn'] is defined %} + {% set _ipv4_fqdn = ansible_facts['fqdn'] %} + {% endif %} + {% if ansible_facts['hostname'] is defined %} + {% set _hostname = ansible_facts['hostname'] %} + {% endif %} + {% if ansible_facts['domain'] is defined %} + {% set _domainname = ansible_facts['domain'] %} + {% endif %} +{{ _ipv4_address.ljust(15) }} {{ _ipv4_fqdn.ljust(25) }} {{ _hostname if _hostname | string | length > 0 and _hostname != _ipv4_fqdn else '' }} +{% endif %} +{% if hosts_add_basic_ipv6 is defined and + hosts_add_basic_ipv6 | string | length > 0 and + hosts_add_basic_ipv6 %} + +# basic IPv6 entries +::1 localhost6.localdomain6 localhost6 ip6-localhost ip6-loopback +fe00::0 ip6-localnet +ff00::0 ip6-mcastprefix +ff02::1 ip6-allnodes +ff02::2 ip6-allrouters +{% endif %} +{% if hosts_add_ansible_managed_hosts_groups is defined and + hosts_add_ansible_managed_hosts_groups | string | length > 0 and + hosts_add_ansible_managed_hosts_groups %} + +# ansible managed hosts + {% for group in hosts_add_ansible_managed_hosts_groups %} + {% for host in groups[group] | sort %} + {% set address = None %} + {% if hosts_network_interface is defined and + hosts_network_interface | string | length > 0 %} + {% set interface_config = hostvars.get(host).get('ansible_' + hosts_network_interface) %} + {% if interface_config is defined and + interface_config | count > 0 %} + {% set protocol_config = interface_config.get(hosts_ip_protocol) %} + {% if protocol_config is defined and + protocol_config | count > 0 %} + {% set address = protocol_config.get('address') | default(None) %} + {% endif %} + {% endif %} + {% else %} + {% set protocol_config = hostvars.get(host).get('ansible_facts.default_' + hosts_ip_protocol) %} + {% if protocol_config is defined and + protocol_config | count > 0 %} + {% set address = protocol_config.get('address') | default(None) %} + {% endif %} + {% endif %} + {% if address %} + {% set host_fqdn = hostvars.get(host).get('ansible_facts').get('fqdn') %} + {% set host_short = hostvars.get(host).get('ansible_facts').get('hostname') %} +{{ address.ljust(15) }} {{ host_fqdn.ljust(25) }} {{ host_short if host_fqdn != host_short else '' }} + {% endif %} + {% endfor %} + {% endfor %} +{% endif %} +{% if hosts_entries is defined and + hosts_entries | count > 0 %} + + {% for host in hosts_entries %} +{{ host.ip.ljust(15) }} {{ host.name.ljust(25) }} {{ host.aliases | default([]) | join(' ') }} + {% endfor %} +{% endif %} +{% if hosts_file_snippets|length != 0 %} + + {% for snippet in hosts_file_snippets %} + {% include snippet %} + + {% endfor %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/vars/main.yml new file mode 100644 index 0000000..d1ddbd5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/hosts/vars/main.yml @@ -0,0 +1,8 @@ +--- + +hosts_dependencies: + - iproute2 + +hosts_file: /etc/hosts + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/Makefile new file mode 100644 index 0000000..3abaf48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_6.1 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/README.md new file mode 100644 index 0000000..d20165f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/README.md @@ -0,0 +1,122 @@ + +# Ansible Role: `knot` + +This role will fully configure and install [knot](https://github.com/CZ-NIC/knot). + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-knot/main.yml?branch=main)][ci] +[![GitHub issues](https://img.shields.io/github/issues/bodsch/ansible-knot)][issues] +[![GitHub release (latest by date)](https://img.shields.io/github/v/release/bodsch/ansible-knot)][releases] +[![Ansible Quality Score](https://img.shields.io/ansible/quality/50067?label=role%20quality)][quality] + +[ci]: https://github.com/bodsch/ansible-knot/actions +[issues]: https://github.com/bodsch/ansible-knot/issues?q=is%3Aopen+is%3Aissue +[releases]: https://github.com/bodsch/ansible-knot/releases +[quality]: https://galaxy.ansible.com/bodsch/knot + + +## Requirements & Dependencies + +not known + +### Operating systems + +Tested on + +* ArchLinux +* Debian based + - Debian 10 / 11 + - Ubuntu 20.04 + +## configuration + +### default + +```yaml +knot_user: knot +knot_group: knot + +knot_config: {} + +knot_zones: {} +``` + +### knot config + +```yaml +knot_config: + server: + listen: + - '127.0.0.1@5353' + + log: + syslog: + any: debug + + database: + storage: "{{ knot_database }}" + + template: + default: + storage: "{{ knot_database }}" + file: "%s.zone" + + zone: + molecule.local: {} +``` + +### knot zones + +```yaml +knot_zones: + state: present + molecule.local: + ttl: 3600 + soa: + primary_dns: 'dns.molecule.local' + hostmaster: 'hostmaster.molecule.local' + refresh: 6h + retry: 1h + expire: 1w + minimum: 1d + name_servers: + dns.molecule.local: + ip: '{{ ansible_default_ipv4.address }}' + records: + router.molecule.local: + type: 'A' + ip: '{{ ansible_default_ipv4.address }}' + + ldap.molecule.local: + type: 'CNAME' + target: 'router.molecule.local' +``` + + +### `knotc` CLI + +```bash +knotc conf-begin +knotc conf-set zone.domain molecule.local +knotc conf-commit + +knotc zone-begin molecule.local +knotc zone-set molecule.local @ 7200 SOA dns hostmaster 1 86400 900 691200 3600 +knotc zone-set molecule.local dns 3600 A 172.17.0.2 +knotc zone-set molecule.local router 3600 A 172.17.0.2 +knotc zone-set molecule.local www 3600 A 172.17.0.5 +knotc zone-set molecule.local ldap 3600 CNAME router +knotc zone-set molecule.local _https._tcp 3600 SRV "10 20 433 www" +knotc zone-commit molecule.local +``` + +more under [knot operation doku](https://www.knot-dns.cz/docs/3.1/html/operation.html) + +## Author and License + +- Bodo Schulz + +## License + +[Apache](LICENSE) + +`FREE SOFTWARE, HELL YEAH!` diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/defaults/main.yml new file mode 100644 index 0000000..9353cb3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/defaults/main.yml @@ -0,0 +1,10 @@ +--- + +knot_user: knot +knot_group: knot + +knot_config: {} + +knot_zones: {} + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/handlers/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/handlers/main.yml new file mode 100644 index 0000000..93ef5f6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/handlers/main.yml @@ -0,0 +1,13 @@ +--- + +- name: 'knot restart' + ansible.builtin.service: + name: 'knot' + state: 'restarted' + +- name: 'knot reload' + ansible.builtin.service: + name: 'knot' + state: 'reloaded' + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/meta/main.yml new file mode 100644 index 0000000..36426bc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/meta/main.yml @@ -0,0 +1,33 @@ +--- + +galaxy_info: + role_name: knot + + author: Bodo Schulz + description: installs, configures knot + + license: Apache + min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + - bullseye + - bookworm + - name: Ubuntu + versions: + # 20.04 + - focal + # 22.04 + - jammy + + galaxy_tags: + - dns + - knot + +dependencies: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/configured/converge.yml new file mode 100644 index 0000000..6584cdd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/configured/converge.yml @@ -0,0 +1,13 @@ +--- +- name: converge + hosts: all + any_errors_fatal: false + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.knot + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..f93b1d6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,70 @@ +--- + +knot_config: + server: + rundir: "/run/knot" + user: "{{ knot_user }}:{{ knot_group }}" + listen: + - '127.0.0.1@5353' + + log: + syslog: + any: info + stderr: + server: notice + control: notice + zone: notice + any: notice + + database: + storage: "{{ knot_database }}" + + template: + default: + storage: "{{ knot_database }}" + file: "%s.zone" + + acl: + update_acl: + address: 127.0.0.1 + action: update + key: molecule + + zone: + molecule.local: + # master: master + acl: update_acl + file: molecule.local.zone + + key: + molecule: + algorithm: hmac-sha256 + # base64 coded! + # echo 8z4cp9L0MSESNg3Gv3cHXKQPFvSfDxwt | base64 -w0 + secret: OHo0Y3A5TDBNU0VTTmczR3YzY0hYS1FQRnZTZkR4d3QK + +knot_zones: + molecule.local: + ttl: 3600 + soa: + primary_dns: 'dns.molecule.local' + hostmaster: 'hostmaster.molecule.local' + refresh: 6h + retry: 1h + expire: 1w + minimum: 1d + name_servers: + dns.molecule.local: + ip: '{{ ansible_facts.default_ipv4.address }}' + records: + this.molecule.local: + type: 'A' + ip: '{{ ansible_facts.default_ipv4.address }}' + docker.molecule.local: + type: 'A' + ip: 192.168.110.10 + + there.molecule.local: + type: 'CNAME' + target: 'this.molecule.local' +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/configured/molecule.yml new file mode 100644 index 0000000..dbbfc67 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/configured/molecule.yml @@ -0,0 +1,56 @@ +--- + +role_name_check: 1 + + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/configured/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/configured/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..bf68cb5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/configured/tests/test_default.py @@ -0,0 +1,128 @@ +import os +import pprint + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts("all") + + +def base_directory(): + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = "molecule/{}".format( + os.environ.get("MOLECULE_SCENARIO_NAME") + ) + + return directory, molecule_directory + + +@pytest.fixture() +def get_vars(host): + """ """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + file_defaults = f"file={base_dir}/defaults/main.yaml name=role_defaults" + file_vars = f"file={base_dir}/vars/main.yaml name=role_vars" + file_molecule = f"file={molecule_dir}/group_vars/all/vars.yaml name=test_vars" + file_distibution = ( + f"file={base_dir}/vars/{operation_system}.yaml name=role_distibution" + ) + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_packages(host): + """ """ + distribution = host.system_info.distribution + release = host.system_info.release + + print(f"distribution: {distribution}") + print(f"release : {release}") + + packages = [] + packages.append("knot") + + # artix ist not supported + if not distribution == "artix": + for package in packages: + p = host.package(package) + assert p.is_installed + + +@pytest.mark.parametrize("dirs", ["/etc/knot", "/var/lib/knot"]) +def test_directories(host, dirs): + + d = host.file(dirs) + assert d.is_directory + + +def test_service_running_and_enabled(host): + service = host.service("knot") + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + bind_address = "127.0.0.1" + bind_port = 5353 + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/default/converge.yml new file mode 100644 index 0000000..6584cdd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/default/converge.yml @@ -0,0 +1,13 @@ +--- +- name: converge + hosts: all + any_errors_fatal: false + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.knot + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/default/molecule.yml new file mode 100644 index 0000000..dbbfc67 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/default/molecule.yml @@ -0,0 +1,56 @@ +--- + +role_name_check: 1 + + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/default/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/default/tests/test_default.py new file mode 100644 index 0000000..85b4f62 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/molecule/default/tests/test_default.py @@ -0,0 +1,128 @@ +import os +import pprint + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts("all") + + +def base_directory(): + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = "molecule/{}".format( + os.environ.get("MOLECULE_SCENARIO_NAME") + ) + + return directory, molecule_directory + + +@pytest.fixture() +def get_vars(host): + """ """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + file_defaults = f"file={base_dir}/defaults/main.yaml name=role_defaults" + file_vars = f"file={base_dir}/vars/main.yaml name=role_vars" + file_molecule = f"file={molecule_dir}/group_vars/all/vars.yaml name=test_vars" + file_distibution = ( + f"file={base_dir}/vars/{operation_system}.yaml name=role_distibution" + ) + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_packages(host): + """ """ + distribution = host.system_info.distribution + release = host.system_info.release + + print(f"distribution: {distribution}") + print(f"release : {release}") + + packages = [] + packages.append("knot") + + # artix ist not supported + if not distribution == "artix": + for package in packages: + p = host.package(package) + assert p.is_installed + + +@pytest.mark.parametrize("dirs", ["/etc/knot", "/var/lib/knot"]) +def test_directories(host, dirs): + + d = host.file(dirs) + assert d.is_directory + + +def test_service_running_and_enabled(host): + service = host.service("knot") + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + bind_address = "127.0.0.1" + bind_port = 53 + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/configure.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/configure.yml new file mode 100644 index 0000000..17a62a4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/configure.yml @@ -0,0 +1,35 @@ +--- + +- name: do facts module to get latest information + ansible.builtin.setup: + +- name: update configuration + ansible.builtin.template: + src: etc/knot/knot.conf.j2 + dest: /etc/knot/knot.conf + # owner: root + group: "{{ knot_group }}" + mode: "0640" + validate: "{{ knot_knotc }} -c %s conf-check" + backup: true + notify: + - knot restart + +- name: handle zone configs + bodsch.dns.knot_zone: + state: "{{ item.value.state | default('present') }}" + database_path: "{{ knot_database }}" + zone: "{{ item.key }}" + zone_ttl: "{{ item.value.ttl | default('3600') }}" + zone_soa: "{{ item.value.soa | default({}) }}" + name_servers: "{{ item.value.name_servers | default({}) }}" + records: "{{ item.value.records | default({}) }}" + loop: + "{{ knot_zones | dict2items }}" + loop_control: + label: "{{ item.key }}" + notify: + - knot reload + register: _knot_zone + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/install.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/install.yml new file mode 100644 index 0000000..f889ef8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/install.yml @@ -0,0 +1,51 @@ +--- + +- name: installing knot-dns + ansible.builtin.package: + name: "{{ knot_packages }}" + state: present + +- name: python support + when: + - knot_python_packages is defined + - knot_python_packages | count > 0 + block: + - name: create pip requirements file + bodsch.core.pip_requirements: + name: knot + requirements: "{{ knot_python_packages }}" + register: pip_requirements + + - name: fail if pip not installed + ansible.builtin.fail: + msg: python pip is not installed + when: + - not pip_requirements.pip.present + + - name: install knot python packages # noqa no-handler + ansible.builtin.pip: + state: present + requirements: "{{ pip_requirements.requirements_file }}" + extra_args: "{{ glauth_python_extra_args | default([]) | bodsch.core.python_extra_args(python_version=ansible_facts.python.version) | default(omit) }}" + register: pip_install + ignore_errors: true + no_log: true + when: + - pip_requirements.requirements_file is defined + - pip_requirements.changed + + - name: do facts module to get latest information + ansible.builtin.setup: + + rescue: + - name: remove python requirements file + bodsch.core.pip_requirements: + name: knot + requirements: "{{ knot_python_packages }}" + state: absent + + - name: exit with fail + ansible.builtin.fail: + msg: Sorry, but a serious error occurred when installing the required Python packages. + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/main.yml new file mode 100644 index 0000000..90583d6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/main.yml @@ -0,0 +1,18 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yml + +- name: repositories + ansible.builtin.include_tasks: repositories.yml + +- name: install + ansible.builtin.include_tasks: install.yml + +- name: configure + ansible.builtin.include_tasks: configure.yml + +- name: service + ansible.builtin.include_tasks: service.yml + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/prepare.yml new file mode 100644 index 0000000..0466ad8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/prepare.yml @@ -0,0 +1,35 @@ +--- + +- name: include OS specific configuration + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_facts.os_family | lower }}.yml" + # artixlinux + - "{{ ansible_facts.os_family | lower | replace(' ', '') }}.yml" + - default.yml + skip: true + +- name: install dedendency + ansible.builtin.package: + name: "{{ knot_dependencies }}" + state: present + when: + - knot_dependencies is defined + - knot_dependencies | count > 0 + +- name: merge knot configuration between defaults and custom + ansible.builtin.set_fact: + knot_config: "{{ knot_defaults_config | combine(knot_config, recursive=True) }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/repositories.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/repositories.yml new file mode 100644 index 0000000..8d669ab --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/repositories.yml @@ -0,0 +1,37 @@ +--- + +- name: add CZ.NIC apt sources + when: + - ansible_facts.os_family | lower == 'debian' + bodsch.core.apt_sources: + name: cznic-labs-knot-dns + filename: cznic-labs-knot-dns.sources + uris: + - https://pkg.labs.nic.cz/knot-dns + suites: "{{ ansible_facts.distribution_release | lower }}" + components: + - main + signed_by: /usr/share/keyrings/cznic-labs-pkg.gpg + key: + method: download + url: "https://pkg.labs.nic.cz/gpg" + dest: "/usr/share/keyrings/cznic-labs-pkg.gpg" + dearmor: true + validate: true + update_cache: true + +- name: add knot ppa for ubuntu {{ ansible_facts.distribution_release }} + when: + - ansible_facts.distribution | lower == 'ubuntu' + ansible.builtin.apt_repository: + repo: ppa:cz.nic-labs/knot-dns-latest + codename: "{{ ansible_facts.distribution_release }}" + state: present + update_cache: true + validate_certs: false + +- name: update package cache + ansible.builtin.package: + update_cache: true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/service.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/service.yml new file mode 100644 index 0000000..5a973f7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/tasks/service.yml @@ -0,0 +1,9 @@ +--- + +- name: start and enable knot service + ansible.builtin.service: + name: "knot" + state: started + enabled: true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/templates/etc/knot/knot.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/templates/etc/knot/knot.conf.j2 new file mode 100644 index 0000000..f9a4ab3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/templates/etc/knot/knot.conf.j2 @@ -0,0 +1,197 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if knot_config %} + {% set _server = knot_config.server %} +server: + {% if _server.identity is defined and _server.identity | string | length > 0 %} + identity: {{ _server.identity }} + {% endif %} + {% if _server.version is defined and _server.version | string | length > 0 %} + version: {{ _server.version }} + {% endif %} + {% if _server.nsid is defined and _server.nsid | string | length > 0 %} + nsid: {{ _server.nsid }} + {% endif %} + {% if _server.rundir is defined and _server.rundir | string | length > 0 %} + rundir: {{ _server.rundir }} + {% endif %} + user: {{ knot_user }}:{{ knot_group }} + {% if _server.pidfile is defined and _server.pidfile | string | length > 0 %} + pidfile: {{ _server.pidfile }} + {% endif %} + {% if _server.udp_workers is defined and _server.udp_workers | string | length > 0 %} + udp-workers: {{ _server.udp_workers }} + {% endif %} + {% if _server.tcp_workers is defined and _server.tcp_workers | string | length > 0 %} + tcp-workers: {{ _server.tcp_workers }} + {% endif %} + {% if _server.background_workers is defined and _server.background_workers | string | length > 0 %} + background-workers: {{ _server.background_workers }} + {% endif %} + {% if _server.async_start is defined and _server.async_start | string | length > 0 %} + async-start: {{ _server.async_start | bool | bodsch.core.config_bool(true_as='on', false_as='off') }} + {% endif %} + {% if _server.tcp_idle_timeout is defined and _server.tcp_idle_timeout | string | length > 0 %} + tcp-idle-timeout: {{ _server.tcp_idle_timeout }} + {% endif %} + {% if _server.tcp_io_timeout is defined and _server.tcp_io_timeout | string | length > 0 %} + tcp-io-timeout: {{ _server.tcp_io_timeout }} + {% endif %} + {% if _server.tcp_remote_io_timeout is defined and _server.tcp_remote_io_timeout | string | length > 0 %} + tcp-remote-io-timeout: {{ _server.tcp_remote_io_timeout }} + {% endif %} + {% if _server.tcp_max_clients is defined and _server.tcp_max_clients | string | length > 0 %} + tcp-max-clients: {{ _server.tcp_max_clients }} + {% endif %} + {% if _server.tcp_reuseport is defined and _server.tcp_reuseport | string | length > 0 %} + tcp-reuseport: {{ _server.tcp_reuseport }} + {% endif %} + {% if _server.socket_affinity is defined and _server.socket_affinity | string | length > 0 %} + socket-affinity: {{ _server.socket_affinity }} + {% endif %} + {% if _server.udp_max_payload is defined and _server.udp_max_payload | string | length > 0 %} + udp-max-payload: {{ _server.udp_max_payload }} + {% endif %} + {% if _server.udp_max_payload_ipv4 is defined and _server.udp_max_payload_ipv4 | string | length > 0 %} + udp-max-payload-ipv4: {{ _server.udp_max_payload_ipv4 }} + {% endif %} + {% if _server.udp_max_payload_ipv6 is defined and _server.udp_max_payload_ipv6 | string | length > 0 %} + udp-max-payload-ipv6: {{ _server.udp_max_payload_ipv6 }} + {% endif %} + {% if _server.edns_client_subnet is defined and _server.edns_client_subnet | string | length > 0 %} + edns-client-subnet: {{ _server.edns_client_subnet }} + {% endif %} + {% if _server.answer_rotation is defined and _server.answer_rotation | string | length > 0 %} + answer-rotation: {{ _server.answer_rotation }} + {% endif %} + {% if _server.listen %} + {% for listen in _server.listen %} + listen: {{ listen }} + {% endfor %} + {% endif %} + {% if _server.listen_xdp is defined and _server.listen_xdp | string | length > 0 %} + listen-xdp: {{ _server.listen_xdp }} + {% endif %} + {% if knot_config.log %} + +log: + {% set _valid_target = ["stdout", "stderr", "syslog"] %} + {% set _valid_level = ["critical", "error", "warning", "notice", "info", "debug"] %} + {% set _valid_keys = ["target", "server", "control", "zone", "any"] %} + {% for k, v in knot_config.log.items() %} + - target: {{ k }} + {% for x, y in v.items() %} + {% if y not in _valid_level %} + {% set y = "info" %} + {% endif %} + {% if x in _valid_keys %} + {{ x }}: {{ y }} + {% endif %} + {% endfor %} + {% endfor %} + {% endif %} + {% if knot_config.key %} + +key: + {% for k, v in knot_config.key.items() %} + - id: {{ k }} + {% for x,y in v.items() %} + {{ x }}: {{ y }} + {% endfor %} + {% endfor %} + {% endif %} + {% if knot_config.database %} + +database: + {% for x,y in knot_config.database.items() %} + {{ x | replace('_', '-') }}: {{ y }} + {% endfor %} + {% endif %} + {% if knot_config.remote %} + +remote: + {% for k, v in knot_config.remote.items() %} + - id: {{ k }} + {% for x,y in v.items() %} + {{ x }}: {{ y }} + {% endfor %} + {% endfor %} + {% endif %} + {% if knot_config.acl %} + +acl: + {% for k, v in knot_config.acl.items() %} + - id: {{ k }} + {% for x,y in v.items() %} + {{ x }}: {{ y }} + {% endfor %} + {% endfor %} + {% endif %} + {% if knot_config.template %} + +{% if knot_config.mod_synthrecord %} + +mod-synthrecord: + {% for k, v in knot_config.mod_synthrecord.items() %} + - id: {{ k }} + {% for x,y in v.items() %} + {{ x }}: {{ y }} + {% endfor %} + {% endfor %} + {% endif %} + +template: + {% for k, v in knot_config.template.items() %} + - id: {{ k }} + {% for x,y in v.items() %} + {{ x }}: {{ y }} + {% endfor %} + {% endfor %} + {% endif %} + {% if knot_config.zone %} + +zone: + {% for k, v in knot_config.zone.items() %} + - domain: {{ k }} + {% for x,y in v.items() %} + {{ x }}: {{ y }} + {% endfor %} + {% endfor %} + {% endif %} + {% if knot_config.control %} + +control: + {% for x,y in knot_config.control.items() %} + {{ x }}: {{ y }} + {% endfor %} + {% endif %} + {% if knot_config.statistics %} + +statistics: + {% for x,y in knot_config.statistics.items() %} + {{ x }}: {{ y }} + {% endfor %} + {% endif %} + {% if knot_config.submission %} + +submission: + {% for k, v in knot_config.submission.items() %} + - id: {{ k }} + {% for x,y in v.items() %} + {{ x }}: {{ y }} + {% endfor %} + {% endfor %} + {% endif %} + {% if knot_config.policy %} + +policy: + {% for k, v in knot_config.policy.items() %} + - id: {{ k }} + {% for x,y in v.items() %} + {{ x }}: {{ y }} + {% endfor %} + {% endfor %} + {% endif %} + +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/archlinux-openrc.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/archlinux-openrc.yml new file mode 100644 index 0000000..f4fd547 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/archlinux-openrc.yml @@ -0,0 +1,13 @@ +--- + +knot_packages: + - knot-openrc + +knot_dependencies: + - python-pip + +knot_python_packages: + - name: jinja2 + version: 3.0.1 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/archlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/archlinux.yml new file mode 100644 index 0000000..6249212 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/archlinux.yml @@ -0,0 +1,6 @@ +--- + +knot_packages: + - knot + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/artixlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/artixlinux.yml new file mode 100644 index 0000000..f4fd547 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/artixlinux.yml @@ -0,0 +1,13 @@ +--- + +knot_packages: + - knot-openrc + +knot_dependencies: + - python-pip + +knot_python_packages: + - name: jinja2 + version: 3.0.1 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/debian.yml new file mode 100644 index 0000000..e484f0a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/debian.yml @@ -0,0 +1,7 @@ +--- + +knot_dependencies: + - gpg + - iproute2 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/main.yml new file mode 100644 index 0000000..af63059 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/main.yml @@ -0,0 +1,188 @@ +--- + +knot_dependencies: [] + +knot_rundir: /run/knot +knot_user: knot +knot_group: knot + +knot_database: /var/lib/knot + +knot_knotc: /usr/sbin/knotc + +knot_defaults_config: + # https://www.knot-dns.cz/docs/3.0/singlehtml/#server-section + server: + rundir: "{{ knot_rundir }}" + user: "{{ knot_user }}:{{ knot_group }}" + listen: + - '127.0.0.1@53' + # identity: [STR] + # version: [STR] + # nsid: [STR|HEXSTR] + # pidfile: STR + # udp-workers: INT + # tcp-workers: INT + # background-workers: INT + # async-start: BOOL + # tcp-idle-timeout: TIME + # tcp-io-timeout: INT + # tcp-remote-io-timeout: INT + # tcp-max-clients: INT + # tcp-reuseport: BOOL + # socket-affinity: BOOL + # udp-max-payload: SIZE + # udp-max-payload-ipv4: SIZE + # udp-max-payload-ipv6: SIZE + # edns-client-subnet: BOOL + # answer-rotation: BOOL + # listen-xdp: STR[@INT] | ADDR[@INT] ... + + # https://www.knot-dns.cz/docs/3.0/singlehtml/#logging-section + log: + syslog: + any: info + # server: critical | error | warning | notice | info | debug + # control: critical | error | warning | notice | info | debug + # zone: critical | error | warning | notice | info | debug + # any: critical | error | warning | notice | info | debug + + # https://www.knot-dns.cz/docs/3.0/singlehtml/#database-section + database: + storage: "{{ knot_database }}" + # journal-db: STR + # journal-db-mode: robust | asynchronous + # journal-db-max-size: SIZE + # kasp-db: STR + # kasp-db-max-size: SIZE + # timer-db: STR + # timer-db-max-size: SIZE + # catalog-db: str + # catalog-db-max-size: SIZE + + # https://www.knot-dns.cz/docs/3.0/singlehtml/#server-section + remote: {} + + # https://www.knot-dns.cz/docs/3.0/singlehtml/#acl-section + acl: {} + # STR: + # address: ADDR[/INT] | ADDR-ADDR ... + # key: key_id ... + # remote: remote_id ... + # action: notify | transfer | update ... + # deny: BOOL + # update-type: STR ... + # update-owner: key | zone | name + # update-owner-match: sub-or-equal | equal | sub + # update-owner-name: STR ... + + # https://www.knot-dns.cz/docs/3.1/html/modules.html#synthrecord-automatic-forward-reverse-records + mod_synthrecord: {} + # mod_synthrecord: + # STR + # type: forward | reverse + # prefix: STR + # origin: DNAME + # ttl: INT + # network: ADDR[/INT] | ADDR-ADDR ... + # reverse-short: BOOL + + # https://www.knot-dns.cz/docs/3.0/singlehtml/#template-section + template: + default: + storage: "{{ knot_database }}" + file: "%s.zone" + + # https://www.knot-dns.cz/docs/3.0/singlehtml/#zone-section + zone: {} + # DOMAIN: + # template: template_id + # storage: STR + # file: STR + # master: remote_id ... + # ddns-master: remote_id + # notify: remote_id ... + # acl: acl_id ... + # semantic-checks: BOOL + # zonefile-sync: TIME + # zonefile-load: none | difference | difference-no-serial | whole + # journal-content: none | changes | all + # journal-max-usage: SIZE + # journal-max-depth: INT + # zone-max-size : SIZE + # adjust-threads: INT + # dnssec-signing: BOOL + # dnssec-validation: BOOL + # dnssec-policy: policy_id + # serial-policy: increment | unixtime | dateserial + # refresh-min-interval: TIME + # refresh-max-interval: TIME + # catalog-role: none | interpret + # catalog-template: template_id + # module: STR/STR ... + + # https://www.knot-dns.cz/docs/3.0/singlehtml/#control-section + control: + listen: /run/knot/knot.sock + timeout: 5 + + # https://www.knot-dns.cz/docs/3.0/singlehtml/#statistics-section + statistics: {} + # timer: + # file: + # append: + + # https://www.knot-dns.cz/docs/3.0/singlehtml/#key-section + # https://gist.github.com/amphineko/615618f8026ddd4faad52c75ea9daeb0 + key: + # STR: + # hmac-md5 | hmac-sha1 | hmac-sha224 | hmac-sha256 | hmac-sha384 | hmac-sha512 + # algorithm: hmac-sha256 + # secret: + + # https://www.knot-dns.cz/docs/3.0/singlehtml/#submission-section + submission: {} + # STR: + # parent: remote_id ... + # check-interval: TIME + # timeout: TIME + + # https://www.knot-dns.cz/docs/3.0/singlehtml/#policy-section + policy: {} + # STR: + # keystore: keystore_id + # manual: BOOL + # single-type-signing: BOOL + # algorithm: rsasha1 | rsasha1-nsec3-sha1 | rsasha256 | rsasha512 | ecdsap256sha256 | ecdsap384sha384 | ed25519 | ed448 + # ksk-size: SIZE + # zsk-size: SIZE + # ksk-shared: BOOL + # dnskey-ttl: TIME + # zone-max-ttl: TIME + # ksk-lifetime: TIME + # zsk-lifetime: TIME + # propagation-delay: TIME + # rrsig-lifetime: TIME + # rrsig-refresh: TIME + # rrsig-pre-refresh: TIME + # reproducible-signing: BOOL + # nsec3: BOOL + # nsec3-iterations: INT + # nsec3-opt-out: BOOL + # nsec3-salt-length: INT + # nsec3-salt-lifetime: TIME + # signing-threads: INT + # ksk-submission: submission_id + # ds-push: remote_id + # cds-cdnskey-publish: none | delete-dnssec | rollover | always | double-ds + # cds-digest-type: sha256 | sha384 + # offline-ksk: BOOL + # unsafe-operation: none | no-check-keyset | no-update-dnskey | no-update-nsec | no-update-expired ... + +knot_packages: + - knot + - knot-dnsutils + +knot_python_packages: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/ubuntu.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/ubuntu.yml new file mode 100644 index 0000000..ed3d6fd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot/vars/ubuntu.yml @@ -0,0 +1,8 @@ +--- + +knot_dependencies: + - gpg + - iproute2 + - python3-pip + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/.yamllint new file mode 100644 index 0000000..20fd7aa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + +ignore: | + molecule/ + .github diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/Makefile new file mode 100644 index 0000000..3abaf48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_6.1 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/README.md new file mode 100644 index 0000000..2c2b750 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/README.md @@ -0,0 +1,96 @@ + +# Ansible Role: `knot-resolver` + +This role will fully configure and install [knot-resolver](https://github.com/CZ-NIC/knot-resolver). + +[pkg.labs.nic.cz](https://pkg.labs.nic.cz/doc/?project=knot-resolver) + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-knot-resolver/main.yml?branch=main)][ci] +[![GitHub issues](https://img.shields.io/github/issues/bodsch/ansible-knot-resolver)][issues] +[![GitHub release (latest by date)](https://img.shields.io/github/v/release/bodsch/ansible-knot-resolver)][releases] +[![Ansible Quality Score](https://img.shields.io/ansible/quality/50067?label=role%20quality)][quality] + +[ci]: https://github.com/bodsch/ansible-knot-resolver/actions +[issues]: https://github.com/bodsch/ansible-knot-resolver/issues?q=is%3Aopen+is%3Aissue +[releases]: https://github.com/bodsch/ansible-knot-resolver/releases +[quality]: https://galaxy.ansible.com/bodsch/knot_resolver + + +## Requirements & Dependencies + +not known + +### Operating systems + +Tested on + +* ArchLinux +* Debian based + - Debian 10 / 11 / 12 + - Ubuntu 20.04 / 22.04 + + +## configuration + +### default + +```yaml +knot_resolver_support_ipv6: false + +knot_resolver_listener: [] + +knot_resolver_systemd_instances: 2 +knot_resolver_cachesize: 10 + +knot_resolver_internal_domain: [] + +knot_resolver_views: [] +``` + +### listener + +```yaml +knot_resolver_listener: + - name: localhost + interfaces: + - eth0 + ips: + - '127.0.0.1' + port: 53 + options: + tls: false +``` + + +### internal domains + +```yaml +knot_resolver_internal_domain: + - domains: + - 'molecule.lan' + - 'matrix.lan' + - '0.172.in-addr.arpa' + policy: + stub: '127.0.0.1@5353' +``` + +### views + +```yaml +knot_resolver_views: + - pass: + - '127.0.0.0/8' + - '192.168.0.0/24' + - drop: + - '0.0.0.0/0' +``` + +## Author and License + +- Bodo Schulz + +## License + +[Apache](LICENSE) + +**FREE SOFTWARE, HELL YEAH!** diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/defaults/main.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/defaults/main.yaml new file mode 100644 index 0000000..b8d0501 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/defaults/main.yaml @@ -0,0 +1,32 @@ +--- + +knot_resolver_support_ipv6: false + +knot_resolver_listener: [] + +knot_resolver_cache: + directory: /var/cache/knot-resolver + max_ttl: 3600 + size: 200 + garbage_interval: 1000 + +knot_resolver_log: {} + +knot_resolver_prometheus: {} + +knot_resolver_predict: {} + +knot_resolver_systemd: + instances: 2 + unit: knot-resolver + +knot_resolver_trusted: + dns_server: "k.root-servers.net" + signing_key: "257" + keyfile: /etc/trusted-key.key + +knot_resolver_internal_domain: [] + +knot_resolver_views: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/handlers/main.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/handlers/main.yaml new file mode 100644 index 0000000..213c726 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/handlers/main.yaml @@ -0,0 +1,28 @@ +--- + +- name: restart kresd (systemd) + listen: "restart kresd" + ansible.builtin.service: + name: "{{ item }}" + state: restarted + loop: "{{ knot_resolver_service }}" + when: + - ansible_facts.service_mgr | lower == 'systemd' + +- name: restart kresd (openrc) + listen: "restart kresd" + ansible.builtin.service: + name: kresd + state: restarted + when: + - ansible_facts.service_mgr | lower == 'openrc' + +- name: restart kres-cache-gc (openrc) + listen: "restart kres-cache-gc" + ansible.builtin.service: + name: kres-cache-gc + state: restarted + when: + - ansible_facts.service_mgr | lower == 'openrc' + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/meta/main.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/meta/main.yaml new file mode 100644 index 0000000..2a40dd7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/meta/main.yaml @@ -0,0 +1,35 @@ +--- + +galaxy_info: + role_name: knot_resolver + namespace: bodsch + + author: Bodo Schulz + description: Installs, Configures and Manages knot resolver + + + license: Apache + min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + - bullseye + - bookworm + - name: Ubuntu + versions: + # 20.04 + - focal + + galaxy_tags: + - dns + - resolver + - knot + - knotresolver + +dependencies: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/converge.yml new file mode 100644 index 0000000..acaf036 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/converge.yml @@ -0,0 +1,10 @@ +--- +- name: converge + hosts: all + any_errors_fatal: false + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.knot_resolver diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/group_vars/all/knot.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/group_vars/all/knot.yaml new file mode 100644 index 0000000..b210fcb --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/group_vars/all/knot.yaml @@ -0,0 +1,48 @@ +--- + +knot_config: + server: + rundir: "/run/knot" + user: "{{ knot_user }}:{{ knot_group }}" + listen: + - '127.0.0.1@5353' + log: + syslog: + any: info + stderr: + server: critical + any: debug + database: + storage: "{{ knot_database }}" + template: + default: + storage: "{{ knot_database }}" + file: "%s.zone" + zone: + molecule.lan: {} + +knot_zones: + molecule.lan: + ttl: 3600 + soa: + primary_dns: 'dns.molecule.lan' + hostmaster: 'hostmaster.molecule.lan' + refresh: 6h + retry: 1h + expire: 1w + minimum: 1d + name_servers: + dns.molecule.lan: + ip: '{{ ansible_facts.default_ipv4.address }}' + records: + this.molecule.lan: + type: 'A' + ip: '{{ ansible_facts.default_ipv4.address }}' + docker.molecule.lan: + type: 'A' + ip: 192.168.110.10 + + there.molecule.lan: + type: 'CNAME' + target: 'this.molecule.lan' +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/group_vars/all/pacman.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/group_vars/all/pacman.yml new file mode 100644 index 0000000..b984dc6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/group_vars/all/pacman.yml @@ -0,0 +1,10 @@ +--- + +pacman_custom_mirrors: + - file: /etc/pacman.d/mirrorlist-arch + "ARCH MIRRORS": + enabled: true + servers: + - http://mirror.i3d.net/pub/archlinux/$repo/os/$arch + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/group_vars/all/syslog-ng.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/group_vars/all/syslog-ng.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/group_vars/all/syslog-ng.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..99abeef --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,44 @@ +--- + +knot_resolver_listener: + - name: localhost + #interfaces: + # - eth0 + ips: + - '127.0.0.1' + port: 53 + # options: + # tls: true + - name: tls + ips: + - '127.0.0.1' + port: 853 + options: + kind: tls + - name: webmgmt + ips: + - '127.0.0.1' + port: 8453 + options: + kind: webmgmt + +knot_resolver_internal_domain: + - domains: + - 'molecule.lan' + - '0.172.in-addr.arpa' + policy: + stub: '127.0.0.1@5353' + +knot_resolver_cache: + # directory: /var/cache/knot-resolver + max_ttl: 1200 + size: 100 + +knot_resolver_views: + - pass: + - '127.0.0.0/8' + - '192.168.0.0/24' + - drop: + - '0.0.0.0/0' + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/molecule.yml new file mode 100644 index 0000000..3fb1c8f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/molecule.yml @@ -0,0 +1,54 @@ +--- + +role_name_check: 1 + + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 + +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/prepare.yml new file mode 100644 index 0000000..93906df --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/prepare.yml @@ -0,0 +1,57 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +- name: prepare container + hosts: all + gather_facts: true + + roles: + - role: bodsch.core.syslog_ng + - role: bodsch.dns.knot diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..b012923 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/configured/tests/test_default.py @@ -0,0 +1,116 @@ +import os +import pprint + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts("all") + + +def base_directory(): + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = "molecule/{}".format( + os.environ.get("MOLECULE_SCENARIO_NAME") + ) + + return directory, molecule_directory + + +@pytest.fixture() +def get_vars(host): + """ """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + file_defaults = f"file={base_dir}/defaults/main.yaml name=role_defaults" + file_vars = f"file={base_dir}/vars/main.yaml name=role_vars" + file_molecule = f"file={molecule_dir}/group_vars/all/vars.yml name=test_vars" + file_distibution = ( + f"file={base_dir}/vars/{operation_system}.yaml name=role_distibution" + ) + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_packages(host): + """ """ + distribution = host.system_info.distribution + release = host.system_info.release + + print(f"distribution: {distribution}") + print(f"release : {release}") + + packages = [] + packages.append("knot-resolver") + + # artix ist not supported + if not distribution == "artix": + for package in packages: + p = host.package(package) + assert p.is_installed + + +@pytest.mark.parametrize("dirs", ["/etc/knot-resolver", "/usr/lib/knot-resolver"]) +def test_directories(host, dirs): + distribution = host.system_info.distribution + # release = host.system_info.release + + if distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + dirs = dirs.replace("/lib/", "/lib64/") + + d = host.file(dirs) + assert d.is_directory + + +# def test_service_running_and_enabled(host): +# +# service = host.service('docker') +# assert service.is_running +# assert service.is_enabled diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/converge.yml new file mode 100644 index 0000000..acaf036 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/converge.yml @@ -0,0 +1,10 @@ +--- +- name: converge + hosts: all + any_errors_fatal: false + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.knot_resolver diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/group_vars/all/knot.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/group_vars/all/knot.yaml new file mode 100644 index 0000000..b210fcb --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/group_vars/all/knot.yaml @@ -0,0 +1,48 @@ +--- + +knot_config: + server: + rundir: "/run/knot" + user: "{{ knot_user }}:{{ knot_group }}" + listen: + - '127.0.0.1@5353' + log: + syslog: + any: info + stderr: + server: critical + any: debug + database: + storage: "{{ knot_database }}" + template: + default: + storage: "{{ knot_database }}" + file: "%s.zone" + zone: + molecule.lan: {} + +knot_zones: + molecule.lan: + ttl: 3600 + soa: + primary_dns: 'dns.molecule.lan' + hostmaster: 'hostmaster.molecule.lan' + refresh: 6h + retry: 1h + expire: 1w + minimum: 1d + name_servers: + dns.molecule.lan: + ip: '{{ ansible_facts.default_ipv4.address }}' + records: + this.molecule.lan: + type: 'A' + ip: '{{ ansible_facts.default_ipv4.address }}' + docker.molecule.lan: + type: 'A' + ip: 192.168.110.10 + + there.molecule.lan: + type: 'CNAME' + target: 'this.molecule.lan' +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/group_vars/all/pacman.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/group_vars/all/pacman.yml new file mode 100644 index 0000000..b984dc6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/group_vars/all/pacman.yml @@ -0,0 +1,10 @@ +--- + +pacman_custom_mirrors: + - file: /etc/pacman.d/mirrorlist-arch + "ARCH MIRRORS": + enabled: true + servers: + - http://mirror.i3d.net/pub/archlinux/$repo/os/$arch + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/group_vars/all/syslog-ng.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/group_vars/all/syslog-ng.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/group_vars/all/syslog-ng.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/molecule.yml new file mode 100644 index 0000000..3fb1c8f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/molecule.yml @@ -0,0 +1,54 @@ +--- + +role_name_check: 1 + + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 + +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/prepare.yml new file mode 100644 index 0000000..93906df --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/prepare.yml @@ -0,0 +1,57 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +- name: prepare container + hosts: all + gather_facts: true + + roles: + - role: bodsch.core.syslog_ng + - role: bodsch.dns.knot diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/tests/test_default.py new file mode 100644 index 0000000..b012923 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/molecule/default/tests/test_default.py @@ -0,0 +1,116 @@ +import os +import pprint + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts("all") + + +def base_directory(): + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = "molecule/{}".format( + os.environ.get("MOLECULE_SCENARIO_NAME") + ) + + return directory, molecule_directory + + +@pytest.fixture() +def get_vars(host): + """ """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + file_defaults = f"file={base_dir}/defaults/main.yaml name=role_defaults" + file_vars = f"file={base_dir}/vars/main.yaml name=role_vars" + file_molecule = f"file={molecule_dir}/group_vars/all/vars.yml name=test_vars" + file_distibution = ( + f"file={base_dir}/vars/{operation_system}.yaml name=role_distibution" + ) + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_packages(host): + """ """ + distribution = host.system_info.distribution + release = host.system_info.release + + print(f"distribution: {distribution}") + print(f"release : {release}") + + packages = [] + packages.append("knot-resolver") + + # artix ist not supported + if not distribution == "artix": + for package in packages: + p = host.package(package) + assert p.is_installed + + +@pytest.mark.parametrize("dirs", ["/etc/knot-resolver", "/usr/lib/knot-resolver"]) +def test_directories(host, dirs): + distribution = host.system_info.distribution + # release = host.system_info.release + + if distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + dirs = dirs.replace("/lib/", "/lib64/") + + d = host.file(dirs) + assert d.is_directory + + +# def test_service_running_and_enabled(host): +# +# service = host.service('docker') +# assert service.is_running +# assert service.is_enabled diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/configure.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/configure.yaml new file mode 100644 index 0000000..7df02f1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/configure.yaml @@ -0,0 +1,63 @@ +--- + +- name: user and group handling + when: + - knot_resolver_owner != "root" + block: + - name: create kresd group + ansible.builtin.group: + name: "{{ knot_resolver_group }}" + state: present + system: true + + - name: create kresd user + ansible.builtin.user: + name: "{{ knot_resolver_owner }}" + groups: "{{ knot_resolver_group }}" + append: true + shell: /usr/sbin/nologin + system: true + createhome: false + home: /nonexistent + +- name: merge knot_resolver configuration between defaults and custom + ansible.builtin.set_fact: + knot_resolver_cache: "{{ knot_resolver_defaults_cache | combine(knot_resolver_cache, recursive=True) }}" + knot_resolver_log: "{{ knot_resolver_defaults_log | combine(knot_resolver_log, recursive=True) }}" + knot_resolver_prometheus: "{{ knot_resolver_defaults_prometheus | combine(knot_resolver_prometheus, recursive=True) }}" + knot_resolver_predict: "{{ knot_resolver_defaults_predict | combine(knot_resolver_predict, recursive=True) }}" + knot_resolver_service: "{{ knot_resolver_systemd.unit | bodsch.dns.knot_resolver_service( + os_family=ansible_facts.os_family, + count=knot_resolver_systemd.instances | default(0), + service='kresd@.service') }}" + +- name: create knot resolver configuration + ansible.builtin.template: + src: kresd.conf.j2 + dest: /etc/knot-resolver/kresd.conf + mode: "0664" + backup: true + notify: + - restart kresd + +- name: openrc + when: + - ansible_facts.service_mgr | lower == 'openrc' + block: + - name: create kres-cache-gc config + ansible.builtin.template: + src: init/openrc/conf.d/kres-cache-gc.j2 + dest: /etc/conf.d/kres-cache-gc + mode: "0640" + notify: + - restart kres-cache-gc + + - name: create kresd config + ansible.builtin.template: + src: init/openrc/conf.d/kresd.j2 + dest: /etc/conf.d/kresd + mode: "0640" + notify: + - restart kresd + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/install.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/install.yaml new file mode 100644 index 0000000..6d66466 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/install.yaml @@ -0,0 +1,32 @@ +--- + +- name: install knot resolver packages + ansible.builtin.package: + name: "{{ knot_resolver_packages }}" + state: present + +- name: openrc + when: + - ansible_facts.service_mgr | lower == 'openrc' + block: + - name: create kres-cache-gc init script + ansible.builtin.template: + src: init/openrc/init.d/kres-cache-gc.j2 + dest: /etc/init.d/kres-cache-gc + mode: "0755" + + - name: create kresd init script + ansible.builtin.template: + src: init/openrc/init.d/kresd.j2 + dest: /etc/init.d/kresd + mode: "0755" + +- name: update trusted-key + bodsch.dns.kdig: + root_dns: "{{ knot_resolver_trusted.dns_server | default('k.root-servers.net') }}" + signing_key: "{{ knot_resolver_trusted.signing_key | default('257') | int }}" + trust_keyfile: "{{ knot_resolver_trusted.keyfile | default('/etc/trusted-key.key') }}" + notify: + - restart kresd + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/main.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/main.yaml new file mode 100644 index 0000000..d34efb8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/main.yaml @@ -0,0 +1,18 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yaml + +- name: repositories + ansible.builtin.include_tasks: repositories.yaml + +- name: install + ansible.builtin.include_tasks: install.yaml + +- name: configure + ansible.builtin.include_tasks: configure.yaml + +- name: service + ansible.builtin.include_tasks: service.yaml + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/prepare.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/prepare.yaml new file mode 100644 index 0000000..80b6726 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/prepare.yaml @@ -0,0 +1,34 @@ +--- + +- name: include OS specific configuration + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yaml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yaml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yaml" + # eg. redhat / debian / archlinux + - "{{ ansible_facts.os_family | lower }}.yaml" + # artixlinux + - "{{ ansible_facts.os_family | lower | replace(' ', '') }}.yaml" + - default.yaml + skip: true + +- name: install dedendency + ansible.builtin.package: + name: "{{ knot_resolver_dependencies }}" + state: present + when: + - knot_resolver_dependencies is defined + - knot_resolver_dependencies | count > 0 + +# - name: get latest system information + # ansible.builtin.setup: + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/repositories.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/repositories.yaml new file mode 100644 index 0000000..a3d0f3b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/repositories.yaml @@ -0,0 +1,27 @@ +--- + +- name: add CZ.NIC apt sources + when: + - ansible_facts.os_family | lower == 'debian' + bodsch.core.apt_sources: + name: cznic-labs-knot-resolver + filename: cznic-labs-knot-resolver.sources + uris: + - https://pkg.labs.nic.cz/knot-resolver + suites: "{{ ansible_facts.distribution_release | lower }}" + components: + - main + signed_by: /usr/share/keyrings/cznic-labs-pkg.gpg + key: + method: download + url: "https://pkg.labs.nic.cz/gpg" + dest: "/usr/share/keyrings/cznic-labs-pkg.gpg" + dearmor: true + validate: true + update_cache: true + +- name: update package cache + ansible.builtin.package: + update_cache: true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/service.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/service.yaml new file mode 100644 index 0000000..9c27a7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/tasks/service.yaml @@ -0,0 +1,20 @@ +--- + +- name: start and enable Knot Resolver service (systemd) + ansible.builtin.service: + name: "{{ item }}" + state: started + enabled: true + loop: "{{ knot_resolver_service }}" + when: + - ansible_facts.service_mgr | lower == 'systemd' + +- name: start and enable Knot Resolver service (openrc) + ansible.builtin.service: + name: kresd + state: started + enabled: true + when: + - ansible_facts.service_mgr | lower == 'openrc' + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/apt_knot.list.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/apt_knot.list.j2 new file mode 100644 index 0000000..ae24016 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/apt_knot.list.j2 @@ -0,0 +1,4 @@ +# {{ ansible_managed }} + +deb http://download.opensuse.org/repositories/home:/CZ-NIC:/knot-resolver-latest/{{ ansible_facts.os_family }}_{{ ansible_facts.distribution_major_version }} / +# deb https://deb.knot-dns.cz/knot-latest/ {{ ansible_facts.distribution_release | lower }} main diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/init/openrc/conf.d/kres-cache-gc.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/init/openrc/conf.d/kres-cache-gc.j2 new file mode 100644 index 0000000..59abe41 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/init/openrc/conf.d/kres-cache-gc.j2 @@ -0,0 +1,14 @@ +# Configuration for /etc/init.d/kres-cache-gc + +# Time interval in milliseconds how often to run GC. +garbage_interval={{ knot_resolver_cache.garbage_interval }} + +command_user="{{ knot_resolver_owner }}:{{ knot_resolver_group }}" + +# Path to directory with knot resolver's cache. +# If not specified, it tries to get it from /etc/conf.d/kresd and if +# it's not even there, defaults to /var/cache/knot-resolver. +cachedir={{ knot_resolver_cache.directory }} + +# Additional arguments to pass to kres-cache-gc. +#command_args= diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/init/openrc/conf.d/kresd.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/init/openrc/conf.d/kresd.j2 new file mode 100644 index 0000000..cdcf23f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/init/openrc/conf.d/kresd.j2 @@ -0,0 +1,20 @@ +# Configuration for /etc/init.d/kresd + +# Config file path. +cfgfile="/etc/knot-resolver/kresd.conf" + +command_user="{{ knot_resolver_owner }}:{{ knot_resolver_group }}" + +# Cache (working) directory. +cachedir="{{ knot_resolver_cache.directory }}" + +# A process which will be used to log the standard output from the kresd +# process. Default is logger(1) which redirects the output to syslog. +# If you want to disable this, set output_logger="". +#output_logger="logger -t kresd -p daemon.info" + +# Path to the logging file. Default is to use the output_logger instead. +#output_log= + +# Uncomment to define soft dependency on kres-cache-gc. +rc_want="kres-cache-gc" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/init/openrc/init.d/kres-cache-gc.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/init/openrc/init.d/kres-cache-gc.j2 new file mode 100644 index 0000000..440471c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/init/openrc/init.d/kres-cache-gc.j2 @@ -0,0 +1,22 @@ +#!/sbin/openrc-run + +description="Knot Resolver Cache Garbage Collector" + +: ${command_user:="kresd"} +: ${garbage_interval:=1000} +: ${output_logger="logger -t kres-cache-gc -p daemon.info"} +: ${wait:=50} # milliseconds + +command="/usr/sbin/kres-cache-gc" +command_background="yes" +pidfile="/run/$RC_SVCNAME.pid" +start_stop_daemon_args="--wait $wait" + +start_pre() { + if [ -z "$cachedir" ] && [ -e /etc/conf.d/kresd ]; then + cachedir=$(. /etc/conf.d/kresd; echo "$cachedir") + fi + : ${cachedir:="/var/cache/knot-resolver"} + + command_args="-c $cachedir -d $garbage_interval ${command_args:-}" +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/init/openrc/init.d/kresd.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/init/openrc/init.d/kresd.j2 new file mode 100644 index 0000000..83486ea --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/init/openrc/init.d/kresd.j2 @@ -0,0 +1,33 @@ +#!/sbin/openrc-run + +: ${wait:=100} + +command="/usr/sbin/kresd" +command_args="--noninteractive --config=$cfgfile $cachedir" +command_background="yes" +pidfile="/run/$RC_SVCNAME.pid" +start_stop_daemon_args="--chdir=$cachedir --wait $wait" +required_files="$cfgfile" + +depend() { + need net + before kres-cache-gc +} + +start_pre() { + [ "$output_log" ] && checkpath -f -m 640 -o "$command_user" "$output_log" + checkpath -d -m 750 -o "$command_user" "$cachedir" + chown -R "$command_user" "$cachedir" + + ebegin "check capabilities for ${command}" + if [ -z "$(getcap ${command})" ] + then + setcap 'cap_net_bind_service,cap_setpcap=+ep' /usr/sbin/kresd + fi +} + +stop_post() { + + ebegin "clear control sockets" + rm -fv /var/cache/knot-resolver/control/* +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/kresd.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/kresd.conf.j2 new file mode 100644 index 0000000..d0c389d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/templates/kresd.conf.j2 @@ -0,0 +1,161 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +-- {{ ansible_managed }} +-- +-- Refer to manual: https://knot-resolver.readthedocs.org/en/stable/ + +{% if knot_resolver_log.level is defined and + knot_resolver_log.level | length > 0 %} +log_level('{{ knot_resolver_log.level }}') +{% endif %} +{% if knot_resolver_log.target is defined and + knot_resolver_log.target | length > 0 %} +log_target('{{ knot_resolver_log.target }}') +{% endif %} + +user( + '{{ knot_resolver_owner }}', + '{{ knot_resolver_group }}' +) + +-- trust_anchors.add_file('/var/lib/knot-resolver/root.key') +-- trust_anchors.hold_down_time = 3 * day +trust_anchors.refresh_time = 10 * sec +trust_anchors.keep_removed = 2 + +{% if knot_resolver_support_ipv6 %} +net.ipv6 = true +{% else %} +net.ipv6 = false +{% endif %} + +{% if knot_resolver_listener is defined and + knot_resolver_listener | count > 0 %} + {% for listener in knot_resolver_listener %} +net.listen({{ listener | bodsch.dns.resolver_listener }}) + {% endfor %} +{% endif %} + +{%- if knot_resolver_support_ipv6 %} +net.listen('::1', 53, { kind = 'dns', freebind = true }) +net.listen('::1', 853, { kind = 'tls', freebind = true }) +{% endif %} + +{%- if knot_resolver_service_ip4 is defined and + knot_resolver_service_ip4 | length > 0 %} +net.listen('{{ knot_resolver_service_ip4 }}', 53, { kind = 'dns' }) +net.listen('{{ knot_resolver_service_ip4 }}', 853, { kind = 'tls' }) +{% endif %} + +-- Load useful modules +modules = { + 'stats', -- Track internal statistics + 'predict', -- Prefetch expiring/frequent records + 'cache', + 'serve_stale < cache', + 'workarounds < iterate', + 'hints > iterate', -- Load /etc/hosts and allow custom root hints +} + +-- module configuration + +-- - stats + +-- - http +{% if knot_resolver_prometheus.enabled %} +modules.load('http') +-- Set Prometheus namespace +http.prometheus.namespace = '{{ knot_resolver_prometheus.namespace }}' +http.config() +{% endif %} + +-- - predict +-- Prefetch learning +predict.config({ + window = {{ knot_resolver_predict.window }}, -- 90 minutes sampling window + period = {{ knot_resolver_predict.period }}*(60/15) -- track last 48 hours +}) + +-- - cache config +cache.open( {{ knot_resolver_cache.size }} * MB, 'lmdb://{{ knot_resolver_cache.directory }}') +{% if knot_resolver_cache.min_ttl is defined %} +cache.min_ttl({{ knot_resolver_cache.min_ttl | int }}) +{% endif %} +{% if knot_resolver_cache.max_ttl is defined %} +cache.max_ttl({{ knot_resolver_cache.max_ttl | int }}) +{% endif %} + +{% if knot_resolver_log.level == "debug" %} +policy.add( + policy.all( + policy.DEBUG_ALWAYS + ) +) +{% endif %} + +{% if knot_resolver_views is defined and + knot_resolver_views | length > 0 %} +modules.load('view') + {% for view in knot_resolver_views %} + {% if view.pass is defined and view.pass | count > 0%} + {% for k,v in view.items() %} + {% for ip in v %} +view:addr('{{ ip }}', policy.all(policy.PASS)) + {% endfor %} + {% endfor %} + {% endif %} + {% if view.drop is defined and view.drop | count > 0%} + {% for k,v in view.items() %} + {% for ip in v %} +view:addr('{{ ip }}', policy.all(policy.DROP)) + {% endfor %} + {% endfor %} + {% endif %} + {% endfor %} +{% endif %} + +-- policy.add( +-- policy.suffix( +-- policy.REQTRACE, policy.todnames({'.'}) +-- ) +-- ) + +{% if knot_resolver_internal_domain is defined and + knot_resolver_internal_domain | length > 0 %} + {# https://knot-resolver.readthedocs.io/en/stable/modules-policy.html#replacing-part-of-the-dns-tree #} +-- define list of internal-only domains +-- forward only queries for names under domain example.com to a single IP address + {% for domain in knot_resolver_internal_domain %} + +internal_domains = policy.todnames({ '{{ domain.domains | join("', '") }}' }) + +policy.add( + policy.suffix( + policy.FLAGS( + {'NO_CACHE', 'NO_EDNS'} + ), + internal_domains + ) +) +policy.add( + policy.suffix( + {% if domain.policy.forward is defined %} + policy.FORWARD( + '{{ domain.policy.forward | default('127.0.1.1@53') }}' + ), + {% elif domain.policy.stub is defined %} + policy.STUB( + '{{ domain.policy.stub | default('127.0.1.1@53') }}' + ), + {% endif %} + internal_domains + ) +) + {% endfor %} +{% endif %} + +-- Block Firefox DoH +policy.add( + policy.suffix( + policy.DENY, { todname('use-application-dns.net') } + ) +) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/archlinux-openrc.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/archlinux-openrc.yaml new file mode 100644 index 0000000..8e6b924 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/archlinux-openrc.yaml @@ -0,0 +1,10 @@ +--- + +knot_resolver_owner: knot-resolver +knot_resolver_group: knot-resolver + +knot_resolver_dependencies: + - lua51-http + - lua51-cqueues + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/archlinux.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/archlinux.yaml new file mode 100644 index 0000000..e646a6b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/archlinux.yaml @@ -0,0 +1,11 @@ +--- + +knot_resolver_owner: knot-resolver +knot_resolver_group: knot-resolver + +knot_resolver_dependencies: + - lua51-http + - lua51-cqueues + - iproute2 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/artixlinux.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/artixlinux.yaml new file mode 100644 index 0000000..8e6b924 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/artixlinux.yaml @@ -0,0 +1,10 @@ +--- + +knot_resolver_owner: knot-resolver +knot_resolver_group: knot-resolver + +knot_resolver_dependencies: + - lua51-http + - lua51-cqueues + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/debian.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/debian.yaml new file mode 100644 index 0000000..b5ca400 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/debian.yaml @@ -0,0 +1,13 @@ +--- + +knot_resolver_dependencies: + - gpg + - xz-utils + - iproute2 + +knot_resolver_packages: + - knot-dnsutils + - knot-resolver + - knot-resolver-module-http + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/main.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/main.yaml new file mode 100644 index 0000000..5f5b9e3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/main.yaml @@ -0,0 +1,31 @@ +--- + +knot_resolver_owner: knot-resolver +knot_resolver_group: knot-resolver + +knot_resolver_defaults_cache: + directory: /var/cache/knot-resolver + min_ttl: 120 + max_ttl: 3600 + size: 200 + garbage_interval: 1000 + +knot_resolver_defaults_log: + level: '' # info + target: syslog + +knot_resolver_defaults_prometheus: + enabled: true + namespace: 'resolver_' + +knot_resolver_defaults_predict: + enabled: true + window: 90 + period: 48 + +knot_resolver_dependencies: [] + +knot_resolver_packages: + - knot-resolver + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/redhat.yaml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/redhat.yaml new file mode 100644 index 0000000..b8d2790 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/knot_resolver/vars/redhat.yaml @@ -0,0 +1,9 @@ +--- + +knot_resolver_dependencies: + - iproute + +knot_resolver_packages: + - knot-resolver + - knot-resolver-module-http +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/.gitignore new file mode 100644 index 0000000..3adf759 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/.gitignore @@ -0,0 +1,19 @@ +### Ansible ### +*.retry +.ansible_cache + +### Python ### +# Byte-compiled / optimized / DLL files +.pytest_cache/ +__pycache__/ +*.py[cod] +*$py.class + +### Molecule ### +.tox +.cache +.molecule +.vagrant + +#venv +venv diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/.yamllint new file mode 100644 index 0000000..8827676 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/.yamllint @@ -0,0 +1,33 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: disable + key-duplicates: enable + line-length: disable + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/README.md new file mode 100644 index 0000000..0ba42a2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/README.md @@ -0,0 +1,27 @@ +# Ansible Role: `bodsch.dns.pdns` + +Ansible role to install and configure powerdns on various linux systems. + + +## usage + +```yaml + +``` + +## Contribution + +Please read [Contribution](CONTRIBUTING.md) + +## Development, Branches (Git Tags) + + +## Author + +- Bodo Schulz + +## License + +[Apache](LICENSE) + +**FREE SOFTWARE, HELL YEAH!** diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/defaults/main.yml new file mode 100644 index 0000000..7534d19 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/defaults/main.yml @@ -0,0 +1,85 @@ +--- + +pdns_config_include: "{{ pdns_config_dir }}/pdns.d" + +# The user and group the PowerDNS Authoritative Server process will run as. +# NOTE: at the moment, we don't create a user as we assume the package creates +# a "pdns" user and group. If you change these variables, make sure to create +# the user and groups before applying this role +# pdns_user: "pdns" +# pdns_group: "pdns" +pdns_file_owner: "root" +pdns_file_group: "{{ pdns_group }}" + +pdns_service: + # Name of the PowerDNS Authoritative Server Service + name: "pdns" + # State of the PowerDNS Authoritative Server service + state: "started" + enabled: true +# systemd_overrides: +# LimitNOFILE: 10000 + +# When True, disable the automated restart of the PowerDNS service +# pdns_disable_handlers: False + +# dict containing all configuration options, except for backend +# configuration and the "config-dir", "setuid" and "setgid" directives. +pdns_config: {} +# pdns_config: +# master: yes +# slave: no +# local-address: '192.0.2.53' +# local-ipv6: '2001:DB8:1::53' +# local-port: '5300' + +# A dict with all the backends you'd like to configure. +# This default starts just the bind-backend with an empty config file +pdns_backends: + - name: bind + config: '/dev/null' + # check-interval: 10 + # dnssec-db: "{{ pdns_config_dir }}/dnssec.db" + # dnssec-db-journal-mode: WAL + # hybrid: true + # ignore-broken-records: false + # supermaster-config: /var/lib/powerdns/supermaster.conf + # supermaster-destdir: /var/lib/powerdns/zones.slave.d + # supermasters: [] + # + # - name: gsqlite3 + # database: /var/lib/powerdns/pdns.db + # dnssec: true + # pragma-journal-mode: true + # pragma-synchronous: true + # pragma-foreign-keys: true + # + # - name: gmysql + # host: 10.11.0.10 + # dbname: pdns + # user: pdns + # password: "{{ vault__pdns.databases.pdns }}" + # # # https://doc.powerdns.com/authoritative/backends/generic-mysql.html?highlight=gmysql#settings + # # host: "" # Host (ip address) to connect to. Mutually exclusive with gmysql-socket. + # # # Warning: When specified as a hostname a chicken/egg situation might arise where the database is needed to resolve the IP address of the database. It is best to supply an IP address of the database here. + # # port: "" # The port to connect to on gmysql-host. Default: 3306. + # # socket: "" # Connect to the UNIX socket at this path. Mutually exclusive with gmysql-host. + # # dbname: "" # Name of the database to connect to. Default: “powerdns”. + # # user: "" # User to connect as. Default: “powerdns”. + # # group: "" # Group to connect as. Default: “client”. + # # password: "" # The password to for gmysql-user. + # # dnssec: "" # Enable DNSSEC processing for this backend. Default: no. + # # innodb-read-committed: "" # Use the InnoDB READ-COMMITTED transaction isolation level. Default: yes. + # # ssl: "" # Deprecated since version 5.0.0. + # # timeout: "" # The timeout in seconds for each attempt to read from, or write to the server. A value of 0 will disable the timeout. Default: 10 + # # thread-cleanup: "" # Only enable this if you are certain you need to + + # - name: lmdb + # filename: /var/lib/powerdns/pdns.lmdb + # shards: 64 + # sync-mode: nometasync + # # schema-version: 5 + # random-ids: true + # map-size: 16000 + # # flag-deleted: + # # lightning-stream: diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/archlinux/bind-dnssec.schema.sqlite3.sql b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/archlinux/bind-dnssec.schema.sqlite3.sql new file mode 100644 index 0000000..afef620 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/archlinux/bind-dnssec.schema.sqlite3.sql @@ -0,0 +1,28 @@ +create table domainmetadata ( + id INTEGER PRIMARY KEY, + domain VARCHAR(255) COLLATE NOCASE, + kind VARCHAR(32) COLLATE NOCASE, + content TEXT +); + +create index domainmetanameindex on domainmetadata(domain); + +create table cryptokeys ( + id INTEGER PRIMARY KEY, + domain VARCHAR(255) COLLATE NOCASE, + flags INT NOT NULL, + active BOOL, + published BOOL DEFAULT 1, + content TEXT +); + +create index domainnameindex on cryptokeys(domain); + +create table tsigkeys ( + id INTEGER PRIMARY KEY, + name VARCHAR(255) COLLATE NOCASE, + algorithm VARCHAR(50) COLLATE NOCASE, + secret VARCHAR(255) +); + +create unique index namealgoindex on tsigkeys(name, algorithm); diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/archlinux/schema.mysql.sql b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/archlinux/schema.mysql.sql new file mode 100644 index 0000000..322a0ff --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/archlinux/schema.mysql.sql @@ -0,0 +1,108 @@ +CREATE TABLE domains ( + id INT AUTO_INCREMENT, + name VARCHAR(255) NOT NULL, + master VARCHAR(128) DEFAULT NULL, + last_check INT DEFAULT NULL, + type VARCHAR(8) NOT NULL, + notified_serial INT UNSIGNED DEFAULT NULL, + account VARCHAR(40) CHARACTER SET 'utf8' DEFAULT NULL, + options VARCHAR(64000) DEFAULT NULL, + catalog VARCHAR(255) DEFAULT NULL, + PRIMARY KEY (id) +) Engine=InnoDB CHARACTER SET 'latin1'; + +CREATE UNIQUE INDEX name_index ON domains(name); +CREATE INDEX catalog_idx ON domains(catalog); + + +CREATE TABLE records ( + id BIGINT AUTO_INCREMENT, + domain_id INT DEFAULT NULL, + name VARCHAR(255) DEFAULT NULL, + type VARCHAR(10) DEFAULT NULL, + content VARCHAR(64000) DEFAULT NULL, + ttl INT DEFAULT NULL, + prio INT DEFAULT NULL, + disabled TINYINT(1) DEFAULT 0, + ordername VARCHAR(255) BINARY DEFAULT NULL, + auth TINYINT(1) DEFAULT 1, + PRIMARY KEY (id) +) Engine=InnoDB CHARACTER SET 'latin1'; + +CREATE INDEX nametype_index ON records(name,type); +CREATE INDEX domain_id ON records(domain_id); +CREATE INDEX ordername ON records (ordername); + + +CREATE TABLE supermasters ( + ip VARCHAR(64) NOT NULL, + nameserver VARCHAR(255) NOT NULL, + account VARCHAR(40) CHARACTER SET 'utf8' NOT NULL, + PRIMARY KEY (ip, nameserver) +) Engine=InnoDB CHARACTER SET 'latin1'; + + +CREATE TABLE comments ( + id INT AUTO_INCREMENT, + domain_id INT NOT NULL, + name VARCHAR(255) NOT NULL, + type VARCHAR(10) NOT NULL, + modified_at INT NOT NULL, + account VARCHAR(40) CHARACTER SET 'utf8' DEFAULT NULL, + comment TEXT CHARACTER SET 'utf8' NOT NULL, + PRIMARY KEY (id) +) Engine=InnoDB CHARACTER SET 'latin1'; + +CREATE INDEX comments_name_type_idx ON comments (name, type); +CREATE INDEX comments_order_idx ON comments (domain_id, modified_at); + + +CREATE TABLE domainmetadata ( + id INT AUTO_INCREMENT, + domain_id INT NOT NULL, + kind VARCHAR(32), + content TEXT, + PRIMARY KEY (id) +) Engine=InnoDB CHARACTER SET 'latin1'; + +CREATE INDEX domainmetadata_idx ON domainmetadata (domain_id, kind); + + +CREATE TABLE cryptokeys ( + id INT AUTO_INCREMENT, + domain_id INT NOT NULL, + flags INT NOT NULL, + active BOOL, + published BOOL DEFAULT 1, + content TEXT, + PRIMARY KEY(id) +) Engine=InnoDB CHARACTER SET 'latin1'; + +CREATE INDEX domainidindex ON cryptokeys(domain_id); + + +CREATE TABLE tsigkeys ( + id INT AUTO_INCREMENT, + name VARCHAR(255), + algorithm VARCHAR(50), + secret VARCHAR(255), + PRIMARY KEY (id) +) Engine=InnoDB CHARACTER SET 'latin1'; + +CREATE UNIQUE INDEX namealgoindex ON tsigkeys(name, algorithm); + +--- +--- Using this SQL causes Mysql to create foreign keys on your database. This will +--- make sure that no records, comments or keys exists for domains that you already +--- removed. This is not enabled by default, because we're not sure what the +--- consequences are from a performance point of view. If you do have feedback, +--- please let us know how this affects your setup. +--- +--- Please note that it's not possible to apply this, before you cleaned up your +--- database, as the foreign keys do not exist. +--- +ALTER TABLE records ADD CONSTRAINT `records_domain_id_ibfk` FOREIGN KEY (`domain_id`) REFERENCES `domains` (`id`) ON DELETE CASCADE ON UPDATE CASCADE; +ALTER TABLE comments ADD CONSTRAINT `comments_domain_id_ibfk` FOREIGN KEY (`domain_id`) REFERENCES `domains` (`id`) ON DELETE CASCADE ON UPDATE CASCADE; +ALTER TABLE domainmetadata ADD CONSTRAINT `domainmetadata_domain_id_ibfk` FOREIGN KEY (`domain_id`) REFERENCES `domains` (`id`) ON DELETE CASCADE ON UPDATE CASCADE; +ALTER TABLE cryptokeys ADD CONSTRAINT `cryptokeys_domain_id_ibfk` FOREIGN KEY (`domain_id`) REFERENCES `domains` (`id`) ON DELETE CASCADE ON UPDATE CASCADE; + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/archlinux/schema.sqlite3.sql b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/archlinux/schema.sqlite3.sql new file mode 100644 index 0000000..b34e3a4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/archlinux/schema.sqlite3.sql @@ -0,0 +1,93 @@ +PRAGMA foreign_keys = 1; + +CREATE TABLE domains ( + id INTEGER PRIMARY KEY, + name VARCHAR(255) NOT NULL COLLATE NOCASE, + master VARCHAR(128) DEFAULT NULL, + last_check INTEGER DEFAULT NULL, + type VARCHAR(8) NOT NULL, + notified_serial INTEGER DEFAULT NULL, + account VARCHAR(40) DEFAULT NULL, + options VARCHAR(65535) DEFAULT NULL, + catalog VARCHAR(255) DEFAULT NULL +); + +CREATE UNIQUE INDEX name_index ON domains(name); +CREATE INDEX catalog_idx ON domains(catalog); + + +CREATE TABLE records ( + id INTEGER PRIMARY KEY, + domain_id INTEGER DEFAULT NULL, + name VARCHAR(255) DEFAULT NULL, + type VARCHAR(10) DEFAULT NULL, + content VARCHAR(65535) DEFAULT NULL, + ttl INTEGER DEFAULT NULL, + prio INTEGER DEFAULT NULL, + disabled BOOLEAN DEFAULT 0, + ordername VARCHAR(255), + auth BOOL DEFAULT 1, + FOREIGN KEY(domain_id) REFERENCES domains(id) ON DELETE CASCADE ON UPDATE CASCADE +); + +CREATE INDEX records_lookup_idx ON records(name, type); +CREATE INDEX records_lookup_id_idx ON records(domain_id, name, type); +CREATE INDEX records_order_idx ON records(domain_id, ordername); + + +CREATE TABLE supermasters ( + ip VARCHAR(64) NOT NULL, + nameserver VARCHAR(255) NOT NULL COLLATE NOCASE, + account VARCHAR(40) NOT NULL +); + +CREATE UNIQUE INDEX ip_nameserver_pk ON supermasters(ip, nameserver); + + +CREATE TABLE comments ( + id INTEGER PRIMARY KEY, + domain_id INTEGER NOT NULL, + name VARCHAR(255) NOT NULL, + type VARCHAR(10) NOT NULL, + modified_at INT NOT NULL, + account VARCHAR(40) DEFAULT NULL, + comment VARCHAR(65535) NOT NULL, + FOREIGN KEY(domain_id) REFERENCES domains(id) ON DELETE CASCADE ON UPDATE CASCADE +); + +CREATE INDEX comments_idx ON comments(domain_id, name, type); +CREATE INDEX comments_order_idx ON comments (domain_id, modified_at); + + +CREATE TABLE domainmetadata ( + id INTEGER PRIMARY KEY, + domain_id INT NOT NULL, + kind VARCHAR(32) COLLATE NOCASE, + content TEXT, + FOREIGN KEY(domain_id) REFERENCES domains(id) ON DELETE CASCADE ON UPDATE CASCADE +); + +CREATE INDEX domainmetaidindex ON domainmetadata(domain_id); + + +CREATE TABLE cryptokeys ( + id INTEGER PRIMARY KEY, + domain_id INT NOT NULL, + flags INT NOT NULL, + active BOOL, + published BOOL DEFAULT 1, + content TEXT, + FOREIGN KEY(domain_id) REFERENCES domains(id) ON DELETE CASCADE ON UPDATE CASCADE +); + +CREATE INDEX domainidindex ON cryptokeys(domain_id); + + +CREATE TABLE tsigkeys ( + id INTEGER PRIMARY KEY, + name VARCHAR(255) COLLATE NOCASE, + algorithm VARCHAR(50) COLLATE NOCASE, + secret VARCHAR(255) +); + +CREATE UNIQUE INDEX namealgoindex ON tsigkeys(name, algorithm); diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/bind-dnssec.schema.sqlite3.sql b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/bind-dnssec.schema.sqlite3.sql new file mode 100644 index 0000000..afef620 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/bind-dnssec.schema.sqlite3.sql @@ -0,0 +1,28 @@ +create table domainmetadata ( + id INTEGER PRIMARY KEY, + domain VARCHAR(255) COLLATE NOCASE, + kind VARCHAR(32) COLLATE NOCASE, + content TEXT +); + +create index domainmetanameindex on domainmetadata(domain); + +create table cryptokeys ( + id INTEGER PRIMARY KEY, + domain VARCHAR(255) COLLATE NOCASE, + flags INT NOT NULL, + active BOOL, + published BOOL DEFAULT 1, + content TEXT +); + +create index domainnameindex on cryptokeys(domain); + +create table tsigkeys ( + id INTEGER PRIMARY KEY, + name VARCHAR(255) COLLATE NOCASE, + algorithm VARCHAR(50) COLLATE NOCASE, + secret VARCHAR(255) +); + +create unique index namealgoindex on tsigkeys(name, algorithm); diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/schema.mysql.sql b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/schema.mysql.sql new file mode 100644 index 0000000..0f3a6cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/schema.mysql.sql @@ -0,0 +1,92 @@ +CREATE TABLE domains ( + id INT AUTO_INCREMENT, + name VARCHAR(255) NOT NULL, + master VARCHAR(128) DEFAULT NULL, + last_check INT DEFAULT NULL, + type VARCHAR(8) NOT NULL, + notified_serial INT UNSIGNED DEFAULT NULL, + account VARCHAR(40) CHARACTER SET 'utf8' DEFAULT NULL, + options VARCHAR(64000) DEFAULT NULL, + catalog VARCHAR(255) DEFAULT NULL, + PRIMARY KEY (id) +) Engine=InnoDB CHARACTER SET 'latin1'; + +CREATE UNIQUE INDEX name_index ON domains(name); +CREATE INDEX catalog_idx ON domains(catalog); + + +CREATE TABLE records ( + id BIGINT AUTO_INCREMENT, + domain_id INT DEFAULT NULL, + name VARCHAR(255) DEFAULT NULL, + type VARCHAR(10) DEFAULT NULL, + content VARCHAR(64000) DEFAULT NULL, + ttl INT DEFAULT NULL, + prio INT DEFAULT NULL, + disabled TINYINT(1) DEFAULT 0, + ordername VARCHAR(255) BINARY DEFAULT NULL, + auth TINYINT(1) DEFAULT 1, + PRIMARY KEY (id) +) Engine=InnoDB CHARACTER SET 'latin1'; + +CREATE INDEX nametype_index ON records(name,type); +CREATE INDEX domain_id ON records(domain_id); +CREATE INDEX ordername ON records (ordername); + + +CREATE TABLE supermasters ( + ip VARCHAR(64) NOT NULL, + nameserver VARCHAR(255) NOT NULL, + account VARCHAR(40) CHARACTER SET 'utf8' NOT NULL, + PRIMARY KEY (ip, nameserver) +) Engine=InnoDB CHARACTER SET 'latin1'; + + +CREATE TABLE comments ( + id INT AUTO_INCREMENT, + domain_id INT NOT NULL, + name VARCHAR(255) NOT NULL, + type VARCHAR(10) NOT NULL, + modified_at INT NOT NULL, + account VARCHAR(40) CHARACTER SET 'utf8' DEFAULT NULL, + comment TEXT CHARACTER SET 'utf8' NOT NULL, + PRIMARY KEY (id) +) Engine=InnoDB CHARACTER SET 'latin1'; + +CREATE INDEX comments_name_type_idx ON comments (name, type); +CREATE INDEX comments_order_idx ON comments (domain_id, modified_at); + + +CREATE TABLE domainmetadata ( + id INT AUTO_INCREMENT, + domain_id INT NOT NULL, + kind VARCHAR(32), + content TEXT, + PRIMARY KEY (id) +) Engine=InnoDB CHARACTER SET 'latin1'; + +CREATE INDEX domainmetadata_idx ON domainmetadata (domain_id, kind); + + +CREATE TABLE cryptokeys ( + id INT AUTO_INCREMENT, + domain_id INT NOT NULL, + flags INT NOT NULL, + active BOOL, + published BOOL DEFAULT 1, + content TEXT, + PRIMARY KEY(id) +) Engine=InnoDB CHARACTER SET 'latin1'; + +CREATE INDEX domainidindex ON cryptokeys(domain_id); + + +CREATE TABLE tsigkeys ( + id INT AUTO_INCREMENT, + name VARCHAR(255), + algorithm VARCHAR(50), + secret VARCHAR(255), + PRIMARY KEY (id) +) Engine=InnoDB CHARACTER SET 'latin1'; + +CREATE UNIQUE INDEX namealgoindex ON tsigkeys(name, algorithm); diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/schema.pgsql.sql b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/schema.pgsql.sql new file mode 100644 index 0000000..10f542c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/schema.pgsql.sql @@ -0,0 +1,98 @@ +CREATE TABLE domains ( + id SERIAL PRIMARY KEY, + name VARCHAR(255) NOT NULL, + master VARCHAR(128) DEFAULT NULL, + last_check INT DEFAULT NULL, + type TEXT NOT NULL, + notified_serial BIGINT DEFAULT NULL, + account VARCHAR(40) DEFAULT NULL, + options TEXT DEFAULT NULL, + catalog TEXT DEFAULT NULL, + CONSTRAINT c_lowercase_name CHECK (((name)::TEXT = LOWER((name)::TEXT))) +); + +CREATE UNIQUE INDEX name_index ON domains(name); +CREATE INDEX catalog_idx ON domains(catalog); + + +CREATE TABLE records ( + id BIGSERIAL PRIMARY KEY, + domain_id INT DEFAULT NULL, + name VARCHAR(255) DEFAULT NULL, + type VARCHAR(10) DEFAULT NULL, + content VARCHAR(65535) DEFAULT NULL, + ttl INT DEFAULT NULL, + prio INT DEFAULT NULL, + disabled BOOL DEFAULT 'f', + ordername VARCHAR(255), + auth BOOL DEFAULT 't', + CONSTRAINT domain_exists + FOREIGN KEY(domain_id) REFERENCES domains(id) + ON DELETE CASCADE, + CONSTRAINT c_lowercase_name CHECK (((name)::TEXT = LOWER((name)::TEXT))) +); + +CREATE INDEX rec_name_index ON records(name); +CREATE INDEX nametype_index ON records(name,type); +CREATE INDEX domain_id ON records(domain_id); +CREATE INDEX recordorder ON records (domain_id, ordername text_pattern_ops); + + +CREATE TABLE supermasters ( + ip INET NOT NULL, + nameserver VARCHAR(255) NOT NULL, + account VARCHAR(40) NOT NULL, + PRIMARY KEY(ip, nameserver) +); + + +CREATE TABLE comments ( + id SERIAL PRIMARY KEY, + domain_id INT NOT NULL, + name VARCHAR(255) NOT NULL, + type VARCHAR(10) NOT NULL, + modified_at INT NOT NULL, + account VARCHAR(40) DEFAULT NULL, + comment VARCHAR(65535) NOT NULL, + CONSTRAINT domain_exists + FOREIGN KEY(domain_id) REFERENCES domains(id) + ON DELETE CASCADE, + CONSTRAINT c_lowercase_name CHECK (((name)::TEXT = LOWER((name)::TEXT))) +); + +CREATE INDEX comments_domain_id_idx ON comments (domain_id); +CREATE INDEX comments_name_type_idx ON comments (name, type); +CREATE INDEX comments_order_idx ON comments (domain_id, modified_at); + + +CREATE TABLE domainmetadata ( + id SERIAL PRIMARY KEY, + domain_id INT REFERENCES domains(id) ON DELETE CASCADE, + kind VARCHAR(32), + content TEXT +); + +CREATE INDEX domainidmetaindex ON domainmetadata(domain_id); + + +CREATE TABLE cryptokeys ( + id SERIAL PRIMARY KEY, + domain_id INT REFERENCES domains(id) ON DELETE CASCADE, + flags INT NOT NULL, + active BOOL, + published BOOL DEFAULT TRUE, + content TEXT +); + +CREATE INDEX domainidindex ON cryptokeys(domain_id); + + +CREATE TABLE tsigkeys ( + id SERIAL PRIMARY KEY, + name VARCHAR(255), + algorithm VARCHAR(50), + secret VARCHAR(255), + CONSTRAINT c_lowercase_name CHECK (((name)::TEXT = LOWER((name)::TEXT))) +); + +CREATE UNIQUE INDEX namealgoindex ON tsigkeys(name, algorithm); diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/schema.sqlite3.sql b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/schema.sqlite3.sql new file mode 100644 index 0000000..b34e3a4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/files/schema.sqlite3.sql @@ -0,0 +1,93 @@ +PRAGMA foreign_keys = 1; + +CREATE TABLE domains ( + id INTEGER PRIMARY KEY, + name VARCHAR(255) NOT NULL COLLATE NOCASE, + master VARCHAR(128) DEFAULT NULL, + last_check INTEGER DEFAULT NULL, + type VARCHAR(8) NOT NULL, + notified_serial INTEGER DEFAULT NULL, + account VARCHAR(40) DEFAULT NULL, + options VARCHAR(65535) DEFAULT NULL, + catalog VARCHAR(255) DEFAULT NULL +); + +CREATE UNIQUE INDEX name_index ON domains(name); +CREATE INDEX catalog_idx ON domains(catalog); + + +CREATE TABLE records ( + id INTEGER PRIMARY KEY, + domain_id INTEGER DEFAULT NULL, + name VARCHAR(255) DEFAULT NULL, + type VARCHAR(10) DEFAULT NULL, + content VARCHAR(65535) DEFAULT NULL, + ttl INTEGER DEFAULT NULL, + prio INTEGER DEFAULT NULL, + disabled BOOLEAN DEFAULT 0, + ordername VARCHAR(255), + auth BOOL DEFAULT 1, + FOREIGN KEY(domain_id) REFERENCES domains(id) ON DELETE CASCADE ON UPDATE CASCADE +); + +CREATE INDEX records_lookup_idx ON records(name, type); +CREATE INDEX records_lookup_id_idx ON records(domain_id, name, type); +CREATE INDEX records_order_idx ON records(domain_id, ordername); + + +CREATE TABLE supermasters ( + ip VARCHAR(64) NOT NULL, + nameserver VARCHAR(255) NOT NULL COLLATE NOCASE, + account VARCHAR(40) NOT NULL +); + +CREATE UNIQUE INDEX ip_nameserver_pk ON supermasters(ip, nameserver); + + +CREATE TABLE comments ( + id INTEGER PRIMARY KEY, + domain_id INTEGER NOT NULL, + name VARCHAR(255) NOT NULL, + type VARCHAR(10) NOT NULL, + modified_at INT NOT NULL, + account VARCHAR(40) DEFAULT NULL, + comment VARCHAR(65535) NOT NULL, + FOREIGN KEY(domain_id) REFERENCES domains(id) ON DELETE CASCADE ON UPDATE CASCADE +); + +CREATE INDEX comments_idx ON comments(domain_id, name, type); +CREATE INDEX comments_order_idx ON comments (domain_id, modified_at); + + +CREATE TABLE domainmetadata ( + id INTEGER PRIMARY KEY, + domain_id INT NOT NULL, + kind VARCHAR(32) COLLATE NOCASE, + content TEXT, + FOREIGN KEY(domain_id) REFERENCES domains(id) ON DELETE CASCADE ON UPDATE CASCADE +); + +CREATE INDEX domainmetaidindex ON domainmetadata(domain_id); + + +CREATE TABLE cryptokeys ( + id INTEGER PRIMARY KEY, + domain_id INT NOT NULL, + flags INT NOT NULL, + active BOOL, + published BOOL DEFAULT 1, + content TEXT, + FOREIGN KEY(domain_id) REFERENCES domains(id) ON DELETE CASCADE ON UPDATE CASCADE +); + +CREATE INDEX domainidindex ON cryptokeys(domain_id); + + +CREATE TABLE tsigkeys ( + id INTEGER PRIMARY KEY, + name VARCHAR(255) COLLATE NOCASE, + algorithm VARCHAR(50) COLLATE NOCASE, + secret VARCHAR(255) +); + +CREATE UNIQUE INDEX namealgoindex ON tsigkeys(name, algorithm); diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/handlers/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/handlers/main.yml new file mode 100644 index 0000000..d95af7a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/handlers/main.yml @@ -0,0 +1,27 @@ +--- + +- name: systemctl daemon-reload + become: true + ansible.builtin.systemd: + daemon_reload: true + force: true + when: + - ansible_facts.service_mgr | lower == "systemd" + +- name: reload pdns + become: true + ansible.builtin.service: + name: "{{ pdns_service.name }}" + state: reloaded + when: + - not running_in_check_mode + +- name: restart pdns + become: true + ansible.builtin.service: + name: "{{ pdns_service.name }}" + state: restarted + when: + - not running_in_check_mode + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/meta/main.yml new file mode 100644 index 0000000..36cfaf7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/meta/main.yml @@ -0,0 +1,29 @@ +--- + +galaxy_info: + role_name: pdns + + author: Bodo Schulz + description: nsible role for install and configure PowerDNS Authoritative DNS Server + + license: Apache + min_ansible_version: "2.12" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 11 + - bullseye + - bookworm + - name: Ubuntu + versions: + # 20.04 + - focal + + galaxy_tags: + - system + - dns + - pdns + - powerdns + - auth diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-lmdb/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-lmdb/converge.yml new file mode 100644 index 0000000..05aed5a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-lmdb/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: all + any_errors_fatal: true + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.pdns diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-lmdb/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-lmdb/group_vars/all/vars.yml new file mode 100644 index 0000000..cb9486e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-lmdb/group_vars/all/vars.yml @@ -0,0 +1,39 @@ +--- + +pdns_backends: + - name: lmdb + filename: /var/lib/powerdns/pdns.lmdb + shards: 64 + sync-mode: nometasync + # schema-version: 5 + random-ids: true + map-size: 16000 + # flag-deleted: + # lightning-stream: + +pdns_config: + master: true + slave: false + also-notify: "" + local-address: '127.0.0.1' + local-port: '5300' + log-dns-details: true + loglevel: "5" # 0 = emergency, 1 = alert, 2 = critical, 3 = error, 4 = warning, 5 = notice, 6 = info, 7 = debug + +pdns_webserver: + enabled: false + address: "{{ ansible_facts.default_ipv4.address }}" + allow-from: + - "127.0.0.1" + - "::1" + - "10.11.0.0/24" + - "192.168.0.0/24" + connection-timeout: 5 + hash-plaintext-credentials: false + loglevel: normal + max-bodysize: 2 + password: + port: 8081 + print-arguments: true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-lmdb/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-lmdb/molecule.yml new file mode 100644 index 0000000..068d236 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-lmdb/molecule.yml @@ -0,0 +1,70 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + published_ports: + - 8081:8081 + tty: true + environment: + container: docker + groups: + - dns + docker_networks: + - name: bind + ipam_config: + - subnet: "10.11.0.0/24" + gateway: "10.11.0.254" + networks: + - name: bind + ipv4_address: "10.11.0.1" + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-lmdb/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-lmdb/prepare.yml new file mode 100644 index 0000000..34b7c5e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-lmdb/prepare.yml @@ -0,0 +1,63 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + become: true + ansible.builtin.command: + argv: + - pacman + - --refresh + - --sync + - --sysupgrade + - --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-lmdb/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-lmdb/tests/test_default.py new file mode 100644 index 0000000..c3b1595 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-lmdb/tests/test_default.py @@ -0,0 +1,331 @@ +from __future__ import annotations + +import json +import os +import re +from pathlib import Path +from typing import Any, Dict, List, Mapping, Optional, Sequence + +import pytest +from ansible.parsing.dataloader import DataLoader +from jinja2 import ChainableUndefined +from jinja2.nativetypes import NativeEnvironment + +# --- helper ---------------------------------------------------------------- + + +def pp_json(json_thing, sort=True, indents=2): + + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + + return None + + +# --- paths ----------------------------------------------------------------- + + +def base_directory() -> tuple[Path, Path]: + """ + Returns: + role_dir: role root (contains defaults/, vars/, tasks/, ...) + scenario_dir: molecule scenario dir (contains group_vars/, ...) + """ + cwd = Path.cwd() + + # pytest läuft je nach tox/molecule entweder im scenario/tests oder im role-root + if (cwd / "group_vars").is_dir(): + # .../molecule//tests -> role root ist ../.. + return (cwd / "../..").resolve(), cwd.resolve() + + scenario = os.environ.get("MOLECULE_SCENARIO_NAME", "default") + return cwd.resolve(), (cwd / "molecule" / scenario).resolve() + + +def _normalize_os(distribution: str) -> Optional[str]: + d = (distribution or "").strip().lower() + if d in ("debian", "ubuntu"): + return "debian" + if d in ("arch", "artix"): + return f"{d}linux" + return None + + +# --- load vars files (YAML) ------------------------------------------------ + + +def _load_vars_file(loader: DataLoader, file_base: Path) -> Dict[str, Any]: + """ + file_base ohne Extension übergeben, z.B. role_dir/'defaults'/'main' + Lädt main.yml oder main.yaml via Ansible DataLoader (Vault kompatibel). + """ + for ext in ("yml", "yaml"): + p = file_base.with_suffix(f".{ext}") + if not p.is_file(): + continue + + data = loader.load_from_file(str(p)) + if data is None: + return {} + if not isinstance(data, dict): + raise TypeError(f"{p} must be a mapping/dict, got {type(data)}") + return data + + return {} + + +# --- jinja rendering (multi-pass) ------------------------------------------ + +_JINJA_MARKER = re.compile(r"({{.*?}}|{%-?.*?-%}|{#.*?#})", re.S) + + +def _find_unrendered_templates(obj: Any, prefix: str = "") -> List[str]: + found: List[str] = [] + + if isinstance(obj, str): + if _JINJA_MARKER.search(obj): + found.append(prefix or "") + return found + + if isinstance(obj, Mapping): + for k, v in obj.items(): + key = str(k) + found.extend( + _find_unrendered_templates(v, f"{prefix}.{key}" if prefix else key) + ) + return found + + if isinstance(obj, Sequence) and not isinstance(obj, (str, bytes, bytearray)): + for i, v in enumerate(obj): + found.extend(_find_unrendered_templates(v, f"{prefix}[{i}]")) + return found + + return found + + +def _make_jinja_env() -> NativeEnvironment: + """ + NativeEnvironment: gibt bei reinen Expressions native Typen zurück, + sonst Strings. Undefined ist 'chainable', damit ansible_facts.foo.bar + nicht hart explodiert, sondern Undefined liefert (ähnlich fail_on_undefined=False). + """ + env = NativeEnvironment(undefined=ChainableUndefined, autoescape=False) + + # Ansible-ähnliche lookup/query Minimalimplementierung (nur env erlaubt) + def _lookup(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> Any: + if plugin != "env": + raise ValueError( + f"lookup('{plugin}', ...) not supported in tests (allowlist: env)" + ) + # Ansible lookup('env','X') -> '' wenn nicht gesetzt (damit default(..., true) greift) + if isinstance(term, (list, tuple)): + vals = [os.environ.get(str(t), "") for t in term] + return vals[0] if kwargs.get("wantlist") is False else vals + return os.environ.get(str(term), "") + + def _query(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> List[Any]: + # query() ist wantlist=True + kwargs["wantlist"] = True + res = _lookup(plugin, term, *rest, **kwargs) + return res if isinstance(res, list) else [res] + + env.globals["lookup"] = _lookup + env.globals["query"] = _query + return env + + +def _render_obj( + env: NativeEnvironment, obj: Any, ctx: Dict[str, Any], *, skip_keys: frozenset[str] +) -> Any: + if isinstance(obj, str): + if not _JINJA_MARKER.search(obj): + return obj + tmpl = env.from_string(obj) + return tmpl.render(**ctx) + + if isinstance(obj, Mapping): + out: Dict[str, Any] = {} + for k, v in obj.items(): + ks = str(k) + if ks in skip_keys: + out[ks] = v + else: + out[ks] = _render_obj(env, v, ctx, skip_keys=skip_keys) + return out + + if isinstance(obj, list): + return [_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj] + + if isinstance(obj, tuple): + return tuple(_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj) + + return obj + + +def render_all_vars(data: Dict[str, Any], passes: int = 8) -> Dict[str, Any]: + """ + Multi-pass: damit Werte wie + system_architecture -> ..., + und danach defaults_release.file -> ...{{ system_architecture }}... + sauber aufgelöst werden. + """ + env = _make_jinja_env() + + current: Dict[str, Any] = data + last_leftovers: Optional[List[str]] = None + + for _ in range(max(1, passes)): + # Kontext ist immer der aktuelle Stand + rendered = _render_obj( + env, current, current, skip_keys=frozenset({"ansible_facts"}) + ) + if not isinstance(rendered, dict): + raise TypeError(f"Rendered vars are not a dict anymore: {type(rendered)}") + + leftovers = _find_unrendered_templates(rendered) + if not leftovers: + return rendered + + # kein Fortschritt mehr + if leftovers == last_leftovers: + current = rendered + break + + last_leftovers = leftovers + current = rendered + + # optional: hart fehlschlagen, wenn noch Templates übrig sind (sonst wird es still falsch) + if os.environ.get("ANSIBLE_TEST_ALLOW_UNRESOLVED_TEMPLATES", "0") != "1": + leftovers = _find_unrendered_templates(current) + if leftovers: + raise AssertionError( + "Unresolved templates after rendering:\n- " + "\n- ".join(leftovers) + ) + + return current + + +# --- pytest fixture -------------------------------------------------------- + + +@pytest.fixture() +def get_vars(host) -> Dict[str, Any]: + role_dir, scenario_dir = base_directory() + + loader = DataLoader() + loader.set_basedir(str(role_dir)) + + distribution = getattr(host.system_info, "distribution", "") or "" + os_id = _normalize_os(distribution) + + merged: Dict[str, Any] = {} + merged.update(_load_vars_file(loader, role_dir / "defaults" / "main")) + merged.update(_load_vars_file(loader, role_dir / "vars" / "main")) + + if os_id: + merged.update(_load_vars_file(loader, role_dir / "vars" / os_id)) + + merged.update(_load_vars_file(loader, scenario_dir / "group_vars" / "all" / "vars")) + + # Facts als Input (keine Templates) + setup = host.ansible("setup") + facts = setup.get("ansible_facts", {}) if isinstance(setup, dict) else {} + if isinstance(facts, dict): + merged["ansible_facts"] = facts + merged.setdefault( + "ansible_system", facts.get("system") or facts.get("ansible_system") + ) + merged.setdefault( + "ansible_architecture", + facts.get("architecture") or facts.get("ansible_architecture"), + ) + + result = render_all_vars(merged, passes=8) + + return result + + +# --- tests ----------------------------------------------------------------- + + +def test_directories(host, get_vars): + """ + used config directory + """ + print(get_vars) + + directories = [ + "/etc/powerdns", + "/var/lib/powerdns", + "/var/spool/powerdns", + get_vars.get("pdns_config_include"), + ] + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [ + "/etc/powerdns/pdns.conf", + "/etc/powerdns/pdns.d/pdns_general.conf", + "/etc/powerdns/pdns.d/pdns_backends.conf", + "/etc/powerdns/pdns.d/pdns_webserver.conf", + "/etc/powerdns/pdns.d/pdns_api.conf", + "/etc/ansible/facts.d/pdns.fact", + "/usr/bin/pdnsutil", + ] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +def test_lmbd_files(host, get_vars): + """ """ + + files = [ + "/var/lib/powerdns/pdns.lmdb", + "/var/lib/powerdns/pdns.lmdb-lock", + ] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = get_vars.get("pdns_service").get("name", None) + + if service_name: + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + bind_port = "5300" + bind_address = "127.0.0.1" + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + listen.append(f"udp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/collections.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/collections.yml new file mode 100644 index 0000000..3d5db19 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/collections.yml @@ -0,0 +1,6 @@ +--- + +collections: + - name: bodsch.database + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/converge.yml new file mode 100644 index 0000000..e19ee63 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: true + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.pdns diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/group_vars/all/vars.yml new file mode 100644 index 0000000..51a8b2f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/group_vars/all/vars.yml @@ -0,0 +1,34 @@ +--- + +pdns_backends: + - name: gmysql + host: 10.11.0.10 + dbname: pdns + user: pdns + password: "{{ vault__pdns.databases.pdns }}" + # # https://doc.powerdns.com/authoritative/backends/generic-mysql.html?highlight=gmysql#settings + # host: "" # Host (ip address) to connect to. Mutually exclusive with gmysql-socket. + # # Warning: When specified as a hostname a chicken/egg situation might arise where the database is needed to resolve the IP address of the database. It is best to supply an IP address of the database here. + # port: "" # The port to connect to on gmysql-host. Default: 3306. + # socket: "" # Connect to the UNIX socket at this path. Mutually exclusive with gmysql-host. + # dbname: "" # Name of the database to connect to. Default: “powerdns”. + # user: "" # User to connect as. Default: “powerdns”. + # group: "" # Group to connect as. Default: “client”. + # password: "" # The password to for gmysql-user. + # dnssec: "" # Enable DNSSEC processing for this backend. Default: no. + # innodb-read-committed: "" # Use the InnoDB READ-COMMITTED transaction isolation level. Default: yes. + # ssl: "" # Deprecated since version 5.0.0. + # timeout: "" # The timeout in seconds for each attempt to read from, or write to the server. A value of 0 will disable the timeout. Default: 10 + # thread-cleanup: "" # Only enable this if you are certain you need to + # credentials: {} + +pdns_config: + master: true + slave: false + also-notify: "" + local-address: '127.0.0.1' + local-port: '5300' + log-dns-details: true + loglevel: "3" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/group_vars/all/vault.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/group_vars/all/vault.yml new file mode 100644 index 0000000..8162f5f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/group_vars/all/vault.yml @@ -0,0 +1,8 @@ +--- + +vault__pdns: + databases: + root: root + pdns: powerdns + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/host_vars/database/mariadb.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/host_vars/database/mariadb.yml new file mode 100644 index 0000000..edd2565 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/host_vars/database/mariadb.yml @@ -0,0 +1,29 @@ +--- + +mariadb_system_users: + - username: root + password: "{{ vault__pdns.databases.root }}" + home: /root + update: true + ignore: true + +mariadb_databases: + - name: pdns + +mariadb_users: + - name: pdns + host: "%" + password: "{{ vault__pdns.databases.pdns }}" + priv: "pdns.*:ALL" + encrypted: false + +mariadb_config_mysqld: + bind_address: 0.0.0.0 + socket: "{{ mariadb_socket }}" + skip_external_locking: + skip_name_resolve: 1 + performance_schema: 1 + expire_logs_days: 2 + max_connections: 20 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/host_vars/database/vault.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/host_vars/database/vault.yml new file mode 100644 index 0000000..8162f5f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/host_vars/database/vault.yml @@ -0,0 +1,8 @@ +--- + +vault__pdns: + databases: + root: root + pdns: powerdns + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/molecule.yml new file mode 100644 index 0000000..d837d32 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/molecule.yml @@ -0,0 +1,85 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + tty: true + environment: + container: docker + groups: + - dns + docker_networks: + - name: pdns + ipam_config: + - subnet: "10.11.0.0/24" + gateway: "10.11.0.254" + networks: + - name: pdns + ipv4_address: "10.11.0.1" + + - name: database + image: "ghcr.io/bodsch/docker-ansible/ansible-debian:13" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_TIME + tmpfs: + - /run + - /tmp + networks: + - name: pdns + ipv4_address: "10.11.0.10" + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 + +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/prepare.yml new file mode 100644 index 0000000..383fe44 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/prepare.yml @@ -0,0 +1,69 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + become: true + ansible.builtin.command: + argv: + - pacman + - --refresh + - --sync + - --sysupgrade + - --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +- name: prepare database + hosts: database + gather_facts: true + + roles: + - role: bodsch.database.mariadb +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/tests/test_default.py new file mode 100644 index 0000000..48966ca --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-mysql/tests/test_default.py @@ -0,0 +1,335 @@ +from __future__ import annotations + +import json +import os +import re +from pathlib import Path +from typing import Any, Dict, List, Mapping, Optional, Sequence + +import pytest +from ansible.parsing.dataloader import DataLoader + +import testinfra.utils.ansible_runner +from jinja2 import ChainableUndefined +from jinja2.nativetypes import NativeEnvironment + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] + ).get_hosts("instance") + + +# --- helper ---------------------------------------------------------------- + + +def pp_json(json_thing, sort=True, indents=2): + + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + + return None + + +# --- paths ----------------------------------------------------------------- + + +def base_directory() -> tuple[Path, Path]: + """ + Returns: + role_dir: role root (contains defaults/, vars/, tasks/, ...) + scenario_dir: molecule scenario dir (contains group_vars/, ...) + """ + cwd = Path.cwd() + + # pytest läuft je nach tox/molecule entweder im scenario/tests oder im role-root + if (cwd / "group_vars").is_dir(): + # .../molecule//tests -> role root ist ../.. + return (cwd / "../..").resolve(), cwd.resolve() + + scenario = os.environ.get("MOLECULE_SCENARIO_NAME", "default") + return cwd.resolve(), (cwd / "molecule" / scenario).resolve() + + +def _normalize_os(distribution: str) -> Optional[str]: + d = (distribution or "").strip().lower() + if d in ("debian", "ubuntu"): + return "debian" + if d in ("arch", "artix"): + return f"{d}linux" + return None + + +# --- load vars files (YAML) ------------------------------------------------ + + +def _load_vars_file(loader: DataLoader, file_base: Path) -> Dict[str, Any]: + """ + file_base ohne Extension übergeben, z.B. role_dir/'defaults'/'main' + Lädt main.yml oder main.yaml via Ansible DataLoader (Vault kompatibel). + """ + for ext in ("yml", "yaml"): + p = file_base.with_suffix(f".{ext}") + if not p.is_file(): + continue + + data = loader.load_from_file(str(p)) + if data is None: + return {} + if not isinstance(data, dict): + raise TypeError(f"{p} must be a mapping/dict, got {type(data)}") + return data + + return {} + + +# --- jinja rendering (multi-pass) ------------------------------------------ + +_JINJA_MARKER = re.compile(r"({{.*?}}|{%-?.*?-%}|{#.*?#})", re.S) + + +def _find_unrendered_templates(obj: Any, prefix: str = "") -> List[str]: + found: List[str] = [] + + if isinstance(obj, str): + if _JINJA_MARKER.search(obj): + found.append(prefix or "") + return found + + if isinstance(obj, Mapping): + for k, v in obj.items(): + key = str(k) + found.extend( + _find_unrendered_templates(v, f"{prefix}.{key}" if prefix else key) + ) + return found + + if isinstance(obj, Sequence) and not isinstance(obj, (str, bytes, bytearray)): + for i, v in enumerate(obj): + found.extend(_find_unrendered_templates(v, f"{prefix}[{i}]")) + return found + + return found + + +def _make_jinja_env() -> NativeEnvironment: + """ + NativeEnvironment: gibt bei reinen Expressions native Typen zurück, + sonst Strings. Undefined ist 'chainable', damit ansible_facts.foo.bar + nicht hart explodiert, sondern Undefined liefert (ähnlich fail_on_undefined=False). + """ + env = NativeEnvironment(undefined=ChainableUndefined, autoescape=False) + + # Ansible-ähnliche lookup/query Minimalimplementierung (nur env erlaubt) + def _lookup(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> Any: + if plugin != "env": + raise ValueError( + f"lookup('{plugin}', ...) not supported in tests (allowlist: env)" + ) + # Ansible lookup('env','X') -> '' wenn nicht gesetzt (damit default(..., true) greift) + if isinstance(term, (list, tuple)): + vals = [os.environ.get(str(t), "") for t in term] + return vals[0] if kwargs.get("wantlist") is False else vals + return os.environ.get(str(term), "") + + def _query(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> List[Any]: + # query() ist wantlist=True + kwargs["wantlist"] = True + res = _lookup(plugin, term, *rest, **kwargs) + return res if isinstance(res, list) else [res] + + env.globals["lookup"] = _lookup + env.globals["query"] = _query + return env + + +def _render_obj( + env: NativeEnvironment, obj: Any, ctx: Dict[str, Any], *, skip_keys: frozenset[str] +) -> Any: + if isinstance(obj, str): + if not _JINJA_MARKER.search(obj): + return obj + tmpl = env.from_string(obj) + return tmpl.render(**ctx) + + if isinstance(obj, Mapping): + out: Dict[str, Any] = {} + for k, v in obj.items(): + ks = str(k) + if ks in skip_keys: + out[ks] = v + else: + out[ks] = _render_obj(env, v, ctx, skip_keys=skip_keys) + return out + + if isinstance(obj, list): + return [_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj] + + if isinstance(obj, tuple): + return tuple(_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj) + + return obj + + +def render_all_vars(data: Dict[str, Any], passes: int = 8) -> Dict[str, Any]: + """ + Multi-pass: damit Werte wie + system_architecture -> ..., + und danach defaults_release.file -> ...{{ system_architecture }}... + sauber aufgelöst werden. + """ + env = _make_jinja_env() + + current: Dict[str, Any] = data + last_leftovers: Optional[List[str]] = None + + for _ in range(max(1, passes)): + # Kontext ist immer der aktuelle Stand + rendered = _render_obj( + env, current, current, skip_keys=frozenset({"ansible_facts"}) + ) + if not isinstance(rendered, dict): + raise TypeError(f"Rendered vars are not a dict anymore: {type(rendered)}") + + leftovers = _find_unrendered_templates(rendered) + if not leftovers: + return rendered + + # kein Fortschritt mehr + if leftovers == last_leftovers: + current = rendered + break + + last_leftovers = leftovers + current = rendered + + # optional: hart fehlschlagen, wenn noch Templates übrig sind (sonst wird es still falsch) + if os.environ.get("ANSIBLE_TEST_ALLOW_UNRESOLVED_TEMPLATES", "0") != "1": + leftovers = _find_unrendered_templates(current) + if leftovers: + raise AssertionError( + "Unresolved templates after rendering:\n- " + "\n- ".join(leftovers) + ) + + return current + + +# --- pytest fixture -------------------------------------------------------- + + +@pytest.fixture() +def get_vars(host) -> Dict[str, Any]: + role_dir, scenario_dir = base_directory() + + loader = DataLoader() + loader.set_basedir(str(role_dir)) + + distribution = getattr(host.system_info, "distribution", "") or "" + os_id = _normalize_os(distribution) + + merged: Dict[str, Any] = {} + merged.update(_load_vars_file(loader, role_dir / "defaults" / "main")) + merged.update(_load_vars_file(loader, role_dir / "vars" / "main")) + + if os_id: + merged.update(_load_vars_file(loader, role_dir / "vars" / os_id)) + + merged.update(_load_vars_file(loader, scenario_dir / "group_vars" / "all" / "vars")) + + # Facts als Input (keine Templates) + setup = host.ansible("setup") + facts = setup.get("ansible_facts", {}) if isinstance(setup, dict) else {} + if isinstance(facts, dict): + merged["ansible_facts"] = facts + merged.setdefault( + "ansible_system", facts.get("system") or facts.get("ansible_system") + ) + merged.setdefault( + "ansible_architecture", + facts.get("architecture") or facts.get("ansible_architecture"), + ) + + result = render_all_vars(merged, passes=8) + + return result + + +# --- tests ----------------------------------------------------------------- + + +def test_directories(host, get_vars): + """ + used config directory + """ + distribution = host.system_info.distribution + release = host.system_info.release + + print(f"distribution: {distribution}") + print(f"release : {release}") + # print(get_vars) + + directories = [ + "/etc/powerdns", + get_vars.get("pdns_config_include"), + ] + + if distribution in ['arch', 'artix']: + directories.append("/usr/lib/powerdns") + + if distribution in ['debian', 'ubuntu']: + directories.append("/var/lib/powerdns",) + directories.append("/var/spool/powerdns") + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [ + "/etc/powerdns/pdns.conf", + "/etc/powerdns/pdns.d/pdns_general.conf", + "/etc/powerdns/pdns.d/pdns_backends.conf", + "/etc/powerdns/pdns.d/pdns_webserver.conf", + "/etc/powerdns/pdns.d/pdns_api.conf", + "/etc/ansible/facts.d/pdns.fact", + "/usr/bin/pdnsutil", + ] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = get_vars.get("pdns_service").get("name", None) + + if service_name: + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + bind_port = "5300" + bind_address = "127.0.0.1" + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + listen.append(f"udp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-sqlite/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-sqlite/converge.yml new file mode 100644 index 0000000..05aed5a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-sqlite/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: all + any_errors_fatal: true + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.pdns diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-sqlite/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-sqlite/group_vars/all/vars.yml new file mode 100644 index 0000000..ab60e1b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-sqlite/group_vars/all/vars.yml @@ -0,0 +1,40 @@ +--- + +pdns_backends: + - name: gsqlite3 + database: /var/lib/powerdns/pdns.db + dnssec: true + pragma-journal-mode: true + pragma-synchronous: true + pragma-foreign-keys: true + +pdns_config: + master: true + slave: false + also-notify: "" + local-address: '127.0.0.1' + local-port: '5300' + log-dns-details: true + loglevel: 6 # 0 = emergency, 1 = alert, 2 = critical, 3 = error, 4 = warning, 5 = notice, 6 = info, 7 = debug + +pdns_webserver: + enabled: true + address: "{{ ansible_facts.default_ipv4.address }}" + allow-from: + - "127.0.0.1" + - "::1" + - "10.11.0.0/24" + - "192.168.0.0/24" + connection-timeout: 5 + hash-plaintext-credentials: false + loglevel: normal # (none, normal, detailed) + max-bodysize: 2 + # password: + port: 8081 + print-arguments: false + +pdns_api: + enabled: true + key: tNSNNEiFxeDe/3nhqA + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-sqlite/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-sqlite/molecule.yml new file mode 100644 index 0000000..068d236 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-sqlite/molecule.yml @@ -0,0 +1,70 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + published_ports: + - 8081:8081 + tty: true + environment: + container: docker + groups: + - dns + docker_networks: + - name: bind + ipam_config: + - subnet: "10.11.0.0/24" + gateway: "10.11.0.254" + networks: + - name: bind + ipv4_address: "10.11.0.1" + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-sqlite/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-sqlite/prepare.yml new file mode 100644 index 0000000..34b7c5e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-sqlite/prepare.yml @@ -0,0 +1,63 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + become: true + ansible.builtin.command: + argv: + - pacman + - --refresh + - --sync + - --sysupgrade + - --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-sqlite/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-sqlite/tests/test_default.py new file mode 100644 index 0000000..45bc53e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/backend-sqlite/tests/test_default.py @@ -0,0 +1,337 @@ +from __future__ import annotations + +import json +import os +import re +from pathlib import Path +from typing import Any, Dict, List, Mapping, Optional, Sequence + +import pytest +from ansible.parsing.dataloader import DataLoader +from jinja2 import ChainableUndefined +from jinja2.nativetypes import NativeEnvironment + +# --- helper ---------------------------------------------------------------- + + +def pp_json(json_thing, sort=True, indents=2): + + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + + return None + + +# --- paths ----------------------------------------------------------------- + + +def base_directory() -> tuple[Path, Path]: + """ + Returns: + role_dir: role root (contains defaults/, vars/, tasks/, ...) + scenario_dir: molecule scenario dir (contains group_vars/, ...) + """ + cwd = Path.cwd() + + # pytest läuft je nach tox/molecule entweder im scenario/tests oder im role-root + if (cwd / "group_vars").is_dir(): + # .../molecule//tests -> role root ist ../.. + return (cwd / "../..").resolve(), cwd.resolve() + + scenario = os.environ.get("MOLECULE_SCENARIO_NAME", "default") + return cwd.resolve(), (cwd / "molecule" / scenario).resolve() + + +def _normalize_os(distribution: str) -> Optional[str]: + d = (distribution or "").strip().lower() + if d in ("debian", "ubuntu"): + return "debian" + if d in ("arch", "artix"): + return f"{d}linux" + return None + + +# --- load vars files (YAML) ------------------------------------------------ + + +def _load_vars_file(loader: DataLoader, file_base: Path) -> Dict[str, Any]: + """ + file_base ohne Extension übergeben, z.B. role_dir/'defaults'/'main' + Lädt main.yml oder main.yaml via Ansible DataLoader (Vault kompatibel). + """ + for ext in ("yml", "yaml"): + p = file_base.with_suffix(f".{ext}") + if not p.is_file(): + continue + + data = loader.load_from_file(str(p)) + if data is None: + return {} + if not isinstance(data, dict): + raise TypeError(f"{p} must be a mapping/dict, got {type(data)}") + return data + + return {} + + +# --- jinja rendering (multi-pass) ------------------------------------------ + +_JINJA_MARKER = re.compile(r"({{.*?}}|{%-?.*?-%}|{#.*?#})", re.S) + + +def _find_unrendered_templates(obj: Any, prefix: str = "") -> List[str]: + found: List[str] = [] + + if isinstance(obj, str): + if _JINJA_MARKER.search(obj): + found.append(prefix or "") + return found + + if isinstance(obj, Mapping): + for k, v in obj.items(): + key = str(k) + found.extend( + _find_unrendered_templates(v, f"{prefix}.{key}" if prefix else key) + ) + return found + + if isinstance(obj, Sequence) and not isinstance(obj, (str, bytes, bytearray)): + for i, v in enumerate(obj): + found.extend(_find_unrendered_templates(v, f"{prefix}[{i}]")) + return found + + return found + + +def _make_jinja_env() -> NativeEnvironment: + """ + NativeEnvironment: gibt bei reinen Expressions native Typen zurück, + sonst Strings. Undefined ist 'chainable', damit ansible_facts.foo.bar + nicht hart explodiert, sondern Undefined liefert (ähnlich fail_on_undefined=False). + """ + env = NativeEnvironment(undefined=ChainableUndefined, autoescape=False) + + # Ansible-ähnliche lookup/query Minimalimplementierung (nur env erlaubt) + def _lookup(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> Any: + if plugin != "env": + raise ValueError( + f"lookup('{plugin}', ...) not supported in tests (allowlist: env)" + ) + # Ansible lookup('env','X') -> '' wenn nicht gesetzt (damit default(..., true) greift) + if isinstance(term, (list, tuple)): + vals = [os.environ.get(str(t), "") for t in term] + return vals[0] if kwargs.get("wantlist") is False else vals + return os.environ.get(str(term), "") + + def _query(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> List[Any]: + # query() ist wantlist=True + kwargs["wantlist"] = True + res = _lookup(plugin, term, *rest, **kwargs) + return res if isinstance(res, list) else [res] + + env.globals["lookup"] = _lookup + env.globals["query"] = _query + return env + + +def _render_obj( + env: NativeEnvironment, obj: Any, ctx: Dict[str, Any], *, skip_keys: frozenset[str] +) -> Any: + if isinstance(obj, str): + if not _JINJA_MARKER.search(obj): + return obj + tmpl = env.from_string(obj) + return tmpl.render(**ctx) + + if isinstance(obj, Mapping): + out: Dict[str, Any] = {} + for k, v in obj.items(): + ks = str(k) + if ks in skip_keys: + out[ks] = v + else: + out[ks] = _render_obj(env, v, ctx, skip_keys=skip_keys) + return out + + if isinstance(obj, list): + return [_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj] + + if isinstance(obj, tuple): + return tuple(_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj) + + return obj + + +def render_all_vars(data: Dict[str, Any], passes: int = 8) -> Dict[str, Any]: + """ + Multi-pass: damit Werte wie + system_architecture -> ..., + und danach defaults_release.file -> ...{{ system_architecture }}... + sauber aufgelöst werden. + """ + env = _make_jinja_env() + + current: Dict[str, Any] = data + last_leftovers: Optional[List[str]] = None + + for _ in range(max(1, passes)): + # Kontext ist immer der aktuelle Stand + rendered = _render_obj( + env, current, current, skip_keys=frozenset({"ansible_facts"}) + ) + if not isinstance(rendered, dict): + raise TypeError(f"Rendered vars are not a dict anymore: {type(rendered)}") + + leftovers = _find_unrendered_templates(rendered) + if not leftovers: + return rendered + + # kein Fortschritt mehr + if leftovers == last_leftovers: + current = rendered + break + + last_leftovers = leftovers + current = rendered + + # optional: hart fehlschlagen, wenn noch Templates übrig sind (sonst wird es still falsch) + if os.environ.get("ANSIBLE_TEST_ALLOW_UNRESOLVED_TEMPLATES", "0") != "1": + leftovers = _find_unrendered_templates(current) + if leftovers: + raise AssertionError( + "Unresolved templates after rendering:\n- " + "\n- ".join(leftovers) + ) + + return current + + +# --- pytest fixture -------------------------------------------------------- + + +@pytest.fixture() +def get_vars(host) -> Dict[str, Any]: + role_dir, scenario_dir = base_directory() + + loader = DataLoader() + loader.set_basedir(str(role_dir)) + + distribution = getattr(host.system_info, "distribution", "") or "" + os_id = _normalize_os(distribution) + + merged: Dict[str, Any] = {} + merged.update(_load_vars_file(loader, role_dir / "defaults" / "main")) + merged.update(_load_vars_file(loader, role_dir / "vars" / "main")) + + if os_id: + merged.update(_load_vars_file(loader, role_dir / "vars" / os_id)) + + merged.update(_load_vars_file(loader, scenario_dir / "group_vars" / "all" / "vars")) + + # Facts als Input (keine Templates) + setup = host.ansible("setup") + facts = setup.get("ansible_facts", {}) if isinstance(setup, dict) else {} + if isinstance(facts, dict): + merged["ansible_facts"] = facts + merged.setdefault( + "ansible_system", facts.get("system") or facts.get("ansible_system") + ) + merged.setdefault( + "ansible_architecture", + facts.get("architecture") or facts.get("ansible_architecture"), + ) + + result = render_all_vars(merged, passes=8) + + return result + + +# --- tests ----------------------------------------------------------------- + + +def test_directories(host, get_vars): + """ + used config directory + """ + distribution = host.system_info.distribution + release = host.system_info.release + + print(f"distribution: {distribution}") + print(f"release : {release}") + # print(get_vars) + + directories = [ + "/etc/powerdns", + get_vars.get("pdns_config_include"), + ] + + if distribution in ['arch', 'artix']: + directories.append("/usr/lib/powerdns") + + if distribution in ['debian', 'ubuntu']: + directories.append("/var/lib/powerdns",) + directories.append("/var/spool/powerdns") + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [ + "/etc/powerdns/pdns.conf", + "/etc/powerdns/pdns.d/pdns_general.conf", + "/etc/powerdns/pdns.d/pdns_backends.conf", + "/etc/powerdns/pdns.d/pdns_webserver.conf", + "/etc/powerdns/pdns.d/pdns_api.conf", + "/etc/ansible/facts.d/pdns.fact", + "/usr/bin/pdnsutil", + ] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +def test_sqlite_files(host, get_vars): + """ """ + base_directory = "/var/lib/powerdns" + _file = os.path.join(base_directory, "pdns.db") + + f = host.file(_file) + assert f.is_file + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = get_vars.get("pdns_service").get("name", None) + + if service_name: + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + bind_port = "5300" + bind_address = "127.0.0.1" + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + listen.append(f"udp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/configured/converge.yml new file mode 100644 index 0000000..05aed5a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/configured/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: all + any_errors_fatal: true + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.pdns diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..d3e6dd5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,12 @@ +--- + +pdns_config: + master: true + slave: false + also-notify: "" + local-address: '127.0.0.1' + local-port: '5300' + log-dns-details: "on" + loglevel: 6 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/configured/molecule.yml new file mode 100644 index 0000000..3b4aec5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/configured/molecule.yml @@ -0,0 +1,68 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + tty: true + environment: + container: docker + groups: + - dns + docker_networks: + - name: bind + ipam_config: + - subnet: "10.11.0.0/24" + gateway: "10.11.0.254" + networks: + - name: bind + ipv4_address: "10.11.0.1" + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -vv + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/configured/prepare.yml new file mode 100644 index 0000000..34b7c5e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/configured/prepare.yml @@ -0,0 +1,63 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + become: true + ansible.builtin.command: + argv: + - pacman + - --refresh + - --sync + - --sysupgrade + - --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..d6c8e19 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/configured/tests/test_default.py @@ -0,0 +1,328 @@ +from __future__ import annotations + +import json +import os +import re +from pathlib import Path +from typing import Any, Dict, List, Mapping, Optional, Sequence + +import pytest +from ansible.parsing.dataloader import DataLoader +from jinja2 import ChainableUndefined +from jinja2.nativetypes import NativeEnvironment + +# --- helper ---------------------------------------------------------------- + + +def pp_json(json_thing, sort=True, indents=2): + + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + + return None + + +# --- paths ----------------------------------------------------------------- + + +def base_directory() -> tuple[Path, Path]: + """ + Returns: + role_dir: role root (contains defaults/, vars/, tasks/, ...) + scenario_dir: molecule scenario dir (contains group_vars/, ...) + """ + cwd = Path.cwd() + + # pytest läuft je nach tox/molecule entweder im scenario/tests oder im role-root + if (cwd / "group_vars").is_dir(): + # .../molecule//tests -> role root ist ../.. + return (cwd / "../..").resolve(), cwd.resolve() + + scenario = os.environ.get("MOLECULE_SCENARIO_NAME", "default") + return cwd.resolve(), (cwd / "molecule" / scenario).resolve() + + +def _normalize_os(distribution: str) -> Optional[str]: + d = (distribution or "").strip().lower() + if d in ("debian", "ubuntu"): + return "debian" + if d in ("arch", "artix"): + return f"{d}linux" + return None + + +# --- load vars files (YAML) ------------------------------------------------ + + +def _load_vars_file(loader: DataLoader, file_base: Path) -> Dict[str, Any]: + """ + file_base ohne Extension übergeben, z.B. role_dir/'defaults'/'main' + Lädt main.yml oder main.yaml via Ansible DataLoader (Vault kompatibel). + """ + for ext in ("yml", "yaml"): + p = file_base.with_suffix(f".{ext}") + if not p.is_file(): + continue + + data = loader.load_from_file(str(p)) + if data is None: + return {} + if not isinstance(data, dict): + raise TypeError(f"{p} must be a mapping/dict, got {type(data)}") + return data + + return {} + + +# --- jinja rendering (multi-pass) ------------------------------------------ + +_JINJA_MARKER = re.compile(r"({{.*?}}|{%-?.*?-%}|{#.*?#})", re.S) + + +def _find_unrendered_templates(obj: Any, prefix: str = "") -> List[str]: + found: List[str] = [] + + if isinstance(obj, str): + if _JINJA_MARKER.search(obj): + found.append(prefix or "") + return found + + if isinstance(obj, Mapping): + for k, v in obj.items(): + key = str(k) + found.extend( + _find_unrendered_templates(v, f"{prefix}.{key}" if prefix else key) + ) + return found + + if isinstance(obj, Sequence) and not isinstance(obj, (str, bytes, bytearray)): + for i, v in enumerate(obj): + found.extend(_find_unrendered_templates(v, f"{prefix}[{i}]")) + return found + + return found + + +def _make_jinja_env() -> NativeEnvironment: + """ + NativeEnvironment: gibt bei reinen Expressions native Typen zurück, + sonst Strings. Undefined ist 'chainable', damit ansible_facts.foo.bar + nicht hart explodiert, sondern Undefined liefert (ähnlich fail_on_undefined=False). + """ + env = NativeEnvironment(undefined=ChainableUndefined, autoescape=False) + + # Ansible-ähnliche lookup/query Minimalimplementierung (nur env erlaubt) + def _lookup(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> Any: + if plugin != "env": + raise ValueError( + f"lookup('{plugin}', ...) not supported in tests (allowlist: env)" + ) + # Ansible lookup('env','X') -> '' wenn nicht gesetzt (damit default(..., true) greift) + if isinstance(term, (list, tuple)): + vals = [os.environ.get(str(t), "") for t in term] + return vals[0] if kwargs.get("wantlist") is False else vals + return os.environ.get(str(term), "") + + def _query(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> List[Any]: + # query() ist wantlist=True + kwargs["wantlist"] = True + res = _lookup(plugin, term, *rest, **kwargs) + return res if isinstance(res, list) else [res] + + env.globals["lookup"] = _lookup + env.globals["query"] = _query + return env + + +def _render_obj( + env: NativeEnvironment, obj: Any, ctx: Dict[str, Any], *, skip_keys: frozenset[str] +) -> Any: + if isinstance(obj, str): + if not _JINJA_MARKER.search(obj): + return obj + tmpl = env.from_string(obj) + return tmpl.render(**ctx) + + if isinstance(obj, Mapping): + out: Dict[str, Any] = {} + for k, v in obj.items(): + ks = str(k) + if ks in skip_keys: + out[ks] = v + else: + out[ks] = _render_obj(env, v, ctx, skip_keys=skip_keys) + return out + + if isinstance(obj, list): + return [_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj] + + if isinstance(obj, tuple): + return tuple(_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj) + + return obj + + +def render_all_vars(data: Dict[str, Any], passes: int = 8) -> Dict[str, Any]: + """ + Multi-pass: damit Werte wie + system_architecture -> ..., + und danach defaults_release.file -> ...{{ system_architecture }}... + sauber aufgelöst werden. + """ + env = _make_jinja_env() + + current: Dict[str, Any] = data + last_leftovers: Optional[List[str]] = None + + for _ in range(max(1, passes)): + # Kontext ist immer der aktuelle Stand + rendered = _render_obj( + env, current, current, skip_keys=frozenset({"ansible_facts"}) + ) + if not isinstance(rendered, dict): + raise TypeError(f"Rendered vars are not a dict anymore: {type(rendered)}") + + leftovers = _find_unrendered_templates(rendered) + if not leftovers: + return rendered + + # kein Fortschritt mehr + if leftovers == last_leftovers: + current = rendered + break + + last_leftovers = leftovers + current = rendered + + # optional: hart fehlschlagen, wenn noch Templates übrig sind (sonst wird es still falsch) + if os.environ.get("ANSIBLE_TEST_ALLOW_UNRESOLVED_TEMPLATES", "0") != "1": + leftovers = _find_unrendered_templates(current) + if leftovers: + raise AssertionError( + "Unresolved templates after rendering:\n- " + "\n- ".join(leftovers) + ) + + return current + + +# --- pytest fixture -------------------------------------------------------- + + +@pytest.fixture() +def get_vars(host) -> Dict[str, Any]: + role_dir, scenario_dir = base_directory() + + loader = DataLoader() + loader.set_basedir(str(role_dir)) + + distribution = getattr(host.system_info, "distribution", "") or "" + os_id = _normalize_os(distribution) + + merged: Dict[str, Any] = {} + merged.update(_load_vars_file(loader, role_dir / "defaults" / "main")) + merged.update(_load_vars_file(loader, role_dir / "vars" / "main")) + + if os_id: + merged.update(_load_vars_file(loader, role_dir / "vars" / os_id)) + + merged.update(_load_vars_file(loader, scenario_dir / "group_vars" / "all" / "vars")) + + # Facts als Input (keine Templates) + setup = host.ansible("setup") + facts = setup.get("ansible_facts", {}) if isinstance(setup, dict) else {} + if isinstance(facts, dict): + merged["ansible_facts"] = facts + merged.setdefault( + "ansible_system", facts.get("system") or facts.get("ansible_system") + ) + merged.setdefault( + "ansible_architecture", + facts.get("architecture") or facts.get("ansible_architecture"), + ) + + result = render_all_vars(merged, passes=8) + + return result + + +# --- tests ----------------------------------------------------------------- + + +def test_directories(host, get_vars): + """ + used config directory + """ + distribution = host.system_info.distribution + release = host.system_info.release + + print(f"distribution: {distribution}") + print(f"release : {release}") + # print(get_vars) + + directories = [ + "/etc/powerdns", + get_vars.get("pdns_config_include"), + ] + + if distribution in ['arch', 'artix']: + directories.append("/usr/lib/powerdns") + + if distribution in ['debian', 'ubuntu']: + directories.append("/var/lib/powerdns",) + directories.append("/var/spool/powerdns") + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [ + "/etc/powerdns/pdns.conf", + "/etc/powerdns/pdns.d/pdns_general.conf", + "/etc/powerdns/pdns.d/pdns_backends.conf", + "/etc/powerdns/pdns.d/pdns_webserver.conf", + "/etc/powerdns/pdns.d/pdns_api.conf", + "/etc/ansible/facts.d/pdns.fact", + "/usr/bin/pdnsutil", + ] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = get_vars.get("pdns_service").get("name", None) + + if service_name: + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + bind_port = "5300" + bind_address = "127.0.0.1" + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + listen.append(f"udp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/default/converge.yml new file mode 100644 index 0000000..5a68b0d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/default/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.pdns diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/default/molecule.yml new file mode 100644 index 0000000..3e63c5f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/default/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -vv + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/default/prepare.yml new file mode 100644 index 0000000..4c14c51 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/default/prepare.yml @@ -0,0 +1,57 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/default/tests/test_default.py new file mode 100644 index 0000000..9331093 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/molecule/default/tests/test_default.py @@ -0,0 +1,328 @@ +from __future__ import annotations + +import json +import os +import re +from pathlib import Path +from typing import Any, Dict, List, Mapping, Optional, Sequence + +import pytest +from ansible.parsing.dataloader import DataLoader +from jinja2 import ChainableUndefined +from jinja2.nativetypes import NativeEnvironment + +# --- helper ---------------------------------------------------------------- + + +def pp_json(json_thing, sort=True, indents=2): + + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + + return None + + +# --- paths ----------------------------------------------------------------- + + +def base_directory() -> tuple[Path, Path]: + """ + Returns: + role_dir: role root (contains defaults/, vars/, tasks/, ...) + scenario_dir: molecule scenario dir (contains group_vars/, ...) + """ + cwd = Path.cwd() + + # pytest läuft je nach tox/molecule entweder im scenario/tests oder im role-root + if (cwd / "group_vars").is_dir(): + # .../molecule//tests -> role root ist ../.. + return (cwd / "../..").resolve(), cwd.resolve() + + scenario = os.environ.get("MOLECULE_SCENARIO_NAME", "default") + return cwd.resolve(), (cwd / "molecule" / scenario).resolve() + + +def _normalize_os(distribution: str) -> Optional[str]: + d = (distribution or "").strip().lower() + if d in ("debian", "ubuntu"): + return "debian" + if d in ("arch", "artix"): + return f"{d}linux" + return None + + +# --- load vars files (YAML) ------------------------------------------------ + + +def _load_vars_file(loader: DataLoader, file_base: Path) -> Dict[str, Any]: + """ + file_base ohne Extension übergeben, z.B. role_dir/'defaults'/'main' + Lädt main.yml oder main.yaml via Ansible DataLoader (Vault kompatibel). + """ + for ext in ("yml", "yaml"): + p = file_base.with_suffix(f".{ext}") + if not p.is_file(): + continue + + data = loader.load_from_file(str(p)) + if data is None: + return {} + if not isinstance(data, dict): + raise TypeError(f"{p} must be a mapping/dict, got {type(data)}") + return data + + return {} + + +# --- jinja rendering (multi-pass) ------------------------------------------ + +_JINJA_MARKER = re.compile(r"({{.*?}}|{%-?.*?-%}|{#.*?#})", re.S) + + +def _find_unrendered_templates(obj: Any, prefix: str = "") -> List[str]: + found: List[str] = [] + + if isinstance(obj, str): + if _JINJA_MARKER.search(obj): + found.append(prefix or "") + return found + + if isinstance(obj, Mapping): + for k, v in obj.items(): + key = str(k) + found.extend( + _find_unrendered_templates(v, f"{prefix}.{key}" if prefix else key) + ) + return found + + if isinstance(obj, Sequence) and not isinstance(obj, (str, bytes, bytearray)): + for i, v in enumerate(obj): + found.extend(_find_unrendered_templates(v, f"{prefix}[{i}]")) + return found + + return found + + +def _make_jinja_env() -> NativeEnvironment: + """ + NativeEnvironment: gibt bei reinen Expressions native Typen zurück, + sonst Strings. Undefined ist 'chainable', damit ansible_facts.foo.bar + nicht hart explodiert, sondern Undefined liefert (ähnlich fail_on_undefined=False). + """ + env = NativeEnvironment(undefined=ChainableUndefined, autoescape=False) + + # Ansible-ähnliche lookup/query Minimalimplementierung (nur env erlaubt) + def _lookup(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> Any: + if plugin != "env": + raise ValueError( + f"lookup('{plugin}', ...) not supported in tests (allowlist: env)" + ) + # Ansible lookup('env','X') -> '' wenn nicht gesetzt (damit default(..., true) greift) + if isinstance(term, (list, tuple)): + vals = [os.environ.get(str(t), "") for t in term] + return vals[0] if kwargs.get("wantlist") is False else vals + return os.environ.get(str(term), "") + + def _query(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> List[Any]: + # query() ist wantlist=True + kwargs["wantlist"] = True + res = _lookup(plugin, term, *rest, **kwargs) + return res if isinstance(res, list) else [res] + + env.globals["lookup"] = _lookup + env.globals["query"] = _query + return env + + +def _render_obj( + env: NativeEnvironment, obj: Any, ctx: Dict[str, Any], *, skip_keys: frozenset[str] +) -> Any: + if isinstance(obj, str): + if not _JINJA_MARKER.search(obj): + return obj + tmpl = env.from_string(obj) + return tmpl.render(**ctx) + + if isinstance(obj, Mapping): + out: Dict[str, Any] = {} + for k, v in obj.items(): + ks = str(k) + if ks in skip_keys: + out[ks] = v + else: + out[ks] = _render_obj(env, v, ctx, skip_keys=skip_keys) + return out + + if isinstance(obj, list): + return [_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj] + + if isinstance(obj, tuple): + return tuple(_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj) + + return obj + + +def render_all_vars(data: Dict[str, Any], passes: int = 8) -> Dict[str, Any]: + """ + Multi-pass: damit Werte wie + system_architecture -> ..., + und danach defaults_release.file -> ...{{ system_architecture }}... + sauber aufgelöst werden. + """ + env = _make_jinja_env() + + current: Dict[str, Any] = data + last_leftovers: Optional[List[str]] = None + + for _ in range(max(1, passes)): + # Kontext ist immer der aktuelle Stand + rendered = _render_obj( + env, current, current, skip_keys=frozenset({"ansible_facts"}) + ) + if not isinstance(rendered, dict): + raise TypeError(f"Rendered vars are not a dict anymore: {type(rendered)}") + + leftovers = _find_unrendered_templates(rendered) + if not leftovers: + return rendered + + # kein Fortschritt mehr + if leftovers == last_leftovers: + current = rendered + break + + last_leftovers = leftovers + current = rendered + + # optional: hart fehlschlagen, wenn noch Templates übrig sind (sonst wird es still falsch) + if os.environ.get("ANSIBLE_TEST_ALLOW_UNRESOLVED_TEMPLATES", "0") != "1": + leftovers = _find_unrendered_templates(current) + if leftovers: + raise AssertionError( + "Unresolved templates after rendering:\n- " + "\n- ".join(leftovers) + ) + + return current + + +# --- pytest fixture -------------------------------------------------------- + + +@pytest.fixture() +def get_vars(host) -> Dict[str, Any]: + role_dir, scenario_dir = base_directory() + + loader = DataLoader() + loader.set_basedir(str(role_dir)) + + distribution = getattr(host.system_info, "distribution", "") or "" + os_id = _normalize_os(distribution) + + merged: Dict[str, Any] = {} + merged.update(_load_vars_file(loader, role_dir / "defaults" / "main")) + merged.update(_load_vars_file(loader, role_dir / "vars" / "main")) + + if os_id: + merged.update(_load_vars_file(loader, role_dir / "vars" / os_id)) + + merged.update(_load_vars_file(loader, scenario_dir / "group_vars" / "all" / "vars")) + + # Facts als Input (keine Templates) + setup = host.ansible("setup") + facts = setup.get("ansible_facts", {}) if isinstance(setup, dict) else {} + if isinstance(facts, dict): + merged["ansible_facts"] = facts + merged.setdefault( + "ansible_system", facts.get("system") or facts.get("ansible_system") + ) + merged.setdefault( + "ansible_architecture", + facts.get("architecture") or facts.get("ansible_architecture"), + ) + + result = render_all_vars(merged, passes=8) + + return result + + +# --- tests ----------------------------------------------------------------- + + +def test_directories(host, get_vars): + """ + used config directory + """ + distribution = host.system_info.distribution + release = host.system_info.release + + print(f"distribution: {distribution}") + print(f"release : {release}") + # print(get_vars) + + directories = [ + "/etc/powerdns", + get_vars.get("pdns_config_include"), + ] + + if distribution in ['arch', 'artix']: + directories.append("/usr/lib/powerdns") + + if distribution in ['debian', 'ubuntu']: + directories.append("/var/lib/powerdns",) + directories.append("/var/spool/powerdns") + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [ + "/etc/powerdns/pdns.conf", + "/etc/powerdns/pdns.d/pdns_general.conf", + "/etc/powerdns/pdns.d/pdns_backends.conf", + "/etc/powerdns/pdns.d/pdns_webserver.conf", + "/etc/powerdns/pdns.d/pdns_api.conf", + "/etc/ansible/facts.d/pdns.fact", + "/usr/bin/pdnsutil", + ] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = get_vars.get("pdns_service").get("name", None) + + if service_name: + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + bind_port = "53" + bind_address = "127.0.0.1" + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + listen.append(f"udp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/notes.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/notes.md new file mode 100644 index 0000000..fd78c91 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/notes.md @@ -0,0 +1,3 @@ + +https://oprtr.org/einrichtung-eines-unabhangigen-dyndns-dienstes-mit-powerdns-und-mysql-mariadb/ +https://pablintino.com/en/how-to-setup-powerdns-with-replication/ diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/configure.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/configure.yml new file mode 100644 index 0000000..8cb3fdd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/configure.yml @@ -0,0 +1,60 @@ +--- + +- name: create directory for pdns configuration + ansible.builtin.file: + name: "{{ pdns_config_dir }}" + state: directory + owner: "{{ pdns_file_owner }}" + group: "{{ pdns_file_group }}" + mode: "0750" + +- name: create directory for pdns 'include-dir' + ansible.builtin.file: + name: "{{ pdns_config_include }}" + state: directory + owner: "{{ pdns_file_owner }}" + group: "{{ pdns_file_group }}" + mode: "0750" + when: + - pdns_config_include is defined + +- name: generate the pdns configuration files + ansible.builtin.template: + src: "{{ item }}.conf.j2" + dest: "{{ pdns_config_include }}/{{ item }}.conf" + owner: "{{ pdns_file_owner }}" + group: "{{ pdns_file_group }}" + mode: "0644" + backup: true + loop: + - pdns_general + - pdns_backends + - pdns_webserver + - pdns_api + notify: + - restart pdns + +- name: remove built-in pdns backend bind configuration + ansible.builtin.file: + dest: "{{ pdns_config_include }}/bind.conf" + state: absent + notify: + - restart pdns + +- name: remove built-in pdns backend lmdb configuration + ansible.builtin.file: + dest: "{{ pdns_config_include }}/lmdb.conf" + state: absent + notify: + - restart pdns + +- name: generate the pdns configuration + ansible.builtin.template: + src: pdns.conf.j2 + dest: "{{ pdns_config_dir }}/pdns.conf" + owner: "{{ pdns_file_owner }}" + group: "{{ pdns_file_group }}" + mode: "0640" + backup: true + notify: + - restart pdns diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/database/lmdb.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/database/lmdb.yml new file mode 100644 index 0000000..03003a6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/database/lmdb.yml @@ -0,0 +1,14 @@ +--- +# https://doc.powerdns.com/authoritative/backends/lmdb.html + +- name: define lmdb backend data + ansible.builtin.set_fact: + _pdns_backend_data: "{{ pdns_backends | bodsch.dns.pdns_backend_data('lmdb') }}" + +- name: create directory for lmdb database + ansible.builtin.file: + name: "{{ _pdns_backend_data[0].filename | dirname }}" + owner: "{{ pdns_owner }}" + group: "{{ pdns_group }}" + state: directory + mode: "0750" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/database/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/database/main.yml new file mode 100644 index 0000000..eecf66c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/database/main.yml @@ -0,0 +1,68 @@ +--- + +- name: define backend types + ansible.builtin.set_fact: + pdns_backend_types: "{{ pdns_backends | bodsch.dns.pdns_backend_types(version=pdns_version.full_version) }}" + +- name: backend handling + block: + - name: define backend packages + ansible.builtin.set_fact: + pdns_backend_packages: "{{ pdns_backend_types | bodsch.dns.pdns_backend_packages(_pdns_backend_packages) }}" + + - name: install pdns backends + ansible.builtin.package: + name: "{{ pdns_backend_packages }}" + state: present + when: + - pdns_backend_packages is defined + - pdns_backend_packages | count > 0 + +- name: find schema files for powerdns backends + ansible.builtin.find: + paths: + - "/usr/share/pdns-backend-sqlite3/schema" + - "/usr/share/pdns-backend-mysql/schema" + - "/usr/share/pdns-backend-bind/schema" + - "{{ pdns_config_dir }}/backends/" + file_type: file + follow: true + recurse: true + depth: 2 + use_regex: true + patterns: + - '^schema.*.sql$' + register: found_files + # no_log: true + +- name: prepare schema files + ansible.builtin.include_tasks: schema_files.yml + when: + - found_files.matched | default(0) | int == 0 + +- name: mysql database + ansible.builtin.include_tasks: database/mysql.yml + when: + - pdns_backends is defined + - "'mysql' in pdns_backend_types" + tags: + - pdns + - pdns_backend + +- name: sqlite database + ansible.builtin.include_tasks: database/sqlite3.yml + when: + - pdns_backends is defined + - "'sqlite3' in pdns_backend_types" + tags: + - pdns + - pdns_backend + +- name: lmdb database + ansible.builtin.include_tasks: database/lmdb.yml + when: + - pdns_backends is defined + - "'lmdb' in pdns_backend_types" + tags: + - pdns + - pdns_backend diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/database/mysql.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/database/mysql.yml new file mode 100644 index 0000000..1b5c51f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/database/mysql.yml @@ -0,0 +1,50 @@ +--- +# https://doc.powerdns.com/authoritative/backends/generic-mysql.html + +- name: define mysql backend data + ansible.builtin.set_fact: + _pdns_backend_data: "{{ pdns_backends | bodsch.dns.pdns_backend_data('mysql') }}" + +- name: find schema file for mysql database + ansible.builtin.find: + paths: + - "/usr/share/pdns-backend-mysql/schema" + - "{{ pdns_config_dir }}/backends" + file_type: file + patterns: + - schema.mysql.sql + recurse: true + register: found_files + no_log: true + +- name: define mysql schema file + ansible.builtin.set_fact: + pdns_mysql_schema: "{{ found_files.files | sort(attribute='path', reverse=True) | map(attribute='path') | list | first }}" + when: + - found_files.files is defined + - found_files.files | count > 0 + +- name: validate mysql schema file + ansible.builtin.fail: + msg: "i can't find a valid mysql schema file!\n" + when: + - pdns_mysql_schema is defined + - pdns_mysql_schema | string | length == 0 + run_once: true + +- name: create powerdns mysql databases + bodsch.dns.pdns_mysql_backend: + state: create + owner: "{{ pdns_owner }}" + group: "{{ pdns_group }}" + mode: "0644" + database: + hostname: "{{ _pdns_backend_data[0].host }}" + port: "{{ _pdns_backend_data[0].port | default(omit) }}" + socket: "{{ _pdns_backend_data[0].socket | default(omit) }}" + config_file: "{{ _pdns_backend_data[0].config_file | default(omit) }}" + schemaname: "{{ _pdns_backend_data[0].dbname | default(omit) }}" + login: + username: "{{ _pdns_backend_data[0].user | default(omit) }}" + password: "{{ _pdns_backend_data[0].password | default(omit) }}" + schema_file: "{{ pdns_mysql_schema }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/database/schema_files.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/database/schema_files.yml new file mode 100644 index 0000000..6afd3f3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/database/schema_files.yml @@ -0,0 +1,24 @@ +--- + +- name: create directory for powerdns backend schema files + ansible.builtin.file: + name: "{{ pdns_config_dir }}/backends" + owner: "{{ pdns_owner }}" + group: "{{ pdns_group }}" + state: directory + mode: "0755" + +- name: copy backend schema files + ansible.builtin.copy: + src: "{{ item }}" + dest: "{{ pdns_config_dir }}/backends/" + mode: "0640" + loop: + - bind-dnssec.schema.sqlite3.sql + - schema.mysql.sql + - schema.pgsql.sql + - schema.sqlite3.sql + loop_control: + label: "{{ item }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/database/sqlite3.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/database/sqlite3.yml new file mode 100644 index 0000000..7ac61cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/database/sqlite3.yml @@ -0,0 +1,42 @@ +--- +# https://doc.powerdns.com/authoritative/backends/generic-sqlite3.html + +- name: define sqlite backend data + ansible.builtin.set_fact: + _pdns_backend_data: "{{ pdns_backends | bodsch.dns.pdns_backend_data('sqlite') }}" + +- name: find schema file for sqlite database + ansible.builtin.find: + paths: + - "/usr/share/pdns-backend-sqlite3/schema" + - "{{ pdns_config_dir }}/backends/" + file_type: file + patterns: + - schema.sqlite3.sql + recurse: true + register: found_files + no_log: true + +- name: define sqlite schema file + ansible.builtin.set_fact: + pdns_sqlite_schema: "{{ found_files.files | sort(attribute='path', reverse=True) | map(attribute='path') | list | first }}" + when: + - found_files.files is defined + - found_files.files | count > 0 + +- name: validate sqlite schema file + ansible.builtin.fail: + msg: "i can't find a valid sqlite schema file!\n" + when: + - pdns_sqlite_schema is defined + - pdns_sqlite_schema | string | length == 0 + run_once: true + +- name: create powerdns sqlite databases + bodsch.dns.pdns_sqlite_backend: + state: create + owner: "{{ pdns_owner }}" + group: "{{ pdns_group }}" + mode: "0644" + database: "{{ _pdns_backend_data[0] }}" + schema_file: "{{ pdns_sqlite_schema }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/install.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/install.yml new file mode 100644 index 0000000..cc18a00 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/install.yml @@ -0,0 +1,57 @@ +--- + +- name: repositories + ansible.builtin.include_tasks: repositories/debian.yml + when: + - ansible_facts.os_family | lower == 'debian' + - pdns_external_repo | default('false') | bool + +- name: install pdns + become: true + package: + name: "{{ pdns_packages }}" + state: present + +- name: detect pdns version + become: true + bodsch.dns.pdns_version: + register: pdns_version + check_mode: false + ignore_errors: true + +- name: create custom fact file + bodsch.core.facts: + name: pdns + facts: + full_version: "{{ pdns_version.full_version }}" + version: "{{ pdns_version.version }}" + +# ----------------------------------------------------------------- + +- name: systemd + when: + - ansible_facts.service_mgr == "systemd" + block: + - name: populate service facts + ansible.builtin.service_facts: + register: systemd_facts + no_log: true + tags: + - pdns + - install + + - name: set systemd unit name + ansible.builtin.set_fact: + resolved_unit_file: "{{ ansible_facts.services | bodsch.systemd.service('systemd-resolved', state='running') }}" + tags: + - pdns + - install + + - name: disable systemd-resolved + ansible.builtin.service: + name: "{{ resolved_unit_file | default('systemd-resolved') }}" + state: stopped + enabled: false + when: + - not resolved_unit_file | default('') == 'null' or + resolved_unit_file | default('') | string | length > 0 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/main.yml new file mode 100644 index 0000000..84b1ef4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/main.yml @@ -0,0 +1,18 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yml + +- name: install + ansible.builtin.include_tasks: install.yml + +- name: install + ansible.builtin.include_tasks: database/main.yml + +- name: configure + ansible.builtin.include_tasks: configure.yml + +- name: service + ansible.builtin.include_tasks: service.yml + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/prepare.yml new file mode 100644 index 0000000..51fcf8f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/prepare.yml @@ -0,0 +1,39 @@ +--- + +- name: include OS specific configuration ({{ ansible_facts.distribution }} ({{ ansible_facts.os_family }}) {{ ansible_facts.distribution_major_version }}) + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_facts.os_family | lower }}.yml" + - default.yaml + skip: true + +- name: detect ansible check_mode + bodsch.core.check_mode: + register: _check_mode + +- name: define running_in_check_mode + ansible.builtin.set_fact: + running_in_check_mode: '{{ _check_mode.check_mode }}' + +- name: install dependecies + ansible.builtin.package: + name: "{{ pdns_dependencies }}" + state: present + when: + - pdns_dependencies | default([]) | count > 0 + +- name: get latest system information + ansible.builtin.setup: + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/repositories/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/repositories/debian.yml new file mode 100644 index 0000000..b4cc6f3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/repositories/debian.yml @@ -0,0 +1,58 @@ +--- + +# - name: Install gnupg +# ansible.builtin.package: +# name: gnupg +# state: present + +- name: add poerdns apt sources + when: + - ansible_facts.os_family | lower == 'debian' + bodsch.core.apt_sources: + name: cznic-labs-knot-dns + filename: cznic-labs-knot-dns.sources + uris: + - http://repo.powerdns.com/debian + suites: "{{ ansible_facts.distribution_release | lower }}-auth-master" + components: + - main + signed_by: /etc/apt/keyrings/pdns-auth-master-pub.asc + key: + method: download + url: "https://repo.powerdns.com/CBC8B383-pub.asc" + dest: "/etc/apt/keyrings/pdns-auth-master-pub.asc" + dearmor: true + validate: true + update_cache: true + +# - name: add apt signing key +# ansible.builtin.apt_key: +# url: https://repo.powerdns.com/CBC8B383-pub.asc +# # id: "{{ pdns_install_repo['gpg_key_id'] | default('') }}" +# state: present +# register: apt_key +# changed_when: apt_key.changed +# failed_when: apt_key.failed +# +# - name: Add the PowerDNS APT Repository +# ansible.builtin.apt_repository: +# # filename: "{{ pdns_install_repo['name'] }}" +# repo: deb [signed-by=/etc/apt/keyrings/auth-master-pub.asc] http://repo.powerdns.com/debian bookworm-auth-master main +# state: present +# register: apt_repo +# changed_when: apt_repo.changed +# failed_when: apt_repo.failed +# +# - name: Update the APT cache +# ansible.builtin.apt: +# update_cache: true +# when: +# - apt_key.changed or _pdns_apt_repo.changed + +# - name: Pin the PowerDNS APT Repository +# ansible.builtin.template: +# src: pdns.pin.j2 +# dest: /etc/apt/preferences.d/pdns +# owner: root +# group: root +# mode: 0644 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/service.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/service.yml new file mode 100644 index 0000000..e0f683b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/tasks/service.yml @@ -0,0 +1,12 @@ +--- + +- name: Start and enable the PowerDNS service + throttle: 1 + service: + name: "{{ pdns_service.name }}" + state: "{{ pdns_service.state }}" + enabled: "{{ pdns_service.enabled }}" + tags: + - service + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/templates/pdns.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/templates/pdns.conf.j2 new file mode 100644 index 0000000..2f5460b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/templates/pdns.conf.j2 @@ -0,0 +1,751 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +config-dir={{ pdns_config_dir }} +setuid={{ pdns_owner }} +setgid={{ pdns_group }} + +# create parent object +launch= + +include-dir={{ pdns_config_include }} + +{# +# Autogenerated configuration file template + +################################# +# ignore-unknown-settings Configuration settings to ignore if they are unknown +# +# ignore-unknown-settings= + +################################# +# 8bit-dns Allow 8bit dns queries +# +# 8bit-dns=no + +################################# +# allow-axfr-ips Allow zonetransfers only to these subnets +# +# allow-axfr-ips=127.0.0.0/8,::1 + +################################# +# allow-dnsupdate-from A global setting to allow DNS updates from these IP ranges. +# +# allow-dnsupdate-from=127.0.0.0/8,::1 + +################################# +# allow-notify-from Allow AXFR NOTIFY from these IP ranges. If empty, drop all incoming notifies. +# +# allow-notify-from=0.0.0.0/0,::/0 + +################################# +# allow-unsigned-autoprimary Allow autoprimaries to create zones without TSIG signed NOTIFY +# +# allow-unsigned-autoprimary=yes + +################################# +# allow-unsigned-notify Allow unsigned notifications for TSIG secured zones +# +# allow-unsigned-notify=yes + +################################# +# also-notify When notifying a zone, also notify these nameservers +# +# also-notify= + +################################# +# any-to-tcp Answer ANY queries with tc=1, shunting to TCP +# +# any-to-tcp=yes + +################################# +# api Enable/disable the REST API (including HTTP listener) +# +# api=no + +################################# +# api-key Static pre-shared authentication key for access to the REST API +# +# api-key= + +################################# +# autosecondary Act as an autosecondary +# +# autosecondary=no + +################################# +# axfr-fetch-timeout Maximum time in seconds for inbound AXFR to start or be idle after starting +# +# axfr-fetch-timeout=10 + +################################# +# axfr-lower-serial Also AXFR a zone from a primary with a lower serial +# +# axfr-lower-serial=no + +################################# +# cache-ttl Seconds to store packets in the PacketCache +# +# cache-ttl=20 + +################################# +# carbon-instance If set overwrites the instance name default +# +# carbon-instance=auth + +################################# +# carbon-interval Number of seconds between carbon (graphite) updates +# +# carbon-interval=30 + +################################# +# carbon-namespace If set overwrites the first part of the carbon string +# +# carbon-namespace=pdns + +################################# +# carbon-ourname If set, overrides our reported hostname for carbon stats +# +# carbon-ourname= + +################################# +# carbon-server If set, send metrics in carbon (graphite) format to this server IP address +# +# carbon-server= + +################################# +# chroot If set, chroot to this directory for more security +# +# chroot= + +################################# +# config-dir Location of configuration directory (pdns.conf) +# +# config-dir=/etc/powerdns + +################################# +# config-name Name of this virtual configuration - will rename the binary image +# +# config-name= + +################################# +# consistent-backends Assume individual zones are not divided over backends. Send only ANY lookup operations to the backend to reduce the number of lookups +# +# consistent-backends=yes + +################################# +# control-console Debugging switch - don't use +# +# control-console=no + +################################# +# daemon Operate as a daemon +# +# daemon=no + +################################# +# default-api-rectify Default API-RECTIFY value for zones +# +# default-api-rectify=yes + +################################# +# default-catalog-zone Catalog zone to assign newly created primary zones (via the API) to +# +# default-catalog-zone= + +################################# +# default-ksk-algorithm Default KSK algorithm +# +# default-ksk-algorithm=ecdsa256 + +################################# +# default-ksk-size Default KSK size (0 means default) +# +# default-ksk-size=0 + +################################# +# default-publish-cdnskey Default value for PUBLISH-CDNSKEY +# +# default-publish-cdnskey= + +################################# +# default-publish-cds Default value for PUBLISH-CDS +# +# default-publish-cds= + +################################# +# default-soa-content Default SOA content +# +# default-soa-content=a.misconfigured.dns.server.invalid hostmaster.@ 0 10800 3600 604800 3600 + +################################# +# default-soa-edit Default SOA-EDIT value +# +# default-soa-edit= + +################################# +# default-soa-edit-signed Default SOA-EDIT value for signed zones +# +# default-soa-edit-signed= + +################################# +# default-ttl Seconds a result is valid if not set otherwise +# +# default-ttl=3600 + +################################# +# default-zsk-algorithm Default ZSK algorithm +# +# default-zsk-algorithm= + +################################# +# default-zsk-size Default ZSK size (0 means default) +# +# default-zsk-size=0 + +################################# +# delay-notifications Configure a delay to send out notifications, no delay by default +# +# delay-notifications=0 + +################################# +# direct-dnskey Fetch DNSKEY, CDS and CDNSKEY RRs from backend during DNSKEY or CDS/CDNSKEY synthesis +# +# direct-dnskey=no + +################################# +# disable-axfr Disable zonetransfers but do allow TCP queries +# +# disable-axfr=no + +################################# +# disable-axfr-rectify Disable the rectify step during an outgoing AXFR. Only required for regression testing. +# +# disable-axfr-rectify=no + +################################# +# disable-syslog Disable logging to syslog, useful when running inside a supervisor that logs stdout +# +# disable-syslog=no + +################################# +# distributor-threads Default number of Distributor (backend) threads to start +# +# distributor-threads=3 + +################################# +# dname-processing If we should support DNAME records +# +# dname-processing=no + +################################# +# dnssec-key-cache-ttl Seconds to cache DNSSEC keys from the database +# +# dnssec-key-cache-ttl=30 + +################################# +# dnsupdate Enable/Disable DNS update (RFC2136) support. Default is no. +# +# dnsupdate=no + +################################# +# domain-metadata-cache-ttl Seconds to cache zone metadata from the database +# +# domain-metadata-cache-ttl= + +################################# +# edns-cookie-secret When set, set a server cookie when responding to a query with a Client cookie (in hex) +# +# edns-cookie-secret= + +################################# +# edns-subnet-processing If we should act on EDNS Subnet options +# +# edns-subnet-processing=no + +################################# +# enable-lua-records Process LUA records for all zones (metadata overrides this) +# +# enable-lua-records=no + +################################# +# entropy-source If set, read entropy from this file +# +# entropy-source=/dev/urandom + +################################# +# expand-alias Expand ALIAS records +# +# expand-alias=no + +################################# +# forward-dnsupdate A global setting to allow DNS update packages that are for a Secondary zone, to be forwarded to the primary. +# +# forward-dnsupdate=yes + +################################# +# forward-notify IP addresses to forward received notifications to regardless of primary or secondary settings +# +# forward-notify= + +################################# +# guardian Run within a guardian process +# +# guardian=no + +################################# +# include-dir Include *.conf files from this directory +# +# include-dir= + +################################# +# launch Which backends to launch and order to query them in +# +# launch= + +################################# +# load-modules Load this module - supply absolute or relative path +# +# load-modules= + +################################# +# local-address Local IP addresses to which we bind +# +# local-address=0.0.0.0, :: + +################################# +# local-address-nonexist-fail Fail to start if one or more of the local-address's do not exist on this server +# +# local-address-nonexist-fail=yes + +################################# +# local-port The port on which we listen +# +# local-port=53 + +################################# +# log-dns-details If PDNS should log DNS non-erroneous details +# +# log-dns-details=no + +################################# +# log-dns-queries If PDNS should log all incoming DNS queries +# +# log-dns-queries=no + +################################# +# log-timestamp Print timestamps in log lines +# +# log-timestamp=yes + +################################# +# logging-facility Log under a specific facility +# +# logging-facility= + +################################# +# loglevel Amount of logging. Higher is more. Do not set below 3 +# +# loglevel=4 + +################################# +# loglevel-show Include log level indicator in log output +# +# loglevel-show=no + +################################# +# lua-axfr-script Script to be used to edit incoming AXFRs +# +# lua-axfr-script= + +################################# +# lua-consistent-hashes-cleanup-interval Pre-computed hashes cleanup interval (in seconds) +# +# lua-consistent-hashes-cleanup-interval=3600 + +################################# +# lua-consistent-hashes-expire-delay Cleanup pre-computed hashes that haven't been used for the given delay (in seconds). See pickchashed() LUA function +# +# lua-consistent-hashes-expire-delay=86400 + +################################# +# lua-dnsupdate-policy-script Lua script with DNS update policy handler +# +# lua-dnsupdate-policy-script= + +################################# +# lua-health-checks-expire-delay Stops doing health checks after the record hasn't been used for that delay (in seconds) +# +# lua-health-checks-expire-delay=3600 + +################################# +# lua-health-checks-interval LUA records health checks monitoring interval in seconds +# +# lua-health-checks-interval=5 + +################################# +# lua-prequery-script Lua script with prequery handler (DO NOT USE) +# +# lua-prequery-script= + +################################# +# lua-records-exec-limit LUA records scripts execution limit (instructions count). Values <= 0 mean no limit +# +# lua-records-exec-limit=1000 + +################################# +# lua-records-insert-whitespace Insert whitespace when combining LUA chunks +# +# lua-records-insert-whitespace=yes + +################################# +# max-cache-entries Maximum number of entries in the query cache +# +# max-cache-entries=1000000 + +################################# +# max-ent-entries Maximum number of empty non-terminals in a zone +# +# max-ent-entries=100000 + +################################# +# max-generate-steps Maximum number of $GENERATE steps when loading a zone from a file +# +# max-generate-steps=0 + +################################# +# max-include-depth Maximum number of nested $INCLUDE directives while processing a zone file +# +# max-include-depth=20 + +################################# +# max-nsec3-iterations Limit the number of NSEC3 hash iterations +# +# max-nsec3-iterations=100 + +################################# +# max-packet-cache-entries Maximum number of entries in the packet cache +# +# max-packet-cache-entries=1000000 + +################################# +# max-queue-length Maximum queuelength before considering situation lost +# +# max-queue-length=5000 + +################################# +# max-signature-cache-entries Maximum number of signatures cache entries +# +# max-signature-cache-entries= + +################################# +# max-tcp-connection-duration Maximum time in seconds that a TCP DNS connection is allowed to stay open. +# +# max-tcp-connection-duration=0 + +################################# +# max-tcp-connections Maximum number of TCP connections +# +# max-tcp-connections=20 + +################################# +# max-tcp-connections-per-client Maximum number of simultaneous TCP connections per client +# +# max-tcp-connections-per-client=0 + +################################# +# max-tcp-transactions-per-conn Maximum number of subsequent queries per TCP connection +# +# max-tcp-transactions-per-conn=0 + +################################# +# module-dir Default directory for modules +# +# module-dir=/usr/lib/powerdns + +################################# +# negquery-cache-ttl Seconds to store negative query results in the QueryCache +# +# negquery-cache-ttl=60 + +################################# +# no-shuffle Set this to prevent random shuffling of answers - for regression testing +# +# no-shuffle=off + +################################# +# non-local-bind Enable binding to non-local addresses by using FREEBIND / BINDANY socket options +# +# non-local-bind=no + +################################# +# only-notify Only send AXFR NOTIFY to these IP addresses or netmasks +# +# only-notify=0.0.0.0/0,::/0 + +################################# +# outgoing-axfr-expand-alias Expand ALIAS records during outgoing AXFR +# +# outgoing-axfr-expand-alias=no + +################################# +# overload-queue-length Maximum queuelength moving to packetcache only +# +# overload-queue-length=0 + +################################# +# prevent-self-notification Don't send notifications to what we think is ourself +# +# prevent-self-notification=yes + +################################# +# primary Act as a primary +# +# primary=no + +################################# +# proxy-protocol-from A Proxy Protocol header is only allowed from these subnets, and is mandatory then too. +# +# proxy-protocol-from= + +################################# +# proxy-protocol-maximum-size The maximum size of a proxy protocol payload, including the TLV values +# +# proxy-protocol-maximum-size=512 + +################################# +# query-cache-ttl Seconds to store query results in the QueryCache +# +# query-cache-ttl=20 + +################################# +# query-local-address Source IP addresses for sending queries +# +# query-local-address=0.0.0.0 :: + +################################# +# query-logging Hint backends that queries should be logged +# +# query-logging=no + +################################# +# queue-limit Maximum number of milliseconds to queue a query +# +# queue-limit=1500 + +################################# +# receiver-threads Default number of receiver threads to start +# +# receiver-threads=1 + +################################# +# resolver Use this resolver for ALIAS and the internal stub resolver +# +# resolver=no + +################################# +# retrieval-threads Number of AXFR-retrieval threads for secondary operation +# +# retrieval-threads=2 + +################################# +# reuseport Enable higher performance on compliant kernels by using SO_REUSEPORT allowing each receiver thread to open its own socket +# +# reuseport=no + +################################# +# rng Specify the random number generator to use. Valid values are auto,sodium,openssl,getrandom,arc4random,urandom. +# +# rng=auto + +################################# +# secondary Act as a secondary +# +# secondary=no + +################################# +# secondary-check-signature-freshness Check signatures in SOA freshness check. Sets DO flag on SOA queries. Outside some very problematic scenarios, say yes here. +# +# secondary-check-signature-freshness=yes + +################################# +# secondary-do-renotify If this secondary should send out notifications after receiving zone transfers from a primary +# +# secondary-do-renotify=no + +################################# +# security-poll-suffix Zone name from which to query security update notifications +# +# security-poll-suffix=secpoll.powerdns.com. + +################################# +# send-signed-notify Send TSIG secured NOTIFY if TSIG key is configured for a zone +# +# send-signed-notify=yes + +################################# +# server-id Returned when queried for 'id.server' TXT or NSID, defaults to hostname - disabled or custom +# +# server-id= + +################################# +# setgid If set, change group id to this gid for more security +# +setgid=powerdns + +################################# +# setuid If set, change user id to this uid for more security +# +setuid=powerdns + +################################# +# signing-threads Default number of signer threads to start +# +# signing-threads=3 + +################################# +# socket-dir Where the controlsocket will live, /var/run/pdns when unset and not chrooted. Set to the RUNTIME_DIRECTORY environment variable when that variable has a value (e.g. under systemd). +# +# socket-dir= + +################################# +# svc-autohints Transparently fill ipv6hint=auto ipv4hint=auto SVC params with AAAA/A records for the target name of the record (if within the same zone) +# +# svc-autohints=no + +################################# +# tcp-control-address If set, PowerDNS can be controlled over TCP on this address +# +# tcp-control-address= + +################################# +# tcp-control-port If set, PowerDNS can be controlled over TCP on this address +# +# tcp-control-port=53000 + +################################# +# tcp-control-range If set, remote control of PowerDNS is possible over these networks only +# +# tcp-control-range=127.0.0.0/8, 10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12, ::1/128, fe80::/10 + +################################# +# tcp-control-secret If set, PowerDNS can be controlled over TCP after passing this secret +# +# tcp-control-secret= + +################################# +# tcp-fast-open Enable TCP Fast Open support on the listening sockets, using the supplied numerical value as the queue size +# +# tcp-fast-open=0 + +################################# +# tcp-idle-timeout Maximum time in seconds that a TCP DNS connection is allowed to stay open while being idle +# +# tcp-idle-timeout=5 + +################################# +# traceback-handler Enable the traceback handler (Linux only) +# +# traceback-handler=yes + +################################# +# trusted-notification-proxy IP address of incoming notification proxy +# +# trusted-notification-proxy= + +################################# +# udp-truncation-threshold Maximum UDP response size before we truncate +# +# udp-truncation-threshold=1232 + +################################# +# upgrade-unknown-types Transparently upgrade known TYPExxx records. Recommended to keep off, except for PowerDNS upgrades until data sources are cleaned up +# +# upgrade-unknown-types=no + +################################# +# version-string PowerDNS version in packets - full, anonymous, powerdns or custom +# +# version-string=full + +################################# +# webserver Start a webserver for monitoring (api=yes also enables the HTTP listener) +# +# webserver=no + +################################# +# webserver-address IP Address of webserver/API to listen on +# +# webserver-address=127.0.0.1 + +################################# +# webserver-allow-from Webserver/API access is only allowed from these subnets +# +# webserver-allow-from=127.0.0.1,::1 + +################################# +# webserver-connection-timeout Webserver/API request/response timeout in seconds +# +# webserver-connection-timeout=5 + +################################# +# webserver-hash-plaintext-credentials Whether to hash passwords and api keys supplied in plaintext, to prevent keeping the plaintext version in memory at runtime +# +# webserver-hash-plaintext-credentials=no + +################################# +# webserver-loglevel Amount of logging in the webserver (none, normal, detailed) +# +# webserver-loglevel=normal + +################################# +# webserver-max-bodysize Webserver/API maximum request/response body size in megabytes +# +# webserver-max-bodysize=2 + +################################# +# webserver-password Password required for accessing the webserver +# +# webserver-password= + +################################# +# webserver-port Port of webserver/API to listen on +# +# webserver-port=8081 + +################################# +# webserver-print-arguments If the webserver should print arguments +# +# webserver-print-arguments=no + +################################# +# workaround-11804 Workaround for issue 11804: send single RR per AXFR chunk +# +# workaround-11804=no + +################################# +# write-pid Write a PID file +# +# write-pid=yes + +################################# +# xfr-cycle-interval Schedule primary/secondary SOA freshness checks once every .. seconds +# +# xfr-cycle-interval=60 + +################################# +# xfr-max-received-mbytes Maximum number of megabytes received from an incoming XFR +# +# xfr-max-received-mbytes=100 + +################################# +# zone-cache-refresh-interval Seconds to cache list of known zones +# +# zone-cache-refresh-interval=300 + +################################# +# zone-metadata-cache-ttl Seconds to cache zone metadata from the database +# +# zone-metadata-cache-ttl=60 + +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/templates/pdns_api.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/templates/pdns_api.conf.j2 new file mode 100644 index 0000000..490c9ee --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/templates/pdns_api.conf.j2 @@ -0,0 +1,7 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if pdns_api is defined %} +api = {{ pdns_api.enabled | bodsch.core.config_bool(true_as='yes', false_as='no') }} +api-key = {{ pdns_api.key }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/templates/pdns_backends.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/templates/pdns_backends.conf.j2 new file mode 100644 index 0000000..7aba8ad --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/templates/pdns_backends.conf.j2 @@ -0,0 +1,20 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +{% set pdns_backends = pdns_backends | bodsch.dns.pdns_config_upgrades(version=pdns_version.get("full_version")) %} + +{% for backend in pdns_backends %} + {% set backend_name = backend.get("name") | ansible.builtin.split(':') | first %} + {% set _ = backend.pop("name") %} + {% if backend.pop("credentials", None) %} + {% set _ = backend.pop("credentials") %} + {% endif %} + {% set backend_string = backend_name | replace(':', '-') %} +launch+={{ backend_name }} + {% for backend_item, value in backend.items() %} + {% if value | bodsch.core.type == "bool" %} + {% set value = value | bodsch.core.config_bool(true_as='yes', false_as='no') %} + {% endif %} +{{ backend_string }}-{{ backend_item }} = {{ value }} + {% endfor %} + +{% endfor -%} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/templates/pdns_general.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/templates/pdns_general.conf.j2 new file mode 100644 index 0000000..cd8a701 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/templates/pdns_general.conf.j2 @@ -0,0 +1,19 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +{% set pdns_config = pdns_config | bodsch.dns.pdns_config_upgrades(version=pdns_version.get("full_version")) %} + +{% for config_item, value in pdns_config.items() | sort() %} + {% if config_item not in ["config-dir", "launch", "setuid", "setgid"] %} + {% if value | bodsch.core.type == "bool" %} +{{ config_item }}={{ value | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% elif value == None %} +{{ config_item }}= + {% elif value is string %} +{{ config_item }}={{ value | string }} + {% elif value | bodsch.core.type == "list" %} +{{ config_item }}={{ value | join(',') }} + {% else %} +{{ config_item }}={{ value | string }} + {% endif %} + {% endif %} +{% endfor %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/templates/pdns_webserver.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/templates/pdns_webserver.conf.j2 new file mode 100644 index 0000000..03c5976 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/templates/pdns_webserver.conf.j2 @@ -0,0 +1,30 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if pdns_webserver is defined and + pdns_webserver.enabled is defined and + pdns_webserver.enabled %} + {% set _ = pdns_webserver.pop("enabled") %} + {% if pdns_version.full_version is version_compare('4.8', '<=') %} + {% set _ = pdns_webserver.pop("connection-timeout") %} + {% endif %} + {% if pdns_version.full_version is version_compare('4.6', '<=') %} + {% set _ = pdns_webserver.pop("hash-plaintext-credentials") %} + {% endif %} +webserver=yes + {% for config_item, value in pdns_webserver.items() %} + {% if config_item not in ["enabled"] %} + {% if value | bodsch.core.type == "bool" %} +webserver-{{ config_item }}={{ value | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% elif value == None %} +webserver-{{ config_item }}= + {% elif value is string %} +webserver-{{ config_item }}={{ value | string }} + {% elif value | bodsch.core.type == "list" %} +webserver-{{ config_item }}={{ value | join(',') }} + {% else %} +webserver-{{ config_item }}={{ value | string }} + {% endif %} + {% endif %} + {% endfor %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/vars/archlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/vars/archlinux.yml new file mode 100644 index 0000000..1324b5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/vars/archlinux.yml @@ -0,0 +1,17 @@ +--- + +pdns_lib_dir: /usr/lib/powerdns + +pdns_packages: + - powerdns + - python-requests + - python-urllib3 + - python-netaddr + +_pdns_backend_packages: + mysql: + - mariadb-libs + - python-mysqlclient + +pdns_owner: powerdns +pdns_group: powerdns diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/vars/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/vars/debian.yml new file mode 100644 index 0000000..9fde552 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/vars/debian.yml @@ -0,0 +1,31 @@ +--- + +pdns_packages: + - pdns-server + +pdns_dependencies: + - gnupg + - python3-requests + +# The directory where the PowerDNS Authoritative Server configuration is located +pdns_config_dir: "/etc/powerdns" + +pdns_owner: pdns +pdns_group: pdns + +# List of PowerDNS Authoritative Server Backends packages on Debian +_pdns_backend_packages: + geo: pdns-backend-geo + geoip: pdns-backend-geoip + mysql: + - pdns-backend-mysql + - python3-mysqldb + pgsql: pdns-backend-pgsql + sqlite3: pdns-backend-sqlite3 + ldap: pdns-backend-ldap + lmdb: pdns-backend-lmdb + lua: pdns-backend-lua + mydns: pdns-backend-mydns + pipe: pdns-backend-pipe + remote: pdns-backend-remote + tinydns: pdns-backend-tinydns diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/vars/main.yml new file mode 100644 index 0000000..3c542d6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns/vars/main.yml @@ -0,0 +1,24 @@ +--- + +pdns_dependencies: [] + +# The directory where the PowerDNS configuration is located +pdns_config_dir: '/etc/powerdns' + +pdns_lib_dir: /var/lib/powerdns + +pdns_defaults_webserver: + enabled: false + address: 127.0.0.1 + allow-from: 127.0.0.1,::1 + connection-timeout: 5 + hash-plaintext-credentials: false + loglevel: normal + max-bodysize: 2 + password: + port: 8081 + print-arguments: false + +pdns_defaults_xfr: + cycle-interval: 60 + max-received-mbytes: 100 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/.yamllint new file mode 100644 index 0000000..8827676 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/.yamllint @@ -0,0 +1,33 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: disable + key-duplicates: enable + line-length: disable + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/README.md new file mode 100644 index 0000000..f4449df --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/README.md @@ -0,0 +1,94 @@ +# Ansible Role: `bodsch.dns.pdns_records` + +Ansible role to configure dns records for powerdns. + + +## usage + +```yaml +pdns_zones: + - name: 'acme-inc.local' + type: primary + create_forward_zones: true + create_reverse_zones: true + #primaries: + # - 10.11.0.1 + networks: + - '10.11.0' + ipv6_networks: + - '2001:db8::' + name_servers: + - ns1 + - ns2 + + hosts: + - name: ns1 + ip: 10.11.0.1 + - name: ns2 + ip: 10.11.0.2 + - name: srv001 + ip: 10.11.1.1 + ipv6: 2001:db8::1 + aliases: + - www + - foo + + mail_servers: + - name: mail001 + preference: 10 + - name: mail002 + preference: 20 + + services: + - name: _ldap._tcp + weight: 100 + port: 631 + target: srv010 + - name: _ldap._tcp + weight: 50 + port: 631 + target: srv010 + - name: _imap._tcp + weight: 50 + port: 143 + target: mail001 + + text: + - name: _kerberos + text: KERBEROS.ACME-INC.COM + - name: '@' + text: + - 'some text' + - 'more text' + + - name: 'matrix.vpn' + type: primary + name_servers: + - ns + + hosts: + - name: ns + ip: 192.168.0.4 + - name: dunkelzahn + ip: 192.168.0.4 + aliases: + - home + - vpn +``` + +## Contribution + +Please read [Contribution](CONTRIBUTING.md) + +## Development, Branches (Git Tags) + + +## Author + +- Bodo Schulz + +## License + +[Apache](LICENSE) + +**FREE SOFTWARE, HELL YEAH!** diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/defaults/main.yml new file mode 100644 index 0000000..f1126fd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/defaults/main.yml @@ -0,0 +1,77 @@ +--- + +pdns_config_include: "{{ pdns_config_dir }}/pdns.d" + +# The user and group the PowerDNS Authoritative Server process will run as. +# NOTE: at the moment, we don't create a user as we assume the package creates +# a "pdns" user and group. If you change these variables, make sure to create +# the user and groups before applying this role +# pdns_user: "pdns" +# pdns_group: "pdns" +pdns_file_owner: "root" +pdns_file_group: "{{ pdns_group }}" + +pdns_service: + # Name of the PowerDNS Authoritative Server Service + name: "pdns" + # State of the PowerDNS Authoritative Server service + state: "started" + enabled: true +# systemd_overrides: +# LimitNOFILE: 10000 + +# When True, disable the automated restart of the PowerDNS service +# pdns_disable_handlers: False + +# dict containing all configuration options, except for backend +# configuration and the "config-dir", "setuid" and "setgid" directives. +pdns_config: {} +# pdns_config: +# master: yes +# slave: no +# local-address: '192.0.2.53' +# local-ipv6: '2001:DB8:1::53' +# local-port: '5300' + +# A dict with all the backends you'd like to configure. +# This default starts just the bind-backend with an empty config file +pdns_backends: + - name: bind + config: '/dev/null' + # check-interval: 10 + # dnssec-db: "{{ pdns_config_dir }}/dnssec.db" + # dnssec-db-journal-mode: WAL + # hybrid: true + # ignore-broken-records: false + # supermaster-config: /var/lib/powerdns/supermaster.conf + # supermaster-destdir: /var/lib/powerdns/zones.slave.d + # supermasters: [] + # + # - name: gsqlite3 + # database: /var/lib/powerdns/pdns.db + # dnssec: true + # pragma-journal-mode: true + # pragma-synchronous: true + # pragma-foreign-keys: true + # + # - name: gmysql + # host: 10.11.0.10 + # dbname: pdns + # user: pdns + # password: "{{ vault__pdns.databases.pdns }}" + # # # https://doc.powerdns.com/authoritative/backends/generic-mysql.html?highlight=gmysql#settings + # # host: "" # Host (ip address) to connect to. Mutually exclusive with gmysql-socket. + # # # Warning: When specified as a hostname a chicken/egg situation might arise where the database is needed to resolve the IP address of the database. It is best to supply an IP address of the database here. + # # port: "" # The port to connect to on gmysql-host. Default: 3306. + # # socket: "" # Connect to the UNIX socket at this path. Mutually exclusive with gmysql-host. + # # dbname: "" # Name of the database to connect to. Default: “powerdns”. + # # user: "" # User to connect as. Default: “powerdns”. + # # group: "" # Group to connect as. Default: “client”. + # # password: "" # The password to for gmysql-user. + # # dnssec: "" # Enable DNSSEC processing for this backend. Default: no. + # # innodb-read-committed: "" # Use the InnoDB READ-COMMITTED transaction isolation level. Default: yes. + # # ssl: "" # Deprecated since version 5.0.0. + # # timeout: "" # The timeout in seconds for each attempt to read from, or write to the server. A value of 0 will disable the timeout. Default: 10 + # # thread-cleanup: "" # Only enable this if you are certain you need to + +pdns_zones: [] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/meta/main.yml new file mode 100644 index 0000000..f6f8218 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/meta/main.yml @@ -0,0 +1,28 @@ +--- + +galaxy_info: + role_name: pdns_records + + author: Bodo Schulz + description: Ansible role for create dns records for powerdns + + license: Apache + min_ansible_version: "2.12" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 11 + - bullseye + - bookworm + - name: Ubuntu + versions: + # 20.04 + - focal + + galaxy_tags: + - system + - dns + - pdns + - powerdns diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/converge.yml new file mode 100644 index 0000000..b63d241 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/converge.yml @@ -0,0 +1,33 @@ +--- + +- name: prepare instance + hosts: all + gather_facts: true + pre_tasks: + - name: "archlinux: install dnspython" + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + ansible.builtin.package: + name: + - python-dnspython + state: present + + - name: "debian: install dnspython" + when: + - ansible_facts.os_family | lower == 'debian' + ansible.builtin.package: + name: + - python3-dnspython + state: present + +- name: converge + hosts: all + any_errors_fatal: true + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.pdns_records diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/group_vars/all/pdns.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/group_vars/all/pdns.yml new file mode 100644 index 0000000..a42d4a7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/group_vars/all/pdns.yml @@ -0,0 +1,43 @@ +--- + +pdns_backends: + - name: lmdb + filename: /var/lib/powerdns/pdns.lmdb + shards: 64 + sync-mode: nometasync + # schema-version: 5 + random-ids: true + map-size: 16000 + # flag-deleted: + # lightning-stream: + +pdns_config: + master: true + slave: false + also-notify: "" + local-address: '127.0.0.1' + local-port: '5300' + log-dns-details: true + loglevel: "5" # 0 = emergency, 1 = alert, 2 = critical, 3 = error, 4 = warning, 5 = notice, 6 = info, 7 = debug + +pdns_webserver: + enabled: true + address: "{{ ansible_facts.default_ipv4.address }}" + allow-from: + - "127.0.0.1" + - "::1" + - "10.11.0.0/24" + - "192.168.0.0/24" + connection-timeout: 5 + hash-plaintext-credentials: false + loglevel: normal + max-bodysize: 2 + password: + port: 8081 + print-arguments: true + +pdns_api: + enabled: true + key: tNSN-E1FxeDe/3nhqA + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/group_vars/all/vars.yml new file mode 100644 index 0000000..38aefd3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/group_vars/all/vars.yml @@ -0,0 +1,78 @@ +--- + +pdns_config: + master: true + slave: false + also-notify: "" + local-address: '127.0.0.1' + local-port: '5300' + log-dns-details: true + loglevel: "5" # 0 = emergency, 1 = alert, 2 = critical, 3 = error, 4 = warning, 5 = notice, 6 = info, 7 = debug + +pdns_zones: + - name: 'acme-inc.local' + + create_forward_zones: true + create_reverse_zones: true + + name_servers: + - ns1 + - ns2 + + hosts: + - name: ns1 + ip: 10.11.0.1 + - name: ns2 + ip: 10.11.0.2 + - name: srv001 + ip: 10.11.1.1 + ipv6: 2001:db8::1 + aliases: + - www + - foo + - name: mail001 + ip: 10.11.2.1 + + mail_servers: + - name: mail001 + preference: 10 + - name: mail002 + preference: 20 + + services: + - name: _ldap._tcp + weight: 100 + port: 631 + target: srv010 + - name: _ldap._tcp + weight: 50 + port: 631 + target: srv010 + - name: _imap._tcp + weight: 50 + port: 143 + target: mail001 + + text: + - name: _kerberos + text: KERBEROS.ACME-INC.COM + - name: '@' + text: + - 'some text' + - 'more text' + + - name: 'matrix.vpn' + type: primary + create_reverse_zones: true + name_servers: + - ns + + hosts: + - name: ns + ip: 192.168.0.4 + - name: dunkelzahn + ip: 192.168.0.4 + aliases: + - home + - vpn +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/molecule.yml new file mode 100644 index 0000000..068d236 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/molecule.yml @@ -0,0 +1,70 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + published_ports: + - 8081:8081 + tty: true + environment: + container: docker + groups: + - dns + docker_networks: + - name: bind + ipam_config: + - subnet: "10.11.0.0/24" + gateway: "10.11.0.254" + networks: + - name: bind + ipv4_address: "10.11.0.1" + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/prepare.yml new file mode 100644 index 0000000..71ef4f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/prepare.yml @@ -0,0 +1,69 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + become: true + ansible.builtin.command: + argv: + - pacman + - --refresh + - --sync + - --sysupgrade + - --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +- name: install powerdns + hosts: instance + gather_facts: true + + roles: + - role: bodsch.dns.pdns +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/tests/helper/dns_utils.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/tests/helper/dns_utils.py new file mode 100644 index 0000000..5372c5c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/tests/helper/dns_utils.py @@ -0,0 +1,385 @@ +# tests/helpers/remote_dns.py +from __future__ import annotations + +import json +import re +import shlex +from dataclasses import dataclass +from typing import Any, Dict, List, Tuple + +ERR_PREFIX = "__REMOTE_DNS_HELPER_ERROR__" + + +@dataclass(frozen=True) +class RemoteDnsLookupError(RuntimeError): + kind: str + message: str + rc: int + stderr: str + + def __str__(self) -> str: + return f"{self.kind}: {self.message} (rc={self.rc})\n{self.stderr}".strip() + + +REMOTE_DNS_SCRIPT = rf""" +import sys +import json +import traceback + +ERR_PREFIX = {ERR_PREFIX!r} + +def fatal(code: int, kind: str, message: str, **extra): + payload = {{ + "kind": kind, + "message": message, + **extra, + }} + sys.stderr.write(ERR_PREFIX + json.dumps(payload, ensure_ascii=False) + "\n") + sys.exit(code) + +try: + import ipaddress + import dns.exception + import dns.message + import dns.query + import dns.rcode + import dns.rdatatype + import dns.reversename +except Exception as e: + fatal( + 10, + "import_error", + f"{{type(e).__name__}}: {{e}}", + traceback=traceback.format_exc(), + hint="Install dnspython (Debian/Ubuntu: apt-get install -y python3-dnspython).", + ) + +def is_ip(s: str) -> bool: + try: + ipaddress.ip_address(s) + return True + except ValueError: + return False + +def normalize_qname(name: str) -> str: + name = name.strip() + return name if name.endswith(".") else name + "." + +def ptr_qname(name_or_ip: str) -> str: + s = name_or_ip.strip().rstrip(".") + if s.endswith("in-addr.arpa") or s.endswith("ip6.arpa"): + return normalize_qname(s) + if is_ip(s): + return dns.reversename.from_address(s).to_text() + return normalize_qname(s) + +def answers_to_short(values): + if not values: + return None + uniq = sorted(set(values)) + return ",".join(uniq) if len(uniq) > 1 else uniq[0] + +def main(): + # argv: name type server port timeout tcp(0/1) + if len(sys.argv) != 7: + fatal(2, "usage_error", f"Expected 6 args, got {{len(sys.argv)-1}}: {{sys.argv[1:]}}") + + name = sys.argv[1] + qtype = sys.argv[2].upper() + server = sys.argv[3] + port = int(sys.argv[4]) + timeout = float(sys.argv[5]) + use_tcp = bool(int(sys.argv[6])) + + try: + if qtype == "PTR": + qname = ptr_qname(name) + rdtype = dns.rdatatype.PTR + else: + qname = normalize_qname(name) + rdtype = dns.rdatatype.from_text(qtype) + except Exception as e: + fatal(3, "type_error", f"{{type(e).__name__}}: {{e}}", qtype=qtype) + + q = dns.message.make_query(qname, rdtype) + + try: + if use_tcp: + resp = dns.query.tcp(q, where=server, port=port, timeout=timeout) + else: + resp = dns.query.udp(q, where=server, port=port, timeout=timeout) + + rcode = resp.rcode() + + if rcode == dns.rcode.NXDOMAIN: + print("", end="") + return + + if rcode != dns.rcode.NOERROR: + fatal( + 12, + "rcode_error", + f"RCODE={{dns.rcode.to_text(rcode)}}", + rcode=int(rcode), + qname=qname, + qtype=qtype, + server=server, + port=port, + ) + + answers = [] + for rrset in resp.answer: + if rrset.rdtype != rdtype: + continue + for item in rrset: + # IMPORTANT: keep dig-like textual form (do NOT rstrip('.')) + answers.append(item.to_text()) + + out = answers_to_short(answers) + print(out or "", end="") + + except dns.exception.Timeout as e: + fatal( + 20, + "timeout", + f"{{type(e).__name__}}: {{e}}", + qname=qname, + qtype=qtype, + server=server, + port=port, + timeout=timeout, + ) + except dns.exception.DNSException as e: + fatal( + 21, + "dns_exception", + f"{{type(e).__name__}}: {{e}}", + qname=qname, + qtype=qtype, + server=server, + port=port, + ) + except Exception as e: + fatal( + 99, + "unexpected", + f"{{type(e).__name__}}: {{e}}", + traceback=traceback.format_exc(), + ) + +if __name__ == "__main__": + main() +""" + + +def _extract_remote_error(stderr: str) -> dict[str, Any] | None: + m = re.search(re.escape(ERR_PREFIX) + r"(\{.*\})", stderr, flags=re.DOTALL) + if not m: + return None + try: + return json.loads(m.group(1)) + except Exception: + return None + + +def dns_lookup_on_host( + host, + dns_name: str, + dns_type: str, + server_ip: str, + server_port: int, + timeout_s: float = 2.0, + use_tcp: bool = False, +) -> str | None: + args = [ + dns_name, + dns_type, + server_ip, + str(server_port), + str(timeout_s), + "1" if use_tcp else "0", + ] + quoted_args = " ".join(shlex.quote(a) for a in args) + + cmd = f"python3 - {quoted_args} <<'PY'\n{REMOTE_DNS_SCRIPT}\nPY" + r = host.run(cmd) + + out = (r.stdout or "").strip() + err = (r.stderr or "").strip() + + if getattr(r, "rc", 0) != 0: + payload = _extract_remote_error(err) or {} + kind = str(payload.get("kind") or "remote_error") + msg = str(payload.get("message") or "Remote DNS helper failed") + raise RemoteDnsLookupError(kind=kind, message=msg, rc=int(r.rc), stderr=err) + + return out or None + + +def _strip_final_dot(name: str) -> str: + return name[:-1] if name.endswith(".") else name + + +def _norm_name(name: str) -> str: + # DNS names are case-insensitive + return _strip_final_dot(name.strip()).lower() + + +def _normalize_value(rrtype: str, value: str) -> str: + t = rrtype.upper().strip() + v = value.strip() + + # Single-name types + if t in {"CNAME", "NS", "PTR"}: + return _norm_name(v) + + # MX: "pref exchange." + if t == "MX": + parts = v.split() + if len(parts) >= 2: + pref = parts[0].strip() + exch = _norm_name(parts[1]) + return f"{pref} {exch}" + return v + + # SRV: "prio weight port target." + if t == "SRV": + parts = v.split() + if len(parts) >= 4: + prio, weight, port = parts[0].strip(), parts[1].strip(), parts[2].strip() + target = _norm_name(parts[3]) + return f"{prio} {weight} {port} {target}" + return v + + # SOA: "mname rname serial refresh retry expire minimum" + if t == "SOA": + parts = v.split() + if len(parts) >= 2: + parts[0] = _norm_name(parts[0]) + parts[1] = _norm_name(parts[1]) + return " ".join(parts) + return v + + return v + + +def dns_values_equal(rrtype: str, actual: str, expected: str) -> bool: + # allow expected to be written with/without trailing dot for name-like types + return _normalize_value(rrtype, actual) == _normalize_value(rrtype, expected) + + +def dig_python( + host, + get_vars: Dict[str, Any], + domains: List[Dict[str, Any]], +) -> Tuple[bool, Dict[str, Dict[str, Any]]]: + pdns_cfg = get_vars.get("pdns_config", {}) or {} + local_dns_address = str(pdns_cfg.get("local-address", "127.0.0.1")).strip() + local_dns_port = int(pdns_cfg.get("local-port", 53)) + + result_state: List[Dict[str, Any]] = [] + + for d in domains: + domain = d.get("domain") + rrtype = d.get("type", "A") + expected = d.get("result") + + error: str | None = None + try: + value = dns_lookup_on_host( + host=host, + dns_name=domain, + dns_type=rrtype, + server_ip=local_dns_address, + server_port=local_dns_port, + timeout_s=2.0, + use_tcp=False, + ) + except RemoteDnsLookupError as e: + value = None + error = str(e) + + output_msg = value or "" + + ok = False + if expected is None: + ok = output_msg == "" + else: + ok = dns_values_equal(str(rrtype), output_msg, str(expected)) + + entry: Dict[str, Any] = { + "output": output_msg, + "cmd": f"python3(dnspython) {rrtype} {domain} @{local_dns_address}:{local_dns_port}", + "failed": not ok, + } + if error: + entry["failed"] = True + entry["error"] = error + + result_state.append({domain: entry}) + + combined = {k: v for item in result_state for k, v in item.items()} + failed = { + k: v for k, v in combined.items() if isinstance(v, dict) and v.get("failed") + } + return (len(failed) > 0, failed) + + +def extract_error(failed: dict[str, dict[str, Any]]) -> list[str]: + """ """ + seen: set[str] = set() + + for _, info in failed.items(): + err = info.get("failed") + if err: + seen.add(info.get("cmd")) + + return seen + +def extract_unique_errors(failed: dict[str, dict[str, Any]]) -> list[str]: + """ + Extracts `error` strings from a molecule-style `failed` dict and removes duplicates + while preserving first-seen order. + + It also normalizes the remote helper error format: + "\\n__REMOTE_DNS_HELPER_ERROR__{json...}" + into a short, stable message (kind/message + optional hint). + """ + seen: set[str] = set() + unique: list[str] = [] + + for _, info in failed.items(): + err = info.get("error") + if not isinstance(err, str) or not err.strip(): + continue + + normalized = _normalize_error_text(err) + if normalized not in seen: + seen.add(normalized) + unique.append(normalized) + + return unique + + +def _normalize_error_text(err: str) -> str: + err = err.strip() + + # If the remote helper JSON marker is present, prefer the JSON payload (stable & dedup-friendly). + if ERR_PREFIX in err: + _, payload = err.split(ERR_PREFIX, 1) + payload = payload.strip() + + try: + data = json.loads(payload) + kind = str(data.get("kind") or "remote_error") + message = str(data.get("message") or "").strip() + hint = data.get("hint") + parts = [f"{kind}: {message}".strip()] + if isinstance(hint, str) and hint.strip(): + parts.append(f"hint: {hint.strip()}") + return "\n".join(parts).strip() + except Exception: + # Fall back to raw error text if JSON parsing fails + return err + + return err diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/tests/helper/molecule.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/tests/helper/molecule.py new file mode 100644 index 0000000..08eb131 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/tests/helper/molecule.py @@ -0,0 +1,277 @@ +from __future__ import annotations + +import json +import os +import re +from pathlib import Path +from typing import Any, Dict, List, Mapping, Optional, Sequence + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from jinja2 import ChainableUndefined +from jinja2.nativetypes import NativeEnvironment + +# --- helper ---------------------------------------------------------------- + + +def pp_json(json_thing, sort=True, indents=2): + + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + + return None + + +def local_facts(host, fact: Optional[str] = None) -> Dict: + """ + return local facts + """ + local_fact = host.ansible("setup").get("ansible_facts").get("ansible_local") + + print(f"local_fact : {local_fact}") + + if local_fact and fact: + return local_fact.get(fact, {}) + else: + return dict() + + +def infra_hosts(host_name: Optional[str] = None): + """ """ + if not host_name: + host_name = "all" + + result = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] + ).get_hosts(host_name) + + print(f"result: {result}") + print(f" {type(result)}") + + return result + + +# --- paths ----------------------------------------------------------------- + + +def base_directory() -> tuple[Path, Path]: + """ + Returns: + role_dir: role root (contains defaults/, vars/, tasks/, ...) + scenario_dir: molecule scenario dir (contains group_vars/, ...) + """ + cwd = Path.cwd() + + # pytest läuft je nach tox/molecule entweder im scenario/tests oder im role-root + if (cwd / "group_vars").is_dir(): + # .../molecule//tests -> role root ist ../.. + return (cwd / "../..").resolve(), cwd.resolve() + + scenario = os.environ.get("MOLECULE_SCENARIO_NAME", "default") + return cwd.resolve(), (cwd / "molecule" / scenario).resolve() + + +def _normalize_os(distribution: str) -> Optional[str]: + d = (distribution or "").strip().lower() + if d in ("debian", "ubuntu"): + return "debian" + if d in ("arch", "artix"): + return f"{d}linux" + return None + + +# --- load vars files (YAML) ------------------------------------------------ + + +def _load_vars_file(loader: DataLoader, file_base: Path) -> Dict[str, Any]: + """ + file_base ohne Extension übergeben, z.B. role_dir/'defaults'/'main' + Lädt main.yml oder main.yaml via Ansible DataLoader (Vault kompatibel). + """ + for ext in ("yml", "yaml"): + p = file_base.with_suffix(f".{ext}") + if not p.is_file(): + continue + + data = loader.load_from_file(str(p)) + if data is None: + return {} + if not isinstance(data, dict): + raise TypeError(f"{p} must be a mapping/dict, got {type(data)}") + return data + + return {} + + +# --- jinja rendering (multi-pass) ------------------------------------------ + +_JINJA_MARKER = re.compile(r"({{.*?}}|{%-?.*?-%}|{#.*?#})", re.S) + + +def _find_unrendered_templates(obj: Any, prefix: str = "") -> List[str]: + found: List[str] = [] + + if isinstance(obj, str): + if _JINJA_MARKER.search(obj): + found.append(prefix or "") + return found + + if isinstance(obj, Mapping): + for k, v in obj.items(): + key = str(k) + found.extend( + _find_unrendered_templates(v, f"{prefix}.{key}" if prefix else key) + ) + return found + + if isinstance(obj, Sequence) and not isinstance(obj, (str, bytes, bytearray)): + for i, v in enumerate(obj): + found.extend(_find_unrendered_templates(v, f"{prefix}[{i}]")) + return found + + return found + + +def _make_jinja_env() -> NativeEnvironment: + """ + NativeEnvironment: gibt bei reinen Expressions native Typen zurück, + sonst Strings. Undefined ist 'chainable', damit ansible_facts.foo.bar + nicht hart explodiert, sondern Undefined liefert (ähnlich fail_on_undefined=False). + """ + env = NativeEnvironment(undefined=ChainableUndefined, autoescape=False) + + # Ansible-ähnliche lookup/query Minimalimplementierung (nur env erlaubt) + def _lookup(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> Any: + if plugin != "env": + raise ValueError( + f"lookup('{plugin}', ...) not supported in tests (allowlist: env)" + ) + # Ansible lookup('env','X') -> '' wenn nicht gesetzt (damit default(..., true) greift) + if isinstance(term, (list, tuple)): + vals = [os.environ.get(str(t), "") for t in term] + return vals[0] if kwargs.get("wantlist") is False else vals + return os.environ.get(str(term), "") + + def _query(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> List[Any]: + # query() ist wantlist=True + kwargs["wantlist"] = True + res = _lookup(plugin, term, *rest, **kwargs) + return res if isinstance(res, list) else [res] + + env.globals["lookup"] = _lookup + env.globals["query"] = _query + return env + + +def _render_obj( + env: NativeEnvironment, obj: Any, ctx: Dict[str, Any], *, skip_keys: frozenset[str] +) -> Any: + if isinstance(obj, str): + if not _JINJA_MARKER.search(obj): + return obj + tmpl = env.from_string(obj) + return tmpl.render(**ctx) + + if isinstance(obj, Mapping): + out: Dict[str, Any] = {} + for k, v in obj.items(): + ks = str(k) + if ks in skip_keys: + out[ks] = v + else: + out[ks] = _render_obj(env, v, ctx, skip_keys=skip_keys) + return out + + if isinstance(obj, list): + return [_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj] + + if isinstance(obj, tuple): + return tuple(_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj) + + return obj + + +def render_all_vars(data: Dict[str, Any], passes: int = 8) -> Dict[str, Any]: + """ + Multi-pass: damit Werte wie + system_architecture -> ..., + und danach defaults_release.file -> ...{{ system_architecture }}... + sauber aufgelöst werden. + """ + env = _make_jinja_env() + + current: Dict[str, Any] = data + last_leftovers: Optional[List[str]] = None + + for _ in range(max(1, passes)): + # Kontext ist immer der aktuelle Stand + rendered = _render_obj( + env, current, current, skip_keys=frozenset({"ansible_facts"}) + ) + if not isinstance(rendered, dict): + raise TypeError(f"Rendered vars are not a dict anymore: {type(rendered)}") + + leftovers = _find_unrendered_templates(rendered) + if not leftovers: + return rendered + + # kein Fortschritt mehr + if leftovers == last_leftovers: + current = rendered + break + + last_leftovers = leftovers + current = rendered + + # optional: hart fehlschlagen, wenn noch Templates übrig sind (sonst wird es still falsch) + if os.environ.get("ANSIBLE_TEST_ALLOW_UNRESOLVED_TEMPLATES", "0") != "1": + leftovers = _find_unrendered_templates(current) + if leftovers: + raise AssertionError( + "Unresolved templates after rendering:\n- " + "\n- ".join(leftovers) + ) + + return current + + +# --- pytest fixture -------------------------------------------------------- + + +@pytest.fixture() +def get_vars(host) -> Dict[str, Any]: + role_dir, scenario_dir = base_directory() + + loader = DataLoader() + loader.set_basedir(str(role_dir)) + + distribution = getattr(host.system_info, "distribution", "") or "" + os_id = _normalize_os(distribution) + + merged: Dict[str, Any] = {} + merged.update(_load_vars_file(loader, role_dir / "defaults" / "main")) + merged.update(_load_vars_file(loader, role_dir / "vars" / "main")) + + if os_id: + merged.update(_load_vars_file(loader, role_dir / "vars" / os_id)) + + merged.update(_load_vars_file(loader, scenario_dir / "group_vars" / "all" / "vars")) + + # Facts als Input (keine Templates) + setup = host.ansible("setup") + facts = setup.get("ansible_facts", {}) if isinstance(setup, dict) else {} + if isinstance(facts, dict): + merged["ansible_facts"] = facts + merged.setdefault( + "ansible_system", facts.get("system") or facts.get("ansible_system") + ) + merged.setdefault( + "ansible_architecture", + facts.get("architecture") or facts.get("ansible_architecture"), + ) + + result = render_all_vars(merged, passes=8) + + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/tests/test_default.py new file mode 100644 index 0000000..c7fddc8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-lmdb/tests/test_default.py @@ -0,0 +1,176 @@ +from __future__ import annotations, unicode_literals + +from helper.dns_utils import dig_python, extract_error, extract_unique_errors +from helper.molecule import infra_hosts, get_vars + +testinfra_hosts = infra_hosts(host_name="instance") + +# --- tests ----------------------------------------------------------------- + +def _exec_dns_test(host, get_vars, domains): + + has_failed, failed = dig_python(host=host, get_vars=get_vars, domains=domains) + + if has_failed: + print(failed) + unique_errors = extract_unique_errors(failed) + error = extract_error(failed) + print("\n".join(error)) + print("\n".join(unique_errors)) + assert False + +def test_records_A(host, get_vars): + """ """ + domains = [ + {"domain": "ns1.acme-inc.local", "type": "A", "result": "10.11.0.1"}, + {"domain": "ns2.acme-inc.local", "type": "A", "result": "10.11.0.2"}, + # {"domain": "ns3.acme-inc.local", "type": "A", "result": "10.11.0.2"}, + {"domain": "srv001.acme-inc.local", "type": "A", "result": "10.11.1.1"}, + # {"domain": "srv002.acme-inc.local", "type": "A", "result": "10.11.1.2"}, + {"domain": "mail001.acme-inc.local", "type": "A", "result": "10.11.2.1"}, + # {"domain": "mail002.acme-inc.local", "type": "A", "result": "10.11.2.2"}, + # {"domain": "mail003.acme-inc.local", "type": "A", "result": "10.11.2.3"}, + # {"domain": "srv010.acme-inc.local", "type": "A", "result": "10.11.0.10"}, + # {"domain": "srv011.acme-inc.local", "type": "A", "result": "10.11.0.11"}, + # {"domain": "srv012.acme-inc.local", "type": "A", "result": "10.11.0.12"}, + # # + # {"domain": "cms.cm.local", "type": "A", "result": "192.168.124.21"}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_PTR(host, get_vars): + """ """ + domains = [ + # IPv4 Reverse lookups + {"domain": "10.11.0.1", "type": "PTR", "result": "ns1.acme-inc.local."}, + {"domain": "10.11.0.2", "type": "PTR", "result": "ns2.acme-inc.local."}, + {"domain": "10.11.1.1", "type": "PTR", "result": "srv001.acme-inc.local."}, + # {"domain": "10.11.1.2", "type": "PTR", "result": "srv002.acme-inc.local."}, + {"domain": "10.11.2.1", "type": "PTR", "result": "mail001.acme-inc.local."}, + # {"domain": "10.11.2.2", "type": "PTR", "result": "mail002.acme-inc.local."}, + # {"domain": "10.11.2.3", "type": "PTR", "result": "mail003.acme-inc.local."}, + # {"domain": "10.11.0.10", "type": "PTR", "result": "srv010.acme-inc.local."}, + # {"domain": "10.11.0.11", "type": "PTR", "result": "srv011.acme-inc.local."}, + # {"domain": "10.11.0.12", "type": "PTR", "result": "srv012.acme-inc.local."}, + # # # IPv6 Reverse lookups + # {"domain": "2001:db8::1", "type": "PTR", "result": "srv001.acme-inc.local."}, + # # + # {"domain": "192.168.124.21", "type": "PTR", "result": "cms.cm.local"}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_CNAME(host, get_vars): + """ """ + domains = [ + # IPv4 Alias lookups + { + "domain": "www.acme-inc.local", + "type": "CNAME", + "result": "srv001.acme-inc.local.", + }, + { + "domain": "foo.acme-inc.local", + "type": "CNAME", + "result": "srv001.acme-inc.local.", + }, + # { + # "domain": "smtp.acme-inc.local", + # "type": "CNAME", + # "result": "mail001.acme-inc.local.", + # }, + # { + # "domain": "mail-in.acme-inc.local", + # "type": "CNAME", + # "result": "mail001.acme-inc.local.", + # }, + # { + # "domain": "imap.acme-inc.local", + # "type": "CNAME", + # "result": "mail003.acme-inc.local.", + # }, + # { + # "domain": "mail-out.acme-inc.local", + # "type": "CNAME", + # "result": "mail003.acme-inc.local.", + # }, + # # + # {"domain": "cms.cm.local", "type": "CNAME", "result": "192.168.124.21"}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_AAAA(host, get_vars): + """ """ + domains = [ + # IPv6 Forward lookups + {"domain": "srv001.acme-inc.local", "type": "AAAA", "result": "2001:db8::1"}, + ] + + has_failed, failed = dig_python(host=host, get_vars=get_vars, domains=domains) + + if has_failed: + unique_errors = extract_unique_errors(failed) + print("\n\n".join(unique_errors)) + assert False + + +def test_records_NS(host, get_vars): + """ """ + domains = [ + # NS records lookup + { + "domain": "acme-inc.local", + "type": "NS", + "result": "ns1.acme-inc.local.,ns2.acme-inc.local.", + }, + # {"domain": "cm.local", "type": "NS", "result": "dns.cm.local."}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_MX(host, get_vars): + """ """ + domains = [ + # MX records lookup + { + "domain": "acme-inc.local", + "type": "MX", + "result": "10 mail001.acme-inc.local.,20 mail002.acme-inc.local.", + }, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_SRV(host, get_vars): + """ """ + domains = [ + # Service records lookup + { + "domain": "_ldap._tcp.acme-inc.local", + "type": "SRV", + "result": "0 100 631 srv010.acme-inc.local.,0 50 631 srv010.acme-inc.local.", + }, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_TXT(host, get_vars): + """ """ + domains = [ + # TXT records lookup + { + "domain": "acme-inc.local", + "type": "TXT", + "result": '"more text","some text"', + }, + ] + + _exec_dns_test(host, get_vars, domains) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/converge.yml new file mode 100644 index 0000000..0bc8ddf --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/converge.yml @@ -0,0 +1,33 @@ +--- + +- name: prepare instance + hosts: all + gather_facts: true + pre_tasks: + - name: "archlinux: install dnspython" + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + ansible.builtin.package: + name: + - python-dnspython + state: present + + - name: "debian: install dnspython" + when: + - ansible_facts.os_family | lower == 'debian' + ansible.builtin.package: + name: + - python3-dnspython + state: present + +- name: converge + hosts: instance + any_errors_fatal: true + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.pdns_records diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/group_vars/all/pdns.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/group_vars/all/pdns.yml new file mode 100644 index 0000000..48396e9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/group_vars/all/pdns.yml @@ -0,0 +1,54 @@ +--- + +pdns_backends: + - name: gmysql + host: 10.11.0.10 + dbname: pdns + user: pdns + password: "{{ vault__pdns.databases.pdns }}" + # # https://doc.powerdns.com/authoritative/backends/generic-mysql.html?highlight=gmysql#settings + # host: "" # Host (ip address) to connect to. Mutually exclusive with gmysql-socket. + # # Warning: When specified as a hostname a chicken/egg situation might arise where the database is needed to resolve the IP address of the database. It is best to supply an IP address of the database here. + # port: "" # The port to connect to on gmysql-host. Default: 3306. + # socket: "" # Connect to the UNIX socket at this path. Mutually exclusive with gmysql-host. + # dbname: "" # Name of the database to connect to. Default: “powerdns”. + # user: "" # User to connect as. Default: “powerdns”. + # group: "" # Group to connect as. Default: “client”. + # password: "" # The password to for gmysql-user. + # dnssec: "" # Enable DNSSEC processing for this backend. Default: no. + # innodb-read-committed: "" # Use the InnoDB READ-COMMITTED transaction isolation level. Default: yes. + # ssl: "" # Deprecated since version 5.0.0. + # timeout: "" # The timeout in seconds for each attempt to read from, or write to the server. A value of 0 will disable the timeout. Default: 10 + # thread-cleanup: "" # Only enable this if you are certain you need to + # credentials: {} + +pdns_config: + master: true + slave: false + also-notify: "" + local-address: '127.0.0.1' + local-port: '5300' + log-dns-details: true + loglevel: "3" + +pdns_webserver: + enabled: true + address: "{{ ansible_facts.default_ipv4.address }}" + allow-from: + - "127.0.0.1" + - "::1" + - "10.11.0.0/24" + - "192.168.0.0/24" + connection-timeout: 5 + hash-plaintext-credentials: false + loglevel: normal # (none, normal, detailed) + max-bodysize: 2 + # password: + port: 8081 + print-arguments: false + +pdns_api: + enabled: true + key: tNsNNE10Fx3De/3nhQA + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/group_vars/all/vars.yml new file mode 100644 index 0000000..679551e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/group_vars/all/vars.yml @@ -0,0 +1,74 @@ +--- + +pdns_config: + master: true + slave: false + also-notify: "" + local-address: '127.0.0.1' + local-port: '5300' + log-dns-details: true + loglevel: "1" # 0 = emergency, 1 = alert, 2 = critical, 3 = error, 4 = warning, 5 = notice, 6 = info, 7 = debug + +pdns_zones: + - name: 'acme-inc.local' + + create_forward_zones: true + create_reverse_zones: true + + name_servers: + - ns1 + - ns2 + + hosts: + - name: ns1 + ip: 10.11.0.1 + - name: ns2 + ip: 10.11.0.2 + - name: srv001 + ip: 10.11.1.1 + ipv6: 2001:db8::1 + aliases: + - www + - foo + + mail_servers: + - name: mail001 + preference: 10 + - name: mail002 + preference: 20 + + services: + - name: _ldap._tcp + weight: 100 + port: 631 + target: srv010 + - name: _ldap._tcp + weight: 50 + port: 631 + target: srv010 + - name: _imap._tcp + weight: 50 + port: 143 + target: mail001 + + text: + - name: _kerberos + text: KERBEROS.ACME-INC.COM + - name: '@' + text: + - 'some text' + - 'more text' + + - name: 'matrix.vpn' + name_servers: + - ns + + hosts: + - name: ns + ip: 192.168.0.4 + - name: dunkelzahn + ip: 192.168.0.4 + aliases: + - home + - vpn +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/group_vars/all/vault.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/group_vars/all/vault.yml new file mode 100644 index 0000000..8162f5f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/group_vars/all/vault.yml @@ -0,0 +1,8 @@ +--- + +vault__pdns: + databases: + root: root + pdns: powerdns + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/host_vars/database/mariadb.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/host_vars/database/mariadb.yml new file mode 100644 index 0000000..aff6b55 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/host_vars/database/mariadb.yml @@ -0,0 +1,30 @@ +--- + +mariadb_system_users: + - username: root + password: "{{ vault__pdns.databases.root }}" + home: /root + update: true + ignore: true + +mariadb_databases: + - name: pdns + +mariadb_users: + - name: pdns + host: "%" + password: "{{ vault__pdns.databases.pdns }}" + priv: "pdns.*:ALL" + encrypted: false + +mariadb_config_mysqld: + bind_address: 127.0.0.1 + bind_address: 0.0.0.0 + socket: "{{ mariadb_socket }}" + skip_external_locking: + skip_name_resolve: 1 + performance_schema: 1 + expire_logs_days: 2 + max_connections: 20 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/molecule.yml new file mode 100644 index 0000000..29ff5a2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/molecule.yml @@ -0,0 +1,87 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + tty: true + environment: + container: docker + groups: + - dns + docker_networks: + - name: pdns + ipam_config: + - subnet: "10.11.0.0/24" + gateway: "10.11.0.254" + networks: + - name: pdns + ipv4_address: "10.11.0.1" + + - name: database + image: "ghcr.io/bodsch/docker-ansible/ansible-debian:12" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_TIME + tmpfs: + - /run + - /tmp + networks: + - name: pdns + ipv4_address: "10.11.0.10" + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/prepare.yml new file mode 100644 index 0000000..2268a4a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/prepare.yml @@ -0,0 +1,76 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + become: true + ansible.builtin.command: + argv: + - pacman + - --refresh + - --sync + - --sysupgrade + - --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +- name: prepare database + hosts: database + gather_facts: true + + roles: + - role: bodsch.mariadb + +- name: install powerdns + hosts: instance + gather_facts: true + + roles: + - role: bodsch.dns.pdns +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/requirements.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/requirements.yml new file mode 100644 index 0000000..6b40f3d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/requirements.yml @@ -0,0 +1,11 @@ +--- + +collections: + - name: community.mysql + +roles: + - name: bodsch.mariadb + src: bodsch.mariadb + version: 2.6.0 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/tests/helper/dns_utils.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/tests/helper/dns_utils.py new file mode 100644 index 0000000..5372c5c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/tests/helper/dns_utils.py @@ -0,0 +1,385 @@ +# tests/helpers/remote_dns.py +from __future__ import annotations + +import json +import re +import shlex +from dataclasses import dataclass +from typing import Any, Dict, List, Tuple + +ERR_PREFIX = "__REMOTE_DNS_HELPER_ERROR__" + + +@dataclass(frozen=True) +class RemoteDnsLookupError(RuntimeError): + kind: str + message: str + rc: int + stderr: str + + def __str__(self) -> str: + return f"{self.kind}: {self.message} (rc={self.rc})\n{self.stderr}".strip() + + +REMOTE_DNS_SCRIPT = rf""" +import sys +import json +import traceback + +ERR_PREFIX = {ERR_PREFIX!r} + +def fatal(code: int, kind: str, message: str, **extra): + payload = {{ + "kind": kind, + "message": message, + **extra, + }} + sys.stderr.write(ERR_PREFIX + json.dumps(payload, ensure_ascii=False) + "\n") + sys.exit(code) + +try: + import ipaddress + import dns.exception + import dns.message + import dns.query + import dns.rcode + import dns.rdatatype + import dns.reversename +except Exception as e: + fatal( + 10, + "import_error", + f"{{type(e).__name__}}: {{e}}", + traceback=traceback.format_exc(), + hint="Install dnspython (Debian/Ubuntu: apt-get install -y python3-dnspython).", + ) + +def is_ip(s: str) -> bool: + try: + ipaddress.ip_address(s) + return True + except ValueError: + return False + +def normalize_qname(name: str) -> str: + name = name.strip() + return name if name.endswith(".") else name + "." + +def ptr_qname(name_or_ip: str) -> str: + s = name_or_ip.strip().rstrip(".") + if s.endswith("in-addr.arpa") or s.endswith("ip6.arpa"): + return normalize_qname(s) + if is_ip(s): + return dns.reversename.from_address(s).to_text() + return normalize_qname(s) + +def answers_to_short(values): + if not values: + return None + uniq = sorted(set(values)) + return ",".join(uniq) if len(uniq) > 1 else uniq[0] + +def main(): + # argv: name type server port timeout tcp(0/1) + if len(sys.argv) != 7: + fatal(2, "usage_error", f"Expected 6 args, got {{len(sys.argv)-1}}: {{sys.argv[1:]}}") + + name = sys.argv[1] + qtype = sys.argv[2].upper() + server = sys.argv[3] + port = int(sys.argv[4]) + timeout = float(sys.argv[5]) + use_tcp = bool(int(sys.argv[6])) + + try: + if qtype == "PTR": + qname = ptr_qname(name) + rdtype = dns.rdatatype.PTR + else: + qname = normalize_qname(name) + rdtype = dns.rdatatype.from_text(qtype) + except Exception as e: + fatal(3, "type_error", f"{{type(e).__name__}}: {{e}}", qtype=qtype) + + q = dns.message.make_query(qname, rdtype) + + try: + if use_tcp: + resp = dns.query.tcp(q, where=server, port=port, timeout=timeout) + else: + resp = dns.query.udp(q, where=server, port=port, timeout=timeout) + + rcode = resp.rcode() + + if rcode == dns.rcode.NXDOMAIN: + print("", end="") + return + + if rcode != dns.rcode.NOERROR: + fatal( + 12, + "rcode_error", + f"RCODE={{dns.rcode.to_text(rcode)}}", + rcode=int(rcode), + qname=qname, + qtype=qtype, + server=server, + port=port, + ) + + answers = [] + for rrset in resp.answer: + if rrset.rdtype != rdtype: + continue + for item in rrset: + # IMPORTANT: keep dig-like textual form (do NOT rstrip('.')) + answers.append(item.to_text()) + + out = answers_to_short(answers) + print(out or "", end="") + + except dns.exception.Timeout as e: + fatal( + 20, + "timeout", + f"{{type(e).__name__}}: {{e}}", + qname=qname, + qtype=qtype, + server=server, + port=port, + timeout=timeout, + ) + except dns.exception.DNSException as e: + fatal( + 21, + "dns_exception", + f"{{type(e).__name__}}: {{e}}", + qname=qname, + qtype=qtype, + server=server, + port=port, + ) + except Exception as e: + fatal( + 99, + "unexpected", + f"{{type(e).__name__}}: {{e}}", + traceback=traceback.format_exc(), + ) + +if __name__ == "__main__": + main() +""" + + +def _extract_remote_error(stderr: str) -> dict[str, Any] | None: + m = re.search(re.escape(ERR_PREFIX) + r"(\{.*\})", stderr, flags=re.DOTALL) + if not m: + return None + try: + return json.loads(m.group(1)) + except Exception: + return None + + +def dns_lookup_on_host( + host, + dns_name: str, + dns_type: str, + server_ip: str, + server_port: int, + timeout_s: float = 2.0, + use_tcp: bool = False, +) -> str | None: + args = [ + dns_name, + dns_type, + server_ip, + str(server_port), + str(timeout_s), + "1" if use_tcp else "0", + ] + quoted_args = " ".join(shlex.quote(a) for a in args) + + cmd = f"python3 - {quoted_args} <<'PY'\n{REMOTE_DNS_SCRIPT}\nPY" + r = host.run(cmd) + + out = (r.stdout or "").strip() + err = (r.stderr or "").strip() + + if getattr(r, "rc", 0) != 0: + payload = _extract_remote_error(err) or {} + kind = str(payload.get("kind") or "remote_error") + msg = str(payload.get("message") or "Remote DNS helper failed") + raise RemoteDnsLookupError(kind=kind, message=msg, rc=int(r.rc), stderr=err) + + return out or None + + +def _strip_final_dot(name: str) -> str: + return name[:-1] if name.endswith(".") else name + + +def _norm_name(name: str) -> str: + # DNS names are case-insensitive + return _strip_final_dot(name.strip()).lower() + + +def _normalize_value(rrtype: str, value: str) -> str: + t = rrtype.upper().strip() + v = value.strip() + + # Single-name types + if t in {"CNAME", "NS", "PTR"}: + return _norm_name(v) + + # MX: "pref exchange." + if t == "MX": + parts = v.split() + if len(parts) >= 2: + pref = parts[0].strip() + exch = _norm_name(parts[1]) + return f"{pref} {exch}" + return v + + # SRV: "prio weight port target." + if t == "SRV": + parts = v.split() + if len(parts) >= 4: + prio, weight, port = parts[0].strip(), parts[1].strip(), parts[2].strip() + target = _norm_name(parts[3]) + return f"{prio} {weight} {port} {target}" + return v + + # SOA: "mname rname serial refresh retry expire minimum" + if t == "SOA": + parts = v.split() + if len(parts) >= 2: + parts[0] = _norm_name(parts[0]) + parts[1] = _norm_name(parts[1]) + return " ".join(parts) + return v + + return v + + +def dns_values_equal(rrtype: str, actual: str, expected: str) -> bool: + # allow expected to be written with/without trailing dot for name-like types + return _normalize_value(rrtype, actual) == _normalize_value(rrtype, expected) + + +def dig_python( + host, + get_vars: Dict[str, Any], + domains: List[Dict[str, Any]], +) -> Tuple[bool, Dict[str, Dict[str, Any]]]: + pdns_cfg = get_vars.get("pdns_config", {}) or {} + local_dns_address = str(pdns_cfg.get("local-address", "127.0.0.1")).strip() + local_dns_port = int(pdns_cfg.get("local-port", 53)) + + result_state: List[Dict[str, Any]] = [] + + for d in domains: + domain = d.get("domain") + rrtype = d.get("type", "A") + expected = d.get("result") + + error: str | None = None + try: + value = dns_lookup_on_host( + host=host, + dns_name=domain, + dns_type=rrtype, + server_ip=local_dns_address, + server_port=local_dns_port, + timeout_s=2.0, + use_tcp=False, + ) + except RemoteDnsLookupError as e: + value = None + error = str(e) + + output_msg = value or "" + + ok = False + if expected is None: + ok = output_msg == "" + else: + ok = dns_values_equal(str(rrtype), output_msg, str(expected)) + + entry: Dict[str, Any] = { + "output": output_msg, + "cmd": f"python3(dnspython) {rrtype} {domain} @{local_dns_address}:{local_dns_port}", + "failed": not ok, + } + if error: + entry["failed"] = True + entry["error"] = error + + result_state.append({domain: entry}) + + combined = {k: v for item in result_state for k, v in item.items()} + failed = { + k: v for k, v in combined.items() if isinstance(v, dict) and v.get("failed") + } + return (len(failed) > 0, failed) + + +def extract_error(failed: dict[str, dict[str, Any]]) -> list[str]: + """ """ + seen: set[str] = set() + + for _, info in failed.items(): + err = info.get("failed") + if err: + seen.add(info.get("cmd")) + + return seen + +def extract_unique_errors(failed: dict[str, dict[str, Any]]) -> list[str]: + """ + Extracts `error` strings from a molecule-style `failed` dict and removes duplicates + while preserving first-seen order. + + It also normalizes the remote helper error format: + "\\n__REMOTE_DNS_HELPER_ERROR__{json...}" + into a short, stable message (kind/message + optional hint). + """ + seen: set[str] = set() + unique: list[str] = [] + + for _, info in failed.items(): + err = info.get("error") + if not isinstance(err, str) or not err.strip(): + continue + + normalized = _normalize_error_text(err) + if normalized not in seen: + seen.add(normalized) + unique.append(normalized) + + return unique + + +def _normalize_error_text(err: str) -> str: + err = err.strip() + + # If the remote helper JSON marker is present, prefer the JSON payload (stable & dedup-friendly). + if ERR_PREFIX in err: + _, payload = err.split(ERR_PREFIX, 1) + payload = payload.strip() + + try: + data = json.loads(payload) + kind = str(data.get("kind") or "remote_error") + message = str(data.get("message") or "").strip() + hint = data.get("hint") + parts = [f"{kind}: {message}".strip()] + if isinstance(hint, str) and hint.strip(): + parts.append(f"hint: {hint.strip()}") + return "\n".join(parts).strip() + except Exception: + # Fall back to raw error text if JSON parsing fails + return err + + return err diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/tests/helper/dns_utils.save b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/tests/helper/dns_utils.save new file mode 100644 index 0000000..80d6824 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/tests/helper/dns_utils.save @@ -0,0 +1,160 @@ +# tests/helpers/remote_dns.py +from __future__ import annotations + +import shlex +from typing import Any, Dict, List, Tuple + +REMOTE_DNS_SCRIPT = r""" +import sys +import ipaddress +import dns.exception +import dns.message +import dns.query +import dns.rcode +import dns.rdatatype +import dns.reversename + +def is_ip(s: str) -> bool: + try: + ipaddress.ip_address(s) + return True + except ValueError: + return False + +def normalize_qname(name: str) -> str: + name = name.strip() + return name if name.endswith(".") else name + "." + +def ptr_qname(name_or_ip: str) -> str: + s = name_or_ip.strip().rstrip(".") + if s.endswith("in-addr.arpa") or s.endswith("ip6.arpa"): + return normalize_qname(s) + if is_ip(s): + return dns.reversename.from_address(s).to_text() + return normalize_qname(s) + +def answers_to_short(values): + if not values: + return None + uniq = sorted(set(values)) + return ",".join(uniq) if len(uniq) > 1 else uniq[0] + +def main(): + # argv: name type server port timeout tcp(0/1) + name = sys.argv[1] + qtype = sys.argv[2].upper() + server = sys.argv[3] + port = int(sys.argv[4]) + timeout = float(sys.argv[5]) + use_tcp = bool(int(sys.argv[6])) + + if qtype == "PTR": + qname = ptr_qname(name) + rdtype = dns.rdatatype.PTR + else: + qname = normalize_qname(name) + rdtype = dns.rdatatype.from_text(qtype) + + q = dns.message.make_query(qname, rdtype) + + try: + if use_tcp: + resp = dns.query.tcp(q, where=server, port=port, timeout=timeout) + else: + resp = dns.query.udp(q, where=server, port=port, timeout=timeout) + + if resp.rcode() != dns.rcode.NOERROR: + print("", end="") + return + + answers = [] + for rrset in resp.answer: + if rrset.rdtype != rdtype: + continue + for item in rrset: + answers.append(item.to_text()) # .rstrip(".")) + + out = answers_to_short(answers) + print(out or "", end="") + + except dns.exception.DNSException: + print("", end="") + +if __name__ == "__main__": + main() +""" + + +def dns_lookup_on_host( + host, + dns_name: str, + dns_type: str, + server_ip: str, + server_port: int, + timeout_s: float = 2.0, + use_tcp: bool = False, +) -> str | None: + args = [ + dns_name, + dns_type, + server_ip, + str(server_port), + str(timeout_s), + "1" if use_tcp else "0", + ] + quoted_args = " ".join(shlex.quote(a) for a in args) + + cmd = f"python3 - {quoted_args} <<'PY'\n" f"{REMOTE_DNS_SCRIPT}\n" f"PY" + + r = host.run(cmd) + out = (r.stdout or "").strip() + err = (r.stderr or "").strip() + + if err: + print(err) + + return out or None + + +def dig_python( + host, get_vars: Dict[str, Any], domains: List[Dict[str, Any]] +) -> Tuple[bool, Dict[str, Dict[str, Any]]]: + pdns_cfg = get_vars.get("pdns_config", {}) or {} + local_dns_address = str(pdns_cfg.get("local-address", "127.0.0.1")).strip() + local_dns_port = int(pdns_cfg.get("local-port", 53)) + + result_state: List[Dict[str, Any]] = [] + + for d in domains: + domain = d.get("domain") + rrtype = d.get("type", "A") + expected = d.get("result") + + value = dns_lookup_on_host( + host=host, + dns_name=domain, + dns_type=rrtype, + server_ip=local_dns_address, + server_port=local_dns_port, + timeout_s=2.0, + use_tcp=False, + ) + output_msg = value or "" + + result_state.append( + { + domain: { + "output": output_msg, + "cmd": f"python3(dnspython) {rrtype} {domain} @{local_dns_address}:{local_dns_port}", + "failed": output_msg != expected, + } + } + ) + + # print(result_state) + + combined = {k: v for item in result_state for k, v in item.items()} + failed = { + k: v for k, v in combined.items() if isinstance(v, dict) and v.get("failed") + } + return (len(failed) > 0, failed) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/tests/helper/molecule.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/tests/helper/molecule.py new file mode 100644 index 0000000..08eb131 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/tests/helper/molecule.py @@ -0,0 +1,277 @@ +from __future__ import annotations + +import json +import os +import re +from pathlib import Path +from typing import Any, Dict, List, Mapping, Optional, Sequence + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from jinja2 import ChainableUndefined +from jinja2.nativetypes import NativeEnvironment + +# --- helper ---------------------------------------------------------------- + + +def pp_json(json_thing, sort=True, indents=2): + + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + + return None + + +def local_facts(host, fact: Optional[str] = None) -> Dict: + """ + return local facts + """ + local_fact = host.ansible("setup").get("ansible_facts").get("ansible_local") + + print(f"local_fact : {local_fact}") + + if local_fact and fact: + return local_fact.get(fact, {}) + else: + return dict() + + +def infra_hosts(host_name: Optional[str] = None): + """ """ + if not host_name: + host_name = "all" + + result = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] + ).get_hosts(host_name) + + print(f"result: {result}") + print(f" {type(result)}") + + return result + + +# --- paths ----------------------------------------------------------------- + + +def base_directory() -> tuple[Path, Path]: + """ + Returns: + role_dir: role root (contains defaults/, vars/, tasks/, ...) + scenario_dir: molecule scenario dir (contains group_vars/, ...) + """ + cwd = Path.cwd() + + # pytest läuft je nach tox/molecule entweder im scenario/tests oder im role-root + if (cwd / "group_vars").is_dir(): + # .../molecule//tests -> role root ist ../.. + return (cwd / "../..").resolve(), cwd.resolve() + + scenario = os.environ.get("MOLECULE_SCENARIO_NAME", "default") + return cwd.resolve(), (cwd / "molecule" / scenario).resolve() + + +def _normalize_os(distribution: str) -> Optional[str]: + d = (distribution or "").strip().lower() + if d in ("debian", "ubuntu"): + return "debian" + if d in ("arch", "artix"): + return f"{d}linux" + return None + + +# --- load vars files (YAML) ------------------------------------------------ + + +def _load_vars_file(loader: DataLoader, file_base: Path) -> Dict[str, Any]: + """ + file_base ohne Extension übergeben, z.B. role_dir/'defaults'/'main' + Lädt main.yml oder main.yaml via Ansible DataLoader (Vault kompatibel). + """ + for ext in ("yml", "yaml"): + p = file_base.with_suffix(f".{ext}") + if not p.is_file(): + continue + + data = loader.load_from_file(str(p)) + if data is None: + return {} + if not isinstance(data, dict): + raise TypeError(f"{p} must be a mapping/dict, got {type(data)}") + return data + + return {} + + +# --- jinja rendering (multi-pass) ------------------------------------------ + +_JINJA_MARKER = re.compile(r"({{.*?}}|{%-?.*?-%}|{#.*?#})", re.S) + + +def _find_unrendered_templates(obj: Any, prefix: str = "") -> List[str]: + found: List[str] = [] + + if isinstance(obj, str): + if _JINJA_MARKER.search(obj): + found.append(prefix or "") + return found + + if isinstance(obj, Mapping): + for k, v in obj.items(): + key = str(k) + found.extend( + _find_unrendered_templates(v, f"{prefix}.{key}" if prefix else key) + ) + return found + + if isinstance(obj, Sequence) and not isinstance(obj, (str, bytes, bytearray)): + for i, v in enumerate(obj): + found.extend(_find_unrendered_templates(v, f"{prefix}[{i}]")) + return found + + return found + + +def _make_jinja_env() -> NativeEnvironment: + """ + NativeEnvironment: gibt bei reinen Expressions native Typen zurück, + sonst Strings. Undefined ist 'chainable', damit ansible_facts.foo.bar + nicht hart explodiert, sondern Undefined liefert (ähnlich fail_on_undefined=False). + """ + env = NativeEnvironment(undefined=ChainableUndefined, autoescape=False) + + # Ansible-ähnliche lookup/query Minimalimplementierung (nur env erlaubt) + def _lookup(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> Any: + if plugin != "env": + raise ValueError( + f"lookup('{plugin}', ...) not supported in tests (allowlist: env)" + ) + # Ansible lookup('env','X') -> '' wenn nicht gesetzt (damit default(..., true) greift) + if isinstance(term, (list, tuple)): + vals = [os.environ.get(str(t), "") for t in term] + return vals[0] if kwargs.get("wantlist") is False else vals + return os.environ.get(str(term), "") + + def _query(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> List[Any]: + # query() ist wantlist=True + kwargs["wantlist"] = True + res = _lookup(plugin, term, *rest, **kwargs) + return res if isinstance(res, list) else [res] + + env.globals["lookup"] = _lookup + env.globals["query"] = _query + return env + + +def _render_obj( + env: NativeEnvironment, obj: Any, ctx: Dict[str, Any], *, skip_keys: frozenset[str] +) -> Any: + if isinstance(obj, str): + if not _JINJA_MARKER.search(obj): + return obj + tmpl = env.from_string(obj) + return tmpl.render(**ctx) + + if isinstance(obj, Mapping): + out: Dict[str, Any] = {} + for k, v in obj.items(): + ks = str(k) + if ks in skip_keys: + out[ks] = v + else: + out[ks] = _render_obj(env, v, ctx, skip_keys=skip_keys) + return out + + if isinstance(obj, list): + return [_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj] + + if isinstance(obj, tuple): + return tuple(_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj) + + return obj + + +def render_all_vars(data: Dict[str, Any], passes: int = 8) -> Dict[str, Any]: + """ + Multi-pass: damit Werte wie + system_architecture -> ..., + und danach defaults_release.file -> ...{{ system_architecture }}... + sauber aufgelöst werden. + """ + env = _make_jinja_env() + + current: Dict[str, Any] = data + last_leftovers: Optional[List[str]] = None + + for _ in range(max(1, passes)): + # Kontext ist immer der aktuelle Stand + rendered = _render_obj( + env, current, current, skip_keys=frozenset({"ansible_facts"}) + ) + if not isinstance(rendered, dict): + raise TypeError(f"Rendered vars are not a dict anymore: {type(rendered)}") + + leftovers = _find_unrendered_templates(rendered) + if not leftovers: + return rendered + + # kein Fortschritt mehr + if leftovers == last_leftovers: + current = rendered + break + + last_leftovers = leftovers + current = rendered + + # optional: hart fehlschlagen, wenn noch Templates übrig sind (sonst wird es still falsch) + if os.environ.get("ANSIBLE_TEST_ALLOW_UNRESOLVED_TEMPLATES", "0") != "1": + leftovers = _find_unrendered_templates(current) + if leftovers: + raise AssertionError( + "Unresolved templates after rendering:\n- " + "\n- ".join(leftovers) + ) + + return current + + +# --- pytest fixture -------------------------------------------------------- + + +@pytest.fixture() +def get_vars(host) -> Dict[str, Any]: + role_dir, scenario_dir = base_directory() + + loader = DataLoader() + loader.set_basedir(str(role_dir)) + + distribution = getattr(host.system_info, "distribution", "") or "" + os_id = _normalize_os(distribution) + + merged: Dict[str, Any] = {} + merged.update(_load_vars_file(loader, role_dir / "defaults" / "main")) + merged.update(_load_vars_file(loader, role_dir / "vars" / "main")) + + if os_id: + merged.update(_load_vars_file(loader, role_dir / "vars" / os_id)) + + merged.update(_load_vars_file(loader, scenario_dir / "group_vars" / "all" / "vars")) + + # Facts als Input (keine Templates) + setup = host.ansible("setup") + facts = setup.get("ansible_facts", {}) if isinstance(setup, dict) else {} + if isinstance(facts, dict): + merged["ansible_facts"] = facts + merged.setdefault( + "ansible_system", facts.get("system") or facts.get("ansible_system") + ) + merged.setdefault( + "ansible_architecture", + facts.get("architecture") or facts.get("ansible_architecture"), + ) + + result = render_all_vars(merged, passes=8) + + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/tests/test_default.py new file mode 100644 index 0000000..51e3671 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-mysql/tests/test_default.py @@ -0,0 +1,176 @@ +from __future__ import annotations, unicode_literals + +from helper.dns_utils import dig_python, extract_error, extract_unique_errors +from helper.molecule import infra_hosts, get_vars + +testinfra_hosts = infra_hosts(host_name="instance") + +# --- tests ----------------------------------------------------------------- + +def _exec_dns_test(host, get_vars, domains): + + has_failed, failed = dig_python(host=host, get_vars=get_vars, domains=domains) + + if has_failed: + print(failed) + unique_errors = extract_unique_errors(failed) + error = extract_error(failed) + print("\n".join(error)) + print("\n".join(unique_errors)) + assert False + +def test_records_A(host, get_vars): + """ """ + domains = [ + {"domain": "ns1.acme-inc.local", "type": "A", "result": "10.11.0.1"}, + {"domain": "ns2.acme-inc.local", "type": "A", "result": "10.11.0.2"}, + # {"domain": "ns3.acme-inc.local", "type": "A", "result": "10.11.0.2"}, + # {"domain": "srv001.acme-inc.local", "type": "A", "result": "10.11.1.1"}, + # {"domain": "srv002.acme-inc.local", "type": "A", "result": "10.11.1.2"}, + # {"domain": "mail001.acme-inc.local", "type": "A", "result": "10.11.2.1"}, + # {"domain": "mail002.acme-inc.local", "type": "A", "result": "10.11.2.2"}, + # {"domain": "mail003.acme-inc.local", "type": "A", "result": "10.11.2.3"}, + # {"domain": "srv010.acme-inc.local", "type": "A", "result": "10.11.0.10"}, + # {"domain": "srv011.acme-inc.local", "type": "A", "result": "10.11.0.11"}, + # {"domain": "srv012.acme-inc.local", "type": "A", "result": "10.11.0.12"}, + # # + # {"domain": "cms.cm.local", "type": "A", "result": "192.168.124.21"}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_PTR(host, get_vars): + """ """ + domains = [ + # IPv4 Reverse lookups + {"domain": "10.11.0.1", "type": "PTR", "result": "ns1.acme-inc.local."}, + {"domain": "10.11.0.2", "type": "PTR", "result": "ns2.acme-inc.local."}, + {"domain": "10.11.1.1", "type": "PTR", "result": "srv001.acme-inc.local."}, + # {"domain": "10.11.1.2", "type": "PTR", "result": "srv002.acme-inc.local."}, + # {"domain": "10.11.2.1", "type": "PTR", "result": "mail001.acme-inc.local."}, + # {"domain": "10.11.2.2", "type": "PTR", "result": "mail002.acme-inc.local."}, + # {"domain": "10.11.2.3", "type": "PTR", "result": "mail003.acme-inc.local."}, + # {"domain": "10.11.0.10", "type": "PTR", "result": "srv010.acme-inc.local."}, + # {"domain": "10.11.0.11", "type": "PTR", "result": "srv011.acme-inc.local."}, + # {"domain": "10.11.0.12", "type": "PTR", "result": "srv012.acme-inc.local."}, + # # # IPv6 Reverse lookups + # {"domain": "2001:db8::1", "type": "PTR", "result": "srv001.acme-inc.local."}, + # # + # {"domain": "192.168.124.21", "type": "PTR", "result": "cms.cm.local"}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_CNAME(host, get_vars): + """ """ + domains = [ + # IPv4 Alias lookups + { + "domain": "www.acme-inc.local", + "type": "CNAME", + "result": "srv001.acme-inc.local.", + }, + { + "domain": "foo.acme-inc.local", + "type": "CNAME", + "result": "srv001.acme-inc.local.", + }, + # { + # "domain": "smtp.acme-inc.local", + # "type": "CNAME", + # "result": "mail001.acme-inc.local.", + # }, + # { + # "domain": "mail-in.acme-inc.local", + # "type": "CNAME", + # "result": "mail001.acme-inc.local.", + # }, + # { + # "domain": "imap.acme-inc.local", + # "type": "CNAME", + # "result": "mail003.acme-inc.local.", + # }, + # { + # "domain": "mail-out.acme-inc.local", + # "type": "CNAME", + # "result": "mail003.acme-inc.local.", + # }, + # # + # {"domain": "cms.cm.local", "type": "CNAME", "result": "192.168.124.21"}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_AAAA(host, get_vars): + """ """ + domains = [ + # IPv6 Forward lookups + {"domain": "srv001.acme-inc.local", "type": "AAAA", "result": "2001:db8::1"}, + ] + + has_failed, failed = dig_python(host=host, get_vars=get_vars, domains=domains) + + if has_failed: + unique_errors = extract_unique_errors(failed) + print("\n\n".join(unique_errors)) + assert False + + +def test_records_NS(host, get_vars): + """ """ + domains = [ + # NS records lookup + { + "domain": "acme-inc.local", + "type": "NS", + "result": "ns1.acme-inc.local.,ns2.acme-inc.local.", + }, + # {"domain": "cm.local", "type": "NS", "result": "dns.cm.local."}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_MX(host, get_vars): + """ """ + domains = [ + # MX records lookup + { + "domain": "acme-inc.local", + "type": "MX", + "result": "10 mail001.acme-inc.local.,20 mail002.acme-inc.local.", + }, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_SRV(host, get_vars): + """ """ + domains = [ + # Service records lookup + { + "domain": "_ldap._tcp.acme-inc.local", + "type": "SRV", + "result": "0 100 631 srv010.acme-inc.local.,0 50 631 srv010.acme-inc.local.", + }, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_TXT(host, get_vars): + """ """ + domains = [ + # TXT records lookup + { + "domain": "acme-inc.local", + "type": "TXT", + "result": '"more text","some text"', + }, + ] + + _exec_dns_test(host, get_vars, domains) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/converge.yml new file mode 100644 index 0000000..b63d241 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/converge.yml @@ -0,0 +1,33 @@ +--- + +- name: prepare instance + hosts: all + gather_facts: true + pre_tasks: + - name: "archlinux: install dnspython" + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + ansible.builtin.package: + name: + - python-dnspython + state: present + + - name: "debian: install dnspython" + when: + - ansible_facts.os_family | lower == 'debian' + ansible.builtin.package: + name: + - python3-dnspython + state: present + +- name: converge + hosts: all + any_errors_fatal: true + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.pdns_records diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/group_vars/all/pdns.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/group_vars/all/pdns.yml new file mode 100644 index 0000000..cc40141 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/group_vars/all/pdns.yml @@ -0,0 +1,44 @@ +--- + +pdns_backends: + - name: gsqlite3 + database: /var/lib/powerdns/pdns.db + dnssec: true + pragma-journal-mode: true + pragma-synchronous: true + pragma-foreign-keys: true + +pdns_config: + master: true + slave: false + also-notify: "" + local-address: '127.0.0.1' + local-port: '5300' + disable-syslog: true + log-dns-details: true + log-dns-queries: true + log-timestamp: true + loglevel: 7 # 0 = emergency, 1 = alert, 2 = critical, 3 = error, 4 = warning, 5 = notice, 6 = info, 7 = debug + server-id: localhost + +pdns_webserver: + enabled: true + address: "{{ ansible_facts.default_ipv4.address }}" + allow-from: + - "127.0.0.1" + - "::1" + - "10.11.0.0/24" + - "192.168.0.0/24" + connection-timeout: 5 + hash-plaintext-credentials: false + loglevel: normal # (none, normal, detailed) + max-bodysize: 2 + # password: + port: 8081 + print-arguments: false + +pdns_api: + enabled: true + key: tNSNNEiFxeDe/3nhqA + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/group_vars/all/vars.yml new file mode 100644 index 0000000..8a5371c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/group_vars/all/vars.yml @@ -0,0 +1,98 @@ +--- + +pdns_config: + master: true + slave: false + also-notify: "" + local-address: '127.0.0.1' + local-port: '5300' + disable-syslog: true + log-dns-details: true + log-dns-queries: true + log-timestamp: true + loglevel: 7 # 0 = emergency, 1 = alert, 2 = critical, 3 = error, 4 = warning, 5 = notice, 6 = info, 7 = debug + server-id: localhost + +pdns_zones: + - name: 'acme-inc.local' + type: primary + + create_forward_zones: true + create_reverse_zones: true + + #primaries: + # - 10.11.0.1 + networks: + - '10.11.0' + ipv6_networks: + - '2001:db8::' + name_servers: + - ns1 + - ns2 + + hosts: + - name: ns1 + ip: 10.11.0.1 + - name: ns2 + ip: 10.11.0.2 + - name: srv001 + ip: 10.11.1.1 + ipv6: 2001:db8::1 + aliases: + - www + - foo + - name: mail001 + ip: 10.11.2.1 + + mail_servers: + - name: mail001 + preference: 10 + - name: mail002 + preference: 20 + + services: + - name: _ldap._tcp + weight: 100 + port: 631 + target: srv010 + - name: _ldap._tcp + weight: 50 + port: 631 + target: srv010 + - name: _imap._tcp + weight: 50 + port: 143 + target: mail001 + + text: + - name: _kerberos + text: KERBEROS.ACME-INC.COM + - name: '@' + text: + - 'some text' + - 'more text' + + - name: 'matrix.vpn' + type: primary + create_reverse_zones: true + + # create_forward_zones: true + # create_reverse_zones: true + #primaries: + # - 10.11.0.1 + # networks: + # - '10.11.0' + # ipv6_networks: + # - '2001:db8::' + name_servers: + - ns + + hosts: + - name: ns + ip: 192.168.0.4 + - name: dunkelzahn + ip: 192.168.0.4 + aliases: + - home + - vpn +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/molecule.yml new file mode 100644 index 0000000..068d236 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/molecule.yml @@ -0,0 +1,70 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + published_ports: + - 8081:8081 + tty: true + environment: + container: docker + groups: + - dns + docker_networks: + - name: bind + ipam_config: + - subnet: "10.11.0.0/24" + gateway: "10.11.0.254" + networks: + - name: bind + ipv4_address: "10.11.0.1" + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/prepare.yml new file mode 100644 index 0000000..7ac3389 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/prepare.yml @@ -0,0 +1,70 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + become: true + ansible.builtin.command: + argv: + - pacman + - --refresh + - --sync + - --sysupgrade + - --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +- name: install powerdns + hosts: instance + gather_facts: true + + roles: + - role: bodsch.dns.pdns + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/tests/helper/dns_utils.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/tests/helper/dns_utils.py new file mode 100644 index 0000000..5372c5c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/tests/helper/dns_utils.py @@ -0,0 +1,385 @@ +# tests/helpers/remote_dns.py +from __future__ import annotations + +import json +import re +import shlex +from dataclasses import dataclass +from typing import Any, Dict, List, Tuple + +ERR_PREFIX = "__REMOTE_DNS_HELPER_ERROR__" + + +@dataclass(frozen=True) +class RemoteDnsLookupError(RuntimeError): + kind: str + message: str + rc: int + stderr: str + + def __str__(self) -> str: + return f"{self.kind}: {self.message} (rc={self.rc})\n{self.stderr}".strip() + + +REMOTE_DNS_SCRIPT = rf""" +import sys +import json +import traceback + +ERR_PREFIX = {ERR_PREFIX!r} + +def fatal(code: int, kind: str, message: str, **extra): + payload = {{ + "kind": kind, + "message": message, + **extra, + }} + sys.stderr.write(ERR_PREFIX + json.dumps(payload, ensure_ascii=False) + "\n") + sys.exit(code) + +try: + import ipaddress + import dns.exception + import dns.message + import dns.query + import dns.rcode + import dns.rdatatype + import dns.reversename +except Exception as e: + fatal( + 10, + "import_error", + f"{{type(e).__name__}}: {{e}}", + traceback=traceback.format_exc(), + hint="Install dnspython (Debian/Ubuntu: apt-get install -y python3-dnspython).", + ) + +def is_ip(s: str) -> bool: + try: + ipaddress.ip_address(s) + return True + except ValueError: + return False + +def normalize_qname(name: str) -> str: + name = name.strip() + return name if name.endswith(".") else name + "." + +def ptr_qname(name_or_ip: str) -> str: + s = name_or_ip.strip().rstrip(".") + if s.endswith("in-addr.arpa") or s.endswith("ip6.arpa"): + return normalize_qname(s) + if is_ip(s): + return dns.reversename.from_address(s).to_text() + return normalize_qname(s) + +def answers_to_short(values): + if not values: + return None + uniq = sorted(set(values)) + return ",".join(uniq) if len(uniq) > 1 else uniq[0] + +def main(): + # argv: name type server port timeout tcp(0/1) + if len(sys.argv) != 7: + fatal(2, "usage_error", f"Expected 6 args, got {{len(sys.argv)-1}}: {{sys.argv[1:]}}") + + name = sys.argv[1] + qtype = sys.argv[2].upper() + server = sys.argv[3] + port = int(sys.argv[4]) + timeout = float(sys.argv[5]) + use_tcp = bool(int(sys.argv[6])) + + try: + if qtype == "PTR": + qname = ptr_qname(name) + rdtype = dns.rdatatype.PTR + else: + qname = normalize_qname(name) + rdtype = dns.rdatatype.from_text(qtype) + except Exception as e: + fatal(3, "type_error", f"{{type(e).__name__}}: {{e}}", qtype=qtype) + + q = dns.message.make_query(qname, rdtype) + + try: + if use_tcp: + resp = dns.query.tcp(q, where=server, port=port, timeout=timeout) + else: + resp = dns.query.udp(q, where=server, port=port, timeout=timeout) + + rcode = resp.rcode() + + if rcode == dns.rcode.NXDOMAIN: + print("", end="") + return + + if rcode != dns.rcode.NOERROR: + fatal( + 12, + "rcode_error", + f"RCODE={{dns.rcode.to_text(rcode)}}", + rcode=int(rcode), + qname=qname, + qtype=qtype, + server=server, + port=port, + ) + + answers = [] + for rrset in resp.answer: + if rrset.rdtype != rdtype: + continue + for item in rrset: + # IMPORTANT: keep dig-like textual form (do NOT rstrip('.')) + answers.append(item.to_text()) + + out = answers_to_short(answers) + print(out or "", end="") + + except dns.exception.Timeout as e: + fatal( + 20, + "timeout", + f"{{type(e).__name__}}: {{e}}", + qname=qname, + qtype=qtype, + server=server, + port=port, + timeout=timeout, + ) + except dns.exception.DNSException as e: + fatal( + 21, + "dns_exception", + f"{{type(e).__name__}}: {{e}}", + qname=qname, + qtype=qtype, + server=server, + port=port, + ) + except Exception as e: + fatal( + 99, + "unexpected", + f"{{type(e).__name__}}: {{e}}", + traceback=traceback.format_exc(), + ) + +if __name__ == "__main__": + main() +""" + + +def _extract_remote_error(stderr: str) -> dict[str, Any] | None: + m = re.search(re.escape(ERR_PREFIX) + r"(\{.*\})", stderr, flags=re.DOTALL) + if not m: + return None + try: + return json.loads(m.group(1)) + except Exception: + return None + + +def dns_lookup_on_host( + host, + dns_name: str, + dns_type: str, + server_ip: str, + server_port: int, + timeout_s: float = 2.0, + use_tcp: bool = False, +) -> str | None: + args = [ + dns_name, + dns_type, + server_ip, + str(server_port), + str(timeout_s), + "1" if use_tcp else "0", + ] + quoted_args = " ".join(shlex.quote(a) for a in args) + + cmd = f"python3 - {quoted_args} <<'PY'\n{REMOTE_DNS_SCRIPT}\nPY" + r = host.run(cmd) + + out = (r.stdout or "").strip() + err = (r.stderr or "").strip() + + if getattr(r, "rc", 0) != 0: + payload = _extract_remote_error(err) or {} + kind = str(payload.get("kind") or "remote_error") + msg = str(payload.get("message") or "Remote DNS helper failed") + raise RemoteDnsLookupError(kind=kind, message=msg, rc=int(r.rc), stderr=err) + + return out or None + + +def _strip_final_dot(name: str) -> str: + return name[:-1] if name.endswith(".") else name + + +def _norm_name(name: str) -> str: + # DNS names are case-insensitive + return _strip_final_dot(name.strip()).lower() + + +def _normalize_value(rrtype: str, value: str) -> str: + t = rrtype.upper().strip() + v = value.strip() + + # Single-name types + if t in {"CNAME", "NS", "PTR"}: + return _norm_name(v) + + # MX: "pref exchange." + if t == "MX": + parts = v.split() + if len(parts) >= 2: + pref = parts[0].strip() + exch = _norm_name(parts[1]) + return f"{pref} {exch}" + return v + + # SRV: "prio weight port target." + if t == "SRV": + parts = v.split() + if len(parts) >= 4: + prio, weight, port = parts[0].strip(), parts[1].strip(), parts[2].strip() + target = _norm_name(parts[3]) + return f"{prio} {weight} {port} {target}" + return v + + # SOA: "mname rname serial refresh retry expire minimum" + if t == "SOA": + parts = v.split() + if len(parts) >= 2: + parts[0] = _norm_name(parts[0]) + parts[1] = _norm_name(parts[1]) + return " ".join(parts) + return v + + return v + + +def dns_values_equal(rrtype: str, actual: str, expected: str) -> bool: + # allow expected to be written with/without trailing dot for name-like types + return _normalize_value(rrtype, actual) == _normalize_value(rrtype, expected) + + +def dig_python( + host, + get_vars: Dict[str, Any], + domains: List[Dict[str, Any]], +) -> Tuple[bool, Dict[str, Dict[str, Any]]]: + pdns_cfg = get_vars.get("pdns_config", {}) or {} + local_dns_address = str(pdns_cfg.get("local-address", "127.0.0.1")).strip() + local_dns_port = int(pdns_cfg.get("local-port", 53)) + + result_state: List[Dict[str, Any]] = [] + + for d in domains: + domain = d.get("domain") + rrtype = d.get("type", "A") + expected = d.get("result") + + error: str | None = None + try: + value = dns_lookup_on_host( + host=host, + dns_name=domain, + dns_type=rrtype, + server_ip=local_dns_address, + server_port=local_dns_port, + timeout_s=2.0, + use_tcp=False, + ) + except RemoteDnsLookupError as e: + value = None + error = str(e) + + output_msg = value or "" + + ok = False + if expected is None: + ok = output_msg == "" + else: + ok = dns_values_equal(str(rrtype), output_msg, str(expected)) + + entry: Dict[str, Any] = { + "output": output_msg, + "cmd": f"python3(dnspython) {rrtype} {domain} @{local_dns_address}:{local_dns_port}", + "failed": not ok, + } + if error: + entry["failed"] = True + entry["error"] = error + + result_state.append({domain: entry}) + + combined = {k: v for item in result_state for k, v in item.items()} + failed = { + k: v for k, v in combined.items() if isinstance(v, dict) and v.get("failed") + } + return (len(failed) > 0, failed) + + +def extract_error(failed: dict[str, dict[str, Any]]) -> list[str]: + """ """ + seen: set[str] = set() + + for _, info in failed.items(): + err = info.get("failed") + if err: + seen.add(info.get("cmd")) + + return seen + +def extract_unique_errors(failed: dict[str, dict[str, Any]]) -> list[str]: + """ + Extracts `error` strings from a molecule-style `failed` dict and removes duplicates + while preserving first-seen order. + + It also normalizes the remote helper error format: + "\\n__REMOTE_DNS_HELPER_ERROR__{json...}" + into a short, stable message (kind/message + optional hint). + """ + seen: set[str] = set() + unique: list[str] = [] + + for _, info in failed.items(): + err = info.get("error") + if not isinstance(err, str) or not err.strip(): + continue + + normalized = _normalize_error_text(err) + if normalized not in seen: + seen.add(normalized) + unique.append(normalized) + + return unique + + +def _normalize_error_text(err: str) -> str: + err = err.strip() + + # If the remote helper JSON marker is present, prefer the JSON payload (stable & dedup-friendly). + if ERR_PREFIX in err: + _, payload = err.split(ERR_PREFIX, 1) + payload = payload.strip() + + try: + data = json.loads(payload) + kind = str(data.get("kind") or "remote_error") + message = str(data.get("message") or "").strip() + hint = data.get("hint") + parts = [f"{kind}: {message}".strip()] + if isinstance(hint, str) and hint.strip(): + parts.append(f"hint: {hint.strip()}") + return "\n".join(parts).strip() + except Exception: + # Fall back to raw error text if JSON parsing fails + return err + + return err diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/tests/helper/dns_utils.save b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/tests/helper/dns_utils.save new file mode 100644 index 0000000..80d6824 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/tests/helper/dns_utils.save @@ -0,0 +1,160 @@ +# tests/helpers/remote_dns.py +from __future__ import annotations + +import shlex +from typing import Any, Dict, List, Tuple + +REMOTE_DNS_SCRIPT = r""" +import sys +import ipaddress +import dns.exception +import dns.message +import dns.query +import dns.rcode +import dns.rdatatype +import dns.reversename + +def is_ip(s: str) -> bool: + try: + ipaddress.ip_address(s) + return True + except ValueError: + return False + +def normalize_qname(name: str) -> str: + name = name.strip() + return name if name.endswith(".") else name + "." + +def ptr_qname(name_or_ip: str) -> str: + s = name_or_ip.strip().rstrip(".") + if s.endswith("in-addr.arpa") or s.endswith("ip6.arpa"): + return normalize_qname(s) + if is_ip(s): + return dns.reversename.from_address(s).to_text() + return normalize_qname(s) + +def answers_to_short(values): + if not values: + return None + uniq = sorted(set(values)) + return ",".join(uniq) if len(uniq) > 1 else uniq[0] + +def main(): + # argv: name type server port timeout tcp(0/1) + name = sys.argv[1] + qtype = sys.argv[2].upper() + server = sys.argv[3] + port = int(sys.argv[4]) + timeout = float(sys.argv[5]) + use_tcp = bool(int(sys.argv[6])) + + if qtype == "PTR": + qname = ptr_qname(name) + rdtype = dns.rdatatype.PTR + else: + qname = normalize_qname(name) + rdtype = dns.rdatatype.from_text(qtype) + + q = dns.message.make_query(qname, rdtype) + + try: + if use_tcp: + resp = dns.query.tcp(q, where=server, port=port, timeout=timeout) + else: + resp = dns.query.udp(q, where=server, port=port, timeout=timeout) + + if resp.rcode() != dns.rcode.NOERROR: + print("", end="") + return + + answers = [] + for rrset in resp.answer: + if rrset.rdtype != rdtype: + continue + for item in rrset: + answers.append(item.to_text()) # .rstrip(".")) + + out = answers_to_short(answers) + print(out or "", end="") + + except dns.exception.DNSException: + print("", end="") + +if __name__ == "__main__": + main() +""" + + +def dns_lookup_on_host( + host, + dns_name: str, + dns_type: str, + server_ip: str, + server_port: int, + timeout_s: float = 2.0, + use_tcp: bool = False, +) -> str | None: + args = [ + dns_name, + dns_type, + server_ip, + str(server_port), + str(timeout_s), + "1" if use_tcp else "0", + ] + quoted_args = " ".join(shlex.quote(a) for a in args) + + cmd = f"python3 - {quoted_args} <<'PY'\n" f"{REMOTE_DNS_SCRIPT}\n" f"PY" + + r = host.run(cmd) + out = (r.stdout or "").strip() + err = (r.stderr or "").strip() + + if err: + print(err) + + return out or None + + +def dig_python( + host, get_vars: Dict[str, Any], domains: List[Dict[str, Any]] +) -> Tuple[bool, Dict[str, Dict[str, Any]]]: + pdns_cfg = get_vars.get("pdns_config", {}) or {} + local_dns_address = str(pdns_cfg.get("local-address", "127.0.0.1")).strip() + local_dns_port = int(pdns_cfg.get("local-port", 53)) + + result_state: List[Dict[str, Any]] = [] + + for d in domains: + domain = d.get("domain") + rrtype = d.get("type", "A") + expected = d.get("result") + + value = dns_lookup_on_host( + host=host, + dns_name=domain, + dns_type=rrtype, + server_ip=local_dns_address, + server_port=local_dns_port, + timeout_s=2.0, + use_tcp=False, + ) + output_msg = value or "" + + result_state.append( + { + domain: { + "output": output_msg, + "cmd": f"python3(dnspython) {rrtype} {domain} @{local_dns_address}:{local_dns_port}", + "failed": output_msg != expected, + } + } + ) + + # print(result_state) + + combined = {k: v for item in result_state for k, v in item.items()} + failed = { + k: v for k, v in combined.items() if isinstance(v, dict) and v.get("failed") + } + return (len(failed) > 0, failed) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/tests/helper/molecule.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/tests/helper/molecule.py new file mode 100644 index 0000000..08eb131 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/tests/helper/molecule.py @@ -0,0 +1,277 @@ +from __future__ import annotations + +import json +import os +import re +from pathlib import Path +from typing import Any, Dict, List, Mapping, Optional, Sequence + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from jinja2 import ChainableUndefined +from jinja2.nativetypes import NativeEnvironment + +# --- helper ---------------------------------------------------------------- + + +def pp_json(json_thing, sort=True, indents=2): + + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + + return None + + +def local_facts(host, fact: Optional[str] = None) -> Dict: + """ + return local facts + """ + local_fact = host.ansible("setup").get("ansible_facts").get("ansible_local") + + print(f"local_fact : {local_fact}") + + if local_fact and fact: + return local_fact.get(fact, {}) + else: + return dict() + + +def infra_hosts(host_name: Optional[str] = None): + """ """ + if not host_name: + host_name = "all" + + result = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] + ).get_hosts(host_name) + + print(f"result: {result}") + print(f" {type(result)}") + + return result + + +# --- paths ----------------------------------------------------------------- + + +def base_directory() -> tuple[Path, Path]: + """ + Returns: + role_dir: role root (contains defaults/, vars/, tasks/, ...) + scenario_dir: molecule scenario dir (contains group_vars/, ...) + """ + cwd = Path.cwd() + + # pytest läuft je nach tox/molecule entweder im scenario/tests oder im role-root + if (cwd / "group_vars").is_dir(): + # .../molecule//tests -> role root ist ../.. + return (cwd / "../..").resolve(), cwd.resolve() + + scenario = os.environ.get("MOLECULE_SCENARIO_NAME", "default") + return cwd.resolve(), (cwd / "molecule" / scenario).resolve() + + +def _normalize_os(distribution: str) -> Optional[str]: + d = (distribution or "").strip().lower() + if d in ("debian", "ubuntu"): + return "debian" + if d in ("arch", "artix"): + return f"{d}linux" + return None + + +# --- load vars files (YAML) ------------------------------------------------ + + +def _load_vars_file(loader: DataLoader, file_base: Path) -> Dict[str, Any]: + """ + file_base ohne Extension übergeben, z.B. role_dir/'defaults'/'main' + Lädt main.yml oder main.yaml via Ansible DataLoader (Vault kompatibel). + """ + for ext in ("yml", "yaml"): + p = file_base.with_suffix(f".{ext}") + if not p.is_file(): + continue + + data = loader.load_from_file(str(p)) + if data is None: + return {} + if not isinstance(data, dict): + raise TypeError(f"{p} must be a mapping/dict, got {type(data)}") + return data + + return {} + + +# --- jinja rendering (multi-pass) ------------------------------------------ + +_JINJA_MARKER = re.compile(r"({{.*?}}|{%-?.*?-%}|{#.*?#})", re.S) + + +def _find_unrendered_templates(obj: Any, prefix: str = "") -> List[str]: + found: List[str] = [] + + if isinstance(obj, str): + if _JINJA_MARKER.search(obj): + found.append(prefix or "") + return found + + if isinstance(obj, Mapping): + for k, v in obj.items(): + key = str(k) + found.extend( + _find_unrendered_templates(v, f"{prefix}.{key}" if prefix else key) + ) + return found + + if isinstance(obj, Sequence) and not isinstance(obj, (str, bytes, bytearray)): + for i, v in enumerate(obj): + found.extend(_find_unrendered_templates(v, f"{prefix}[{i}]")) + return found + + return found + + +def _make_jinja_env() -> NativeEnvironment: + """ + NativeEnvironment: gibt bei reinen Expressions native Typen zurück, + sonst Strings. Undefined ist 'chainable', damit ansible_facts.foo.bar + nicht hart explodiert, sondern Undefined liefert (ähnlich fail_on_undefined=False). + """ + env = NativeEnvironment(undefined=ChainableUndefined, autoescape=False) + + # Ansible-ähnliche lookup/query Minimalimplementierung (nur env erlaubt) + def _lookup(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> Any: + if plugin != "env": + raise ValueError( + f"lookup('{plugin}', ...) not supported in tests (allowlist: env)" + ) + # Ansible lookup('env','X') -> '' wenn nicht gesetzt (damit default(..., true) greift) + if isinstance(term, (list, tuple)): + vals = [os.environ.get(str(t), "") for t in term] + return vals[0] if kwargs.get("wantlist") is False else vals + return os.environ.get(str(term), "") + + def _query(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> List[Any]: + # query() ist wantlist=True + kwargs["wantlist"] = True + res = _lookup(plugin, term, *rest, **kwargs) + return res if isinstance(res, list) else [res] + + env.globals["lookup"] = _lookup + env.globals["query"] = _query + return env + + +def _render_obj( + env: NativeEnvironment, obj: Any, ctx: Dict[str, Any], *, skip_keys: frozenset[str] +) -> Any: + if isinstance(obj, str): + if not _JINJA_MARKER.search(obj): + return obj + tmpl = env.from_string(obj) + return tmpl.render(**ctx) + + if isinstance(obj, Mapping): + out: Dict[str, Any] = {} + for k, v in obj.items(): + ks = str(k) + if ks in skip_keys: + out[ks] = v + else: + out[ks] = _render_obj(env, v, ctx, skip_keys=skip_keys) + return out + + if isinstance(obj, list): + return [_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj] + + if isinstance(obj, tuple): + return tuple(_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj) + + return obj + + +def render_all_vars(data: Dict[str, Any], passes: int = 8) -> Dict[str, Any]: + """ + Multi-pass: damit Werte wie + system_architecture -> ..., + und danach defaults_release.file -> ...{{ system_architecture }}... + sauber aufgelöst werden. + """ + env = _make_jinja_env() + + current: Dict[str, Any] = data + last_leftovers: Optional[List[str]] = None + + for _ in range(max(1, passes)): + # Kontext ist immer der aktuelle Stand + rendered = _render_obj( + env, current, current, skip_keys=frozenset({"ansible_facts"}) + ) + if not isinstance(rendered, dict): + raise TypeError(f"Rendered vars are not a dict anymore: {type(rendered)}") + + leftovers = _find_unrendered_templates(rendered) + if not leftovers: + return rendered + + # kein Fortschritt mehr + if leftovers == last_leftovers: + current = rendered + break + + last_leftovers = leftovers + current = rendered + + # optional: hart fehlschlagen, wenn noch Templates übrig sind (sonst wird es still falsch) + if os.environ.get("ANSIBLE_TEST_ALLOW_UNRESOLVED_TEMPLATES", "0") != "1": + leftovers = _find_unrendered_templates(current) + if leftovers: + raise AssertionError( + "Unresolved templates after rendering:\n- " + "\n- ".join(leftovers) + ) + + return current + + +# --- pytest fixture -------------------------------------------------------- + + +@pytest.fixture() +def get_vars(host) -> Dict[str, Any]: + role_dir, scenario_dir = base_directory() + + loader = DataLoader() + loader.set_basedir(str(role_dir)) + + distribution = getattr(host.system_info, "distribution", "") or "" + os_id = _normalize_os(distribution) + + merged: Dict[str, Any] = {} + merged.update(_load_vars_file(loader, role_dir / "defaults" / "main")) + merged.update(_load_vars_file(loader, role_dir / "vars" / "main")) + + if os_id: + merged.update(_load_vars_file(loader, role_dir / "vars" / os_id)) + + merged.update(_load_vars_file(loader, scenario_dir / "group_vars" / "all" / "vars")) + + # Facts als Input (keine Templates) + setup = host.ansible("setup") + facts = setup.get("ansible_facts", {}) if isinstance(setup, dict) else {} + if isinstance(facts, dict): + merged["ansible_facts"] = facts + merged.setdefault( + "ansible_system", facts.get("system") or facts.get("ansible_system") + ) + merged.setdefault( + "ansible_architecture", + facts.get("architecture") or facts.get("ansible_architecture"), + ) + + result = render_all_vars(merged, passes=8) + + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/tests/test_default.py new file mode 100644 index 0000000..c7fddc8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/backend-sqlite/tests/test_default.py @@ -0,0 +1,176 @@ +from __future__ import annotations, unicode_literals + +from helper.dns_utils import dig_python, extract_error, extract_unique_errors +from helper.molecule import infra_hosts, get_vars + +testinfra_hosts = infra_hosts(host_name="instance") + +# --- tests ----------------------------------------------------------------- + +def _exec_dns_test(host, get_vars, domains): + + has_failed, failed = dig_python(host=host, get_vars=get_vars, domains=domains) + + if has_failed: + print(failed) + unique_errors = extract_unique_errors(failed) + error = extract_error(failed) + print("\n".join(error)) + print("\n".join(unique_errors)) + assert False + +def test_records_A(host, get_vars): + """ """ + domains = [ + {"domain": "ns1.acme-inc.local", "type": "A", "result": "10.11.0.1"}, + {"domain": "ns2.acme-inc.local", "type": "A", "result": "10.11.0.2"}, + # {"domain": "ns3.acme-inc.local", "type": "A", "result": "10.11.0.2"}, + {"domain": "srv001.acme-inc.local", "type": "A", "result": "10.11.1.1"}, + # {"domain": "srv002.acme-inc.local", "type": "A", "result": "10.11.1.2"}, + {"domain": "mail001.acme-inc.local", "type": "A", "result": "10.11.2.1"}, + # {"domain": "mail002.acme-inc.local", "type": "A", "result": "10.11.2.2"}, + # {"domain": "mail003.acme-inc.local", "type": "A", "result": "10.11.2.3"}, + # {"domain": "srv010.acme-inc.local", "type": "A", "result": "10.11.0.10"}, + # {"domain": "srv011.acme-inc.local", "type": "A", "result": "10.11.0.11"}, + # {"domain": "srv012.acme-inc.local", "type": "A", "result": "10.11.0.12"}, + # # + # {"domain": "cms.cm.local", "type": "A", "result": "192.168.124.21"}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_PTR(host, get_vars): + """ """ + domains = [ + # IPv4 Reverse lookups + {"domain": "10.11.0.1", "type": "PTR", "result": "ns1.acme-inc.local."}, + {"domain": "10.11.0.2", "type": "PTR", "result": "ns2.acme-inc.local."}, + {"domain": "10.11.1.1", "type": "PTR", "result": "srv001.acme-inc.local."}, + # {"domain": "10.11.1.2", "type": "PTR", "result": "srv002.acme-inc.local."}, + {"domain": "10.11.2.1", "type": "PTR", "result": "mail001.acme-inc.local."}, + # {"domain": "10.11.2.2", "type": "PTR", "result": "mail002.acme-inc.local."}, + # {"domain": "10.11.2.3", "type": "PTR", "result": "mail003.acme-inc.local."}, + # {"domain": "10.11.0.10", "type": "PTR", "result": "srv010.acme-inc.local."}, + # {"domain": "10.11.0.11", "type": "PTR", "result": "srv011.acme-inc.local."}, + # {"domain": "10.11.0.12", "type": "PTR", "result": "srv012.acme-inc.local."}, + # # # IPv6 Reverse lookups + # {"domain": "2001:db8::1", "type": "PTR", "result": "srv001.acme-inc.local."}, + # # + # {"domain": "192.168.124.21", "type": "PTR", "result": "cms.cm.local"}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_CNAME(host, get_vars): + """ """ + domains = [ + # IPv4 Alias lookups + { + "domain": "www.acme-inc.local", + "type": "CNAME", + "result": "srv001.acme-inc.local.", + }, + { + "domain": "foo.acme-inc.local", + "type": "CNAME", + "result": "srv001.acme-inc.local.", + }, + # { + # "domain": "smtp.acme-inc.local", + # "type": "CNAME", + # "result": "mail001.acme-inc.local.", + # }, + # { + # "domain": "mail-in.acme-inc.local", + # "type": "CNAME", + # "result": "mail001.acme-inc.local.", + # }, + # { + # "domain": "imap.acme-inc.local", + # "type": "CNAME", + # "result": "mail003.acme-inc.local.", + # }, + # { + # "domain": "mail-out.acme-inc.local", + # "type": "CNAME", + # "result": "mail003.acme-inc.local.", + # }, + # # + # {"domain": "cms.cm.local", "type": "CNAME", "result": "192.168.124.21"}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_AAAA(host, get_vars): + """ """ + domains = [ + # IPv6 Forward lookups + {"domain": "srv001.acme-inc.local", "type": "AAAA", "result": "2001:db8::1"}, + ] + + has_failed, failed = dig_python(host=host, get_vars=get_vars, domains=domains) + + if has_failed: + unique_errors = extract_unique_errors(failed) + print("\n\n".join(unique_errors)) + assert False + + +def test_records_NS(host, get_vars): + """ """ + domains = [ + # NS records lookup + { + "domain": "acme-inc.local", + "type": "NS", + "result": "ns1.acme-inc.local.,ns2.acme-inc.local.", + }, + # {"domain": "cm.local", "type": "NS", "result": "dns.cm.local."}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_MX(host, get_vars): + """ """ + domains = [ + # MX records lookup + { + "domain": "acme-inc.local", + "type": "MX", + "result": "10 mail001.acme-inc.local.,20 mail002.acme-inc.local.", + }, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_SRV(host, get_vars): + """ """ + domains = [ + # Service records lookup + { + "domain": "_ldap._tcp.acme-inc.local", + "type": "SRV", + "result": "0 100 631 srv010.acme-inc.local.,0 50 631 srv010.acme-inc.local.", + }, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_TXT(host, get_vars): + """ """ + domains = [ + # TXT records lookup + { + "domain": "acme-inc.local", + "type": "TXT", + "result": '"more text","some text"', + }, + ] + + _exec_dns_test(host, get_vars, domains) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/converge.yml new file mode 100644 index 0000000..b63d241 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/converge.yml @@ -0,0 +1,33 @@ +--- + +- name: prepare instance + hosts: all + gather_facts: true + pre_tasks: + - name: "archlinux: install dnspython" + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + ansible.builtin.package: + name: + - python-dnspython + state: present + + - name: "debian: install dnspython" + when: + - ansible_facts.os_family | lower == 'debian' + ansible.builtin.package: + name: + - python3-dnspython + state: present + +- name: converge + hosts: all + any_errors_fatal: true + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.pdns_records diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/group_vars/all/pdns.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/group_vars/all/pdns.yml new file mode 100644 index 0000000..6d0d256 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/group_vars/all/pdns.yml @@ -0,0 +1,109 @@ +--- + +# pdns_mysql_databases_credentials: +# 'gmysql:one: +# 'priv_user: root +# 'priv_password: "{{ vault__pdns.databases.root }}" +# 'priv_host: +# - "localhost" + +pdns_config: + master: true + slave: false + also-notify: "" + local-address: '127.0.0.1' + local-port: '5300' + log-dns-details: "on" + loglevel: 6 + +pdns_webserver: + enabled: true + address: "{{ ansible_facts.default_ipv4.address }}" + allow-from: + - "127.0.0.1" + - "::1" + - "10.11.0.0/24" + - "192.168.0.0/24" + connection-timeout: 5 + hash-plaintext-credentials: false + loglevel: normal + max-bodysize: 2 + password: + port: 8081 + print-arguments: true + +pdns_api: + enabled: true + key: tNSN-E1FxeDe/3nhqA + +pdns_backends: + - name: lmdb + filename: /var/lib/powerdns/pdns.lmdb + shards: 64 + sync-mode: nometasync + # schema-version: 5 + random-ids: true + map-size: 16000 + # flag-deleted: + # lightning-stream: + +# https://doc.powerdns.com/authoritative/backends/index.html?highlight=backend +# pdns_backends: +# - name: 'gmysql:one' +# user: powerdns +# host: localhost +# password: "{{ vault__pdns.databases.pdns }}" +# dbname: pdns +# # # https://doc.powerdns.com/authoritative/backends/generic-mysql.html?highlight=gmysql#settings +# # host: "" # Host (ip address) to connect to. Mutually exclusive with gmysql-socket. +# # # Warning: When specified as a hostname a chicken/egg situation might arise where the database is needed to resolve the IP address of the database. It is best to supply an IP address of the database here. +# # port: "" # The port to connect to on gmysql-host. Default: 3306. +# # socket: "" # Connect to the UNIX socket at this path. Mutually exclusive with gmysql-host. +# # dbname: "" # Name of the database to connect to. Default: “powerdns”. +# # user: "" # User to connect as. Default: “powerdns”. +# # group: "" # Group to connect as. Default: “client”. +# # password: "" # The password to for gmysql-user. +# # dnssec: "" # Enable DNSSEC processing for this backend. Default: no. +# # innodb-read-committed: "" # Use the InnoDB READ-COMMITTED transaction isolation level. Default: yes. +# # ssl: "" # Deprecated since version 5.0.0. +# # timeout: "" # The timeout in seconds for each attempt to read from, or write to the server. A value of 0 will disable the timeout. Default: 10 +# # thread-cleanup: "" # Only enable this if you are certain you need to +# # credentials: {} +# # +# # - name: 'gmysql:two' +# # user: pdns_user +# # host: 192.0.2.15 +# # port: 3307 +# # password: "{{ vault__pdns.databases.external }}" +# # dbname: dns +# # credentials: {} +# # +# # - name: bind +# # config: '/etc/named/named.conf' +# # hybrid: true +# # dnssec-db: '{{ pdns_config_dir }}/dnssec.db' +# +# # - name: gsqlite3 +# # database: /var/lib/powerdns/pdns.db +# # dnssec: true +# # pragma-journal-mode: true +# # pragma-synchronous: true +# # pragma-foreign-keys: true + + +# pdns_rec_install_repo: "{{ pdns_rec_powerdns_repo_44 }}" + +# pdns_rec_config: +# allow-from: "10.0.0.0/8" +# local-address: "10.0.4.1" +# forward-zones: +# - matrix.lan=10.0.4.1:5300 +# webserver: true +# webserver-address: "{{ ansible_facts.default_ipv4.address }}" +# webserver-port: 8082 +# webserver-allow-from: +# - 127.0.0.1/32 +# - 10.0.2.4/32 +# webserver-loglevel: "normal" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..74201a6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,74 @@ +--- + +pdns_config: + master: true + slave: false + also-notify: "" + local-address: '127.0.0.1' + local-port: '5300' + log-dns-details: "on" + loglevel: 6 + +pdns_zones: + - name: 'acme-inc.local' + + create_forward_zones: true + create_reverse_zones: true + + name_servers: + - ns1 + - ns2 + + hosts: + - name: ns1 + ip: 10.11.0.1 + - name: ns2 + ip: 10.11.0.2 + - name: srv001 + ip: 10.11.1.1 + ipv6: 2001:db8::1 + aliases: + - www + - foo + + mail_servers: + - name: mail001 + preference: 10 + - name: mail002 + preference: 20 + + services: + - name: _ldap._tcp + weight: 100 + port: 631 + target: srv010 + - name: _ldap._tcp + weight: 50 + port: 631 + target: srv010 + - name: _imap._tcp + weight: 50 + port: 143 + target: mail001 + + text: + - name: _kerberos + text: KERBEROS.ACME-INC.COM + - name: '@' + text: + - 'some text' + - 'more text' + + - name: 'matrix.vpn' + name_servers: + - ns + + hosts: + - name: ns + ip: 192.168.0.4 + - name: dunkelzahn + ip: 192.168.0.4 + aliases: + - home + - vpn +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/group_vars/all/vault.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/group_vars/all/vault.yml new file mode 100644 index 0000000..db29e95 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/group_vars/all/vault.yml @@ -0,0 +1,8 @@ +--- + +vault__pdns: + databases: + root: root + pdns: powerdns + external: powerdns +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/molecule.yml new file mode 100644 index 0000000..0437c1f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/molecule.yml @@ -0,0 +1,68 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + tty: true + environment: + container: docker + groups: + - dns + docker_networks: + - name: bind + ipam_config: + - subnet: "10.11.0.0/24" + gateway: "10.11.0.254" + networks: + - name: bind + ipv4_address: "10.11.0.1" + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/prepare.yml new file mode 100644 index 0000000..71ef4f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/prepare.yml @@ -0,0 +1,69 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + become: true + ansible.builtin.command: + argv: + - pacman + - --refresh + - --sync + - --sysupgrade + - --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +- name: install powerdns + hosts: instance + gather_facts: true + + roles: + - role: bodsch.dns.pdns +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/tests/helper/dns_utils.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/tests/helper/dns_utils.py new file mode 100644 index 0000000..5372c5c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/tests/helper/dns_utils.py @@ -0,0 +1,385 @@ +# tests/helpers/remote_dns.py +from __future__ import annotations + +import json +import re +import shlex +from dataclasses import dataclass +from typing import Any, Dict, List, Tuple + +ERR_PREFIX = "__REMOTE_DNS_HELPER_ERROR__" + + +@dataclass(frozen=True) +class RemoteDnsLookupError(RuntimeError): + kind: str + message: str + rc: int + stderr: str + + def __str__(self) -> str: + return f"{self.kind}: {self.message} (rc={self.rc})\n{self.stderr}".strip() + + +REMOTE_DNS_SCRIPT = rf""" +import sys +import json +import traceback + +ERR_PREFIX = {ERR_PREFIX!r} + +def fatal(code: int, kind: str, message: str, **extra): + payload = {{ + "kind": kind, + "message": message, + **extra, + }} + sys.stderr.write(ERR_PREFIX + json.dumps(payload, ensure_ascii=False) + "\n") + sys.exit(code) + +try: + import ipaddress + import dns.exception + import dns.message + import dns.query + import dns.rcode + import dns.rdatatype + import dns.reversename +except Exception as e: + fatal( + 10, + "import_error", + f"{{type(e).__name__}}: {{e}}", + traceback=traceback.format_exc(), + hint="Install dnspython (Debian/Ubuntu: apt-get install -y python3-dnspython).", + ) + +def is_ip(s: str) -> bool: + try: + ipaddress.ip_address(s) + return True + except ValueError: + return False + +def normalize_qname(name: str) -> str: + name = name.strip() + return name if name.endswith(".") else name + "." + +def ptr_qname(name_or_ip: str) -> str: + s = name_or_ip.strip().rstrip(".") + if s.endswith("in-addr.arpa") or s.endswith("ip6.arpa"): + return normalize_qname(s) + if is_ip(s): + return dns.reversename.from_address(s).to_text() + return normalize_qname(s) + +def answers_to_short(values): + if not values: + return None + uniq = sorted(set(values)) + return ",".join(uniq) if len(uniq) > 1 else uniq[0] + +def main(): + # argv: name type server port timeout tcp(0/1) + if len(sys.argv) != 7: + fatal(2, "usage_error", f"Expected 6 args, got {{len(sys.argv)-1}}: {{sys.argv[1:]}}") + + name = sys.argv[1] + qtype = sys.argv[2].upper() + server = sys.argv[3] + port = int(sys.argv[4]) + timeout = float(sys.argv[5]) + use_tcp = bool(int(sys.argv[6])) + + try: + if qtype == "PTR": + qname = ptr_qname(name) + rdtype = dns.rdatatype.PTR + else: + qname = normalize_qname(name) + rdtype = dns.rdatatype.from_text(qtype) + except Exception as e: + fatal(3, "type_error", f"{{type(e).__name__}}: {{e}}", qtype=qtype) + + q = dns.message.make_query(qname, rdtype) + + try: + if use_tcp: + resp = dns.query.tcp(q, where=server, port=port, timeout=timeout) + else: + resp = dns.query.udp(q, where=server, port=port, timeout=timeout) + + rcode = resp.rcode() + + if rcode == dns.rcode.NXDOMAIN: + print("", end="") + return + + if rcode != dns.rcode.NOERROR: + fatal( + 12, + "rcode_error", + f"RCODE={{dns.rcode.to_text(rcode)}}", + rcode=int(rcode), + qname=qname, + qtype=qtype, + server=server, + port=port, + ) + + answers = [] + for rrset in resp.answer: + if rrset.rdtype != rdtype: + continue + for item in rrset: + # IMPORTANT: keep dig-like textual form (do NOT rstrip('.')) + answers.append(item.to_text()) + + out = answers_to_short(answers) + print(out or "", end="") + + except dns.exception.Timeout as e: + fatal( + 20, + "timeout", + f"{{type(e).__name__}}: {{e}}", + qname=qname, + qtype=qtype, + server=server, + port=port, + timeout=timeout, + ) + except dns.exception.DNSException as e: + fatal( + 21, + "dns_exception", + f"{{type(e).__name__}}: {{e}}", + qname=qname, + qtype=qtype, + server=server, + port=port, + ) + except Exception as e: + fatal( + 99, + "unexpected", + f"{{type(e).__name__}}: {{e}}", + traceback=traceback.format_exc(), + ) + +if __name__ == "__main__": + main() +""" + + +def _extract_remote_error(stderr: str) -> dict[str, Any] | None: + m = re.search(re.escape(ERR_PREFIX) + r"(\{.*\})", stderr, flags=re.DOTALL) + if not m: + return None + try: + return json.loads(m.group(1)) + except Exception: + return None + + +def dns_lookup_on_host( + host, + dns_name: str, + dns_type: str, + server_ip: str, + server_port: int, + timeout_s: float = 2.0, + use_tcp: bool = False, +) -> str | None: + args = [ + dns_name, + dns_type, + server_ip, + str(server_port), + str(timeout_s), + "1" if use_tcp else "0", + ] + quoted_args = " ".join(shlex.quote(a) for a in args) + + cmd = f"python3 - {quoted_args} <<'PY'\n{REMOTE_DNS_SCRIPT}\nPY" + r = host.run(cmd) + + out = (r.stdout or "").strip() + err = (r.stderr or "").strip() + + if getattr(r, "rc", 0) != 0: + payload = _extract_remote_error(err) or {} + kind = str(payload.get("kind") or "remote_error") + msg = str(payload.get("message") or "Remote DNS helper failed") + raise RemoteDnsLookupError(kind=kind, message=msg, rc=int(r.rc), stderr=err) + + return out or None + + +def _strip_final_dot(name: str) -> str: + return name[:-1] if name.endswith(".") else name + + +def _norm_name(name: str) -> str: + # DNS names are case-insensitive + return _strip_final_dot(name.strip()).lower() + + +def _normalize_value(rrtype: str, value: str) -> str: + t = rrtype.upper().strip() + v = value.strip() + + # Single-name types + if t in {"CNAME", "NS", "PTR"}: + return _norm_name(v) + + # MX: "pref exchange." + if t == "MX": + parts = v.split() + if len(parts) >= 2: + pref = parts[0].strip() + exch = _norm_name(parts[1]) + return f"{pref} {exch}" + return v + + # SRV: "prio weight port target." + if t == "SRV": + parts = v.split() + if len(parts) >= 4: + prio, weight, port = parts[0].strip(), parts[1].strip(), parts[2].strip() + target = _norm_name(parts[3]) + return f"{prio} {weight} {port} {target}" + return v + + # SOA: "mname rname serial refresh retry expire minimum" + if t == "SOA": + parts = v.split() + if len(parts) >= 2: + parts[0] = _norm_name(parts[0]) + parts[1] = _norm_name(parts[1]) + return " ".join(parts) + return v + + return v + + +def dns_values_equal(rrtype: str, actual: str, expected: str) -> bool: + # allow expected to be written with/without trailing dot for name-like types + return _normalize_value(rrtype, actual) == _normalize_value(rrtype, expected) + + +def dig_python( + host, + get_vars: Dict[str, Any], + domains: List[Dict[str, Any]], +) -> Tuple[bool, Dict[str, Dict[str, Any]]]: + pdns_cfg = get_vars.get("pdns_config", {}) or {} + local_dns_address = str(pdns_cfg.get("local-address", "127.0.0.1")).strip() + local_dns_port = int(pdns_cfg.get("local-port", 53)) + + result_state: List[Dict[str, Any]] = [] + + for d in domains: + domain = d.get("domain") + rrtype = d.get("type", "A") + expected = d.get("result") + + error: str | None = None + try: + value = dns_lookup_on_host( + host=host, + dns_name=domain, + dns_type=rrtype, + server_ip=local_dns_address, + server_port=local_dns_port, + timeout_s=2.0, + use_tcp=False, + ) + except RemoteDnsLookupError as e: + value = None + error = str(e) + + output_msg = value or "" + + ok = False + if expected is None: + ok = output_msg == "" + else: + ok = dns_values_equal(str(rrtype), output_msg, str(expected)) + + entry: Dict[str, Any] = { + "output": output_msg, + "cmd": f"python3(dnspython) {rrtype} {domain} @{local_dns_address}:{local_dns_port}", + "failed": not ok, + } + if error: + entry["failed"] = True + entry["error"] = error + + result_state.append({domain: entry}) + + combined = {k: v for item in result_state for k, v in item.items()} + failed = { + k: v for k, v in combined.items() if isinstance(v, dict) and v.get("failed") + } + return (len(failed) > 0, failed) + + +def extract_error(failed: dict[str, dict[str, Any]]) -> list[str]: + """ """ + seen: set[str] = set() + + for _, info in failed.items(): + err = info.get("failed") + if err: + seen.add(info.get("cmd")) + + return seen + +def extract_unique_errors(failed: dict[str, dict[str, Any]]) -> list[str]: + """ + Extracts `error` strings from a molecule-style `failed` dict and removes duplicates + while preserving first-seen order. + + It also normalizes the remote helper error format: + "\\n__REMOTE_DNS_HELPER_ERROR__{json...}" + into a short, stable message (kind/message + optional hint). + """ + seen: set[str] = set() + unique: list[str] = [] + + for _, info in failed.items(): + err = info.get("error") + if not isinstance(err, str) or not err.strip(): + continue + + normalized = _normalize_error_text(err) + if normalized not in seen: + seen.add(normalized) + unique.append(normalized) + + return unique + + +def _normalize_error_text(err: str) -> str: + err = err.strip() + + # If the remote helper JSON marker is present, prefer the JSON payload (stable & dedup-friendly). + if ERR_PREFIX in err: + _, payload = err.split(ERR_PREFIX, 1) + payload = payload.strip() + + try: + data = json.loads(payload) + kind = str(data.get("kind") or "remote_error") + message = str(data.get("message") or "").strip() + hint = data.get("hint") + parts = [f"{kind}: {message}".strip()] + if isinstance(hint, str) and hint.strip(): + parts.append(f"hint: {hint.strip()}") + return "\n".join(parts).strip() + except Exception: + # Fall back to raw error text if JSON parsing fails + return err + + return err diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/tests/helper/dns_utils.save b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/tests/helper/dns_utils.save new file mode 100644 index 0000000..80d6824 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/tests/helper/dns_utils.save @@ -0,0 +1,160 @@ +# tests/helpers/remote_dns.py +from __future__ import annotations + +import shlex +from typing import Any, Dict, List, Tuple + +REMOTE_DNS_SCRIPT = r""" +import sys +import ipaddress +import dns.exception +import dns.message +import dns.query +import dns.rcode +import dns.rdatatype +import dns.reversename + +def is_ip(s: str) -> bool: + try: + ipaddress.ip_address(s) + return True + except ValueError: + return False + +def normalize_qname(name: str) -> str: + name = name.strip() + return name if name.endswith(".") else name + "." + +def ptr_qname(name_or_ip: str) -> str: + s = name_or_ip.strip().rstrip(".") + if s.endswith("in-addr.arpa") or s.endswith("ip6.arpa"): + return normalize_qname(s) + if is_ip(s): + return dns.reversename.from_address(s).to_text() + return normalize_qname(s) + +def answers_to_short(values): + if not values: + return None + uniq = sorted(set(values)) + return ",".join(uniq) if len(uniq) > 1 else uniq[0] + +def main(): + # argv: name type server port timeout tcp(0/1) + name = sys.argv[1] + qtype = sys.argv[2].upper() + server = sys.argv[3] + port = int(sys.argv[4]) + timeout = float(sys.argv[5]) + use_tcp = bool(int(sys.argv[6])) + + if qtype == "PTR": + qname = ptr_qname(name) + rdtype = dns.rdatatype.PTR + else: + qname = normalize_qname(name) + rdtype = dns.rdatatype.from_text(qtype) + + q = dns.message.make_query(qname, rdtype) + + try: + if use_tcp: + resp = dns.query.tcp(q, where=server, port=port, timeout=timeout) + else: + resp = dns.query.udp(q, where=server, port=port, timeout=timeout) + + if resp.rcode() != dns.rcode.NOERROR: + print("", end="") + return + + answers = [] + for rrset in resp.answer: + if rrset.rdtype != rdtype: + continue + for item in rrset: + answers.append(item.to_text()) # .rstrip(".")) + + out = answers_to_short(answers) + print(out or "", end="") + + except dns.exception.DNSException: + print("", end="") + +if __name__ == "__main__": + main() +""" + + +def dns_lookup_on_host( + host, + dns_name: str, + dns_type: str, + server_ip: str, + server_port: int, + timeout_s: float = 2.0, + use_tcp: bool = False, +) -> str | None: + args = [ + dns_name, + dns_type, + server_ip, + str(server_port), + str(timeout_s), + "1" if use_tcp else "0", + ] + quoted_args = " ".join(shlex.quote(a) for a in args) + + cmd = f"python3 - {quoted_args} <<'PY'\n" f"{REMOTE_DNS_SCRIPT}\n" f"PY" + + r = host.run(cmd) + out = (r.stdout or "").strip() + err = (r.stderr or "").strip() + + if err: + print(err) + + return out or None + + +def dig_python( + host, get_vars: Dict[str, Any], domains: List[Dict[str, Any]] +) -> Tuple[bool, Dict[str, Dict[str, Any]]]: + pdns_cfg = get_vars.get("pdns_config", {}) or {} + local_dns_address = str(pdns_cfg.get("local-address", "127.0.0.1")).strip() + local_dns_port = int(pdns_cfg.get("local-port", 53)) + + result_state: List[Dict[str, Any]] = [] + + for d in domains: + domain = d.get("domain") + rrtype = d.get("type", "A") + expected = d.get("result") + + value = dns_lookup_on_host( + host=host, + dns_name=domain, + dns_type=rrtype, + server_ip=local_dns_address, + server_port=local_dns_port, + timeout_s=2.0, + use_tcp=False, + ) + output_msg = value or "" + + result_state.append( + { + domain: { + "output": output_msg, + "cmd": f"python3(dnspython) {rrtype} {domain} @{local_dns_address}:{local_dns_port}", + "failed": output_msg != expected, + } + } + ) + + # print(result_state) + + combined = {k: v for item in result_state for k, v in item.items()} + failed = { + k: v for k, v in combined.items() if isinstance(v, dict) and v.get("failed") + } + return (len(failed) > 0, failed) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/tests/helper/molecule.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/tests/helper/molecule.py new file mode 100644 index 0000000..08eb131 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/tests/helper/molecule.py @@ -0,0 +1,277 @@ +from __future__ import annotations + +import json +import os +import re +from pathlib import Path +from typing import Any, Dict, List, Mapping, Optional, Sequence + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from jinja2 import ChainableUndefined +from jinja2.nativetypes import NativeEnvironment + +# --- helper ---------------------------------------------------------------- + + +def pp_json(json_thing, sort=True, indents=2): + + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + + return None + + +def local_facts(host, fact: Optional[str] = None) -> Dict: + """ + return local facts + """ + local_fact = host.ansible("setup").get("ansible_facts").get("ansible_local") + + print(f"local_fact : {local_fact}") + + if local_fact and fact: + return local_fact.get(fact, {}) + else: + return dict() + + +def infra_hosts(host_name: Optional[str] = None): + """ """ + if not host_name: + host_name = "all" + + result = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] + ).get_hosts(host_name) + + print(f"result: {result}") + print(f" {type(result)}") + + return result + + +# --- paths ----------------------------------------------------------------- + + +def base_directory() -> tuple[Path, Path]: + """ + Returns: + role_dir: role root (contains defaults/, vars/, tasks/, ...) + scenario_dir: molecule scenario dir (contains group_vars/, ...) + """ + cwd = Path.cwd() + + # pytest läuft je nach tox/molecule entweder im scenario/tests oder im role-root + if (cwd / "group_vars").is_dir(): + # .../molecule//tests -> role root ist ../.. + return (cwd / "../..").resolve(), cwd.resolve() + + scenario = os.environ.get("MOLECULE_SCENARIO_NAME", "default") + return cwd.resolve(), (cwd / "molecule" / scenario).resolve() + + +def _normalize_os(distribution: str) -> Optional[str]: + d = (distribution or "").strip().lower() + if d in ("debian", "ubuntu"): + return "debian" + if d in ("arch", "artix"): + return f"{d}linux" + return None + + +# --- load vars files (YAML) ------------------------------------------------ + + +def _load_vars_file(loader: DataLoader, file_base: Path) -> Dict[str, Any]: + """ + file_base ohne Extension übergeben, z.B. role_dir/'defaults'/'main' + Lädt main.yml oder main.yaml via Ansible DataLoader (Vault kompatibel). + """ + for ext in ("yml", "yaml"): + p = file_base.with_suffix(f".{ext}") + if not p.is_file(): + continue + + data = loader.load_from_file(str(p)) + if data is None: + return {} + if not isinstance(data, dict): + raise TypeError(f"{p} must be a mapping/dict, got {type(data)}") + return data + + return {} + + +# --- jinja rendering (multi-pass) ------------------------------------------ + +_JINJA_MARKER = re.compile(r"({{.*?}}|{%-?.*?-%}|{#.*?#})", re.S) + + +def _find_unrendered_templates(obj: Any, prefix: str = "") -> List[str]: + found: List[str] = [] + + if isinstance(obj, str): + if _JINJA_MARKER.search(obj): + found.append(prefix or "") + return found + + if isinstance(obj, Mapping): + for k, v in obj.items(): + key = str(k) + found.extend( + _find_unrendered_templates(v, f"{prefix}.{key}" if prefix else key) + ) + return found + + if isinstance(obj, Sequence) and not isinstance(obj, (str, bytes, bytearray)): + for i, v in enumerate(obj): + found.extend(_find_unrendered_templates(v, f"{prefix}[{i}]")) + return found + + return found + + +def _make_jinja_env() -> NativeEnvironment: + """ + NativeEnvironment: gibt bei reinen Expressions native Typen zurück, + sonst Strings. Undefined ist 'chainable', damit ansible_facts.foo.bar + nicht hart explodiert, sondern Undefined liefert (ähnlich fail_on_undefined=False). + """ + env = NativeEnvironment(undefined=ChainableUndefined, autoescape=False) + + # Ansible-ähnliche lookup/query Minimalimplementierung (nur env erlaubt) + def _lookup(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> Any: + if plugin != "env": + raise ValueError( + f"lookup('{plugin}', ...) not supported in tests (allowlist: env)" + ) + # Ansible lookup('env','X') -> '' wenn nicht gesetzt (damit default(..., true) greift) + if isinstance(term, (list, tuple)): + vals = [os.environ.get(str(t), "") for t in term] + return vals[0] if kwargs.get("wantlist") is False else vals + return os.environ.get(str(term), "") + + def _query(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> List[Any]: + # query() ist wantlist=True + kwargs["wantlist"] = True + res = _lookup(plugin, term, *rest, **kwargs) + return res if isinstance(res, list) else [res] + + env.globals["lookup"] = _lookup + env.globals["query"] = _query + return env + + +def _render_obj( + env: NativeEnvironment, obj: Any, ctx: Dict[str, Any], *, skip_keys: frozenset[str] +) -> Any: + if isinstance(obj, str): + if not _JINJA_MARKER.search(obj): + return obj + tmpl = env.from_string(obj) + return tmpl.render(**ctx) + + if isinstance(obj, Mapping): + out: Dict[str, Any] = {} + for k, v in obj.items(): + ks = str(k) + if ks in skip_keys: + out[ks] = v + else: + out[ks] = _render_obj(env, v, ctx, skip_keys=skip_keys) + return out + + if isinstance(obj, list): + return [_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj] + + if isinstance(obj, tuple): + return tuple(_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj) + + return obj + + +def render_all_vars(data: Dict[str, Any], passes: int = 8) -> Dict[str, Any]: + """ + Multi-pass: damit Werte wie + system_architecture -> ..., + und danach defaults_release.file -> ...{{ system_architecture }}... + sauber aufgelöst werden. + """ + env = _make_jinja_env() + + current: Dict[str, Any] = data + last_leftovers: Optional[List[str]] = None + + for _ in range(max(1, passes)): + # Kontext ist immer der aktuelle Stand + rendered = _render_obj( + env, current, current, skip_keys=frozenset({"ansible_facts"}) + ) + if not isinstance(rendered, dict): + raise TypeError(f"Rendered vars are not a dict anymore: {type(rendered)}") + + leftovers = _find_unrendered_templates(rendered) + if not leftovers: + return rendered + + # kein Fortschritt mehr + if leftovers == last_leftovers: + current = rendered + break + + last_leftovers = leftovers + current = rendered + + # optional: hart fehlschlagen, wenn noch Templates übrig sind (sonst wird es still falsch) + if os.environ.get("ANSIBLE_TEST_ALLOW_UNRESOLVED_TEMPLATES", "0") != "1": + leftovers = _find_unrendered_templates(current) + if leftovers: + raise AssertionError( + "Unresolved templates after rendering:\n- " + "\n- ".join(leftovers) + ) + + return current + + +# --- pytest fixture -------------------------------------------------------- + + +@pytest.fixture() +def get_vars(host) -> Dict[str, Any]: + role_dir, scenario_dir = base_directory() + + loader = DataLoader() + loader.set_basedir(str(role_dir)) + + distribution = getattr(host.system_info, "distribution", "") or "" + os_id = _normalize_os(distribution) + + merged: Dict[str, Any] = {} + merged.update(_load_vars_file(loader, role_dir / "defaults" / "main")) + merged.update(_load_vars_file(loader, role_dir / "vars" / "main")) + + if os_id: + merged.update(_load_vars_file(loader, role_dir / "vars" / os_id)) + + merged.update(_load_vars_file(loader, scenario_dir / "group_vars" / "all" / "vars")) + + # Facts als Input (keine Templates) + setup = host.ansible("setup") + facts = setup.get("ansible_facts", {}) if isinstance(setup, dict) else {} + if isinstance(facts, dict): + merged["ansible_facts"] = facts + merged.setdefault( + "ansible_system", facts.get("system") or facts.get("ansible_system") + ) + merged.setdefault( + "ansible_architecture", + facts.get("architecture") or facts.get("ansible_architecture"), + ) + + result = render_all_vars(merged, passes=8) + + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..51e3671 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/tests/test_default.py @@ -0,0 +1,176 @@ +from __future__ import annotations, unicode_literals + +from helper.dns_utils import dig_python, extract_error, extract_unique_errors +from helper.molecule import infra_hosts, get_vars + +testinfra_hosts = infra_hosts(host_name="instance") + +# --- tests ----------------------------------------------------------------- + +def _exec_dns_test(host, get_vars, domains): + + has_failed, failed = dig_python(host=host, get_vars=get_vars, domains=domains) + + if has_failed: + print(failed) + unique_errors = extract_unique_errors(failed) + error = extract_error(failed) + print("\n".join(error)) + print("\n".join(unique_errors)) + assert False + +def test_records_A(host, get_vars): + """ """ + domains = [ + {"domain": "ns1.acme-inc.local", "type": "A", "result": "10.11.0.1"}, + {"domain": "ns2.acme-inc.local", "type": "A", "result": "10.11.0.2"}, + # {"domain": "ns3.acme-inc.local", "type": "A", "result": "10.11.0.2"}, + # {"domain": "srv001.acme-inc.local", "type": "A", "result": "10.11.1.1"}, + # {"domain": "srv002.acme-inc.local", "type": "A", "result": "10.11.1.2"}, + # {"domain": "mail001.acme-inc.local", "type": "A", "result": "10.11.2.1"}, + # {"domain": "mail002.acme-inc.local", "type": "A", "result": "10.11.2.2"}, + # {"domain": "mail003.acme-inc.local", "type": "A", "result": "10.11.2.3"}, + # {"domain": "srv010.acme-inc.local", "type": "A", "result": "10.11.0.10"}, + # {"domain": "srv011.acme-inc.local", "type": "A", "result": "10.11.0.11"}, + # {"domain": "srv012.acme-inc.local", "type": "A", "result": "10.11.0.12"}, + # # + # {"domain": "cms.cm.local", "type": "A", "result": "192.168.124.21"}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_PTR(host, get_vars): + """ """ + domains = [ + # IPv4 Reverse lookups + {"domain": "10.11.0.1", "type": "PTR", "result": "ns1.acme-inc.local."}, + {"domain": "10.11.0.2", "type": "PTR", "result": "ns2.acme-inc.local."}, + {"domain": "10.11.1.1", "type": "PTR", "result": "srv001.acme-inc.local."}, + # {"domain": "10.11.1.2", "type": "PTR", "result": "srv002.acme-inc.local."}, + # {"domain": "10.11.2.1", "type": "PTR", "result": "mail001.acme-inc.local."}, + # {"domain": "10.11.2.2", "type": "PTR", "result": "mail002.acme-inc.local."}, + # {"domain": "10.11.2.3", "type": "PTR", "result": "mail003.acme-inc.local."}, + # {"domain": "10.11.0.10", "type": "PTR", "result": "srv010.acme-inc.local."}, + # {"domain": "10.11.0.11", "type": "PTR", "result": "srv011.acme-inc.local."}, + # {"domain": "10.11.0.12", "type": "PTR", "result": "srv012.acme-inc.local."}, + # # # IPv6 Reverse lookups + # {"domain": "2001:db8::1", "type": "PTR", "result": "srv001.acme-inc.local."}, + # # + # {"domain": "192.168.124.21", "type": "PTR", "result": "cms.cm.local"}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_CNAME(host, get_vars): + """ """ + domains = [ + # IPv4 Alias lookups + { + "domain": "www.acme-inc.local", + "type": "CNAME", + "result": "srv001.acme-inc.local.", + }, + { + "domain": "foo.acme-inc.local", + "type": "CNAME", + "result": "srv001.acme-inc.local.", + }, + # { + # "domain": "smtp.acme-inc.local", + # "type": "CNAME", + # "result": "mail001.acme-inc.local.", + # }, + # { + # "domain": "mail-in.acme-inc.local", + # "type": "CNAME", + # "result": "mail001.acme-inc.local.", + # }, + # { + # "domain": "imap.acme-inc.local", + # "type": "CNAME", + # "result": "mail003.acme-inc.local.", + # }, + # { + # "domain": "mail-out.acme-inc.local", + # "type": "CNAME", + # "result": "mail003.acme-inc.local.", + # }, + # # + # {"domain": "cms.cm.local", "type": "CNAME", "result": "192.168.124.21"}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_AAAA(host, get_vars): + """ """ + domains = [ + # IPv6 Forward lookups + {"domain": "srv001.acme-inc.local", "type": "AAAA", "result": "2001:db8::1"}, + ] + + has_failed, failed = dig_python(host=host, get_vars=get_vars, domains=domains) + + if has_failed: + unique_errors = extract_unique_errors(failed) + print("\n\n".join(unique_errors)) + assert False + + +def test_records_NS(host, get_vars): + """ """ + domains = [ + # NS records lookup + { + "domain": "acme-inc.local", + "type": "NS", + "result": "ns1.acme-inc.local.,ns2.acme-inc.local.", + }, + # {"domain": "cm.local", "type": "NS", "result": "dns.cm.local."}, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_MX(host, get_vars): + """ """ + domains = [ + # MX records lookup + { + "domain": "acme-inc.local", + "type": "MX", + "result": "10 mail001.acme-inc.local.,20 mail002.acme-inc.local.", + }, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_SRV(host, get_vars): + """ """ + domains = [ + # Service records lookup + { + "domain": "_ldap._tcp.acme-inc.local", + "type": "SRV", + "result": "0 100 631 srv010.acme-inc.local.,0 50 631 srv010.acme-inc.local.", + }, + ] + + _exec_dns_test(host, get_vars, domains) + + +def test_records_TXT(host, get_vars): + """ """ + domains = [ + # TXT records lookup + { + "domain": "acme-inc.local", + "type": "TXT", + "result": '"more text","some text"', + }, + ] + + _exec_dns_test(host, get_vars, domains) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/tests/test_pdns.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/tests/test_pdns.py new file mode 100644 index 0000000..c3b1595 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/molecule/configured/tests/test_pdns.py @@ -0,0 +1,331 @@ +from __future__ import annotations + +import json +import os +import re +from pathlib import Path +from typing import Any, Dict, List, Mapping, Optional, Sequence + +import pytest +from ansible.parsing.dataloader import DataLoader +from jinja2 import ChainableUndefined +from jinja2.nativetypes import NativeEnvironment + +# --- helper ---------------------------------------------------------------- + + +def pp_json(json_thing, sort=True, indents=2): + + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + + return None + + +# --- paths ----------------------------------------------------------------- + + +def base_directory() -> tuple[Path, Path]: + """ + Returns: + role_dir: role root (contains defaults/, vars/, tasks/, ...) + scenario_dir: molecule scenario dir (contains group_vars/, ...) + """ + cwd = Path.cwd() + + # pytest läuft je nach tox/molecule entweder im scenario/tests oder im role-root + if (cwd / "group_vars").is_dir(): + # .../molecule//tests -> role root ist ../.. + return (cwd / "../..").resolve(), cwd.resolve() + + scenario = os.environ.get("MOLECULE_SCENARIO_NAME", "default") + return cwd.resolve(), (cwd / "molecule" / scenario).resolve() + + +def _normalize_os(distribution: str) -> Optional[str]: + d = (distribution or "").strip().lower() + if d in ("debian", "ubuntu"): + return "debian" + if d in ("arch", "artix"): + return f"{d}linux" + return None + + +# --- load vars files (YAML) ------------------------------------------------ + + +def _load_vars_file(loader: DataLoader, file_base: Path) -> Dict[str, Any]: + """ + file_base ohne Extension übergeben, z.B. role_dir/'defaults'/'main' + Lädt main.yml oder main.yaml via Ansible DataLoader (Vault kompatibel). + """ + for ext in ("yml", "yaml"): + p = file_base.with_suffix(f".{ext}") + if not p.is_file(): + continue + + data = loader.load_from_file(str(p)) + if data is None: + return {} + if not isinstance(data, dict): + raise TypeError(f"{p} must be a mapping/dict, got {type(data)}") + return data + + return {} + + +# --- jinja rendering (multi-pass) ------------------------------------------ + +_JINJA_MARKER = re.compile(r"({{.*?}}|{%-?.*?-%}|{#.*?#})", re.S) + + +def _find_unrendered_templates(obj: Any, prefix: str = "") -> List[str]: + found: List[str] = [] + + if isinstance(obj, str): + if _JINJA_MARKER.search(obj): + found.append(prefix or "") + return found + + if isinstance(obj, Mapping): + for k, v in obj.items(): + key = str(k) + found.extend( + _find_unrendered_templates(v, f"{prefix}.{key}" if prefix else key) + ) + return found + + if isinstance(obj, Sequence) and not isinstance(obj, (str, bytes, bytearray)): + for i, v in enumerate(obj): + found.extend(_find_unrendered_templates(v, f"{prefix}[{i}]")) + return found + + return found + + +def _make_jinja_env() -> NativeEnvironment: + """ + NativeEnvironment: gibt bei reinen Expressions native Typen zurück, + sonst Strings. Undefined ist 'chainable', damit ansible_facts.foo.bar + nicht hart explodiert, sondern Undefined liefert (ähnlich fail_on_undefined=False). + """ + env = NativeEnvironment(undefined=ChainableUndefined, autoescape=False) + + # Ansible-ähnliche lookup/query Minimalimplementierung (nur env erlaubt) + def _lookup(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> Any: + if plugin != "env": + raise ValueError( + f"lookup('{plugin}', ...) not supported in tests (allowlist: env)" + ) + # Ansible lookup('env','X') -> '' wenn nicht gesetzt (damit default(..., true) greift) + if isinstance(term, (list, tuple)): + vals = [os.environ.get(str(t), "") for t in term] + return vals[0] if kwargs.get("wantlist") is False else vals + return os.environ.get(str(term), "") + + def _query(plugin: str, term: Any, *rest: Any, **kwargs: Any) -> List[Any]: + # query() ist wantlist=True + kwargs["wantlist"] = True + res = _lookup(plugin, term, *rest, **kwargs) + return res if isinstance(res, list) else [res] + + env.globals["lookup"] = _lookup + env.globals["query"] = _query + return env + + +def _render_obj( + env: NativeEnvironment, obj: Any, ctx: Dict[str, Any], *, skip_keys: frozenset[str] +) -> Any: + if isinstance(obj, str): + if not _JINJA_MARKER.search(obj): + return obj + tmpl = env.from_string(obj) + return tmpl.render(**ctx) + + if isinstance(obj, Mapping): + out: Dict[str, Any] = {} + for k, v in obj.items(): + ks = str(k) + if ks in skip_keys: + out[ks] = v + else: + out[ks] = _render_obj(env, v, ctx, skip_keys=skip_keys) + return out + + if isinstance(obj, list): + return [_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj] + + if isinstance(obj, tuple): + return tuple(_render_obj(env, v, ctx, skip_keys=skip_keys) for v in obj) + + return obj + + +def render_all_vars(data: Dict[str, Any], passes: int = 8) -> Dict[str, Any]: + """ + Multi-pass: damit Werte wie + system_architecture -> ..., + und danach defaults_release.file -> ...{{ system_architecture }}... + sauber aufgelöst werden. + """ + env = _make_jinja_env() + + current: Dict[str, Any] = data + last_leftovers: Optional[List[str]] = None + + for _ in range(max(1, passes)): + # Kontext ist immer der aktuelle Stand + rendered = _render_obj( + env, current, current, skip_keys=frozenset({"ansible_facts"}) + ) + if not isinstance(rendered, dict): + raise TypeError(f"Rendered vars are not a dict anymore: {type(rendered)}") + + leftovers = _find_unrendered_templates(rendered) + if not leftovers: + return rendered + + # kein Fortschritt mehr + if leftovers == last_leftovers: + current = rendered + break + + last_leftovers = leftovers + current = rendered + + # optional: hart fehlschlagen, wenn noch Templates übrig sind (sonst wird es still falsch) + if os.environ.get("ANSIBLE_TEST_ALLOW_UNRESOLVED_TEMPLATES", "0") != "1": + leftovers = _find_unrendered_templates(current) + if leftovers: + raise AssertionError( + "Unresolved templates after rendering:\n- " + "\n- ".join(leftovers) + ) + + return current + + +# --- pytest fixture -------------------------------------------------------- + + +@pytest.fixture() +def get_vars(host) -> Dict[str, Any]: + role_dir, scenario_dir = base_directory() + + loader = DataLoader() + loader.set_basedir(str(role_dir)) + + distribution = getattr(host.system_info, "distribution", "") or "" + os_id = _normalize_os(distribution) + + merged: Dict[str, Any] = {} + merged.update(_load_vars_file(loader, role_dir / "defaults" / "main")) + merged.update(_load_vars_file(loader, role_dir / "vars" / "main")) + + if os_id: + merged.update(_load_vars_file(loader, role_dir / "vars" / os_id)) + + merged.update(_load_vars_file(loader, scenario_dir / "group_vars" / "all" / "vars")) + + # Facts als Input (keine Templates) + setup = host.ansible("setup") + facts = setup.get("ansible_facts", {}) if isinstance(setup, dict) else {} + if isinstance(facts, dict): + merged["ansible_facts"] = facts + merged.setdefault( + "ansible_system", facts.get("system") or facts.get("ansible_system") + ) + merged.setdefault( + "ansible_architecture", + facts.get("architecture") or facts.get("ansible_architecture"), + ) + + result = render_all_vars(merged, passes=8) + + return result + + +# --- tests ----------------------------------------------------------------- + + +def test_directories(host, get_vars): + """ + used config directory + """ + print(get_vars) + + directories = [ + "/etc/powerdns", + "/var/lib/powerdns", + "/var/spool/powerdns", + get_vars.get("pdns_config_include"), + ] + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [ + "/etc/powerdns/pdns.conf", + "/etc/powerdns/pdns.d/pdns_general.conf", + "/etc/powerdns/pdns.d/pdns_backends.conf", + "/etc/powerdns/pdns.d/pdns_webserver.conf", + "/etc/powerdns/pdns.d/pdns_api.conf", + "/etc/ansible/facts.d/pdns.fact", + "/usr/bin/pdnsutil", + ] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +def test_lmbd_files(host, get_vars): + """ """ + + files = [ + "/var/lib/powerdns/pdns.lmdb", + "/var/lib/powerdns/pdns.lmdb-lock", + ] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = get_vars.get("pdns_service").get("name", None) + + if service_name: + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + bind_port = "5300" + bind_address = "127.0.0.1" + + listen = [] + listen.append(f"tcp://{bind_address}:{bind_port}") + listen.append(f"udp://{bind_address}:{bind_port}") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/tasks/domains.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/tasks/domains.yml new file mode 100644 index 0000000..8ec226f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/tasks/domains.yml @@ -0,0 +1,14 @@ +--- + +- name: create domain zones + bodsch.dns.pdns_zone_data: + zone_data: "{{ pdns_zones }}" + register: pdns_zone_created + +- name: created zones + ansible.builtin.debug: + msg: "{{ pdns_zone_created }}" + when: + - pdns_zone_created.failed or + pdns_zone_created.changed +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/tasks/main.yml new file mode 100644 index 0000000..ab34ddc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/tasks/main.yml @@ -0,0 +1,9 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yml + +- name: domains + ansible.builtin.include_tasks: domains.yml + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/tasks/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/tasks/prepare.yml new file mode 100644 index 0000000..e839e60 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/tasks/prepare.yml @@ -0,0 +1,35 @@ +--- + +- name: include OS specific configuration ({{ ansible_facts.distribution }} ({{ ansible_facts.os_family }}) {{ ansible_facts.distribution_major_version }}) + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_facts.os_family | lower }}.yml" + - default.yaml + skip: true + +- name: detect ansible check_mode + bodsch.core.check_mode: + register: _check_mode + +- name: define running_in_check_mode + ansible.builtin.set_fact: + running_in_check_mode: '{{ _check_mode.check_mode }}' + +- name: install dependecies + ansible.builtin.package: + name: "{{ pdns_dependencies }}" + state: present + when: + - pdns_dependencies | default([]) | count > 0 +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/vars/archlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/vars/archlinux.yml new file mode 100644 index 0000000..2dec90c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/vars/archlinux.yml @@ -0,0 +1,15 @@ +--- + +pdns_packages: + - powerdns + - python-requests + - python-urllib3 + - python-netaddr + +_pdns_backend_packages: + mysql: + - mariadb-libs + - python-mysqlclient + +pdns_owner: powerdns +pdns_group: powerdns diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/vars/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/vars/debian.yml new file mode 100644 index 0000000..9fde552 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/vars/debian.yml @@ -0,0 +1,31 @@ +--- + +pdns_packages: + - pdns-server + +pdns_dependencies: + - gnupg + - python3-requests + +# The directory where the PowerDNS Authoritative Server configuration is located +pdns_config_dir: "/etc/powerdns" + +pdns_owner: pdns +pdns_group: pdns + +# List of PowerDNS Authoritative Server Backends packages on Debian +_pdns_backend_packages: + geo: pdns-backend-geo + geoip: pdns-backend-geoip + mysql: + - pdns-backend-mysql + - python3-mysqldb + pgsql: pdns-backend-pgsql + sqlite3: pdns-backend-sqlite3 + ldap: pdns-backend-ldap + lmdb: pdns-backend-lmdb + lua: pdns-backend-lua + mydns: pdns-backend-mydns + pipe: pdns-backend-pipe + remote: pdns-backend-remote + tinydns: pdns-backend-tinydns diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/vars/main.yml new file mode 100644 index 0000000..3c542d6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_records/vars/main.yml @@ -0,0 +1,24 @@ +--- + +pdns_dependencies: [] + +# The directory where the PowerDNS configuration is located +pdns_config_dir: '/etc/powerdns' + +pdns_lib_dir: /var/lib/powerdns + +pdns_defaults_webserver: + enabled: false + address: 127.0.0.1 + allow-from: 127.0.0.1,::1 + connection-timeout: 5 + hash-plaintext-credentials: false + loglevel: normal + max-bodysize: 2 + password: + port: 8081 + print-arguments: false + +pdns_defaults_xfr: + cycle-interval: 60 + max-received-mbytes: 100 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/Makefile new file mode 100644 index 0000000..3abaf48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_6.1 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/README.md new file mode 100644 index 0000000..f1143cd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/README.md @@ -0,0 +1,27 @@ +# Ansible Role: `bodsch.dns.pdns_recursor` + +Ansible role to install and configure powerdns-recursor on various linux systems. + + +## usage + +```yaml + +``` + +## Contribution + +Please read [Contribution](CONTRIBUTING.md) + +## Development, Branches (Git Tags) + + +## Author + +- Bodo Schulz + +## License + +[Apache](LICENSE) + +**FREE SOFTWARE, HELL YEAH!** diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/defaults/main.yml new file mode 100644 index 0000000..3db8634 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/defaults/main.yml @@ -0,0 +1,20 @@ +--- + +pdns_recursor_carbon: {} +pdns_recursor_dnssec: {} +pdns_recursor_ecs: {} +pdns_recursor_incoming: {} +pdns_recursor_logging: {} +pdns_recursor_nod: {} +pdns_recursor_outgoing: {} +pdns_recursor_packetcache: {} +pdns_recursor_recordcache: {} +pdns_recursor_recursor: {} +pdns_recursor_snmp: {} +pdns_recursor_webservice: {} + +# State of the PowerDNS Recursor service +pdns_recursor_service: + name: pdns-recursor + state: started + enabled: true diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/handlers/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/handlers/main.yml new file mode 100644 index 0000000..b07e748 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/handlers/main.yml @@ -0,0 +1,27 @@ +--- + +- name: systemctl daemon-reload + become: true + ansible.builtin.systemd: + daemon_reload: true + force: true + when: + - ansible_facts.service_mgr | lower == "systemd" + +- name: reload powerdns-recursor + become: true + ansible.builtin.service: + name: "{{ pdns_recursor_service.name }}" + state: reloaded + when: + - not running_in_check_mode + +- name: restart powerdns-recursor + become: true + ansible.builtin.service: + name: "{{ pdns_recursor_service.name }}" + state: restarted + when: + - not running_in_check_mode + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/meta/main.yml new file mode 100644 index 0000000..3cec97a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/meta/main.yml @@ -0,0 +1,32 @@ +--- + +galaxy_info: + role_name: pdns_recursor + + author: Bodo Schulz + description: ansible role for install and configure powerdns recursor + + license: Apache + min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + # 11 + - bullseye + - bookworm + - name: Ubuntu + versions: + # 20.04 + - focal + + galaxy_tags: + - system + - dns + +dependencies: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/configured/converge.yml new file mode 100644 index 0000000..d58c328 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/configured/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.pdns_recursor diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..c8a2f8e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,16 @@ +--- + +pdns_recursor_recursor: + forward_zones: + - zone: matrix.lan + forwarders: + - 192.168.0.4 + - 192.168.0.1:5300 + - zone: google.de + forwarders: + - 127.0.0.1 + - zone: google.com + forwarders: + - 127.0.0.1 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/configured/molecule.yml new file mode 100644 index 0000000..fda92e3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/configured/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/configured/prepare.yml new file mode 100644 index 0000000..437874d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/configured/prepare.yml @@ -0,0 +1,54 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..a1b1129 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/configured/tests/test_default.py @@ -0,0 +1,197 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directories(host, get_vars): + """ + used config directory + """ + pp_json(get_vars) + + directories = [ + "/etc/powerdns/recursor.d", + "/var/cache/ansible/pdns_recursor", + ] + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [ + "/etc/powerdns/recursor.conf", + "/usr/lib/systemd/system/pdns-recursor.service", + "/usr/sbin/pdns_recursor", + ] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +# def test_user(host, get_vars): +# """ +# created user +# """ +# shell = '/bin/false' +# +# distribution = host.system_info.distribution +# +# if distribution in ['centos', 'redhat', 'ol']: +# shell = "/sbin/nologin" +# elif distribution == "arch": +# shell = "/usr/bin/nologin" +# +# user_name = "mysql" +# u = host.user(user_name) +# g = host.group(user_name) +# +# assert g.exists +# assert u.exists +# assert user_name in u.groups +# assert u.shell == shell + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = "pdns-recursor" + + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + listen = [] + listen.append(f"tcp://127.0.0.1:53") + listen.append(f"udp://127.0.0.1:53") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/default/converge.yml new file mode 100644 index 0000000..d58c328 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/default/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.pdns_recursor diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/default/molecule.yml new file mode 100644 index 0000000..fda92e3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/default/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/default/prepare.yml new file mode 100644 index 0000000..437874d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/default/prepare.yml @@ -0,0 +1,54 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/default/tests/test_default.py new file mode 100644 index 0000000..a1b1129 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/molecule/default/tests/test_default.py @@ -0,0 +1,197 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directories(host, get_vars): + """ + used config directory + """ + pp_json(get_vars) + + directories = [ + "/etc/powerdns/recursor.d", + "/var/cache/ansible/pdns_recursor", + ] + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + created config files + """ + files = [ + "/etc/powerdns/recursor.conf", + "/usr/lib/systemd/system/pdns-recursor.service", + "/usr/sbin/pdns_recursor", + ] + + for _file in files: + f = host.file(_file) + assert f.is_file + + +# def test_user(host, get_vars): +# """ +# created user +# """ +# shell = '/bin/false' +# +# distribution = host.system_info.distribution +# +# if distribution in ['centos', 'redhat', 'ol']: +# shell = "/sbin/nologin" +# elif distribution == "arch": +# shell = "/usr/bin/nologin" +# +# user_name = "mysql" +# u = host.user(user_name) +# g = host.group(user_name) +# +# assert g.exists +# assert u.exists +# assert user_name in u.groups +# assert u.shell == shell + + +def test_service_running_and_enabled(host, get_vars): + """ + running service + """ + service_name = "pdns-recursor" + + service = host.service(service_name) + assert service.is_running + assert service.is_enabled + + +def test_listening_socket(host, get_vars): + """ """ + listening = host.socket.get_listening_sockets() + + for i in listening: + print(i) + + listen = [] + listen.append(f"tcp://127.0.0.1:53") + listen.append(f"udp://127.0.0.1:53") + + for spec in listen: + socket = host.socket(spec) + assert socket.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/tasks/configure.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/tasks/configure.yml new file mode 100644 index 0000000..bb2267c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/tasks/configure.yml @@ -0,0 +1,165 @@ +--- + +- name: detect powerdns-recursor version + become: true + bodsch.dns.recursor_version: + register: pdns_recursor_version + check_mode: false + ignore_errors: true + +- name: create custom fact file + bodsch.core.facts: + name: pdns_recursor + facts: + full_version: "{{ pdns_recursor_version.full_version }}" + version: "{{ pdns_recursor_version.version }}" + +- name: merge powerdns-recursor configuration between defaults and custom + ansible.builtin.set_fact: + pdns_recursor_carbon: "{{ pdns_recursor_defaults_carbon | combine(pdns_recursor_carbon, recursive=True) }}" + pdns_recursor_dnssec: "{{ pdns_recursor_defaults_dnssec | combine(pdns_recursor_dnssec, recursive=True) }}" + pdns_recursor_ecs: "{{ pdns_recursor_defaults_ecs | combine(pdns_recursor_ecs, recursive=True) }}" + pdns_recursor_incoming: "{{ pdns_recursor_defaults_incoming | combine(pdns_recursor_incoming, recursive=True) }}" + pdns_recursor_logging: "{{ pdns_recursor_defaults_logging | combine(pdns_recursor_logging, recursive=True) }}" + pdns_recursor_nod: "{{ pdns_recursor_defaults_nod | combine(pdns_recursor_nod, recursive=True) }}" + pdns_recursor_outgoing: "{{ pdns_recursor_defaults_outgoing | combine(pdns_recursor_outgoing, recursive=True) }}" + pdns_recursor_packetcache: "{{ pdns_recursor_defaults_packetcache | combine(pdns_recursor_packetcache, recursive=True) }}" + pdns_recursor_recordcache: "{{ pdns_recursor_defaults_recordcache | combine(pdns_recursor_recordcache, recursive=True) }}" + pdns_recursor_recursor: "{{ pdns_recursor_defaults_recursor | combine(pdns_recursor_recursor, recursive=True) }}" + pdns_recursor_snmp: "{{ pdns_recursor_defaults_snmp | combine(pdns_recursor_snmp, recursive=True) }}" + pdns_recursor_webservice: "{{ pdns_recursor_defaults_webservice | combine(pdns_recursor_webservice, recursive=True) }}" + +- name: create the powerdns Recursor 'include-dir' directory + ansible.builtin.file: + name: "{{ pdns_recursor_recursor.include_dir }}" + state: directory + owner: "{{ pdns_recursor_owner }}" + group: "{{ pdns_recursor_group }}" + mode: "0755" + when: + - pdns_recursor_recursor.include_dir | default('') | string | length > 0 + register: _pdns_recursor_configuration_include_dir + +- name: generate the powerdns-recursor configuration + ansible.builtin.template: + src: etc/powerdns/recursor.conf.j2 + dest: "{{ pdns_recursor_config_dir }}/recursor.conf" + owner: "{{ pdns_recursor_owner }}" + group: "{{ pdns_recursor_group }}" + mode: "0640" + backup: true + notify: + - restart powerdns-recursor + + + +# - block: +# +# - name: Ensure the PowerDNS Recursor drop-in unit overrides directory exists (systemd) +# file: +# name: "/etc/systemd/system/{{ pdns_recursor_service_name }}.service.d" +# state: directory +# owner: root +# group: root +# +# - name: Override the PowerDNS Recursor unit (systemd) +# template: +# src: "override-service.systemd.conf.j2" +# dest: "/etc/systemd/system/{{ pdns_recursor_service_name }}.service.d/override.conf" +# owner: root +# group: root +# when: pdns_recursor_service_overrides | length > 0 +# register: _pdns_recursor_override_unit +# +# - name: Reload systemd +# command: systemctl daemon-reload +# when: not pdns_recursor_disable_handlers +# and _pdns_recursor_override_unit.changed +# +# when: ansible_facts.service_mgr == "systemd" +# +# - name: Ensure that the PowerDNS Recursor configuration directory exists +# file: +# name: "{{ pdns_recursor_config_dir }}" +# state: directory +# owner: "{{ pdns_recursor_file_owner }}" +# group: "{{ pdns_recursor_file_group }}" +# mode: 0750 +# +# - name: Generate the PowerDNS Recursor configuration +# template: +# src: recursor.conf.j2 +# dest: "{{ pdns_recursor_config_dir }}/{{ pdns_recursor_config_file }}" +# owner: "{{ pdns_recursor_file_owner }}" +# group: "{{ pdns_recursor_file_group }}" +# mode: 0640 +# register: _pdns_recursor_configuration +# +# - name: Ensure that the PowerDNS Recursor 'include-dir' directory exists +# file: +# name: "{{ pdns_recursor_config['include-dir'] }}" +# state: directory +# owner: "{{ pdns_recursor_file_owner }}" +# group: "{{ pdns_recursor_file_group }}" +# mode: "{{ pdns_recursor_config_include_dir_mode }}" +# when: "pdns_recursor_config['include-dir'] is defined" +# register: _pdns_recursor_configuration_include_dir +# +# - block: +# +# - name: Ensure that the PowerDNS Recursor configuration from-files directory exists +# ansible.builtin.file: +# name: "{{ item.dest | dirname }}" +# state: directory +# owner: "{{ pdns_recursor_file_owner }}" +# group: "{{ pdns_recursor_file_group }}" +# mode: "{{ pdns_recursor_config_from_files_dir_mode }}" +# loop: "{{ pdns_recursor_config_from_files }}" +# +# - name: Copy the PowerDNS Recursor configuration from-files files +# ansible.builtin.copy: +# content: "{{ item.content | default(omit) }}" +# src: "{{ item.src | default(omit) }}" +# dest: "{{ item.dest }}" +# owner: "{{ pdns_recursor_file_owner }}" +# group: "{{ pdns_recursor_file_group }}" +# mode: "{{ pdns_recursor_config_from_files_dir_mode }}" +# loop: "{{ pdns_recursor_config_from_files }}" +# register: _pdns_recursor_configuration_from_files +# +# when: "pdns_recursor_config_from_files | length > 0" +# +# - name: Generate the PowerDNS Recursor Lua config-file +# copy: +# dest: "{{ pdns_recursor_config_lua }}" +# content: "{{ pdns_recursor_config_lua_file_content }}" +# owner: "{{ pdns_recursor_file_owner }}" +# group: "{{ pdns_recursor_file_group }}" +# mode: 0640 +# register: _pdns_recursor_lua_file_configuraton +# when: "pdns_recursor_config_lua_file_content | length > 0" +# +# - name: Generate PowerDNS Recursor Lua dns-script +# copy: +# dest: "{{ pdns_recursor_config_dns_script }}" +# content: "{{ pdns_recursor_config_dns_script_file_content }}" +# owner: "{{ pdns_recursor_file_owner }}" +# group: "{{ pdns_recursor_file_group }}" +# mode: 0640 +# register: _pdns_recursor_dns_script_configuration +# when: "pdns_recursor_config_dns_script_file_content | length > 0" +# +# - name: Restart PowerDNS Recursor +# service: +# name: "{{ pdns_recursor_service_name }}" +# state: restarted +# sleep: 1 +# when: not pdns_recursor_disable_handlers +# and pdns_recursor_service_state != 'stopped' +# and (_pdns_recursor_override_unit.changed +# or _pdns_recursor_configuration_include_dir.changed +# or _pdns_recursor_configuration.changed +# or _pdns_recursor_configuration_from_files.changed +# or _pdns_recursor_lua_file_configuraton.changed +# or _pdns_recursor_dns_script_configuration.changed) +# diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/tasks/install.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/tasks/install.yml new file mode 100644 index 0000000..2a5891f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/tasks/install.yml @@ -0,0 +1,25 @@ +--- + +- name: install powerdns-recursor + become: true + ansible.builtin.package: + name: "{{ pdns_recursor_packages }}" + state: present + tags: + - pdns_recursor + +- name: detect powerdns-recursor version + become: true + bodsch.dns.recursor_version: + register: pdns_recursor_version + check_mode: false + ignore_errors: true + +- name: create custom fact file + bodsch.core.facts: + name: pdns_recursor + facts: + full_version: "{{ pdns_recursor_version.full_version }}" + version: "{{ pdns_recursor_version.version }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/tasks/main.yml new file mode 100644 index 0000000..af539a1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yml + +- name: install + ansible.builtin.include_tasks: install.yml + +- name: configure + ansible.builtin.include_tasks: configure.yml + +- name: service + ansible.builtin.include_tasks: service.yml + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/tasks/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/tasks/prepare.yml new file mode 100644 index 0000000..4306185 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/tasks/prepare.yml @@ -0,0 +1,39 @@ +--- + +- name: include OS specific configuration ({{ ansible_facts.distribution }} ({{ ansible_facts.os_family }}) {{ ansible_facts.distribution_major_version }}) + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_facts.os_family | lower }}.yml" + - default.yaml + skip: true + +- name: detect ansible check_mode + bodsch.core.check_mode: + register: _check_mode + +- name: define running_in_check_mode + ansible.builtin.set_fact: + running_in_check_mode: '{{ _check_mode.check_mode }}' + +- name: install dependency + ansible.builtin.package: + name: "{{ pdns_recursor_requirements }}" + state: present + when: + - pdns_recursor_requirements | default([]) | count > 0 + +- name: get latest system information + ansible.builtin.setup: + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/tasks/service.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/tasks/service.yml new file mode 100644 index 0000000..e7ef8c2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/tasks/service.yml @@ -0,0 +1,15 @@ +--- + +- name: merge powerdns configuration between defaults and custom + ansible.builtin.set_fact: + pdns_recursor_service: "{{ pdns_recursor_defaults_service | combine(pdns_recursor_service, recursive=True) }}" + +- name: Set the status of the PowerDNS Recursor service + ansible.builtin.service: + name: "{{ pdns_recursor_service.name }}" + state: "{{ pdns_recursor_service.state }}" + enabled: "{{ pdns_recursor_service.enabled }}" + tags: + - service + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.conf.j2 new file mode 100644 index 0000000..ba1acac --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.conf.j2 @@ -0,0 +1,18 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +# Version: {{ pdns_recursor_version.full_version }} + +{% include('recursor.d/carbon.j2') -%} +{%- include('recursor.d/dnssec.j2') -%} +{%- include('recursor.d/ecs.j2') -%} +{%- include('recursor.d/incoming.j2') -%} +{%- include('recursor.d/logging.j2') -%} +{%- include('recursor.d/nod.j2') -%} +{%- include('recursor.d/outgoing.j2') -%} +{%- include('recursor.d/packetcache.j2') -%} +{%- include('recursor.d/recordcache.j2') -%} +{%- include('recursor.d/recursor.j2') -%} +{%- include('recursor.d/snmp.j2') -%} +{%- include('recursor.d/webservice.j2') -%} +{##} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/carbon.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/carbon.j2 new file mode 100644 index 0000000..f2e1a7d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/carbon.j2 @@ -0,0 +1,44 @@ +{% set values = pdns_recursor_carbon | bodsch.core.remove_empty_values %} +{% if values | count > 0 %} +## -- carbon ------------------------------------------------------------------ + {% if pdns_recursor_version.full_version is version_compare('5', '>=') %} +carbon: + {{ values | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} + {% elif pdns_recursor_version.full_version is version_compare('5', '<') %} + {% set v = values.get("instance", None) %} + {% if v and v | string | length > 0 %} +carbon-instance={{ v }} + {% endif %} + {% set v = values.get("interval", None) %} + {% if v and v | string | length > 0 %} +carbon-interval={{ v }} + {% endif %} + {% set v = values.get("ns", None) %} + {% if v and v | string | length > 0 %} +carbon-namespace={{ v }} + {% endif %} + {% set v = values.get("ourname", None) %} + {% if v and v | string | length > 0 %} +carbon-ourname={{ v }} + {% endif %} + {% set v = values.get("server", None) %} + {% if v and v | count > 0 %} +carbon-server={{ v | join(',') }} + {% endif %} + + {% endif %} +{% endif %} +{# +######### SECTION carbon ######### +carbon: +##### If set overwrites the instance name default +# instance: recursor +##### Number of seconds between carbon (graphite) updates +# interval: 30 +##### If set overwrites the first part of the carbon string +# ns: pdns +##### If set, overrides our reported hostname for carbon stats +# ourname: '' +##### If set, send metrics in carbon (graphite) format to this server IP address +# server: [] +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/dnssec.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/dnssec.j2 new file mode 100644 index 0000000..d6e580c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/dnssec.j2 @@ -0,0 +1,71 @@ +{% set values = pdns_recursor_dnssec | bodsch.core.remove_empty_values %} +{% if values | count > 0 %} +## -- dnssec ------------------------------------------------------------------ + {% if pdns_recursor_version.full_version is version_compare('5', '>=') %} +dnssec: + {{ values | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} + {# ----------------------------------------------------------------------- #} + {% elif pdns_recursor_version.full_version is version_compare('5', '<') %} + {% if pdns_recursor_version.full_version is version_compare('4.7', '>=') %} +aggressive-cache-max-nsec3-hash-cost={{ values.get("aggressive_cache_max_nsec3_hash_cost") }} + {% endif %} +aggressive-nsec-cache-size={{ values.get("aggressive_nsec_cache_size") }} +dnssec={{ values.get("validation") }} +dnssec-log-bogus={{ values.get("log_bogus") | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% if pdns_recursor_version.full_version is version_compare('4.7', '>=') %} +max-dnskeys={{ values.get("max_dnskeys") }} +max-ds-per-zone={{ values.get("max_ds_per_zone") }} +max-nsec3-hash-computations-per-query={{ values.get("max_nsec3_hash_computations_per_query") }} +max-nsec3s-per-record={{ values.get("max_nsec3s_per_record") }} +max-rrsigs-per-record={{ values.get("max_rrsigs_per_record") }} +max-signature-validations-per-query={{ values.get("max_signature_validations_per_query") }} + {% endif %} +allow-trust-anchor-query={{ values.get("allow_trust_anchor_query", False) | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% if values.get("x_dnssec_names", []) | count > 0 %} +x-dnssec-names={{ values.get("x_dnssec_names") | join(', ') }} + {% endif %} + + {% endif %} +{% endif %} +{# +######### SECTION dnssec ######### +dnssec: +##### Maximum estimated NSEC3 cost for a given query to consider aggressive use of the NSEC3 cache +# aggressive_cache_max_nsec3_hash_cost: 150 +##### The minimum expected hit ratio to store NSEC3 records into the aggressive cache +# aggressive_cache_min_nsec3_hit_ratio: 2000 +##### The number of records to cache in the aggressive cache. If set to a value greater than 0, and DNSSEC processing or validation is enabled, the recursor will cache NSEC and NSEC3 records to generate negative answers, as defined in rfc8198 +# aggressive_nsec_cache_size: 100000 +##### List of DNSSEC algorithm numbers that are considered unsupported +# disabled_algorithms: [] +##### Log DNSSEC bogus validations +# log_bogus: false +##### Maximum number of DNSKEYs with the same algorithm and tag to consider when validating a given record +# max_dnskeys: 2 +##### Maximum number of DS records to consider per zone +# max_ds_per_zone: 8 +##### Maximum number of NSEC3 hashes that we are willing to compute during DNSSEC validation, per incoming query +# max_nsec3_hash_computations_per_query: 600 +##### Maximum number of NSEC3s to consider when validating a given denial of existence +# max_nsec3s_per_record: 10 +##### Maximum number of RRSIGs to consider when validating a given record +# max_rrsigs_per_record: 2 +##### Maximum number of RRSIG signatures we are willing to validate per incoming query +# max_signature_validations_per_query: 30 +##### A sequence of negative trust anchors +# negative_trustanchors: [] +##### Maximum number of iterations allowed for an NSEC3 record +# nsec3_max_iterations: 50 +##### Allow the signature inception to be off by this number of seconds +# signature_inception_skew: 60 +##### A path to a zone file containing trust anchors +# trustanchorfile: '' +##### Interval (in hours) to read the trust anchors file +# trustanchorfile_interval: 24 +##### Sequence of trust anchors +# trustanchors: [] +##### DNSSEC mode: off/process-no-validate/process (default)/log-fail/validate +# validation: process +##### Collect DNSSEC statistics for names or suffixes in this list in separate x-dnssec counters +# x_dnssec_names: [] +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/ecs.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/ecs.j2 new file mode 100644 index 0000000..3b3ade8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/ecs.j2 @@ -0,0 +1,55 @@ +{% set values = pdns_recursor_ecs | bodsch.core.remove_empty_values %} +{% if values | count > 0 %} +## -- ecs --------------------------------------------------------------------- + {% if pdns_recursor_version.full_version is version_compare('5', '>=') %} +ecs: + {{ values | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} + {% elif pdns_recursor_version.full_version is version_compare('5', '<') %} +ecs-add-for={{ values.get("add_for") | join(', ') }} +ecs-cache-limit-ttl={{ values.get("cache_limit_ttl") }} +ecs-ipv4-bits={{ values.get("ipv4_bits") }} +ecs-ipv4-cache-bits={{ values.get("ipv4_cache_bits") }} +ecs-ipv4-never-cache={{ values.get("ipv4_never_cache", False) | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +ecs-ipv6-bits={{ values.get("ipv6_bits") }} +ecs-ipv6-cache-bits={{ values.get("ipv6_cache_bits") }} +ecs-ipv6-never-cache={{ values.get("ipv6_never_cache", False) | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +ecs-minimum-ttl-override={{ values.get("minimum_ttl_override") }} +ecs-scope-zero-address={{ values.get("scope_zero_address") }} + + {% endif %} +{% endif %} +{# +######### SECTION ecs ######### +ecs: +##### List of client netmasks for which EDNS Client Subnet will be added +# add_for: +# - 0.0.0.0/0 +# - ::/0 +# - '!127.0.0.0/8' +# - '!10.0.0.0/8' +# - '!100.64.0.0/10' +# - '!169.254.0.0/16' +# - '!192.168.0.0/16' +# - '!172.16.0.0/12' +# - '!::1/128' +# - '!fc00::/7' +# - '!fe80::/10' +##### Minimum TTL to cache ECS response +# cache_limit_ttl: 0 +##### Number of bits of IPv4 address to pass for EDNS Client Subnet +# ipv4_bits: 24 +##### Maximum number of bits of IPv4 mask to cache ECS response +# ipv4_cache_bits: 24 +##### If we should never cache IPv4 ECS responses +# ipv4_never_cache: false +##### Number of bits of IPv6 address to pass for EDNS Client Subnet +# ipv6_bits: 56 +##### Maximum number of bits of IPv6 mask to cache ECS response +# ipv6_cache_bits: 56 +##### If we should never cache IPv6 ECS responses +# ipv6_never_cache: false +##### The minimum TTL for records in ECS-specific answers +# minimum_ttl_override: 1 +##### Address to send to allow-listed authoritative servers for incoming queries with ECS prefix-length source of 0 +# scope_zero_address: '' +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/incoming.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/incoming.j2 new file mode 100644 index 0000000..c8e850d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/incoming.j2 @@ -0,0 +1,124 @@ +{% set values = pdns_recursor_incoming | bodsch.core.remove_empty_values %} +{% if values | count > 0 %} +## -- incoming ---------------------------------------------------------------- + {% if pdns_recursor_version.full_version is version_compare('5', '>=') %} +incoming: + {{ values | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} + {% elif pdns_recursor_version.full_version is version_compare('5', '<') %} + +allow-from={{ values.get("allow_from") | join(', ') }} +allow-from-file={{ values.get("allow_from_file") }} + {% if values.get("allow_notify_for", None) %} +allow-notify-for={{ values.get("allow_notify_for") | join(', ') }} + {% endif %} +allow-notify-for-file={{ values.get("allow_notify_for_file") }} + {% if values.get("allow_notify_from", None) %} +allow-notify-from={{ values.get("allow_notify_from") | join(', ') }} + {% endif %} +allow-notify-from-file={{ values.get("allow_notify_from_file") }} +allow-trust-anchor-query={{ values.get("") }} # no +distribution-load-factor={{ values.get("distribution_load_factor") }} +distribution-pipe-buffer-size={{ values.get("distribution_pipe_buffer_size") }} +distributor-threads={{ values.get("distributor_threads") }} +edns-outgoing-bufsize={{ values.get("") }} # 1232 +edns-padding-from={{ values.get("edns_padding_from") }} +edns-padding-mode={{ values.get("edns_padding_mode") }} +edns-padding-tag={{ values.get("edns_padding_tag") }} +gettag-needs-edns-options={{ values.get("gettag_needs_edns_options", False) | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +local-address={{ values.get("listen") | join(', ') }} +local-port={{ values.get("port") }} +max-tcp-clients={{ values.get("max_tcp_clients") }} +max-tcp-per-client={{ values.get("max_tcp_per_client") }} +max-tcp-queries-per-connection={{ values.get("max_tcp_queries_per_connection") }} +max-udp-queries-per-round={{ values.get("max_udp_queries_per_round") }} +non-local-bind={{ values.get("non_local_bind", False) | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +pdns-distributes-queries={{ values.get("pdns_distributes_queries", False) | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% if values.get("proxy_protocol_from", None) %} +proxy-protocol-from={{ values.get("proxy_protocol_from") | join(', ') }} + {% endif %} +proxy-protocol-maximum-size={{ values.get("proxy_protocol_maximum_size") }} +tcp-fast-open={{ values.get("tcp_fast_open") }} +client-tcp-timeout={{ values.get("tcp_timeout") }} +udp-truncation-threshold={{ values.get("udp_truncation_threshold") }} +use-incoming-edns-subnet={{ values.get("use_incoming_edns_subnet", False) | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} + + {% endif %} +{% endif %} +{# +######### SECTION incoming ######### +incoming: +##### If set, only allow these comma separated netmasks to recurse +# allow_from: +# - 127.0.0.0/8 +# - 10.0.0.0/8 +# - 100.64.0.0/10 +# - 169.254.0.0/16 +# - 192.168.0.0/16 +# - 172.16.0.0/12 +# - ::1/128 +# - fc00::/7 +# - fe80::/10 +##### If set, load allowed netmasks from this file +# allow_from_file: '' +##### Allow 'no recursion desired (RD=0)' queries. +# allow_no_rd: false +##### If set, NOTIFY requests for these zones will be allowed +# allow_notify_for: [] +##### If set, load NOTIFY-allowed zones from this file +# allow_notify_for_file: '' +##### If set, NOTIFY requests from these comma separated netmasks will be allowed +# allow_notify_from: [] +##### If set, load NOTIFY-allowed netmasks from this file +# allow_notify_from_file: '' +##### The load factor used when PowerDNS is distributing queries to worker threads +# distribution_load_factor: 0.0 +##### Size in bytes of the internal buffer of the pipe used by the distributor to pass incoming queries to a worker thread +# distribution_pipe_buffer_size: 0 +##### Launch this number of distributor threads, distributing queries to other threads +# distributor_threads: 0 +##### List of netmasks (proxy IP in case of proxy-protocol presence, client IP otherwise) for which EDNS padding will be enabled in responses, provided that 'edns-padding-mode' applies +# edns_padding_from: [] +##### Whether to add EDNS padding to all responses ('always') or only to responses for queries containing the EDNS padding option ('padded-queries-only', the default). In both modes, padding will only be added to responses for queries coming from 'setting-edns-padding-from' sources +# edns_padding_mode: padded-queries-only +##### Packetcache tag associated to responses sent with EDNS padding, to prevent sending these to clients for which padding is not enabled. +# edns_padding_tag: 7830 +##### If EDNS Options should be extracted before calling the gettag() hook +# gettag_needs_edns_options: false +##### IP addresses to listen on, separated by spaces or commas. Also accepts ports. +# listen: +# - 127.0.0.1 +##### Maximum number of requests handled concurrently per TCP connection +# max_concurrent_requests_per_tcp_connection: 10 +##### Maximum number of simultaneous TCP clients +# max_tcp_clients: 1024 +##### If set, maximum number of TCP sessions per client (IP address) +# max_tcp_per_client: 0 +##### If set, maximum number of TCP queries in a TCP connection +# max_tcp_queries_per_connection: 0 +##### Maximum number of UDP queries processed per recvmsg() round, before returning back to normal processing +# max_udp_queries_per_round: 10000 +##### Enable binding to non-local addresses by using FREEBIND / BINDANY socket options +# non_local_bind: false +##### If PowerDNS itself should distribute queries over threads +# pdns_distributes_queries: false +##### port to listen on +# port: 53 +##### A Proxy Protocol header should not be used for these listen addresses. +# proxy_protocol_exceptions: [] +##### A Proxy Protocol header is required from these subnets +# proxy_protocol_from: [] +##### The maximum size of a proxy protocol payload, including the TLV values +# proxy_protocol_maximum_size: 512 +##### Sequence of ProxyMapping +# proxymappings: [] +##### Enable SO_REUSEPORT allowing multiple recursors processes to listen to 1 address +# reuseport: true +##### Enable TCP Fast Open support on the listening sockets, using the supplied numerical value as the queue size +# tcp_fast_open: 0 +##### Timeout in seconds when talking to TCP clients +# tcp_timeout: 2 +##### Maximum UDP response size before we truncate +# udp_truncation_threshold: 1232 +##### Pass along received EDNS Client Subnet information +# use_incoming_edns_subnet: false +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/logging.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/logging.j2 new file mode 100644 index 0000000..02523e2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/logging.j2 @@ -0,0 +1,60 @@ +{% set values = pdns_recursor_logging | bodsch.core.remove_empty_values %} +{% if values | count > 0 %} +## -- logging ----------------------------------------------------------------- + {% if pdns_recursor_version.full_version is version_compare('5', '>=') %} +logging: + {{ values | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} + {# ----------------------------------------------------------------------- #} + {% elif pdns_recursor_version.full_version is version_compare('5', '<') %} +disable-syslog={{ values.get("disable_syslog") | bodsch.core.config_bool(true_as='yes', false_as='no') }} +log-common-errors={{ values.get("common_errors") | bodsch.core.config_bool(true_as='yes', false_as='no') }} +log-rpz-changes={{ values.get("rpz_changes") | bodsch.core.config_bool(true_as='yes', false_as='no') }} +log-timestamp={{ values.get("timestamp") | bodsch.core.config_bool(true_as='yes', false_as='no') }} +logging-facility={{ values.get("facility") }} +loglevel={{ values.get("loglevel") }} +quiet={{ values.get("quiet") | bodsch.core.config_bool(true_as='yes', false_as='no') }} +statistics-interval={{ values.get("statistics_interval") }} +structured-logging={{ values.get("structured_logging") | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% if pdns_recursor_version.full_version is version_compare('4.7', '>=') %} +structured-logging-backend={{ values.get("structured_logging_backend") }} + {% endif %} +trace={{ values.get("trace") }} + + {% endif %} +{% endif %} +{# +######### SECTION logging ######### +logging: +##### If we should log rather common errors +# common_errors: false +##### Disable logging to syslog, useful when running inside a supervisor that logs stderr +# disable_syslog: false +##### Sequence of dnstap servers +# dnstap_framestream_servers: [] +##### Sequence of NOD dnstap servers +# dnstap_nod_framestream_servers: [] +##### Facility to log messages as. 0 corresponds to local0 +# facility: '' +##### Amount of logging. Higher is more. Do not set below 3 +# loglevel: 6 +##### List of outgoing protobuf servers +# outgoing_protobuf_servers: [] +##### Sequence of protobuf servers +# protobuf_servers: [] +##### Compute the latency of queries in protobuf messages by using the timestamp set by the kernel when the query was received (when available) +# protobuf_use_kernel_timestamp: false +##### Suppress logging of questions and answers +# quiet: true +##### Log additions and removals to RPZ zones at Info level +# rpz_changes: false +##### Number of seconds between printing of recursor statistics, 0 to disable +# statistics_interval: 1800 +##### Prefer structured logging +# structured_logging: true +##### Structured logging backend +# structured_logging_backend: default +##### Print timestamps in log lines, useful to disable when running with a tool that timestamps stderr already +# timestamp: true +##### if we should output heaps of logging. set to 'fail' to only log failing domains +# trace: no +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/nod.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/nod.j2 new file mode 100644 index 0000000..d8e362b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/nod.j2 @@ -0,0 +1,46 @@ +{% set values = pdns_recursor_nod | bodsch.core.remove_empty_values %} +{% if values | count > 0 %} +## -- nod --------------------------------------------------------------------- + {% if pdns_recursor_version.full_version is version_compare('5', '>=') %} +nod: + {{ values | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} + {% elif pdns_recursor_version.full_version is version_compare('5', '<') %} + + {% endif %} +{% endif %} +{# +######### SECTION nod ######### +nod: +##### Size of the DB used to track new domains in terms of number of cells. Defaults to 67108864 +# db_size: 67108864 +##### Interval (in seconds) to write the NOD and UDR DB snapshots +# db_snapshot_interval: 600 +##### Persist new domain tracking data here to persist between restarts +# history_dir: /usr/var/lib/pdns-recursor/nod +##### List of domains (and implicitly all subdomains) which will never be considered a new domain +# ignore_list: [] +##### File with a list of domains (and implicitly all subdomains) which will never be considered a new domain +# ignore_list_file: '' +##### Log newly observed domains. +# log: true +##### Perform a DNS lookup newly observed domains as a subdomain of the configured domain +# lookup: '' +##### If protobuf is configured, the tag to use for messages containing newly observed domains. Defaults to 'pdns-nod' +# pb_tag: pdns-nod +##### Track newly observed domains (i.e. never seen before). +# tracking: false +##### Size of the DB used to track unique responses in terms of number of cells. Defaults to 67108864 +# unique_response_db_size: 67108864 +##### Persist unique response tracking data here to persist between restarts +# unique_response_history_dir: /usr/var/lib/pdns-recursor/udr +##### List of domains (and implicitly all subdomains) which will never be considered for UDR +# unique_response_ignore_list: [] +##### File with list of domains (and implicitly all subdomains) which will never be considered for UDR +# unique_response_ignore_list_file: '' +##### Log unique responses +# unique_response_log: true +##### If protobuf is configured, the tag to use for messages containing unique DNS responses. Defaults to 'pdns-udr' +# unique_response_pb_tag: pdns-udr +##### Track unique responses (tuple of query name, type and RR). +# unique_response_tracking: false +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/outgoing.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/outgoing.j2 new file mode 100644 index 0000000..a06e826 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/outgoing.j2 @@ -0,0 +1,138 @@ +{% set values = pdns_recursor_outgoing | bodsch.core.remove_empty_values %} +{% if values | count > 0 %} +## -- outgoing ---------------------------------------------------------------- + {% if pdns_recursor_version.full_version is version_compare('5', '>=') %} +outgoing: + {{ values | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} + {% elif pdns_recursor_version.full_version is version_compare('5', '<') %} +dont-query={{ values.get("dont_query") | join(', ') }} + {% if values.get("dont_throttle_names", []) | count > 0 %} +dont-throttle-names={{ values.get("dont_throttle_names") | join(', ') }} + {% endif %} + {% if values.get("dont_throttle_netmasks", []) | count > 0 %} +dont-throttle-netmasks={{ values.get("dont_throttle_netmasks") | join(', ') }} + {% endif %} + {% if values.get("dot_to_auth_names", []) | count > 0 %} +dot-to-auth-names={{ values.get("dot_to_auth_names") | join(', ') }} + {% endif %} +dot-to-port-853={{ values.get("dot_to_port_853") | bodsch.core.config_bool(true_as='yes', false_as='no') }} +lowercase-outgoing={{ values.get("lowercase") | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% if pdns_recursor_version.full_version is version_compare('4.7', '>=') %} +max-busy-dot-probes={{ values.get("max_busy_dot_probes") }} + {% endif %} +max-ns-address-qperq={{ values.get("max_ns_address_qperq") }} + {% if pdns_recursor_version.full_version is version_compare('4.7', '>=') %} +max-ns-per-resolve={{ values.get("max_ns_per_resolve") }} + {% endif %} +max-qperq={{ values.get("max_qperq") }} +network-timeout={{ values.get("network_timeout") }} +non-resolving-ns-max-fails={{ values.get("non_resolving_ns_max_fails") }} +non-resolving-ns-throttle-time={{ values.get("non_resolving_ns_throttle_time") }} +server-down-max-fails={{ values.get("server_down_max_fails") }} +server-down-throttle-time={{ values.get("server_down_throttle_time") }} + {% if pdns_recursor_version.full_version is version_compare('4.7', '>=') %} +edns-padding-out={{ values.get("edns_padding") | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% endif %} +{# + {% if values.get("edns_subnet_allow_list", []) | count > 0 %} +edns-subnet-allow-list={{ values.get("edns_subnet_allow_list") | join(', ') }} +# deprecated: alternative="edns-subnet-allow-list" +# edns-subnet-whitelist={{ values.get("") }} + {% endif %} +#} +single-socket={{ values.get("single_socket") | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% if values.get("source_address", []) | count > 0 %} +query-local-address={{ values.get("source_address") | join(', ') }} + {% endif %} + {% if values.get("udp_source_port_avoid", []) | count > 0 %} +udp-source-port-avoid={{ values.get("udp_source_port_avoid") | join(', ') }} + {% endif %} +udp-source-port-max={{ values.get("udp_source_port_max") }} +udp-source-port-min={{ values.get("udp_source_port_min") }} + + {% endif %} +{% endif %} +{# +######### SECTION outgoing ######### +outgoing: +##### Determines the probability of a server marked down to be used anyway +# bypass_server_throttling_probability: 25 +##### If set, do not query these netmasks for DNS data +# dont_query: +# - 127.0.0.0/8 +# - 10.0.0.0/8 +# - 100.64.0.0/10 +# - 169.254.0.0/16 +# - 192.168.0.0/16 +# - 172.16.0.0/12 +# - ::1/128 +# - fc00::/7 +# - fe80::/10 +# - 0.0.0.0/8 +# - 192.0.0.0/24 +# - 192.0.2.0/24 +# - 198.51.100.0/24 +# - 203.0.113.0/24 +# - 240.0.0.0/4 +# - ::/96 +# - ::ffff:0:0/96 +# - 100::/64 +# - 2001:db8::/32 +##### Do not throttle nameservers with this name or suffix +# dont_throttle_names: [] +##### Do not throttle nameservers with this IP netmask +# dont_throttle_netmasks: [] +##### Use DoT to authoritative servers with these names or suffixes +# dot_to_auth_names: [] +##### Force DoT connection to target port 853 if DoT compiled in +# dot_to_port_853: true +##### Outgoing EDNS buffer size +# edns_bufsize: 1232 +##### Whether to add EDNS padding to outgoing DoT messages +# edns_padding: true +##### List of netmasks and domains that we should enable EDNS subnet for +# edns_subnet_allow_list: [] +##### Force outgoing questions to lowercase +# lowercase: false +##### Maximum number of concurrent DoT probes +# max_busy_dot_probes: 0 +##### Maximum outgoing NS address queries per query +# max_ns_address_qperq: 10 +##### Maximum number of NS records to consider to resolve a name, 0 is no limit +# max_ns_per_resolve: 13 +##### Maximum outgoing queries per query +# max_qperq: 50 +##### Wait this number of milliseconds for network i/o +# network_timeout: 1500 +##### Number of failed address resolves of a nameserver to start throttling it, 0 is disabled +# non_resolving_ns_max_fails: 5 +##### Number of seconds to throttle a nameserver with a name failing to resolve +# non_resolving_ns_throttle_time: 60 +##### Maximum number of consecutive timeouts (and unreachables) to mark a server as down ( 0 => disabled ) +# server_down_max_fails: 64 +##### Number of seconds to throttle all queries to a server after being marked as down +# server_down_throttle_time: 60 +##### If set, only use a single socket for outgoing queries +# single_socket: false +##### Source IP address for sending queries +# source_address: +# - 0.0.0.0 +##### Enable TCP Fast Open support on outgoing sockets +# tcp_fast_open_connect: false +##### Time TCP/DoT connections are left idle in milliseconds or 0 if no limit +# tcp_max_idle_ms: 10000 +##### Maximum number of idle TCP/DoT connections to a specific IP per thread, 0 means do not keep idle connections open +# tcp_max_idle_per_auth: 10 +##### Maximum number of idle TCP/DoT connections per thread +# tcp_max_idle_per_thread: 100 +##### Maximum total number of queries per TCP/DoT connection, 0 means no limit +# tcp_max_queries: 0 +##### List of comma separated UDP port numbers to avoid +# udp_source_port_avoid: +# - '4791' +# - '11211' +##### Maximum UDP port to bind on +# udp_source_port_max: 65535 +##### Minimum UDP port to bind on +# udp_source_port_min: 1024 +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/packetcache.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/packetcache.j2 new file mode 100644 index 0000000..83a0600 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/packetcache.j2 @@ -0,0 +1,26 @@ +{% set values = pdns_recursor_packetcache | bodsch.core.remove_empty_values %} +{% if values | count > 0 %} +## -- packetcache ------------------------------------------------------------- + {% if pdns_recursor_version.full_version is version_compare('5', '>=') %} +packetcache: + {{ values | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} + {% elif pdns_recursor_version.full_version is version_compare('5', '<') %} + + {% endif %} +{% endif %} +{# +######### SECTION packetcache ######### +packetcache: +##### Disable packetcache +# disable: false +##### maximum number of entries to keep in the packetcache +# max_entries: 500000 +##### maximum number of seconds to keep a cached NxDomain or NoData entry in packetcache +# negative_ttl: 60 +##### maximum number of seconds to keep a cached servfail entry in packetcache +# servfail_ttl: 60 +##### Number of shards in the packet cache +# shards: 1024 +##### maximum number of seconds to keep a cached entry in packetcache +# ttl: 86400 +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/recordcache.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/recordcache.j2 new file mode 100644 index 0000000..43a961c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/recordcache.j2 @@ -0,0 +1,36 @@ +{% set values = pdns_recursor_recordcache | bodsch.core.remove_empty_values %} +{% if values | count > 0 %} +## -- recordcache ------------------------------------------------------------- + {% if pdns_recursor_version.full_version is version_compare('5', '>=') %} +recordcache: + {{ values | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} + {% elif pdns_recursor_version.full_version is version_compare('5', '<') %} + + {% endif %} +{% endif %} +{# +######### SECTION recordcache ######### +recordcache:logging +##### Limit answers to ANY queries in size +# limit_qtype_any: true +##### Replace records in record cache only after this % of original TTL has passed +# locked_ttl_perc: 0 +##### maximum number of seconds to keep a Bogus (positive or negative) cached entry in memory +# max_cache_bogus_ttl: 3600 +##### If set, maximum number of entries in the main cache +# max_entries: 1000000 +##### maximum number of seconds to keep a negative cached entry in memory +# max_negative_ttl: 3600 +##### Maximum size of RRSet in cache +# max_rrset_size: 256 +##### maximum number of seconds to keep a cached entry in memory +# max_ttl: 86400 +##### If a record is requested from the cache and only this % of original TTL remains, refetch +# refresh_on_ttl_perc: 0 +##### Number of times a record's ttl is extended by 30s to be served stale +# serve_stale_extensions: 0 +##### Number of shards in the record cache +# shards: 1024 +##### Sequence of ZoneToCache entries +# zonetocaches: [] +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/recursor.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/recursor.j2 new file mode 100644 index 0000000..a88d497 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/recursor.j2 @@ -0,0 +1,883 @@ +{% set values = pdns_recursor_recursor | bodsch.core.remove_empty_values %} +{% if values | count > 0 %} +## -- recursor ---------------------------------------------------------------- + {% if pdns_recursor_version.full_version is version_compare('5', '>=') %} +recursor: + {{ values | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} + {% elif pdns_recursor_version.full_version is version_compare('5', '<') %} +config-dir={{ values.get("config_dir") }} + {% if values.get("cpu_map", []) | count > 0 %} +cpu-map={{ values.get("cpu_map") | join(', ') }} + {% endif %} +daemon={{ values.get("daemon") | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% set _zones = "" %} + {% if values.get("forward_zones", []) | count > 0 %} + {% set _zones = values.get("forward_zones") | bodsch.dns.recursor_backwards_compatibility(version=pdns_recursor_version.full_version) | join(', ') %} + {% endif %} + {% if _zones | string | length > 0 %} +forward-zones={{ _zones }} + {% endif %} + {% set v = values.get("forward_zones_file", None) %} + {% if v and v | string | length > 0 %} +forward-zones-file={{ v }} + {% endif %} + {% set v = values.get("forward_zones_recurse", None) %} + {% if v and v | string | length > 0 %} +forward-zones-recurse={{ v }} + {% endif %} +include-dir={{ values.get("include_dir") }} +lua-config-file={{ values.get("lua_config_file") }} + {% set v = values.get("lua_dns_script", None) %} + {% if v and v | string | length > 0 %} +lua-dns-script={{ v }} + {% endif %} + {% set v = values.get("setgid", None) %} + {% if v and v | string | length > 0 %} +setgid={{ v }} + {% endif %} + {% set v = values.get("setuid", None) %} + {% if v and v | string | length > 0 %} +setuid={{ v }} + {% endif %} +stack-size={{ values.get("stack_size") }} + {% if values.get("stats_api_blacklist", []) | count > 0 %} +stats-api-blacklist={{ values.get("stats_api_blacklist") | default([]) | join(', ') }} + {% endif %} + {% if values.get("stats_api_disabled_list", []) | count > 0 %} +stats-api-disabled-list={{ values.get("stats_api_disabled_list") | default([]) | join(', ') }} + {% endif %} + {% if values.get("stats_carbon_blacklist", []) | count > 0 %} +stats-carbon-blacklist={{ values.get("stats_carbon_blacklist") | default([]) | join(', ') }} + {% endif %} + {% if values.get("stats_carbon_disabled_list", []) | count > 0 %} +stats-carbon-disabled-list={{ values.get("stats_carbon_disabled_list") | default([]) | join(', ') }} + {% endif %} + {% if values.get("stats_carbon_blacklist", []) | count > 0 %} +stats-rec-control-blacklist={{ values.get("stats_carbon_blacklist") | default([]) | join(', ') }} + {% endif %} + {% if values.get("stats_rec_control_disabled_list", []) | count > 0 %} +stats-rec-control-disabled-list={{ values.get("stats_rec_control_disabled_list") | default([]) | join(', ') }} + {% endif %} +stats-ringbuffer-entries={{ values.get("stats_ringbuffer_entries") }} + {% if values.get("stats_snmp_blacklist", []) | count > 0 %} +stats-snmp-blacklist={{ values.get("stats_snmp_blacklist") | default([]) | join(', ') }} + {% endif %} + {% if values.get("stats_snmp_disabled_list", []) | count > 0 %} +stats-snmp-disabled-list={{ values.get("stats_snmp_disabled_list") | default([]) | join(', ') }} + {% endif %} +threads={{ values.get("threads") }} + + {% endif %} +{% endif %} +{# +######### SECTION recursor ######### +recursor: +##### Allow queries for trustanchor.server CH TXT and negativetrustanchor.server CH TXT +# allow_trust_anchor_query: false +##### Sequence of AllowedAdditionalQType +# allowed_additional_qtypes: [] +##### Answer ANY queries with tc=1, shunting to TCP +# any_to_tcp: false +##### Zones for which we have authoritative data, comma separated domain=file pairs +# auth_zones: [] +##### switch to chroot jail +# chroot: '' +##### Location of configuration directory (recursor.conf or recursor.yml) +# config_dir: /etc/powerdns +##### Name of this virtual configuration - will rename the binary image +# config_name: '' +##### Thread to CPU mapping, space separated thread-id=cpu1,cpu2..cpuN pairs +# cpu_map: '' +##### Operate as a daemon +# daemon: false +##### internal use only +# devonly_regression_test_mode: false +##### DNS64 prefix +# dns64_prefix: '' +##### Path to 'hosts' file +# etc_hosts_file: /etc/hosts +##### If set, event traces are collected and send out via protobuf logging (1), logfile (2) or both(3) +# event_trace_enabled: 0 +##### If we should serve up contents from /etc/hosts +# export_etc_hosts: false +##### Also serve up the contents of /etc/hosts with this suffix +# export_etc_hosts_search_suffix: '' +##### If set, send an EDNS Extended Error extension on resolution failures, like DNSSEC validation errors +# extended_resolution_errors: true +##### Zones for which we forward queries, comma separated domain=ip pairs +# forward_zones: [] +##### File with (+)domain=ip pairs for forwarding +# forward_zones_file: '' +##### Zones for which we forward queries with recursion bit, comma separated domain=ip pairs +# forward_zones_recurse: [] +##### Sequence of ForwardingCatalogZone +# forwarding_catalog_zones: [] +##### If set, load root hints from this file +# hint_file: '' +##### Configuration settings to ignore if they are unknown +# ignore_unknown_settings: [] +##### Include settings files from this directory. +# include_dir: '' +##### Number of latency values to calculate the qa-latency average +# latency_statistic_size: 10000 +##### More powerful configuration options +# lua_config_file: '' +##### Filename containing an optional Lua script that will be used to modify dns answers +# lua_dns_script: '' +##### More powerful configuration options +# lua_global_include_dir: '' +##### Number of seconds between calls to the lua user defined maintenance() function +# lua_maintenance_interval: 1 +##### maximum number of queries that can be chained to an outgoing request, 0 is no limit +# max_chain_length: 0 +##### Maximum number CNAME records followed +# max_cnames_followed: 10 +##### Maximum number of $GENERATE steps when loading a zone from a file +# max_generate_steps: 0 +##### Maximum nested $INCLUDE depth when loading a zone from a file +# max_include_depth: 20 +##### Maximum number of simultaneous Mtasker threads +# max_mthreads: 2048 +##### Maximum number of internal recursion calls per query, 0 for unlimited +# max_recursion_depth: 16 +##### Maximum total wall-clock time per query in milliseconds, 0 for unlimited +# max_total_msec: 7000 +##### The minimum TTL +# minimum_ttl_override: 1 +##### When an NXDOMAIN exists in cache for a name with fewer labels than the qname, send NXDOMAIN without doing a lookup (see RFC 8020) +# nothing_below_nxdomain: dnssec +##### Path to the Public Suffix List file, if any +# public_suffix_list_file: '' +##### RFC9156 max minimize count +# qname_max_minimize_count: 10 +##### Use Query Name Minimization +# qname_minimization: true +##### RFC9156 minimize one label parameter +# qname_minimize_one_label: 4 +##### If set, believe that an NXDOMAIN from the root means the TLD does not exist +# root_nx_trust: true +##### Sequence of RPZ entries +# rpzs: [] +##### Save parent NS set to be used if child NS set fails +# save_parent_ns_set: true +##### Domain name from which to query security update notifications +# security_poll_suffix: secpoll.powerdns.com. +##### If we should be authoritative for RFC 1918 private IP space +# serve_rfc1918: true +##### If we should be authoritative for RFC 6303 private IP space +# serve_rfc6303: true +##### Returned when queried for 'id.server' TXT or NSID, defaults to hostname, set custom or 'disabled' +# server_id: '*runtime determined*' +##### If set, change group id to this gid for more security +# setgid: '' +##### If set, change user id to this uid for more security +# setuid: '' +##### Where the controlsocket will live, /var/run/pdns-recursor when unset and not chrooted +# socket_dir: '' +##### Group of socket +# socket_group: '' +##### Permissions for socket +# socket_mode: '' +##### Owner of socket +# socket_owner: '' +##### Sequence of sort lists +# sortlists: [] +##### If non-zero, assume spoofing after this many near misses +# spoof_nearmiss_max: 1 +##### Size of the stack cache, per mthread +# stack_cache_size: 100 +##### stack size per mthread +# stack_size: 200000 +##### List of statistics that are disabled when retrieving the complete list of statistics via the API +# stats_api_disabled_list: +# - cache-bytes +# - packetcache-bytes +# - special-memory-usage +# - ecs-v4-response-bits-1 +# - ecs-v4-response-bits-2 +# - ecs-v4-response-bits-3 +# - ecs-v4-response-bits-4 +# - ecs-v4-response-bits-5 +# - ecs-v4-response-bits-6 +# - ecs-v4-response-bits-7 +# - ecs-v4-response-bits-8 +# - ecs-v4-response-bits-9 +# - ecs-v4-response-bits-10 +# - ecs-v4-response-bits-11 +# - ecs-v4-response-bits-12 +# - ecs-v4-response-bits-13 +# - ecs-v4-response-bits-14 +# - ecs-v4-response-bits-15 +# - ecs-v4-response-bits-16 +# - ecs-v4-response-bits-17 +# - ecs-v4-response-bits-18 +# - ecs-v4-response-bits-19 +# - ecs-v4-response-bits-20 +# - ecs-v4-response-bits-21 +# - ecs-v4-response-bits-22 +# - ecs-v4-response-bits-23 +# - ecs-v4-response-bits-24 +# - ecs-v4-response-bits-25 +# - ecs-v4-response-bits-26 +# - ecs-v4-response-bits-27 +# - ecs-v4-response-bits-28 +# - ecs-v4-response-bits-29 +# - ecs-v4-response-bits-30 +# - ecs-v4-response-bits-31 +# - ecs-v4-response-bits-32 +# - ecs-v6-response-bits-1 +# - ecs-v6-response-bits-2 +# - ecs-v6-response-bits-3 +# - ecs-v6-response-bits-4 +# - ecs-v6-response-bits-5 +# - ecs-v6-response-bits-6 +# - ecs-v6-response-bits-7 +# - ecs-v6-response-bits-8 +# - ecs-v6-response-bits-9 +# - ecs-v6-response-bits-10 +# - ecs-v6-response-bits-11 +# - ecs-v6-response-bits-12 +# - ecs-v6-response-bits-13 +# - ecs-v6-response-bits-14 +# - ecs-v6-response-bits-15 +# - ecs-v6-response-bits-16 +# - ecs-v6-response-bits-17 +# - ecs-v6-response-bits-18 +# - ecs-v6-response-bits-19 +# - ecs-v6-response-bits-20 +# - ecs-v6-response-bits-21 +# - ecs-v6-response-bits-22 +# - ecs-v6-response-bits-23 +# - ecs-v6-response-bits-24 +# - ecs-v6-response-bits-25 +# - ecs-v6-response-bits-26 +# - ecs-v6-response-bits-27 +# - ecs-v6-response-bits-28 +# - ecs-v6-response-bits-29 +# - ecs-v6-response-bits-30 +# - ecs-v6-response-bits-31 +# - ecs-v6-response-bits-32 +# - ecs-v6-response-bits-33 +# - ecs-v6-response-bits-34 +# - ecs-v6-response-bits-35 +# - ecs-v6-response-bits-36 +# - ecs-v6-response-bits-37 +# - ecs-v6-response-bits-38 +# - ecs-v6-response-bits-39 +# - ecs-v6-response-bits-40 +# - ecs-v6-response-bits-41 +# - ecs-v6-response-bits-42 +# - ecs-v6-response-bits-43 +# - ecs-v6-response-bits-44 +# - ecs-v6-response-bits-45 +# - ecs-v6-response-bits-46 +# - ecs-v6-response-bits-47 +# - ecs-v6-response-bits-48 +# - ecs-v6-response-bits-49 +# - ecs-v6-response-bits-50 +# - ecs-v6-response-bits-51 +# - ecs-v6-response-bits-52 +# - ecs-v6-response-bits-53 +# - ecs-v6-response-bits-54 +# - ecs-v6-response-bits-55 +# - ecs-v6-response-bits-56 +# - ecs-v6-response-bits-57 +# - ecs-v6-response-bits-58 +# - ecs-v6-response-bits-59 +# - ecs-v6-response-bits-60 +# - ecs-v6-response-bits-61 +# - ecs-v6-response-bits-62 +# - ecs-v6-response-bits-63 +# - ecs-v6-response-bits-64 +# - ecs-v6-response-bits-65 +# - ecs-v6-response-bits-66 +# - ecs-v6-response-bits-67 +# - ecs-v6-response-bits-68 +# - ecs-v6-response-bits-69 +# - ecs-v6-response-bits-70 +# - ecs-v6-response-bits-71 +# - ecs-v6-response-bits-72 +# - ecs-v6-response-bits-73 +# - ecs-v6-response-bits-74 +# - ecs-v6-response-bits-75 +# - ecs-v6-response-bits-76 +# - ecs-v6-response-bits-77 +# - ecs-v6-response-bits-78 +# - ecs-v6-response-bits-79 +# - ecs-v6-response-bits-80 +# - ecs-v6-response-bits-81 +# - ecs-v6-response-bits-82 +# - ecs-v6-response-bits-83 +# - ecs-v6-response-bits-84 +# - ecs-v6-response-bits-85 +# - ecs-v6-response-bits-86 +# - ecs-v6-response-bits-87 +# - ecs-v6-response-bits-88 +# - ecs-v6-response-bits-89 +# - ecs-v6-response-bits-90 +# - ecs-v6-response-bits-91 +# - ecs-v6-response-bits-92 +# - ecs-v6-response-bits-93 +# - ecs-v6-response-bits-94 +# - ecs-v6-response-bits-95 +# - ecs-v6-response-bits-96 +# - ecs-v6-response-bits-97 +# - ecs-v6-response-bits-98 +# - ecs-v6-response-bits-99 +# - ecs-v6-response-bits-100 +# - ecs-v6-response-bits-101 +# - ecs-v6-response-bits-102 +# - ecs-v6-response-bits-103 +# - ecs-v6-response-bits-104 +# - ecs-v6-response-bits-105 +# - ecs-v6-response-bits-106 +# - ecs-v6-response-bits-107 +# - ecs-v6-response-bits-108 +# - ecs-v6-response-bits-109 +# - ecs-v6-response-bits-110 +# - ecs-v6-response-bits-111 +# - ecs-v6-response-bits-112 +# - ecs-v6-response-bits-113 +# - ecs-v6-response-bits-114 +# - ecs-v6-response-bits-115 +# - ecs-v6-response-bits-116 +# - ecs-v6-response-bits-117 +# - ecs-v6-response-bits-118 +# - ecs-v6-response-bits-119 +# - ecs-v6-response-bits-120 +# - ecs-v6-response-bits-121 +# - ecs-v6-response-bits-122 +# - ecs-v6-response-bits-123 +# - ecs-v6-response-bits-124 +# - ecs-v6-response-bits-125 +# - ecs-v6-response-bits-126 +# - ecs-v6-response-bits-127 +# - ecs-v6-response-bits-128 +##### List of statistics that are prevented from being exported via Carbon +# stats_carbon_disabled_list: +# - cache-bytes +# - packetcache-bytes +# - special-memory-usage +# - ecs-v4-response-bits-1 +# - ecs-v4-response-bits-2 +# - ecs-v4-response-bits-3 +# - ecs-v4-response-bits-4 +# - ecs-v4-response-bits-5 +# - ecs-v4-response-bits-6 +# - ecs-v4-response-bits-7 +# - ecs-v4-response-bits-8 +# - ecs-v4-response-bits-9 +# - ecs-v4-response-bits-10 +# - ecs-v4-response-bits-11 +# - ecs-v4-response-bits-12 +# - ecs-v4-response-bits-13 +# - ecs-v4-response-bits-14 +# - ecs-v4-response-bits-15 +# - ecs-v4-response-bits-16 +# - ecs-v4-response-bits-17 +# - ecs-v4-response-bits-18 +# - ecs-v4-response-bits-19 +# - ecs-v4-response-bits-20 +# - ecs-v4-response-bits-21 +# - ecs-v4-response-bits-22 +# - ecs-v4-response-bits-23 +# - ecs-v4-response-bits-24 +# - ecs-v4-response-bits-25 +# - ecs-v4-response-bits-26 +# - ecs-v4-response-bits-27 +# - ecs-v4-response-bits-28 +# - ecs-v4-response-bits-29 +# - ecs-v4-response-bits-30 +# - ecs-v4-response-bits-31 +# - ecs-v4-response-bits-32 +# - ecs-v6-response-bits-1 +# - ecs-v6-response-bits-2 +# - ecs-v6-response-bits-3 +# - ecs-v6-response-bits-4 +# - ecs-v6-response-bits-5 +# - ecs-v6-response-bits-6 +# - ecs-v6-response-bits-7 +# - ecs-v6-response-bits-8 +# - ecs-v6-response-bits-9 +# - ecs-v6-response-bits-10 +# - ecs-v6-response-bits-11 +# - ecs-v6-response-bits-12 +# - ecs-v6-response-bits-13 +# - ecs-v6-response-bits-14 +# - ecs-v6-response-bits-15 +# - ecs-v6-response-bits-16 +# - ecs-v6-response-bits-17 +# - ecs-v6-response-bits-18 +# - ecs-v6-response-bits-19 +# - ecs-v6-response-bits-20 +# - ecs-v6-response-bits-21 +# - ecs-v6-response-bits-22 +# - ecs-v6-response-bits-23 +# - ecs-v6-response-bits-24 +# - ecs-v6-response-bits-25 +# - ecs-v6-response-bits-26 +# - ecs-v6-response-bits-27 +# - ecs-v6-response-bits-28 +# - ecs-v6-response-bits-29 +# - ecs-v6-response-bits-30 +# - ecs-v6-response-bits-31 +# - ecs-v6-response-bits-32 +# - ecs-v6-response-bits-33 +# - ecs-v6-response-bits-34 +# - ecs-v6-response-bits-35 +# - ecs-v6-response-bits-36 +# - ecs-v6-response-bits-37 +# - ecs-v6-response-bits-38 +# - ecs-v6-response-bits-39 +# - ecs-v6-response-bits-40 +# - ecs-v6-response-bits-41 +# - ecs-v6-response-bits-42 +# - ecs-v6-response-bits-43 +# - ecs-v6-response-bits-44 +# - ecs-v6-response-bits-45 +# - ecs-v6-response-bits-46 +# - ecs-v6-response-bits-47 +# - ecs-v6-response-bits-48 +# - ecs-v6-response-bits-49 +# - ecs-v6-response-bits-50 +# - ecs-v6-response-bits-51 +# - ecs-v6-response-bits-52 +# - ecs-v6-response-bits-53 +# - ecs-v6-response-bits-54 +# - ecs-v6-response-bits-55 +# - ecs-v6-response-bits-56 +# - ecs-v6-response-bits-57 +# - ecs-v6-response-bits-58 +# - ecs-v6-response-bits-59 +# - ecs-v6-response-bits-60 +# - ecs-v6-response-bits-61 +# - ecs-v6-response-bits-62 +# - ecs-v6-response-bits-63 +# - ecs-v6-response-bits-64 +# - ecs-v6-response-bits-65 +# - ecs-v6-response-bits-66 +# - ecs-v6-response-bits-67 +# - ecs-v6-response-bits-68 +# - ecs-v6-response-bits-69 +# - ecs-v6-response-bits-70 +# - ecs-v6-response-bits-71 +# - ecs-v6-response-bits-72 +# - ecs-v6-response-bits-73 +# - ecs-v6-response-bits-74 +# - ecs-v6-response-bits-75 +# - ecs-v6-response-bits-76 +# - ecs-v6-response-bits-77 +# - ecs-v6-response-bits-78 +# - ecs-v6-response-bits-79 +# - ecs-v6-response-bits-80 +# - ecs-v6-response-bits-81 +# - ecs-v6-response-bits-82 +# - ecs-v6-response-bits-83 +# - ecs-v6-response-bits-84 +# - ecs-v6-response-bits-85 +# - ecs-v6-response-bits-86 +# - ecs-v6-response-bits-87 +# - ecs-v6-response-bits-88 +# - ecs-v6-response-bits-89 +# - ecs-v6-response-bits-90 +# - ecs-v6-response-bits-91 +# - ecs-v6-response-bits-92 +# - ecs-v6-response-bits-93 +# - ecs-v6-response-bits-94 +# - ecs-v6-response-bits-95 +# - ecs-v6-response-bits-96 +# - ecs-v6-response-bits-97 +# - ecs-v6-response-bits-98 +# - ecs-v6-response-bits-99 +# - ecs-v6-response-bits-100 +# - ecs-v6-response-bits-101 +# - ecs-v6-response-bits-102 +# - ecs-v6-response-bits-103 +# - ecs-v6-response-bits-104 +# - ecs-v6-response-bits-105 +# - ecs-v6-response-bits-106 +# - ecs-v6-response-bits-107 +# - ecs-v6-response-bits-108 +# - ecs-v6-response-bits-109 +# - ecs-v6-response-bits-110 +# - ecs-v6-response-bits-111 +# - ecs-v6-response-bits-112 +# - ecs-v6-response-bits-113 +# - ecs-v6-response-bits-114 +# - ecs-v6-response-bits-115 +# - ecs-v6-response-bits-116 +# - ecs-v6-response-bits-117 +# - ecs-v6-response-bits-118 +# - ecs-v6-response-bits-119 +# - ecs-v6-response-bits-120 +# - ecs-v6-response-bits-121 +# - ecs-v6-response-bits-122 +# - ecs-v6-response-bits-123 +# - ecs-v6-response-bits-124 +# - ecs-v6-response-bits-125 +# - ecs-v6-response-bits-126 +# - ecs-v6-response-bits-127 +# - ecs-v6-response-bits-128 +# - cumul-clientanswers +# - cumul-authanswers +# - policy-hits +# - proxy-mapping-total +# - remote-logger-count +##### List of statistics that are prevented from being exported via rec_control get-all +# stats_rec_control_disabled_list: +# - cache-bytes +# - packetcache-bytes +# - special-memory-usage +# - ecs-v4-response-bits-1 +# - ecs-v4-response-bits-2 +# - ecs-v4-response-bits-3 +# - ecs-v4-response-bits-4 +# - ecs-v4-response-bits-5 +# - ecs-v4-response-bits-6 +# - ecs-v4-response-bits-7 +# - ecs-v4-response-bits-8 +# - ecs-v4-response-bits-9 +# - ecs-v4-response-bits-10 +# - ecs-v4-response-bits-11 +# - ecs-v4-response-bits-12 +# - ecs-v4-response-bits-13 +# - ecs-v4-response-bits-14 +# - ecs-v4-response-bits-15 +# - ecs-v4-response-bits-16 +# - ecs-v4-response-bits-17 +# - ecs-v4-response-bits-18 +# - ecs-v4-response-bits-19 +# - ecs-v4-response-bits-20 +# - ecs-v4-response-bits-21 +# - ecs-v4-response-bits-22 +# - ecs-v4-response-bits-23 +# - ecs-v4-response-bits-24 +# - ecs-v4-response-bits-25 +# - ecs-v4-response-bits-26 +# - ecs-v4-response-bits-27 +# - ecs-v4-response-bits-28 +# - ecs-v4-response-bits-29 +# - ecs-v4-response-bits-30 +# - ecs-v4-response-bits-31 +# - ecs-v4-response-bits-32 +# - ecs-v6-response-bits-1 +# - ecs-v6-response-bits-2 +# - ecs-v6-response-bits-3 +# - ecs-v6-response-bits-4 +# - ecs-v6-response-bits-5 +# - ecs-v6-response-bits-6 +# - ecs-v6-response-bits-7 +# - ecs-v6-response-bits-8 +# - ecs-v6-response-bits-9 +# - ecs-v6-response-bits-10 +# - ecs-v6-response-bits-11 +# - ecs-v6-response-bits-12 +# - ecs-v6-response-bits-13 +# - ecs-v6-response-bits-14 +# - ecs-v6-response-bits-15 +# - ecs-v6-response-bits-16 +# - ecs-v6-response-bits-17 +# - ecs-v6-response-bits-18 +# - ecs-v6-response-bits-19 +# - ecs-v6-response-bits-20 +# - ecs-v6-response-bits-21 +# - ecs-v6-response-bits-22 +# - ecs-v6-response-bits-23 +# - ecs-v6-response-bits-24 +# - ecs-v6-response-bits-25 +# - ecs-v6-response-bits-26 +# - ecs-v6-response-bits-27 +# - ecs-v6-response-bits-28 +# - ecs-v6-response-bits-29 +# - ecs-v6-response-bits-30 +# - ecs-v6-response-bits-31 +# - ecs-v6-response-bits-32 +# - ecs-v6-response-bits-33 +# - ecs-v6-response-bits-34 +# - ecs-v6-response-bits-35 +# - ecs-v6-response-bits-36 +# - ecs-v6-response-bits-37 +# - ecs-v6-response-bits-38 +# - ecs-v6-response-bits-39 +# - ecs-v6-response-bits-40 +# - ecs-v6-response-bits-41 +# - ecs-v6-response-bits-42 +# - ecs-v6-response-bits-43 +# - ecs-v6-response-bits-44 +# - ecs-v6-response-bits-45 +# - ecs-v6-response-bits-46 +# - ecs-v6-response-bits-47 +# - ecs-v6-response-bits-48 +# - ecs-v6-response-bits-49 +# - ecs-v6-response-bits-50 +# - ecs-v6-response-bits-51 +# - ecs-v6-response-bits-52 +# - ecs-v6-response-bits-53 +# - ecs-v6-response-bits-54 +# - ecs-v6-response-bits-55 +# - ecs-v6-response-bits-56 +# - ecs-v6-response-bits-57 +# - ecs-v6-response-bits-58 +# - ecs-v6-response-bits-59 +# - ecs-v6-response-bits-60 +# - ecs-v6-response-bits-61 +# - ecs-v6-response-bits-62 +# - ecs-v6-response-bits-63 +# - ecs-v6-response-bits-64 +# - ecs-v6-response-bits-65 +# - ecs-v6-response-bits-66 +# - ecs-v6-response-bits-67 +# - ecs-v6-response-bits-68 +# - ecs-v6-response-bits-69 +# - ecs-v6-response-bits-70 +# - ecs-v6-response-bits-71 +# - ecs-v6-response-bits-72 +# - ecs-v6-response-bits-73 +# - ecs-v6-response-bits-74 +# - ecs-v6-response-bits-75 +# - ecs-v6-response-bits-76 +# - ecs-v6-response-bits-77 +# - ecs-v6-response-bits-78 +# - ecs-v6-response-bits-79 +# - ecs-v6-response-bits-80 +# - ecs-v6-response-bits-81 +# - ecs-v6-response-bits-82 +# - ecs-v6-response-bits-83 +# - ecs-v6-response-bits-84 +# - ecs-v6-response-bits-85 +# - ecs-v6-response-bits-86 +# - ecs-v6-response-bits-87 +# - ecs-v6-response-bits-88 +# - ecs-v6-response-bits-89 +# - ecs-v6-response-bits-90 +# - ecs-v6-response-bits-91 +# - ecs-v6-response-bits-92 +# - ecs-v6-response-bits-93 +# - ecs-v6-response-bits-94 +# - ecs-v6-response-bits-95 +# - ecs-v6-response-bits-96 +# - ecs-v6-response-bits-97 +# - ecs-v6-response-bits-98 +# - ecs-v6-response-bits-99 +# - ecs-v6-response-bits-100 +# - ecs-v6-response-bits-101 +# - ecs-v6-response-bits-102 +# - ecs-v6-response-bits-103 +# - ecs-v6-response-bits-104 +# - ecs-v6-response-bits-105 +# - ecs-v6-response-bits-106 +# - ecs-v6-response-bits-107 +# - ecs-v6-response-bits-108 +# - ecs-v6-response-bits-109 +# - ecs-v6-response-bits-110 +# - ecs-v6-response-bits-111 +# - ecs-v6-response-bits-112 +# - ecs-v6-response-bits-113 +# - ecs-v6-response-bits-114 +# - ecs-v6-response-bits-115 +# - ecs-v6-response-bits-116 +# - ecs-v6-response-bits-117 +# - ecs-v6-response-bits-118 +# - ecs-v6-response-bits-119 +# - ecs-v6-response-bits-120 +# - ecs-v6-response-bits-121 +# - ecs-v6-response-bits-122 +# - ecs-v6-response-bits-123 +# - ecs-v6-response-bits-124 +# - ecs-v6-response-bits-125 +# - ecs-v6-response-bits-126 +# - ecs-v6-response-bits-127 +# - ecs-v6-response-bits-128 +# - cumul-clientanswers +# - cumul-authanswers +# - policy-hits +# - proxy-mapping-total +# - remote-logger-count +##### maximum number of packets to store statistics for +# stats_ringbuffer_entries: 10000 +##### List of statistics that are prevented from being exported via SNMP +# stats_snmp_disabled_list: +# - cache-bytes +# - packetcache-bytes +# - special-memory-usage +# - ecs-v4-response-bits-1 +# - ecs-v4-response-bits-2 +# - ecs-v4-response-bits-3 +# - ecs-v4-response-bits-4 +# - ecs-v4-response-bits-5 +# - ecs-v4-response-bits-6 +# - ecs-v4-response-bits-7 +# - ecs-v4-response-bits-8 +# - ecs-v4-response-bits-9 +# - ecs-v4-response-bits-10 +# - ecs-v4-response-bits-11 +# - ecs-v4-response-bits-12 +# - ecs-v4-response-bits-13 +# - ecs-v4-response-bits-14 +# - ecs-v4-response-bits-15 +# - ecs-v4-response-bits-16 +# - ecs-v4-response-bits-17 +# - ecs-v4-response-bits-18 +# - ecs-v4-response-bits-19 +# - ecs-v4-response-bits-20 +# - ecs-v4-response-bits-21 +# - ecs-v4-response-bits-22 +# - ecs-v4-response-bits-23 +# - ecs-v4-response-bits-24 +# - ecs-v4-response-bits-25 +# - ecs-v4-response-bits-26 +# - ecs-v4-response-bits-27 +# - ecs-v4-response-bits-28 +# - ecs-v4-response-bits-29 +# - ecs-v4-response-bits-30 +# - ecs-v4-response-bits-31 +# - ecs-v4-response-bits-32 +# - ecs-v6-response-bits-1 +# - ecs-v6-response-bits-2 +# - ecs-v6-response-bits-3 +# - ecs-v6-response-bits-4 +# - ecs-v6-response-bits-5 +# - ecs-v6-response-bits-6 +# - ecs-v6-response-bits-7 +# - ecs-v6-response-bits-8 +# - ecs-v6-response-bits-9 +# - ecs-v6-response-bits-10 +# - ecs-v6-response-bits-11 +# - ecs-v6-response-bits-12 +# - ecs-v6-response-bits-13 +# - ecs-v6-response-bits-14 +# - ecs-v6-response-bits-15 +# - ecs-v6-response-bits-16 +# - ecs-v6-response-bits-17 +# - ecs-v6-response-bits-18 +# - ecs-v6-response-bits-19 +# - ecs-v6-response-bits-20 +# - ecs-v6-response-bits-21 +# - ecs-v6-response-bits-22 +# - ecs-v6-response-bits-23 +# - ecs-v6-response-bits-24 +# - ecs-v6-response-bits-25 +# - ecs-v6-response-bits-26 +# - ecs-v6-response-bits-27 +# - ecs-v6-response-bits-28 +# - ecs-v6-response-bits-29 +# - ecs-v6-response-bits-30 +# - ecs-v6-response-bits-31 +# - ecs-v6-response-bits-32 +# - ecs-v6-response-bits-33 +# - ecs-v6-response-bits-34 +# - ecs-v6-response-bits-35 +# - ecs-v6-response-bits-36 +# - ecs-v6-response-bits-37 +# - ecs-v6-response-bits-38 +# - ecs-v6-response-bits-39 +# - ecs-v6-response-bits-40 +# - ecs-v6-response-bits-41 +# - ecs-v6-response-bits-42 +# - ecs-v6-response-bits-43 +# - ecs-v6-response-bits-44 +# - ecs-v6-response-bits-45 +# - ecs-v6-response-bits-46 +# - ecs-v6-response-bits-47 +# - ecs-v6-response-bits-48 +# - ecs-v6-response-bits-49 +# - ecs-v6-response-bits-50 +# - ecs-v6-response-bits-51 +# - ecs-v6-response-bits-52 +# - ecs-v6-response-bits-53 +# - ecs-v6-response-bits-54 +# - ecs-v6-response-bits-55 +# - ecs-v6-response-bits-56 +# - ecs-v6-response-bits-57 +# - ecs-v6-response-bits-58 +# - ecs-v6-response-bits-59 +# - ecs-v6-response-bits-60 +# - ecs-v6-response-bits-61 +# - ecs-v6-response-bits-62 +# - ecs-v6-response-bits-63 +# - ecs-v6-response-bits-64 +# - ecs-v6-response-bits-65 +# - ecs-v6-response-bits-66 +# - ecs-v6-response-bits-67 +# - ecs-v6-response-bits-68 +# - ecs-v6-response-bits-69 +# - ecs-v6-response-bits-70 +# - ecs-v6-response-bits-71 +# - ecs-v6-response-bits-72 +# - ecs-v6-response-bits-73 +# - ecs-v6-response-bits-74 +# - ecs-v6-response-bits-75 +# - ecs-v6-response-bits-76 +# - ecs-v6-response-bits-77 +# - ecs-v6-response-bits-78 +# - ecs-v6-response-bits-79 +# - ecs-v6-response-bits-80 +# - ecs-v6-response-bits-81 +# - ecs-v6-response-bits-82 +# - ecs-v6-response-bits-83 +# - ecs-v6-response-bits-84 +# - ecs-v6-response-bits-85 +# - ecs-v6-response-bits-86 +# - ecs-v6-response-bits-87 +# - ecs-v6-response-bits-88 +# - ecs-v6-response-bits-89 +# - ecs-v6-response-bits-90 +# - ecs-v6-response-bits-91 +# - ecs-v6-response-bits-92 +# - ecs-v6-response-bits-93 +# - ecs-v6-response-bits-94 +# - ecs-v6-response-bits-95 +# - ecs-v6-response-bits-96 +# - ecs-v6-response-bits-97 +# - ecs-v6-response-bits-98 +# - ecs-v6-response-bits-99 +# - ecs-v6-response-bits-100 +# - ecs-v6-response-bits-101 +# - ecs-v6-response-bits-102 +# - ecs-v6-response-bits-103 +# - ecs-v6-response-bits-104 +# - ecs-v6-response-bits-105 +# - ecs-v6-response-bits-106 +# - ecs-v6-response-bits-107 +# - ecs-v6-response-bits-108 +# - ecs-v6-response-bits-109 +# - ecs-v6-response-bits-110 +# - ecs-v6-response-bits-111 +# - ecs-v6-response-bits-112 +# - ecs-v6-response-bits-113 +# - ecs-v6-response-bits-114 +# - ecs-v6-response-bits-115 +# - ecs-v6-response-bits-116 +# - ecs-v6-response-bits-117 +# - ecs-v6-response-bits-118 +# - ecs-v6-response-bits-119 +# - ecs-v6-response-bits-120 +# - ecs-v6-response-bits-121 +# - ecs-v6-response-bits-122 +# - ecs-v6-response-bits-123 +# - ecs-v6-response-bits-124 +# - ecs-v6-response-bits-125 +# - ecs-v6-response-bits-126 +# - ecs-v6-response-bits-127 +# - ecs-v6-response-bits-128 +# - cumul-clientanswers +# - cumul-authanswers +# - policy-hits +# - proxy-mapping-total +# - remote-logger-count +##### Set interval (in seconds) of the re-resolve checks of system resolver subsystem. +# system_resolver_interval: 0 +##### Check for potential self-resolve, default enabled. +# system_resolver_self_resolve_check: true +##### Set TTL of system resolver feature, 0 (default) is disabled +# system_resolver_ttl: 0 +##### Launch this number of threads listening for and processing TCP queries +# tcp_threads: 1 +##### Launch this number of threads +# threads: 2 +##### string reported on version.pdns or version.bind +# version_string: '*runtime determined*' +##### Write a PID file +# write_pid: true + +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/snmp.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/snmp.j2 new file mode 100644 index 0000000..1af02ad --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/snmp.j2 @@ -0,0 +1,27 @@ +{% set values = pdns_recursor_snmp | bodsch.core.remove_empty_values %} +{% if values | count > 0 %} +## -- snmp -------------------------------------------------------------------- + {% if pdns_recursor_version.full_version is version_compare('5', '>=') %} +snmp: + {{ values | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} + {% elif pdns_recursor_version.full_version is version_compare('5', '<') %} +snmp-agent={{ values.get("agent") | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% set v = values.get("daemon_socket", None) %} + {% if v and v | string | length > 0 %} +snmp-daemon-socket={{ v }} + {% endif %} + {% set v = values.get("master_socket", None) %} + {% if v and v | string | length > 0 %} +snmp-master-socket={{ v }} + {% endif %} + + {% endif %} +{% endif %} +{# +######### SECTION snmp ######### +snmp: +##### If set, register as an SNMP agent +# agent: false +##### If set and snmp-agent is set, the socket to use to register to the SNMP daemon +# daemon_socket: '' +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/webservice.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/webservice.j2 new file mode 100644 index 0000000..321d091 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/etc/powerdns/recursor.d/webservice.j2 @@ -0,0 +1,43 @@ +{% set values = pdns_recursor_webservice | bodsch.core.remove_empty_values %} +{% if values | count > 0 %} +## -- webservice -------------------------------------------------------------- + {% if pdns_recursor_version.full_version is version_compare('5', '>=') %} +webservice: + {{ values | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} + {% elif pdns_recursor_version.full_version is version_compare('5', '<') %} +webserver={{ values.get("webserver") | bodsch.core.config_bool(true_as='yes', false_as='no') }} +webserver-address={{ values.get("address") }} +webserver-allow-from={{ values.get("allow_from") | join(', ') }} +webserver-hash-plaintext-credentials={{ values.get("hash_plaintext_credentials") | bodsch.core.config_bool(true_as='yes', false_as='no') }} +webserver-loglevel={{ values.get("loglevel") }} +webserver-password={{ values.get("password") }} +webserver-port={{ values.get("port") }} +api-config-dir={{ values.get("api_dir") }} +api-key={{ values.get("api_key") }} + + {% endif %} +{% endif %} +{# +######### SECTION webservice ######### +webservice: +##### IP Address of webserver to listen on +# address: 127.0.0.1 +##### Webserver access is only allowed from these subnets +# allow_from: +# - 127.0.0.1 +# - ::1 +##### Directory where REST API stores config and zones +# api_dir: '' +##### Static pre-shared authentication key for access to the REST API +# api_key: '' +##### Whether to hash passwords and api keys supplied in plaintext, to prevent keeping the plaintext version in memory at runtime +# hash_plaintext_credentials: false +##### Amount of logging in the webserver (none, normal, detailed) +# loglevel: normal +##### Password required for accessing the webserver +# password: '' +##### Port of webserver to listen on +# port: 8082 +##### Start a webserver (for REST API) +# webserver: false +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/recursor.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/recursor.conf.j2 new file mode 100644 index 0000000..56a7452 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/templates/recursor.conf.j2 @@ -0,0 +1,29 @@ +config-dir={{ pdns_recursor_config_dir }} +setuid={{ pdns_recursor_user }} +setgid={{ pdns_recursor_group }} + +{% for config_item, value in pdns_recursor_config.items() | sort() %} +{% if config_item not in ["config-dir", "setuid", "setgid"] %} +{% if config_item == 'threads' %} +{{ config_item }}={{ value | string }} +{% elif value is sameas True %} +{{ config_item }}=yes +{% elif value is sameas False %} +{{ config_item }}=no +{% elif value is string %} +{{ config_item }}={{ value | string }} +{% elif value is sequence %} +{{ config_item }}={{ value | join(',') }} +{% else %} +{{ config_item }}={{ value | string }} +{% endif %} +{% endif %} +{% endfor %} + +{% if pdns_recursor_config_lua_file_content != "" %} +lua-config-file={{ pdns_recursor_config_lua }} +{% endif %} + +{% if pdns_recursor_config_dns_script_file_content != "" %} +lua-dns-script={{ pdns_recursor_config_dns_script }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/vars/archlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/vars/archlinux.yml new file mode 100644 index 0000000..fca1ca4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/vars/archlinux.yml @@ -0,0 +1,13 @@ +--- + +pdns_recursor_packages: + - powerdns-recursor + +pdns_recursor_defaults_service: + name: pdns-recursor + # pdns-recursor@.service + +pdns_recursor_owner: pdns-recursor +pdns_recursor_group: pdns-recursor + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/vars/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/vars/debian.yml new file mode 100644 index 0000000..083b5f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/vars/debian.yml @@ -0,0 +1,29 @@ +--- + +pdns_recursor_packages: + - pdns-recursor + +pdns_recursor_owner: pdns +pdns_recursor_group: pdns + +pdns_recursor_defaults_recursor: + config_dir: /etc/powerdns + cpu_map: [] + daemon: false + forward_zones: [] + forward_zones_file: '' + forward_zones_recurse: [] + forwarding_catalog_zones: [] + include_dir: /etc/powerdns/recursor.d + lua_config_file: /etc/powerdns/recursor.lua + lua_dns_script: '' + setgid: "" + setuid: "" + stack_cache_size: 100 + stack_size: 200000 + stats_api_disabled_list: [] + stats_carbon_disabled_list: [] + stats_rec_control_disabled_list: [] + stats_ringbuffer_entries: 10000 + stats_snmp_disabled_list: [] + threads: 2 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/vars/main.yml new file mode 100644 index 0000000..a97f6d9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pdns_recursor/vars/main.yml @@ -0,0 +1,268 @@ +--- + +pdns_recursor_requirements: [] +pdns_recursor_packages: [] + +# The directory where the PowerDNS configuration is located +pdns_recursor_config_dir: '/etc/powerdns' + +pdns_recursor_owner: pdns-recursor +pdns_recursor_group: pdns-recursor + +pdns_recursor_defaults_service: + name: pdns-recursor + # pdns-recursor@.service + +pdns_recursor_defaults_carbon: + instance: "" # recursor + interval: "" # 30 + ns: "" # pdns + ourname: '' + server: [] + +pdns_recursor_defaults_dnssec: + aggressive_cache_max_nsec3_hash_cost: 150 + aggressive_cache_min_nsec3_hit_ratio: 2000 + aggressive_nsec_cache_size: 100000 + disabled_algorithms: [] + log_bogus: false + max_dnskeys: 2 + max_ds_per_zone: 8 + max_nsec3_hash_computations_per_query: 600 + max_nsec3s_per_record: 10 + max_rrsigs_per_record: 2 + max_signature_validations_per_query: 30 + negative_trustanchors: [] + nsec3_max_iterations: 50 + signature_inception_skew: 60 + trustanchorfile: '' + trustanchorfile_interval: 24 + trustanchors: [] + validation: process + x_dnssec_names: [] + +pdns_recursor_defaults_ecs: + add_for: + - 0.0.0.0/0 + - ::/0 + - '!127.0.0.0/8' + - '!10.0.0.0/8' + - '!100.64.0.0/10' + - '!169.254.0.0/16' + - '!192.168.0.0/16' + - '!172.16.0.0/12' + - '!::1/128' + - '!fc00::/7' + - '!fe80::/10' + cache_limit_ttl: 0 + ipv4_bits: 24 + ipv4_cache_bits: 24 + ipv4_never_cache: false + ipv6_bits: 56 + ipv6_cache_bits: 56 + ipv6_never_cache: false + minimum_ttl_override: 1 + scope_zero_address: '' + +pdns_recursor_defaults_incoming: + allow_from: + - 127.0.0.0/8 + - 10.0.0.0/8 + - 100.64.0.0/10 + - 169.254.0.0/16 + - 192.168.0.0/16 + - 172.16.0.0/12 + - ::1/128 + - fc00::/7 + - fe80::/10 + allow_from_file: '' + allow_no_rd: false + allow_notify_for: [] + allow_notify_for_file: '' + allow_notify_from: [] + allow_notify_from_file: '' + distribution_load_factor: 0.0 + distribution_pipe_buffer_size: 0 + distributor_threads: 0 + edns_padding_from: [] + edns_padding_mode: padded-queries-only + edns_padding_tag: 7830 + gettag_needs_edns_options: false + listen: + - 127.0.0.1 + max_concurrent_requests_per_tcp_connection: 10 + max_tcp_clients: 1024 + max_tcp_per_client: 0 + max_tcp_queries_per_connection: 0 + max_udp_queries_per_round: 10000 + non_local_bind: false + pdns_distributes_queries: false + port: 53 + proxy_protocol_exceptions: [] + proxy_protocol_from: [] + proxy_protocol_maximum_size: 512 + proxymappings: [] + reuseport: true + tcp_fast_open: 0 + tcp_timeout: 2 + udp_truncation_threshold: 1232 + use_incoming_edns_subnet: false + +pdns_recursor_defaults_logging: + common_errors: false + disable_syslog: true + dnstap_framestream_servers: [] + dnstap_nod_framestream_servers: [] + facility: '' + loglevel: 2 + outgoing_protobuf_servers: [] + protobuf_servers: [] + protobuf_use_kernel_timestamp: false + quiet: true + rpz_changes: false + statistics_interval: 1800 + structured_logging: true + structured_logging_backend: default + timestamp: true + trace: fail # set to 'fail' to only log failing domains + +pdns_recursor_defaults_nod: + db_size: 67108864 + db_snapshot_interval: 600 + history_dir: /usr/var/lib/pdns-recursor/nod + ignore_list: [] + ignore_list_file: '' + log: true + lookup: '' + pb_tag: pdns-nod + tracking: false + unique_response_db_size: 67108864 + unique_response_history_dir: /usr/var/lib/pdns-recursor/udr + unique_response_ignore_list: [] + unique_response_log: true + unique_response_pb_tag: pdns-udr + unique_response_tracking: false + +pdns_recursor_defaults_outgoing: + bypass_server_throttling_probability: 25 + dont_query: + - 127.0.0.0/8 + - 10.0.0.0/8 + - 100.64.0.0/10 + - 169.254.0.0/16 + - 192.168.0.0/16 + - 172.16.0.0/12 + - ::1/128 + - fc00::/7 + - fe80::/10 + - 0.0.0.0/8 + - 192.0.0.0/24 + - 192.0.2.0/24 + - 198.51.100.0/24 + - 203.0.113.0/24 + - 240.0.0.0/4 + - ::/96 + - ::ffff:0:0/96 + - 100::/64 + - 2001:db8::/32 + dont_throttle_names: [] + dont_throttle_netmasks: [] + dot_to_auth_names: [] + dot_to_port_853: true + edns_bufsize: 1232 + edns_padding: true + edns_subnet_allow_list: [] + + lowercase: false + max_busy_dot_probes: 0 + max_ns_address_qperq: 10 + max_ns_per_resolve: 13 + max_qperq: 50 + network_timeout: 1500 + non_resolving_ns_max_fails: 5 + non_resolving_ns_throttle_time: 60 + server_down_max_fails: 64 + server_down_throttle_time: 60 + single_socket: false + source_address: + - 0.0.0.0 + tcp_fast_open_connect: false + tcp_max_idle_ms: 10000 + tcp_max_idle_per_auth: 10 + tcp_max_idle_per_thread: 100 + tcp_max_queries: 0 + udp_source_port_avoid: + - '4791' + - '11211' + udp_source_port_max: 65535 + udp_source_port_min: 1024 + +pdns_recursor_defaults_packetcache: + disable: false + max_entries: 500000 + negative_ttl: 60 + servfail_ttl: 60 + shards: 1024 + ttl: 86400 + +pdns_recursor_defaults_recordcache: + limit_qtype_any: true + locked_ttl_perc: 0 + max_cache_bogus_ttl: 3600 + max_entries: 1000000 + max_negative_ttl: 3600 + max_rrset_size: 256 + max_ttl: 86400 + refresh_on_ttl_perc: 0 + serve_stale_extensions: 0 + shards: 1024 + zonetocaches: [] + +pdns_recursor_defaults_recursor: + config_dir: /etc/powerdns + cpu_map: [] + daemon: false + forward_zones: [] + forward_zones_file: '' + forward_zones_recurse: [] + forwarding_catalog_zones: [] + include_dir: /etc/powerdns/recursor.d + lua_config_file: '' + lua_dns_script: '' + setgid: pdns-recursor + setuid: pdns-recursor + stack_cache_size: 100 + stack_size: 200000 + stats_api_disabled_list: [] + stats_carbon_disabled_list: [] + stats_rec_control_disabled_list: [] + stats_ringbuffer_entries: 10000 + stats_snmp_disabled_list: [] + system_resolver_interval: 0 + system_resolver_self_resolve_check: true + system_resolver_ttl: 0 + tcp_threads: 1 + threads: 2 + +pdns_recursor_defaults_snmp: + agent: false + daemon_socket: '' + +pdns_recursor_defaults_webservice: + address: 127.0.0.1 + allow_from: + - 127.0.0.1 + - ::1 + api_dir: "" + api_key: "" + hash_plaintext_credentials: false + loglevel: normal + password: "" + port: 8082 + webserver: false + +# ------------- + +pdns_recursor_defaults_lua: + config_file: "" + dns_script: "" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/.gitignore new file mode 100644 index 0000000..8262c53 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/.gitignore @@ -0,0 +1,7 @@ +.ansible +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/.yamllint new file mode 100644 index 0000000..a66c74c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: + min-spaces-from-content: 1 # prettier compatibility + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + octal-values: + forbid-implicit-octal: true # yamllint defaults to false + forbid-explicit-octal: true # yamllint defaults to false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/Makefile new file mode 100644 index 0000000..5430887 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/Makefile @@ -0,0 +1,25 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint gh-clean + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint + +gh-clean: + @hooks/gh-clean diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/README.md new file mode 100644 index 0000000..58c3ac1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/README.md @@ -0,0 +1,267 @@ +# ansible-pihole + + +```yaml +pihole_version: 6.1.2 + +pihole_direct_download: false + +pihole_arch: + install_type: archive # source | archive + source_repository: https://github.com/pi-hole/pi-hole.git + archive: https://github.com/pi-hole/pi-hole/archive/refs/tags/v{{ pihole_version }}.tar.gz + +pihole_config: {} +# dns: +# upstreams: +# - '1.1.1.1' +# - '1.0.0.1' +# - '9.9.9.9' +# CNAMEdeepInspect: true +# blockESNI: true +# EDNS0ECS: true +# ignoreLocalhost: false +# showDNSSEC: true +# analyzeOnlyAandAAAA: false +# piholePTR: PI.HOLE +# replyWhenBusy: ALLOW +# blockTTL: 2 +# domainNeeded: false +# expandHosts: false +# domain: pi.hole +# bogusPriv: true +# dnssec: false +# interface: "{{ ansible_default_ipv4.interface }}" +# listeningMode: LOCAL +# queryLogging: true +# port: 53 +# cache: +# size: 10000 +# optimizer: 3600 +# upstreamBlockedTTL: 86400 +# blocking: +# active: true +# mode: NULL +# edns: TEXT +# specialDomains: +# mozillaCanary: true +# iCloudPrivateRelay: true +# designatedResolver: true +# reply: +# host: +# force4: false +# force6: false +# blocking: +# force4: false +# force6: false +# rateLimit: +# count: 1000 +# interval: 60 +# dhcp: +# active: false +# ipv6: false +# rapidCommit: false +# multiDNS: false +# logging: false +# ignoreUnknownClients: false +# ntp: +# ipv4: +# active: true +# ipv6: +# active: true +# sync: +# active: true +# server: pool.ntp.org +# interval: 3600 +# count: 8 +# rtc: +# set: false +# utc: true +# resolver: +# resolveIPv4: true +# resolveIPv6: true +# networkNames: true +# refreshNames: IPV4_ONLY +# database: +# DBimport: true +# maxDBdays: 91 +# DBinterval: 60 +# useWAL: true +# network: +# parseARPcache: true +# expire: 91 +# webserver: +# domain: pi.hole +# port: 80 +# threads: 50 +# headers: +# - 'X-DNS-Prefetch-Control: off' +# - "Content-Security-Policy: default-src 'self' 'unsafe-inline';" +# - 'X-Frame-Options: DENY' +# - 'X-XSS-Protection: 0' +# - 'X-Content-Type-Options: nosniff' +# - 'Referrer-Policy: strict-origin-when-cross-origin' +# serve_all: false +# session: +# timeout: 3200 +# restore: true +# tls: +# cert: /etc/pihole/tls.pem +# paths: +# webroot: /var/www/html +# webhome: /admin/ +# interface: +# boxed: true +# theme: default-light +# api: +# max_sessions: 16 +# prettyJSON: true +# pwhash: $BALLOON-SHA256$v=1$s=1024,t=32$RSqXeRU/3QMJIAVOw0jvRA==$SlkKXw2Xhq5Y0OlnGiH+BOpm0MPdPYn3vgXHqucnRjg= +# app_sudo: false +# cli_pw: true +# maxHistory: 86400 +# maxClients: 10 +# client_history_global_max: true +# allow_destructive: true +# temp: +# limit: 60.0 +# unit: C +# files: +# pid: /run/pihole-FTL.pid +# database: /etc/pihole/pihole-FTL.db +# gravity: /etc/pihole/gravity.db +# gravity_tmp: /var/tmp +# macvendor: /etc/pihole/macvendor.db +# log: +# ftl: /var/log/pihole/FTL.log +# dnsmasq: /var/log/pihole/pihole.log +# webserver: /var/log/pihole/webserver.log +# misc: +# privacylevel: 0 +# delay_startup: 10 +# nice: -10 +# addr2line: true +# etc_dnsmasq_d: false +# extraLogging: false +# readOnly: false +# check: +# load: true +# shmem: 90 +# disk: 90 +# debug: +# database: false +# networking: false +# locks: false +# queries: false +# flags: false +# shmem: false +# gc: false +# arp: false +# regex: false +# api: false +# tls: false +# overtime: false +# status: false +# caps: false +# dnssec: false +# vectors: false +# resolver: false +# edns0: false +# clients: false +# aliasclients: false +# events: false +# helper: false +# config: false +# inotify: false +# webserver: false +# extra: false +# reserved: false +# ntp: false +# netlink: false +# all: false + +pihole_groups: [] +# - name: admin +# description: "Admin Group" +# enabled: true + +pihole_clients: [] +# - ip: 192.168.0.10 +# name: laptop +# comment: "Kids Laptop" +# enabled: true +# groups: +# - kids + +# Custom lists (optional) +pihole_custom_denylists: [] +# Beispiele für zusätzliche Blocklisten +# - address: "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" +# - address: "https://someonewhocares.org/hosts/zero/hosts" +# - address: "https://raw.githubusercontent.com/AdguardTeam/AdguardFilters/master/BaseFilter/sections/adservers.txt" +# - address: "https://pgl.yoyo.org/adservers/serverlist.php?hostformat=hosts&showintro=0&mimetype=plaintext" +# - address: "https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt" +# - address: "https://raw.githubusercontent.com/hoshsadiq/adblock-nocoin-list/master/hosts.txt" # Krypto-Mining Blocker +# - address: "https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV-AGH.txt" # Smart TV Telemetrie +# - address: https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts +# comment: "default list" +# enabled: true + +pihole_domain_allowlist: [] + # Beispiele für Domains die NICHT blockiert werden sollen + # Häufig benötigte Domains: + # - "googleadservices.com" # Google Ads (für manche Shopping-Seiten nötig) + # - "googlesyndication.com" # Google AdSense + # - "amazon-adsystem.com" # Amazon Werbung + # - "doubleclick.net" # Google DoubleClick (für manche Seiten nötig) + # + # Microsoft/Windows Updates: + # - "delivery.mp.microsoft.com" # Windows Updates + # - "tlu.dl.delivery.mp.microsoft.com" + # - "download.windowsupdate.com" + # + # Social Media & Messaging: + # - "graph.facebook.com" # Facebook API + # - "scontent.xx.fbcdn.net" # Facebook Inhalte + # - "web.whatsapp.com" # WhatsApp Web + # + # Streaming Services: + # - "widget-cdn.rpxnow.com" # Für verschiedene Login-Widgets + # - "secure.netflix.com" # Netflix + # - "tv.youtube.com" # YouTube TV + # + # Gaming: + # - "clientconfig.rpx.ol.epicgames.com" # Epic Games + # - "tracking.epicgames.com" # Epic Games (manchmal nötig) + +pihole_domain_denylist: [] + # Beispiele für zusätzlich zu blockierende Domains + # Social Media (falls gewünscht): + # - "facebook.com" + # - "twitter.com" + # - "instagram.com" + # - "tiktok.com" + # - "snapchat.com" + # + # Tracking & Analytics: + # - "google-analytics.com" + # - "googletagmanager.com" + # - "hotjar.com" + # - "mouseflow.com" + # - "crazyegg.com" + # + # Werbung & Affiliate: + # - "outbrain.com" + # - "taboola.com" + # - "shareasale.com" + # - "commission-junction.com" + # + # Kryptomining: + # - "coin-hive.com" + # - "coinhive.com" + # - "jsecoin.com" + # + # Malware/Phishing (Beispiele): + # - "malware-domain.com" + # - "phishing-site.net" +``` diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/defaults/main.yml new file mode 100644 index 0000000..5f6a26e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/defaults/main.yml @@ -0,0 +1,264 @@ +--- + +pihole_version: 6.1.2 + +pihole_direct_download: false + +pihole_arch: + install_type: archive # source | archive + source_repository: https://github.com/pi-hole/pi-hole.git + archive: https://github.com/pi-hole/pi-hole/archive/refs/tags/v{{ pihole_version }}.tar.gz + +pihole_config: {} +# dns: +# upstreams: +# - '1.1.1.1' +# - '1.0.0.1' +# - '9.9.9.9' +# CNAMEdeepInspect: true +# blockESNI: true +# EDNS0ECS: true +# ignoreLocalhost: false +# showDNSSEC: true +# analyzeOnlyAandAAAA: false +# piholePTR: PI.HOLE +# replyWhenBusy: ALLOW +# blockTTL: 2 +# domainNeeded: false +# expandHosts: false +# domain: pi.hole +# bogusPriv: true +# dnssec: false +# interface: "{{ ansible_facts.default_ipv4.interface }}" +# listeningMode: LOCAL +# queryLogging: true +# port: 53 +# cache: +# size: 10000 +# optimizer: 3600 +# upstreamBlockedTTL: 86400 +# blocking: +# active: true +# mode: NULL +# edns: TEXT +# specialDomains: +# mozillaCanary: true +# iCloudPrivateRelay: true +# designatedResolver: true +# reply: +# host: +# force4: false +# force6: false +# blocking: +# force4: false +# force6: false +# rateLimit: +# count: 1000 +# interval: 60 +# dhcp: +# active: false +# ipv6: false +# rapidCommit: false +# multiDNS: false +# logging: false +# ignoreUnknownClients: false +# ntp: +# ipv4: +# active: true +# ipv6: +# active: true +# sync: +# active: true +# server: pool.ntp.org +# interval: 3600 +# count: 8 +# rtc: +# set: false +# utc: true +# resolver: +# resolveIPv4: true +# resolveIPv6: true +# networkNames: true +# refreshNames: IPV4_ONLY +# database: +# DBimport: true +# maxDBdays: 91 +# DBinterval: 60 +# useWAL: true +# network: +# parseARPcache: true +# expire: 91 +# webserver: +# domain: pi.hole +# port: 80 +# threads: 50 +# headers: +# - 'X-DNS-Prefetch-Control: off' +# - "Content-Security-Policy: default-src 'self' 'unsafe-inline';" +# - 'X-Frame-Options: DENY' +# - 'X-XSS-Protection: 0' +# - 'X-Content-Type-Options: nosniff' +# - 'Referrer-Policy: strict-origin-when-cross-origin' +# serve_all: false +# session: +# timeout: 3200 +# restore: true +# tls: +# cert: /etc/pihole/tls.pem +# paths: +# webroot: /var/www/html +# webhome: /admin/ +# interface: +# boxed: true +# theme: default-light +# api: +# max_sessions: 16 +# prettyJSON: true +# pwhash: $BALLOON-SHA256$v=1$s=1024,t=32$RSqXeRU/3QMJIAVOw0jvRA==$SlkKXw2Xhq5Y0OlnGiH+BOpm0MPdPYn3vgXHqucnRjg= +# app_sudo: false +# cli_pw: true +# maxHistory: 86400 +# maxClients: 10 +# client_history_global_max: true +# allow_destructive: true +# temp: +# limit: 60.0 +# unit: C +# files: +# pid: /run/pihole-FTL.pid +# database: /etc/pihole/pihole-FTL.db +# gravity: /etc/pihole/gravity.db +# gravity_tmp: /var/tmp +# macvendor: /etc/pihole/macvendor.db +# log: +# ftl: /var/log/pihole/FTL.log +# dnsmasq: /var/log/pihole/pihole.log +# webserver: /var/log/pihole/webserver.log +# misc: +# privacylevel: 0 +# delay_startup: 10 +# nice: -10 +# addr2line: true +# etc_dnsmasq_d: false +# extraLogging: false +# readOnly: false +# check: +# load: true +# shmem: 90 +# disk: 90 +# debug: +# database: false +# networking: false +# locks: false +# queries: false +# flags: false +# shmem: false +# gc: false +# arp: false +# regex: false +# api: false +# tls: false +# overtime: false +# status: false +# caps: false +# dnssec: false +# vectors: false +# resolver: false +# edns0: false +# clients: false +# aliasclients: false +# events: false +# helper: false +# config: false +# inotify: false +# webserver: false +# extra: false +# reserved: false +# ntp: false +# netlink: false +# all: false + +pihole_groups: [] +# - name: admin +# description: "Admin Group" +# enabled: true + +pihole_clients: [] +# - ip: 192.168.0.10 +# name: laptop +# comment: "Kids Laptop" +# enabled: true +# groups: +# - kids + +# Custom lists (optional) +pihole_custom_denylists: [] +# Beispiele für zusätzliche Blocklisten +# - address: "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" +# - address: "https://someonewhocares.org/hosts/zero/hosts" +# - address: "https://raw.githubusercontent.com/AdguardTeam/AdguardFilters/master/BaseFilter/sections/adservers.txt" +# - address: "https://pgl.yoyo.org/adservers/serverlist.php?hostformat=hosts&showintro=0&mimetype=plaintext" +# - address: "https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt" +# - address: "https://raw.githubusercontent.com/hoshsadiq/adblock-nocoin-list/master/hosts.txt" # Krypto-Mining Blocker +# - address: "https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV-AGH.txt" # Smart TV Telemetrie +# - address: https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts +# comment: "default list" +# enabled: true + +pihole_domain_allowlist: [] + # Beispiele für Domains die NICHT blockiert werden sollen + # Häufig benötigte Domains: + # - "googleadservices.com" # Google Ads (für manche Shopping-Seiten nötig) + # - "googlesyndication.com" # Google AdSense + # - "amazon-adsystem.com" # Amazon Werbung + # - "doubleclick.net" # Google DoubleClick (für manche Seiten nötig) + # + # Microsoft/Windows Updates: + # - "delivery.mp.microsoft.com" # Windows Updates + # - "tlu.dl.delivery.mp.microsoft.com" + # - "download.windowsupdate.com" + # + # Social Media & Messaging: + # - "graph.facebook.com" # Facebook API + # - "scontent.xx.fbcdn.net" # Facebook Inhalte + # - "web.whatsapp.com" # WhatsApp Web + # + # Streaming Services: + # - "widget-cdn.rpxnow.com" # Für verschiedene Login-Widgets + # - "secure.netflix.com" # Netflix + # - "tv.youtube.com" # YouTube TV + # + # Gaming: + # - "clientconfig.rpx.ol.epicgames.com" # Epic Games + # - "tracking.epicgames.com" # Epic Games (manchmal nötig) + +pihole_domain_denylist: [] + # Beispiele für zusätzlich zu blockierende Domains + # Social Media (falls gewünscht): + # - "facebook.com" + # - "twitter.com" + # - "instagram.com" + # - "tiktok.com" + # - "snapchat.com" + # + # Tracking & Analytics: + # - "google-analytics.com" + # - "googletagmanager.com" + # - "hotjar.com" + # - "mouseflow.com" + # - "crazyegg.com" + # + # Werbung & Affiliate: + # - "outbrain.com" + # - "taboola.com" + # - "shareasale.com" + # - "commission-junction.com" + # + # Kryptomining: + # - "coin-hive.com" + # - "coinhive.com" + # - "jsecoin.com" + # + # Malware/Phishing (Beispiele): + # - "malware-domain.com" + # - "phishing-site.net" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/handlers/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/handlers/main.yml new file mode 100644 index 0000000..a40acf1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/handlers/main.yml @@ -0,0 +1,16 @@ +--- + +- name: restart pihole-FTL + ansible.builtin.systemd: + name: pihole-FTL + state: restarted + +- name: pihole update gravity + bodsch.dns.pihole_command: + command: update_gravity + register: gravity_result + +- name: pihole reload lists + bodsch.dns.pihole_command: + command: reloadlists + register: reload_result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/meta/main.yml new file mode 100644 index 0000000..30cb564 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/meta/main.yml @@ -0,0 +1,24 @@ +--- + +galaxy_info: + role_name: pihole + namespace: bodsch + + author: Bodo Schulz + description: installs and configure pihole + + license: Apache + # min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 12 + - bookworm + + galaxy_tags: [] + +dependencies: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/configured/converge.yml new file mode 100644 index 0000000..d616057 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/configured/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: all + any_errors_fatal: true + become: false + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.pihole diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..f055354 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,138 @@ +--- + +pihole_admin_password: "SuperSicher2024!" + +pihole_config: + dns: + interface: "{{ ansible_facts.default_ipv4.interface }}" + domain: pi.hole + cache: + size: 10000 + optimizer: 3600 + upstreams: + - "1.1.1.1" + - "9.9.9.9" + bogusPriv: true + dnssec: false + webserver: + domain: pi.hole + port: 8080 + session: + timeout: 1600 + misc: + delay_startup: 10 + +pihole_groups: + - name: admin + description: "Admin Group" + enabled: true + - name: kids + description: "Kids only" + enabled: true + +pihole_clients: + - ip: 192.168.0.10 + name: laptop + comment: "Kinder Laptop" + enabled: true + groups: + - kids + + - ip: 192.168.0.20 + name: necromonger + enabled: true + groups: + - admin + + - ip: 192.168.178.1 + name: router + enabled: true + groups: + - admin + + - ip: 192.168.178.11 + name: admin-pc + enabled: true + groups: + - admin + +# Zusätzliche Blocklisten +pihole_custom_denylists: + - address: https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts + # + - address: https://raw.githubusercontent.com/PolishFiltersTeam/KADhosts/master/KADhosts.txt + - address: https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.Spam/hosts + - address: https://v.firebog.net/hosts/static/w3kbl.txt + - address: https://raw.githubusercontent.com/matomo-org/referrer-spam-blacklist/master/spammers.txt + - address: https://someonewhocares.org/hosts/zero/hosts + - address: https://raw.githubusercontent.com/VeleSila/yhosts/master/hosts + - address: https://winhelp2002.mvps.org/hosts.txt + - address: https://v.firebog.net/hosts/neohostsbasic.txt + - address: https://raw.githubusercontent.com/RooneyMcNibNug/pihole-stuff/master/SNAFU.txt + - address: https://paulgb.github.io/BarbBlock/blacklists/hosts-file.txt + # + - address: https://adaway.org/hosts.txt + - address: https://v.firebog.net/hosts/AdguardDNS.txt + - address: https://v.firebog.net/hosts/Admiral.txt + - address: https://raw.githubusercontent.com/anudeepND/blacklist/master/adservers.txt + - address: https://v.firebog.net/hosts/Easylist.txt + - address: https://pgl.yoyo.org/adservers/serverlist.php?hostformat=hosts&showintro=0&mimetype=plaintext + - address: https://raw.githubusercontent.com/FadeMind/hosts.extras/master/UncheckyAds/hosts + - address: https://raw.githubusercontent.com/bigdargon/hostsVN/master/hosts + # + - address: https://v.firebog.net/hosts/Easyprivacy.txt + - address: https://v.firebog.net/hosts/Prigent-Ads.txt + - address: https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.2o7Net/hosts + - address: https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt + - address: https://hostfiles.frogeye.fr/firstparty-trackers-hosts.txt + - address: https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/android-tracking.txt + - address: https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV.txt + - address: https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/AmazonFireTV.txt + - address: https://gitlab.com/quidsup/notrack-blocklists/raw/master/notrack-blocklist.txt + # + - address: https://raw.githubusercontent.com/DandelionSprout/adfilt/master/Alternate%20versions%20Anti-Malware%20List/AntiMalwareHosts.txt + - address: https://v.firebog.net/hosts/Prigent-Crypto.txt + - address: https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.Risk/hosts + - address: https://bitbucket.org/ethanr/dns-blacklists/raw/8575c9f96e5b4a1308f2f12394abd86d0927a4a0/bad_lists/Mandiant_APT1_Report_Appendix_D.txt + - address: https://phishing.army/download/phishing_army_blocklist_extended.txt + - address: https://gitlab.com/quidsup/notrack-blocklists/raw/master/notrack-malware.txt + - address: https://v.firebog.net/hosts/RPiList-Malware.txt + - address: https://raw.githubusercontent.com/Spam404/lists/master/main-blacklist.txt + - address: https://raw.githubusercontent.com/AssoEchap/stalkerware-indicators/master/generated/hosts + - address: https://urlhaus.abuse.ch/downloads/hostfile/ + - address: https://lists.cyberhost.uk/malware.txt + - address: https://malware-filter.gitlab.io/malware-filter/phishing-filter-hosts.txt + - address: https://v.firebog.net/hosts/Prigent-Malware.txt + - address: https://raw.githubusercontent.com/jarelllama/Scam-Blocklist/main/lists/wildcard_domains/scams.txt + - address: https://v.firebog.net/hosts/RPiList-Phishing.txt + +# Wichtige Domains freigeben +pihole_domain_allowlist: + # Microsoft Updates + - "delivery.mp.microsoft.com" + - "tlu.dl.delivery.mp.microsoft.com" + - "download.windowsupdate.com" + # Google Services (für manche Seiten nötig) + - "googleadservices.com" + - "googlesyndication.com" + # Social Media APIs + - "graph.facebook.com" + - "scontent.xx.fbcdn.net" + # Streaming + - "widget-cdn.rpxnow.com" + - "secure.netflix.com" + # Gaming + - "clientconfig.rpx.ol.epicgames.com" + +# Zusätzlich blockieren +pihole_domain_denylist: + # Tracking + - "google-analytics.com" + - "googletagmanager.com" + - "hotjar.com" + - "mouseflow.com" + # Social Media (optional) + - "tiktok.com" + # Werbung + - "outbrain.com" + - "taboola.com" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/configured/molecule.yml new file mode 100644 index 0000000..e46b840 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/configured/molecule.yml @@ -0,0 +1,55 @@ + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - ALL + tmpfs: + - /run + - /tmp + published_ports: + - 80:80 + - 8080:8080 + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 + +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/configured/prepare.yml new file mode 100644 index 0000000..032f3d2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/configured/prepare.yml @@ -0,0 +1,55 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: "0755" + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..46f8440 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/configured/tests/test_default.py @@ -0,0 +1,164 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distribution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distribution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distribution_vars = ( + host.ansible("include_vars", file_distribution) + .get("ansible_facts") + .get("role_distribution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distribution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("pihole") + + +def test_files(host, get_vars): + """ """ + files = ["/etc/pihole/pihole.toml", "/opt/pihole/api.sh", "/usr/local/bin/pihole"] + + for file in files: + f = host.file(file) + assert f.exists + assert f.is_file + + +def test_user(host, get_vars): + """ """ + user = "pihole" + group = "pihole" + + assert host.group(group).exists + assert host.user(user).exists + assert group in host.user(user).groups + + +def test_service(host, get_vars): + """ """ + service = host.service("pihole-FTL") + assert service.is_enabled + assert service.is_running + + +def test_open_port(host, get_vars): + """ """ + # version = local_facts(host).get("major_version") + + for i in host.socket.get_listening_sockets(): + print(i) + + service = host.socket("udp://0.0.0.0:53") + assert service.is_listening + + service = host.socket("tcp://0.0.0.0:8080") + assert service.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/default/converge.yml new file mode 100644 index 0000000..e3bc2ad --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/default/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.pihole diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/default/molecule.yml new file mode 100644 index 0000000..1b79bf4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/default/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + published_ports: + - 80:80 + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 + +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/default/prepare.yml new file mode 100644 index 0000000..032f3d2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/default/prepare.yml @@ -0,0 +1,55 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: "0755" + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/default/tests/test_default.py new file mode 100644 index 0000000..d729dc1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/default/tests/test_default.py @@ -0,0 +1,164 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distribution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distribution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distribution_vars = ( + host.ansible("include_vars", file_distribution) + .get("ansible_facts") + .get("role_distribution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distribution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("pihole") + + +def test_files(host, get_vars): + """ """ + files = ["/etc/pihole/pihole.toml", "/opt/pihole/api.sh", "/usr/local/bin/pihole"] + + for file in files: + f = host.file(file) + assert f.exists + assert f.is_file + + +def test_user(host, get_vars): + """ """ + user = "pihole" + group = "pihole" + + assert host.group(group).exists + assert host.user(user).exists + assert group in host.user(user).groups + + +def test_service(host, get_vars): + """ """ + service = host.service("pihole-FTL") + assert service.is_enabled + assert service.is_running + + +def test_open_port(host, get_vars): + """ """ + # version = local_facts(host).get("major_version") + + for i in host.socket.get_listening_sockets(): + print(i) + + service = host.socket("udp://0.0.0.0:53") + assert service.is_listening + + # service = host.socket("tcp://0.0.0.0:80") + # assert service.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/family-friendly/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/family-friendly/converge.yml new file mode 100644 index 0000000..d616057 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/family-friendly/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: all + any_errors_fatal: true + become: false + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.pihole diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/family-friendly/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/family-friendly/group_vars/all/vars.yml new file mode 100644 index 0000000..e81b3bb --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/family-friendly/group_vars/all/vars.yml @@ -0,0 +1,41 @@ +--- + +pihole_admin_password: "Familie2024" + +pihole_config: + dns: + domain: pi.hole + interface: "{{ ansible_facts.default_ipv4.interface }}" + webserver: + domain: pi.hole + port: 8080 + debug: + status: true + +pihole_custom_denylists: + - address: "https://raw.githubusercontent.com/StevenBlack/hosts/master/alternates/fakenews-gambling-porn-social/hosts" + - address: "https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt" + +pihole_domain_allowlist: + # Bildung + - "khan-academy.org" + - "khanacademy.org" + - "wikipedia.org" + - "stackoverflow.com" + # Sichere Suchmaschinen + - "duckduckgo.com" + - "startpage.com" + +pihole_domain_denylist: + # Social Media komplett blockieren + - "facebook.com" + - "instagram.com" + - "tiktok.com" + - "snapchat.com" + - "twitter.com" + # Gaming/Zeitverschwender + - "twitch.tv" + - "youtube.com" # Vorsicht: kann andere Services beeinträchtigen + # Glücksspiel + - "bet365.com" + - "pokerstars.com" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/family-friendly/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/family-friendly/molecule.yml new file mode 100644 index 0000000..e46b840 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/family-friendly/molecule.yml @@ -0,0 +1,55 @@ + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - ALL + tmpfs: + - /run + - /tmp + published_ports: + - 80:80 + - 8080:8080 + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 + +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/family-friendly/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/family-friendly/prepare.yml new file mode 100644 index 0000000..032f3d2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/family-friendly/prepare.yml @@ -0,0 +1,55 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: "0755" + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/family-friendly/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/family-friendly/tests/test_default.py new file mode 100644 index 0000000..46f8440 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/molecule/family-friendly/tests/test_default.py @@ -0,0 +1,164 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distribution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distribution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distribution_vars = ( + host.ansible("include_vars", file_distribution) + .get("ansible_facts") + .get("role_distribution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distribution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("pihole") + + +def test_files(host, get_vars): + """ """ + files = ["/etc/pihole/pihole.toml", "/opt/pihole/api.sh", "/usr/local/bin/pihole"] + + for file in files: + f = host.file(file) + assert f.exists + assert f.is_file + + +def test_user(host, get_vars): + """ """ + user = "pihole" + group = "pihole" + + assert host.group(group).exists + assert host.user(user).exists + assert group in host.user(user).groups + + +def test_service(host, get_vars): + """ """ + service = host.service("pihole-FTL") + assert service.is_enabled + assert service.is_running + + +def test_open_port(host, get_vars): + """ """ + # version = local_facts(host).get("major_version") + + for i in host.socket.get_listening_sockets(): + print(i) + + service = host.socket("udp://0.0.0.0:53") + assert service.is_listening + + service = host.socket("tcp://0.0.0.0:8080") + assert service.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/notes.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/notes.md new file mode 100644 index 0000000..c1b01bd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/notes.md @@ -0,0 +1,29 @@ + +/usr/bin/pihole-FTL --config dns.hosts '[ "192.168.0.4 matrix.vpn", "192.168.0.4 matrix.lan" ]' + +#!/bin/bash + +WHITELIST="/path/to/whitelist.txt" +BLACKLIST="/path/to/blacklist.txt" + +# Whitelist importieren +if [[ -f "$WHITELIST" ]]; then + while IFS= read -r domain || [ -n "$domain" ]; do + [[ -z "$domain" || "$domain" == \#* ]] && continue + pihole allow "$domain" --comment "Whitelist import" + done < "$WHITELIST" +fi + +# Blacklist importieren +if [[ -f "$BLACKLIST" ]]; then + while IFS= read -r domain || [ -n "$domain" ]; do + [[ -z "$domain" || "$domain" == \#* ]] && continue + pihole deny "$domain" --comment "Blacklist import" + done < "$BLACKLIST" +fi + +# Listen neu laden +pihole reloadlists + + +git config --global --add safe.directory /var/www/html/admin diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/configure.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/configure.yml new file mode 100644 index 0000000..54cc020 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/configure.yml @@ -0,0 +1,45 @@ +--- + +- name: configure pihole + bodsch.dns.pihole_config: + config: "{{ pihole_config | default({}) }}" + when: + - pihole_config | default({}) | count > 0 + notify: + - restart pihole-FTL + +- name: set pi-hole admin password + bodsch.dns.pihole_admin_password: + password: "{{ pihole_admin_password }}" + when: + - pihole_admin_password | default('') | string | length > 0 + +- name: create pihole groups + bodsch.dns.pihole_groups: + groups: "{{ pihole_groups | default([]) }}" + when: + - pihole_groups | default([]) | count > 0 + +- name: create pihole clients + bodsch.dns.pihole_clients: + clients: "{{ pihole_clients | default([]) }}" + when: + - pihole_clients | default([]) | count > 0 + +- name: create pihole adlists + bodsch.dns.pihole_adlists: + adlists: "{{ pihole_custom_denylists | default([]) }}" + when: + - pihole_custom_denylists | default([]) | count > 0 + notify: + - pihole update gravity + +- name: add custom lists + bodsch.dns.pihole_custom_lists: + allow_list: "{{ pihole_domain_allowlist | default([]) }}" + deny_list: "{{ pihole_domain_denylist | default([]) }}" + register: import_result + changed_when: false + notify: + - pihole reload lists + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/download.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/download.yml new file mode 100644 index 0000000..407c62b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/download.yml @@ -0,0 +1,65 @@ +--- + +- name: download + when: + - not running_in_check_mode + - pihole_arch.install_type == "archive" + block: + - name: create download directory + become: false + delegate_to: "{{ pihole_delegate_to }}" + run_once: "{{ 'false' if pihole_direct_download else 'true' }}" + ansible.builtin.file: + path: "{{ pihole_local_tmp_directory }}" + state: directory + mode: "0750" + + - name: detect the downloaded pihole archive + become: false + delegate_to: "{{ pihole_delegate_to }}" + run_once: "{{ 'false' if pihole_direct_download else 'true' }}" + ansible.builtin.stat: + path: "{{ pihole_local_tmp_directory }}/{{ pihole_arch.archive | basename }}" + register: stat_pihole_archive + + - name: download pihole binary archive + when: + - stat_pihole_archive.stat is defined + - not stat_pihole_archive.stat.exists | default('false') + become: false + delegate_to: "{{ pihole_delegate_to }}" + run_once: "{{ 'false' if pihole_direct_download else 'true' }}" + ansible.builtin.get_url: + url: "{{ pihole_arch.archive }}" + dest: "{{ pihole_local_tmp_directory }}/{{ pihole_arch.archive | basename }}" + mode: "0640" + register: _download_archive + until: _download_archive is succeeded + retries: 5 + delay: 2 + check_mode: false + +- name: detect extracted binary + become: false + delegate_to: "{{ pihole_delegate_to }}" + run_once: "{{ 'false' if pihole_direct_download else 'true' }}" + ansible.builtin.stat: + path: "{{ pihole_local_tmp_directory }}/pihole" + register: stat_pihole_binary + +- name: extract archive + become: false + delegate_to: "{{ pihole_delegate_to }}" + run_once: "{{ 'false' if pihole_direct_download else 'true' }}" + ansible.builtin.unarchive: + src: "{{ pihole_local_tmp_directory }}/{{ pihole_arch.archive | basename }}" + dest: "{{ pihole_local_tmp_directory }}/" + copy: false + extra_opts: + - --strip-components=1 + when: + - not running_in_check_mode + - stat_pihole_binary.stat is defined + - not stat_pihole_binary.stat.exists | default('false') + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/firewall.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/firewall.yml new file mode 100644 index 0000000..6f5ba6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/firewall.yml @@ -0,0 +1,16 @@ +--- + +- name: configure firewall for pi-hole + community.general.ufw: + rule: allow + port: "{{ item }}" + proto: "{{ item.split('/')[1] if '/' in item else 'tcp' }}" + loop: + - "53/tcp" + - "53/udp" + - "80/tcp" + - "{{ pihole_web_port | default('80') }}/tcp" + when: + - pihole_enable_firewall | default('false') + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/install.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/install.yml new file mode 100644 index 0000000..286868b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/install.yml @@ -0,0 +1,114 @@ +--- + +- name: create pihole user + ansible.builtin.user: + name: pihole + system: true + shell: /bin/false + home: /opt/pihole + create_home: false + +- name: create remote tmp directory + ansible.builtin.file: + path: "{{ pihole_remote_tmp_directory }}" + state: directory + mode: "0755" + +- name: detect pihole installer on '{{ pihole_delegate_to }}' + become: false + delegate_to: "{{ pihole_delegate_to }}" + run_once: "{{ 'false' if pihole_direct_download else 'true' }}" + ansible.builtin.stat: + path: "{{ pihole_local_tmp_directory }}/automated install/basic-install.sh" + register: stat_file_installer + +- name: fail when pihole installer are missing + ansible.builtin.fail: + msg: "missing pihole installer on ansible controller" + when: + - not running_in_check_mode + - not stat_file_installer.stat.exists + +- name: detect installed pihole installer + ansible.builtin.stat: + path: "{{ pihole_remote_tmp_directory }}/basic-install.sh" + register: stat_pihole_installer + +- name: propagate pihole installer + when: + - not running_in_check_mode + - stat_file_installer.stat.exists + - stat_pihole_installer.stat is defined and not stat_pihole_installer.stat.exists | default('false') + ansible.builtin.copy: + src: "{{ pihole_local_tmp_directory }}/automated install/basic-install.sh" + dest: "{{ pihole_remote_tmp_directory }}/basic-install.sh" + mode: "0755" + remote_src: "{{ 'true' if pihole_direct_download else 'false' }}" + no_log: true + +- name: detect installed pihole + ansible.builtin.stat: + path: /etc/pihole/pihole.toml + register: stat_pihole_toml + +- name: run pihole installer + when: + - not running_in_check_mode + - stat_file_installer.stat.exists + - stat_pihole_toml.stat is defined and not stat_pihole_toml.stat.exists | default('false') + block: + - name: create pi-hole directories + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: pihole + group: pihole + mode: "0755" + loop: + - /etc/pihole + - /opt/pihole + - /var/log/pihole + + - name: create setupVars.conf + ansible.builtin.template: + src: setupVars.conf.j2 + dest: /etc/pihole/setupVars.conf + owner: pihole + group: pihole + mode: "0640" + + - name: patch pi-hole installer for arch-linux + ansible.builtin.shell: | + sed -i \ + -e '/^[[:space:]]*[^#].*package_manager_detect[[:space:]]*$/s/^/# /' \ + -e '/^[[:space:]]*[^#].*notify_package_updates_available[[:space:]]*$/s/^/# /' \ + -e '/^[[:space:]]*[^#].*build_dependency_package[[:space:]]*$/s/^/# /' \ + -e '/^[[:space:]]*[^#].*install_dependent_packages[[:space:]]*$/s/^/# /' \ + {{ pihole_remote_tmp_directory }}/basic-install.sh + when: + - ansible_facts.distribution | lower == 'archlinux' + + - name: install pi-hole + # remote_user: pihole + become: true + ansible.builtin.shell: | + export PIHOLE_SKIP_OS_CHECK=true + bash {{ pihole_remote_tmp_directory }}/basic-install.sh --unattended + register: pihole_install_result + changed_when: pihole_install_result.rc == 0 + notify: + - restart pihole-FTL + +- name: detect pihole version + become: true + bodsch.dns.pihole_version: + register: pihole_version + check_mode: false + ignore_errors: true + +- name: create custom fact file + bodsch.core.facts: + name: pihole + facts: + full_version: "{{ pihole_version.full_version }}" + version: "{{ pihole_version.version }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/main.yml new file mode 100644 index 0000000..21f6c55 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/main.yml @@ -0,0 +1,18 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yml + +- name: download + ansible.builtin.include_tasks: download.yml + +- name: install + ansible.builtin.include_tasks: install.yml + +- name: configure + ansible.builtin.include_tasks: configure.yml + +- name: service + ansible.builtin.include_tasks: service.yml + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/prepare.yml new file mode 100644 index 0000000..a881bbe --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/prepare.yml @@ -0,0 +1,40 @@ +--- + +- name: include OS specific configuration + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yml" + # eg. redhat / debian + - "{{ ansible_facts.os_family | lower }}.yml" + - main.yml + skip: true + +- name: detect ansible check_mode + bodsch.core.check_mode: + register: _check_mode + +- name: define running_in_check_mode + ansible.builtin.set_fact: + running_in_check_mode: '{{ _check_mode.check_mode }}' + +- name: install dependencies + ansible.builtin.package: + name: "{{ pihole_dependencies }}" + state: present + when: + - pihole_dependencies | default([]) | count > 0 + +- name: merge pihole configuration between defaults and custom + ansible.builtin.set_fact: + pihole_config: "{{ pihole_defaults_config | combine(pihole_config, recursive=True) }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/service.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/service.yml new file mode 100644 index 0000000..570d0ab --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/tasks/service.yml @@ -0,0 +1,9 @@ +--- + +- name: ensure pi-hole services are running + ansible.builtin.systemd: + name: pihole-FTL + state: started + enabled: true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/templates/custom.list.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/templates/custom.list.j2 new file mode 100644 index 0000000..86cb23f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/templates/custom.list.j2 @@ -0,0 +1,6 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +{% if data is defined %} + {% for item in data %} +{{ item }} + {% endfor %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/templates/pihole.toml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/templates/pihole.toml new file mode 100644 index 0000000..ec7f9b3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/templates/pihole.toml @@ -0,0 +1,1205 @@ +# Pi-hole configuration file (v6.2.3) +# Encoding: UTF-8 +# This file is managed by pihole-FTL +# Last updated on 2025-06-18 17:35:43 UTC + +[dns] + # Array of upstream DNS servers used by Pi-hole + # Example: [ "8.8.8.8", "127.0.0.1#5335", "docker-resolver" ] + # + # Possible values are: + # array of IP addresses and/or hostnames, optionally with a port (#...) + upstreams = [ + "192.168.0.4", + "4.2.2.1", + "9.9.9.9", + "1.1.1.1" + ] ### CHANGED, default = [] + + # Use this option to control deep CNAME inspection. Disabling it might be beneficial + # for very low-end devices + CNAMEdeepInspect = true + + # Should _esni. subdomains be blocked by default? Encrypted Server Name Indication + # (ESNI) is certainly a good step into the right direction to enhance privacy on the + # web. It prevents on-path observers, including ISPs, coffee shop owners and + # firewalls, from intercepting the TLS Server Name Indication (SNI) extension by + # encrypting it. This prevents the SNI from being used to determine which websites + # users are visiting. + # ESNI will obviously cause issues for pixelserv-tls which will be unable to generate + # matching certificates on-the-fly when it cannot read the SNI. Cloudflare and Firefox + # are already enabling ESNI. According to the IEFT draft (link above), we can easily + # restore piselserv-tls's operation by replying NXDOMAIN to _esni. subdomains of + # blocked domains as this mimics a "not configured for this domain" behavior. + blockESNI = true + + # Should we overwrite the query source when client information is provided through + # EDNS0 client subnet (ECS) information? This allows Pi-hole to obtain client IPs even + # if they are hidden behind the NAT of a router. This feature has been requested and + # discussed on Discourse where further information how to use it can be found: + # https://discourse.pi-hole.net/t/support-for-add-subnet-option-from-dnsmasq-ecs-edns0-client-subnet/35940 + EDNS0ECS = true + + # Should FTL hide queries made by localhost? + ignoreLocalhost = false + + # Should FTL analyze and show internally generated DNSSEC queries? + showDNSSEC = true + + # Should FTL analyze *only* A and AAAA queries? + analyzeOnlyAandAAAA = false + + # Controls whether and how FTL will reply with for address for which a local interface + # exists. Changing this setting causes FTL to restart. + # + # Possible values are: + # - "NONE" + # Pi-hole will not respond automatically on PTR requests to local interface + # addresses. Ensure pi.hole and/or hostname records exist elsewhere. + # - "HOSTNAME" + # Serve the machine's hostname. The hostname is queried from the kernel through + # uname(2)->nodename. If the machine has multiple network interfaces, it can + # also have multiple nodenames. In this case, it is unspecified and up to the + # kernel which one will be returned. On Linux, the returned string is what has + # been set using sethostname(2) which is typically what has been set in + # /etc/hostname. + # - "HOSTNAMEFQDN" + # Serve the machine's hostname (see limitations above) as fully qualified domain + # by adding the local domain. If no local domain has been defined (config option + # dns.domain), FTL tries to query the domain name from the kernel using + # getdomainname(2). If this fails, FTL appends ".no_fqdn_available" to the + # hostname. + # - "PI.HOLE" + # Respond with "pi.hole". + piholePTR = "PI.HOLE" + + # How should FTL handle queries when the gravity database is not available? + # + # Possible values are: + # - "BLOCK" + # Block all queries when the database is busy. + # - "ALLOW" + # Allow all queries when the database is busy. + # - "REFUSE" + # Refuse all queries which arrive while the database is busy. + # - "DROP" + # Just drop the queries, i.e., never reply to them at all. Despite "REFUSE" + # sounding similar to "DROP", it turned out that many clients will just + # immediately retry, causing up to several thousands of queries per second. This + # does not happen in "DROP" mode. + replyWhenBusy = "ALLOW" + + # FTL's internal TTL to be handed out for blocked queries in seconds. This settings + # allows users to select a value different from the dnsmasq config option local-ttl. + # This is useful in context of locally used hostnames that are known to stay constant + # over long times (printers, etc.). + # Note that large values may render whitelisting ineffective due to client-side + # caching of blocked queries. + blockTTL = 2 + + # Array of custom DNS records + # Example: hosts = [ "127.0.0.1 mylocal", "192.168.0.1 therouter" ] + # + # Possible values are: + # Array of custom DNS records each one in HOSTS form: "IP HOSTNAME" + hosts = [ + "192.168.0.4 matrix.lan" + ] ### CHANGED, default = [] + + # If set, A and AAAA queries for plain names, without dots or domain parts, are never + # forwarded to upstream nameservers + domainNeeded = true ### CHANGED, default = false + + # If set, the domain is added to simple names (without a period) in /etc/hosts in the + # same way as for DHCP-derived names + expandHosts = true ### CHANGED, default = false + + # The DNS domain used by your Pi-hole. + # + # This DNS domain is purely local. FTL may answer queries from its local cache and + # configuration but *never* forwards any requests upstream *unless* you have + # configured a dns.revServer exactly for this domain. In the latter case, all queries + # for this domain are sent exclusively to this server (including reverse lookups). + # + # For DHCP, this has two effects; firstly it causes the DHCP server to return the + # domain to any hosts which request it, and secondly it sets the domain which it is + # legal for DHCP-configured hosts to claim. The intention is to constrain hostnames so + # that an untrusted host on the LAN cannot advertise its name via DHCP as e.g. + # "google.com" and capture traffic not meant for it. If no domain suffix is specified, + # then any DHCP hostname with a domain part (ie with a period) will be disallowed and + # logged. If a domain is specified, then hostnames with a domain part are allowed, + # provided the domain part matches the suffix. In addition, when a suffix is set then + # hostnames without a domain part have the suffix added as an optional domain part. + # For instance, we can set domain=mylab.com and have a machine whose DHCP hostname is + # "laptop". The IP address for that machine is available both as "laptop" and + # "laptop.mylab.com". + # + # You can disable setting a domain by setting this option to an empty string. + # + # Possible values are: + # + domain = "matrix.lan" ### CHANGED, default = "lan" + + # Should all reverse lookups for private IP ranges (i.e., 192.168.x.y, etc) which are + # not found in /etc/hosts or the DHCP leases file be answered with "no such domain" + # rather than being forwarded upstream? + bogusPriv = true + + # Validate DNS replies using DNSSEC? + dnssec = false + + # Interface to use for DNS (see also dnsmasq.listening.mode) and DHCP (if enabled) + # + # Possible values are: + # a valid interface name + interface = "eth0" + + # Add A, AAAA and PTR records to the DNS. This adds one or more names to the DNS with + # associated IPv4 (A) and IPv6 (AAAA) records + # + # Possible values are: + # [,....],[],[][,] + hostRecord = "" + + # Pi-hole interface listening modes + # + # Possible values are: + # - "LOCAL" + # Allow only local requests. This setting accepts DNS queries only from hosts + # whose address is on a local subnet, i.e., a subnet for which an interface + # exists on the server. It is intended to be set as a default on installation, + # to allow unconfigured installations to be useful but also safe from being used + # for DNS amplification attacks if (accidentally) running public. + # - "SINGLE" + # Permit all origins, accept only on the specified interface. Respond only to + # queries arriving on the specified interface. The loopback (lo) interface is + # automatically added to the list of interfaces to use when this option is used. + # Make sure your Pi-hole is properly firewalled! + # - "BIND" + # By default, FTL binds the wildcard address. If this is not what you want, you + # can use this option as it forces FTL to really bind only the interfaces it is + # listening on. Note that this may result in issues when the interface may go + # down (cable unplugged, etc.). About the only time when this is useful is when + # running another nameserver on the same port on the same machine. This may also + # happen if you run a virtualization API such as libvirt. When this option is + # used, IP alias interface labels (e.g. enp2s0:0) are checked rather than + # interface names. + # - "ALL" + # Permit all origins, accept on all interfaces. Make sure your Pi-hole is + # properly firewalled! This truly allows any traffic to be replied to and is a + # dangerous thing to do as your Pi-hole could become an open resolver. You + # should always ask yourself if the first option doesn't work for you as well. + # - "NONE" + # Do not add any configuration concerning the listening mode to the dnsmasq + # configuration file. This is useful if you want to manually configure the + # listening mode in auxiliary configuration files. This option is really meant + # for advanced users only, support for this option may be limited. + listeningMode = "LOCAL" + + # Log DNS queries and replies to pihole.log + queryLogging = true + + # List of CNAME records which indicate that is really . If the is + # given, it overwrites the value of local-ttl + # + # Possible values are: + # Array of CNAMEs each on in one of the following forms: ",[,]" + cnameRecords = [] + + # Port used by the DNS server + port = 53 + + # Reverse server (former also called "conditional forwarding") feature + # Array of reverse servers each one in one of the following forms: + # ",[/],[#][,]" + # + # Individual components: + # + # : either "true" or "false" + # + # [/]: Address range for the reverse server feature in CIDR + # notation. If the prefix length is omitted, either 32 (IPv4) or 128 (IPv6) are + # substituted (exact address match). This is almost certainly not what you want here. + # Example: "192.168.0.0/24" for the range 192.168.0.1 - 192.168.0.255 + # + # [#]: Target server to be used for the reverse server feature + # Example: "192.168.0.1#53" + # + # : Domain used for the reverse server feature (e.g., "fritz.box") + # Example: "fritz.box" + # + # Possible values are: + # array of reverse servers each one in one of the following forms: + # ",[/],[#][,]", e.g., + # "true,192.168.0.0/24,192.168.0.1,fritz.box" + revServers = [] + + [dns.cache] + # Cache size of the DNS server. Note that expiring cache entries naturally make room + # for new insertions over time. Setting this number too high will have an adverse + # effect as not only more space is needed, but also lookup speed gets degraded in the + # 10,000+ range. dnsmasq may issue a warning when you go beyond 10,000+ cache entries. + size = 10000 + + # Query cache optimizer: If a DNS name exists in the cache, but its time-to-live has + # expired only recently, the data will be used anyway (a refreshing from upstream is + # triggered). This can improve DNS query delays especially over unreliable Internet + # connections. This feature comes at the expense of possibly sometimes returning + # out-of-date data and less efficient cache utilization, since old data cannot be + # flushed when its TTL expires, so the cache becomes mostly least-recently-used. To + # mitigate issues caused by massively outdated DNS replies, the maximum overaging of + # cached records is limited. We strongly recommend staying below 86400 (1 day) with + # this option. + # Setting the TTL excess time to zero will serve stale cache data regardless how long + # it has expired. This is not recommended as it may lead to stale data being served + # for a long time. Setting this option to any negative value will disable this feature + # altogether. + optimizer = 3600 + + # This setting allows you to specify the TTL used for queries blocked upstream. Once + # the TTL expires, the query will be forwarded to the upstream server again to check + # if the block is still valid. Defaults to caching for one day (86400 seconds). + # Setting this value to zero disables caching of queries blocked upstream. + upstreamBlockedTTL = 86400 + + [dns.blocking] + # Should FTL block queries? + active = true + + # How should FTL reply to blocked queries? + # + # Possible values are: + # - "NULL" + # In NULL mode, which is both the default and recommended mode for Pi-hole + # FTLDNS, blocked queries will be answered with the "unspecified address" + # (0.0.0.0 or ::). The "unspecified address" is a reserved IP address specified + # by RFC 3513 - Internet Protocol Version 6 (IPv6) Addressing Architecture, + # section 2.5.2. + # - "IP_NODATA_AAAA" + # In IP-NODATA-AAAA mode, blocked queries will be answered with the local IPv4 + # addresses of your Pi-hole. Blocked AAAA queries will be answered with + # NODATA-IPV6 and clients will only try to reach your Pi-hole over its static + # IPv4 address. + # - "IP" + # In IP mode, blocked queries will be answered with the local IP addresses of + # your Pi-hole. + # - "NX" + # In NXDOMAIN mode, blocked queries will be answered with an empty response + # (i.e., there won't be an answer section) and status NXDOMAIN. A NXDOMAIN + # response should indicate that there is no such domain to the client making the + # query. + # - "NODATA" + # In NODATA mode, blocked queries will be answered with an empty response (no + # answer section) and status NODATA. A NODATA response indicates that the domain + # exists, but there is no record for the requested query type. + mode = "NULL" + + # Should FTL enrich blocked replies with EDNS0 information? + # + # Possible values are: + # - "NONE" + # In NONE mode, no additional EDNS information is added to blocked queries + # - "CODE" + # In CODE mode, blocked queries will be enriched with EDNS info-code BLOCKED (15) + # - "TEXT" + # In TEXT mode, blocked queries will be enriched with EDNS info-code BLOCKED (15) + # and a text message describing the reason for the block + edns = "TEXT" + + [dns.specialDomains] + # Should Pi-hole always reply with NXDOMAIN to A and AAAA queries of + # use-application-dns.net to disable Firefox automatic DNS-over-HTTP? This is + # following the recommendation on + # https://support.mozilla.org/en-US/kb/configuring-networks-disable-dns-over-https + mozillaCanary = true + + # Should Pi-hole always reply with NXDOMAIN to A and AAAA queries of mask.icloud.com + # and mask-h2.icloud.com to disable Apple's iCloud Private Relay to prevent Apple + # devices from bypassing Pi-hole? This is following the recommendation on + # https://developer.apple.com/support/prepare-your-network-for-icloud-private-relay + iCloudPrivateRelay = true + + # Should Pi-hole always reply with NODATA to all queries to zone resolver.arpa to + # prevent devices from bypassing Pi-hole using Discovery of Designated Resolvers? This + # is based on recommendations at the end of RFC 9462, section 4. + designatedResolver = true + + [dns.reply.host] + # Use a specific IPv4 address for the Pi-hole host? By default, FTL determines the + # address of the interface a query arrived on and uses this address for replying to A + # queries with the most suitable address for the requesting client. This setting can + # be used to use a fixed, rather than the dynamically obtained, address when Pi-hole + # responds to the following names: [ "pi.hole", "", + # "pi.hole.", "." ] + force4 = false + + # Custom IPv4 address for the Pi-hole host + # + # Possible values are: + # or empty string ("") + IPv4 = "" + + # Use a specific IPv6 address for the Pi-hole host? See description for the IPv4 + # variant above for further details. + force6 = false + + # Custom IPv6 address for the Pi-hole host + # + # Possible values are: + # or empty string ("") + IPv6 = "" + + [dns.reply.blocking] + # Use a specific IPv4 address in IP blocking mode? By default, FTL determines the + # address of the interface a query arrived on and uses this address for replying to A + # queries with the most suitable address for the requesting client. This setting can + # be used to use a fixed, rather than the dynamically obtained, address when Pi-hole + # responds in the following cases: IP blocking mode is used and this query is to be + # blocked, regular expressions with the ;reply=IP regex extension. + force4 = false + + # Custom IPv4 address for IP blocking mode + # + # Possible values are: + # or empty string ("") + IPv4 = "" + + # Use a specific IPv6 address in IP blocking mode? See description for the IPv4 variant + # above for further details. + force6 = false + + # Custom IPv6 address for IP blocking mode + # + # Possible values are: + # or empty string ("") + IPv6 = "" + + [dns.rateLimit] + # Rate-limited queries are answered with a REFUSED reply and not further processed by + # FTL. + # The default settings for FTL's rate-limiting are to permit no more than 1000 queries + # in 60 seconds. Both numbers can be customized independently. It is important to note + # that rate-limiting is happening on a per-client basis. Other clients can continue to + # use FTL while rate-limited clients are short-circuited at the same time. + # For this setting, both numbers, the maximum number of queries within a given time, + # and the length of the time interval (seconds) have to be specified. For instance, if + # you want to set a rate limit of 1 query per hour, the option should look like + # dns.rateLimit.count=1 and dns.rateLimit.interval=3600. The time interval is relative + # to when FTL has finished starting (start of the daemon + possible delay by + # DELAY_STARTUP) then it will advance in steps of the rate-limiting interval. If a + # client reaches the maximum number of queries it will be blocked until the end of the + # current interval. This will be logged to /var/log/pihole/FTL.log, e.g. Rate-limiting + # 10.0.1.39 for at least 44 seconds. If the client continues to send queries while + # being blocked already and this number of queries during the blocking exceeds the + # limit the client will continue to be blocked until the end of the next interval + # (FTL.log will contain lines like Still rate-limiting 10.0.1.39 as it made additional + # 5007 queries). As soon as the client requests less than the set limit, it will be + # unblocked (Ending rate-limitation of 10.0.1.39). + # Rate-limiting may be disabled altogether by setting both values to zero (this + # results in the same behavior as before FTL v5.7). + # How many queries are permitted... + count = 1000 + + # ... in the set interval before rate-limiting? + interval = 60 + +[dhcp] + # Is the embedded DHCP server enabled? + active = false + + # Start address of the DHCP address pool + # + # Possible values are: + # or empty string (""), e.g., "192.168.0.10" + start = "" + + # End address of the DHCP address pool + # + # Possible values are: + # or empty string (""), e.g., "192.168.0.250" + end = "" + + # Address of the gateway to be used (typically the address of your router in a home + # installation) + # + # Possible values are: + # or empty string (""), e.g., "192.168.0.1" + router = "" + + # The netmask used by your Pi-hole. For directly connected networks (i.e., networks on + # which the machine running Pi-hole has an interface) the netmask is optional and may + # be set to an empty string (""): it will then be determined from the interface + # configuration itself. For networks which receive DHCP service via a relay agent, we + # cannot determine the netmask itself, so it should explicitly be specified, otherwise + # Pi-hole guesses based on the class (A, B or C) of the network address. + # + # Possible values are: + # (e.g., "255.255.255.0") or empty string ("") for + # auto-discovery + netmask = "" + + # If the lease time is given, then leases will be given for that length of time. If not + # given, the default lease time is one hour for IPv4 and one day for IPv6. + # + # Possible values are: + # The lease time can be in seconds, or minutes (e.g., "45m") or hours (e.g., "1h") + # or days (like "2d") or even weeks ("1w"). You may also use "infinite" as string + # but be aware of the drawbacks + leaseTime = "" + + # Should Pi-hole make an attempt to also satisfy IPv6 address requests (be aware that + # IPv6 works a whole lot different than IPv4) + ipv6 = false + + # Enable DHCPv4 Rapid Commit Option specified in RFC 4039. Should only be enabled if + # either the server is the only server for the subnet to avoid conflicts + rapidCommit = false + + # Advertise DNS server multiple times to clients. Some devices will add their own + # proprietary DNS servers to the list of DNS servers, which can cause issues with + # Pi-hole. This option will advertise the Pi-hole DNS server multiple times to + # clients, which should prevent this from happening. + multiDNS = false + + # Enable logging for DHCP. This will log all relevant DHCP-related activity, including, + # e.g., all the options sent to DHCP clients and the tags used to determine them (if + # any). This can be useful for debugging DHCP issues. The generated output is saved to + # the file specified by files.log.dnsmasq below. + logging = false + + # Ignore unknown DHCP clients. + # If this option is set, Pi-hole ignores all clients which are not explicitly + # configured through dhcp.hosts. This can be useful to prevent unauthorized clients + # from getting an IP address from the DHCP server. + # It should be noted that this option is not a security feature, as clients can still + # assign themselves an IP address and use the network. It is merely a convenience + # feature to prevent unknown clients from getting a valid IP configuration assigned + # automatically. + # Note that you will need to configure new clients manually in dhcp.hosts before they + # can use the network when this feature is enabled. + ignoreUnknownClients = false + + # Per host parameters for the DHCP server. This allows a machine with a particular + # hardware address to be always allocated the same hostname, IP address and lease time + # or to specify static DHCP leases + # + # Possible values are: + # Array of static leases each on in one of the following forms: + # "[][,id:|*][,set:][,tag:][,][,][,][,ignore]" + hosts = [] + + [ntp.ipv4] + # Should FTL act as network time protocol (NTP) server (IPv4)? + active = true + + # IPv4 address to listen on for NTP requests + # + # Possible values are: + # or empty string ("") for wildcard (0.0.0.0) + address = "" + + [ntp.ipv6] + # Should FTL act as network time protocol (NTP) server (IPv6)? + active = true + + # IPv6 address to listen on for NTP requests + # + # Possible values are: + # or empty string ("") for wildcard (::) + address = "" + + [ntp.sync] + # Should FTL try to synchronize the system time with an upstream NTP server? + active = true + + # NTP upstream server to sync with, e.g., "pool.ntp.org". Note that the NTP server + # should be located as close as possible to you in order to minimize the time offset + # possibly introduced by different routing paths. + # + # Possible values are: + # valid NTP upstream server + server = "pool.ntp.org" + + # Interval in seconds between successive synchronization attempts with the NTP server + interval = 3600 + + # Number of NTP syncs to perform and average before updating the system time + count = 8 + + [ntp.sync.rtc] + # Should FTL update a real-time clock (RTC) if available? + set = false + + # Path to the RTC device to update. Leave empty for auto-discovery + # + # Possible values are: + # Path to the RTC device, e.g., "/dev/rtc0" + device = "" + + # Should the RTC be set to UTC? + utc = true + +[resolver] + # Should FTL try to resolve IPv4 addresses to hostnames? + resolveIPv4 = true + + # Should FTL try to resolve IPv6 addresses to hostnames? + resolveIPv6 = true + + # Control whether FTL should use the fallback option to try to obtain client names from + # checking the network table. This behavior can be disabled with this option. + # Assume an IPv6 client without a host names. However, the network table knows - + # though the client's MAC address - that this is the same device where we have a host + # name for another IP address (e.g., a DHCP server managed IPv4 address). In this + # case, we use the host name associated to the other address as this is the same + # device. + networkNames = true + + # With this option, you can change how (and if) hourly PTR requests are made to check + # for changes in client and upstream server hostnames. + # + # Possible values are: + # - "IPV4_ONLY" + # Do hourly PTR lookups only for IPv4 addresses. This is the new default since + # Pi-hole FTL v5.3.2. It should resolve issues with more and more very + # short-lived PE IPv6 addresses coming up in a lot of networks. + # - "ALL" + # Do hourly PTR lookups for all addresses. This was the default until FTL + # v5.3(.1). It has been replaced as it can create a lot of PTR queries for those + # with many IPv6 addresses in their networks. + # - "UNKNOWN" + # Only resolve unknown hostnames. Already existing hostnames are never refreshed, + # i.e., there will be no PTR queries made for clients where hostnames are known. + # This also means that known hostnames will not be updated once known. + # - "NONE" + # Don't do any hourly PTR lookups. This means we look host names up exactly once + # (when we first see a client) and never again. You may miss future changes of + # host names. + refreshNames = "IPV4_ONLY" + +[database] + # Should FTL load information from the database on startup to be aware of the most + # recent history? + DBimport = true + + # How long should queries be stored in the database [days]? + # Setting this value to 0 will disable the database. + maxDBdays = 91 + + # How often do we store queries in FTL's database [seconds]? + DBinterval = 60 + + # Should FTL enable Write-Ahead Log (WAL) mode for the on-disk query database + # (configured via files.database)? + # It is recommended to leave this setting enabled for performance reasons. About the + # only reason to disable WAL mode is if you are experiencing specific issues with it, + # e.g., when using a database that is accessed from multiple hosts via a network + # share. When this setting is disabled, FTL will use SQLite3's default journal mode + # (rollback journal in DELETE mode). + useWAL = true + + [database.network] + # Should FTL analyze the local ARP cache? When disabled, client identification and the + # network table will stop working reliably. + parseARPcache = true + + # How long should IP addresses be kept in the network_addresses table [days]? IP + # addresses (and associated host names) older than the specified number of days are + # removed to avoid dead entries in the network overview table. + expire = 91 + +[webserver] + # On which domain is the web interface served? + # + # Possible values are: + # + domain = "pi.hole" + + # Webserver access control list (ACL) allowing for restrictions to be put on the list + # of IP addresses which have access to the web server. The ACL is a comma separated + # list of IP subnets, where each subnet is prepended by either a - or a + sign. A plus + # sign means allow, where a minus sign means deny. If a subnet mask is omitted, such + # as -1.2.3.4, this means to deny only that single IP address. If this value is not + # set (empty string), all accesses are allowed. Otherwise, the default setting is to + # deny all accesses. On each request the full list is traversed, and the last (!) + # match wins. IPv6 addresses may be specified in CIDR-form [a:b::c]/64. + # + # Example 1: acl = "+127.0.0.1,+[::1]" + # ---> deny all access, except from 127.0.0.1 and ::1, + # Example 2: acl = "+192.168.0.0/16" + # ---> deny all accesses, except from the 192.168.0.0/16 subnet, + # Example 3: acl = "+[::]/0" ---> allow only IPv6 access. + # + # Possible values are: + # + acl = "" + + # Ports to be used by the webserver. + # Comma-separated list of ports to listen on. It is possible to specify an IP address + # to bind to. In this case, an IP address and a colon must be prepended to the port + # number. For example, to bind to the loopback interface on port 80 (IPv4) and to all + # interfaces port 8080 (IPv4), use "127.0.0.1:80,8080". "[::]:80" can be used to + # listen to IPv6 connections to port 80. IPv6 addresses of network interfaces can be + # specified as well, e.g. "[::1]:80" for the IPv6 loopback interface. [::]:80 will + # bind to port 80 IPv6 only. + # In order to use port 80 for all interfaces, both IPv4 and IPv6, use either the + # configuration "80,[::]:80" (create one socket for IPv4 and one for IPv6 only), or + # "+80" (create one socket for both, IPv4 and IPv6). The '+' notation to use IPv4 and + # IPv6 will only work if no network interface is specified. Depending on your + # operating system version and IPv6 network environment, some configurations might not + # work as expected, so you have to test to find the configuration most suitable for + # your needs. In case "+80" does not work for your environment, you need to use + # "80,[::]:80". + # If the port is TLS/SSL, a letter 's' (secure) must be appended, for example, + # "80,443s" will open port 80 and port 443, and connections on port 443 will be + # encrypted. For non-encrypted ports, it is allowed to append letter 'r' (as in + # redirect). Redirected ports will redirect all their traffic to the first configured + # SSL port. For example, if webserver.port is "80r,443s", then all HTTP traffic coming + # at port 80 will be redirected to HTTPS port 443. + # When specifying 'o' (optional) behind a port, inability to use this port is not + # considered an error. For instance, specifying "80o,8080o" will allow the webserver + # to listen on either 80, 8080, both or even none of the two ports. This flag may be + # combined with 'r' and 's' like "80or,443os,8080,4443s" (80 redirecting to SSL if + # available, 443 encrypted if available, 8080 mandatory and unencrypted, 4443 + # mandatory and encrypted). + # If this value is not set (empty string), the web server will not be started and, + # hence, the API will not be available. + # + # Possible values are: + # comma-separated list of <[ip_address:]port> + port = "80o,443os,[::]:80o,[::]:443os" + + # Maximum number of worker threads allowed. + # The Pi-hole web server handles each incoming connection in a separate thread. + # Therefore, the value of this option is effectively the number of concurrent HTTP + # connections that can be handled. Any other connections are queued until they can be + # processed by a unoccupied thread. + # The total number of threads you see may be lower than the configured value as + # threads are only created when needed due to incoming connections. + # The value 0 means the number of threads is 50 (as per default settings of CivetWeb) + # for backwards-compatible behavior. + threads = 50 + + # Additional HTTP headers added to the web server responses. + # The headers are added to all responses, including those for the API. + # Note about the default additional headers: + # - X-DNS-Prefetch-Control: off: Usually browsers proactively perform domain name + # resolution on links that the user may choose to follow. We disable DNS prefetching + # here. + # - Content-Security-Policy: [...] 'unsafe-inline' is both required by Chart.js + # styling some elements directly, and index.html containing some inlined Javascript + # code. + # - X-Frame-Options: DENY: The page can not be displayed in a frame, regardless of the + # site attempting to do so. + # - X-Xss-Protection: 0: Disables XSS filtering in browsers that support it. This + # header is usually enabled by default in browsers, and is not recommended as it can + # hurt the security of the site. + # (https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection). + # - X-Content-Type-Options: nosniff: Marker used by the server to indicate that the + # MIME types advertised in the Content-Type headers should not be changed and be + # followed. This allows to opt-out of MIME type sniffing, or, in other words, it is a + # way to say that the webmasters knew what they were doing. Site security testers + # usually expect this header to be set. + # - Referrer-Policy: strict-origin-when-cross-origin: A referrer will be sent for + # same-site origins, but cross-origin requests will send no referrer information. + # The latter four headers are set as expected by https://securityheaders.io + # + # Possible values are: + # array of HTTP headers + headers = [ + "X-DNS-Prefetch-Control: off", + "Content-Security-Policy: default-src 'self' 'unsafe-inline';", + "X-Frame-Options: DENY", + "X-XSS-Protection: 0", + "X-Content-Type-Options: nosniff", + "Referrer-Policy: strict-origin-when-cross-origin" + ] + + # Should the web server serve all files in webserver.paths.webroot directory? If + # disabled, only files within the path defined through webserver.paths.webhome and + # /api will be served. + serve_all = false + + [webserver.session] + # Session timeout in seconds. If a session is inactive for more than this time, it will + # be terminated. Sessions are continuously refreshed by the web interface, preventing + # sessions from timing out while the web interface is open. + # This option may also be used to make logins persistent for long times, e.g. 86400 + # seconds (24 hours), 604800 seconds (7 days) or 2592000 seconds (30 days). Note that + # the total number of concurrent sessions is limited so setting this value too high + # may result in users being rejected and unable to log in if there are already too + # many sessions active. + timeout = 1800 + + # Should Pi-hole backup and restore sessions from the database? This is useful if you + # want to keep your sessions after a restart of the web interface. + restore = true + + [webserver.tls] + # Path to the TLS (SSL) certificate file. All directories along the path must be + # readable and accessible by the user running FTL (typically 'pihole'). This option is + # only required when at least one of webserver.port is TLS. The file must be in PEM + # format, and it must have both, private key and certificate (the *.pem file created + # must contain a 'CERTIFICATE' section as well as a 'RSA PRIVATE KEY' section). + # The *.pem file can be created using + # cp server.crt server.pem + # cat server.key >> server.pem + # if you have these files instead + # + # Possible values are: + # + cert = "/etc/pihole/tls.pem" + + [webserver.paths] + # Server root on the host + # + # Possible values are: + # + webroot = "/var/www/html" + + # Sub-directory of the root containing the web interface + # + # Possible values are: + # , both slashes are needed! + webhome = "/admin/" + + # Prefix where the web interface is served + # This is useful when you are using a reverse proxy serving the web interface, e.g., + # at http:///pihole/admin/ instead of http:///admin/. In this example, the + # prefix would be "/pihole". Note that the prefix has to be stripped away by the + # reverse proxy, e.g., for traefik: + # - traefik.http.routers.pihole.rule=PathPrefix(`/pihole`) + # - traefik.http.middlewares.piholehttp.stripprefix.prefixes=/pihole + # The prefix should start with a slash. If you don't use a prefix, leave this field + # empty. Setting this field to an incorrect value may result in the web interface not + # being accessible. + # Don't use this setting if you are not using a reverse proxy! + # + # Possible values are: + # valid URL prefix or empty + prefix = "" + + [webserver.interface] + # Should the web interface use the boxed layout? + boxed = true + + # Theme used by the Pi-hole web interface + # + # Possible values are: + # - "default-auto" + # Pi-hole auto + # - "default-light" + # Pi-hole day + # - "default-dark" + # Pi-hole midnight + # - "default-darker" + # Pi-hole deep-midnight + # - "high-contrast" + # High-contrast light + # - "high-contrast-dark" + # High-contrast dark + # - "lcars" + # Star Trek LCARS + theme = "default-dark" ### CHANGED, default = "default-auto" + + [webserver.api] + # Number of concurrent sessions allowed for the API. If the number of sessions exceeds + # this value, no new sessions will be allowed until the number of sessions drops due + # to session expiration or logout. Note that the number of concurrent sessions is + # irrelevant if authentication is disabled as no sessions are used in this case. + max_sessions = 16 + + # Should FTL prettify the API output (add extra spaces, newlines and indentation)? + prettyJSON = false + + # API password hash + # + # Possible values are: + # + pwhash = "$BALLOON-SHA256$v=1$s=1024,t=32$DNWoHtHsou3p6LN4SG/g0w==$g92GxEwxVc4nzeZzoZ1i/HSTULFGUk2FkPZHJQyd8EY=" ### CHANGED, default = "" + + # Pi-hole 2FA TOTP secret. When set to something different than "", 2FA authentication + # will be enforced for the API and the web interface. This setting is write-only, you + # can not read the secret back. + # + # Possible values are: + # + totp_secret = "" + + # Pi-hole application password. + # After you turn on two-factor (2FA) verification and set up an Authenticator app, you + # may run into issues if you use apps or other services that don't support two-step + # verification. In this case, you can create and use an app password to sign in. An + # app password is a long, randomly generated password that can be used instead of your + # regular password + TOTP token when signing in to the API. The app password can be + # generated through the API and will be shown only once. You can revoke the app + # password at any time. If you revoke the app password, be sure to generate a new one + # and update your app with the new password. + # + # Possible values are: + # + app_pwhash = "" + + # Should application password API sessions be allowed to modify config settings? + # Setting this to true allows third-party applications using the application password + # to modify settings, e.g., the upstream DNS servers, DHCP server settings, or + # changing passwords. This setting should only be enabled if really needed and only if + # you trust the applications using the application password. + app_sudo = false + + # Should FTL create a temporary CLI password? This password is stored in clear in + # /etc/pihole and can be used by the CLI (pihole ... commands) to authenticate + # against the API. Note that the password is only valid for the current session and + # regenerated on each FTL restart. Sessions initiated with this password cannot modify + # the Pi-hole configuration (change passwords, etc.) for security reasons but can + # still use the API to query data and manage lists. + cli_pw = true + + # Array of clients to be excluded from certain API responses (regex): + # - Query Log (/api/queries) + # - Top Clients (/api/stats/top_clients) + # This setting accepts both IP addresses (IPv4 and IPv6) as well as hostnames. + # Note that backslashes "\" need to be escaped, i.e. "\\" in this setting + # + # Example: [ "^192\\.168\\.2\\.56$", "^fe80::341:[0-9a-f]*$", "^localhost$" ] + # + # Possible values are: + # array of regular expressions describing clients + excludeClients = [] + + # Array of domains to be excluded from certain API responses (regex): + # - Query Log (/api/queries) + # - Top Clients (/api/stats/top_domains) + # Note that backslashes "\" need to be escaped, i.e. "\\" in this setting + # + # Example: [ "(^|\\.)\\.google\\.de$", "\\.pi-hole\\.net$" ] + # + # Possible values are: + # array of regular expressions describing domains + excludeDomains = [] + + # How much history should be imported from the database and returned by the API + # [seconds]? (max 24*60*60 = 86400) + maxHistory = 86400 + + # Up to how many clients should be returned in the activity graph endpoint + # (/api/history/clients)? + # This setting can be overwritten at run-time using the parameter N. Setting this to 0 + # will always send all clients. Be aware that this may be challenging for the GUI if + # you have many (think > 1.000 clients) in your network + maxClients = 10 + + # How should the API compute the most active clients? If set to true, the API will + # return the clients with the most queries globally (within 24 hours). If set to + # false, the API will return the clients with the most queries per time slot + # individually. + client_history_global_max = true + + # Allow destructive API calls (e.g. restart DNS server, flush logs, ...) + allow_destructive = true + + [webserver.api.temp] + # Which upper temperature limit should be used by Pi-hole? Temperatures above this + # limit will be shown as "hot". The number specified here is in the unit defined below + limit = 60.000000 + + # Which temperature unit should be used for temperatures processed by FTL? + # + # Possible values are: + # - "C" + # Celsius + # - "F" + # Fahrenheit + # - "K" + # Kelvin + unit = "C" + +[files] + # The file which contains the PID of FTL's main process. + # + # Possible values are: + # + pid = "/run/pihole-FTL.pid" + + # The location of FTL's long-term database + # + # Possible values are: + # + database = "/var/lib/pihole/pihole-FTL.db" + + # The location of Pi-hole's gravity database + # + # Possible values are: + # + gravity = "/var/lib/pihole/gravity.db" + + # A temporary directory where Pi-hole can store files during gravity updates. This + # directory must be writable by the user running gravity (typically pihole). + # + # Possible values are: + # + gravity_tmp = "/tmp" + + # The database containing MAC -> Vendor information for the network table + # + # Possible values are: + # + macvendor = "/var/lib/pihole/macvendor.db" + + # An optional file containing a pcap capture of the network traffic. This file is used + # for debugging purposes only. If you don't know what this is, you don't need it. + # Setting this to an empty string disables pcap recording. The file must be writable + # by the user running FTL (typically pihole). Failure to write to this file will + # prevent the DNS resolver from starting. The file is appended to if it already + # exists. + # + # Possible values are: + # + pcap = "" + + [files.log] + # The location of FTL's log file + # + # Possible values are: + # + ftl = "/var/log/pihole/FTL.log" + + # The log file used by the embedded dnsmasq DNS server + # + # Possible values are: + # + dnsmasq = "/var/log/pihole/pihole.log" + + # The log file used by the webserver + # + # Possible values are: + # + webserver = "/var/log/pihole/webserver.log" + +[misc] + # Using privacy levels you can specify which level of detail you want to see in your + # Pi-hole statistics. Changing this setting will trigger a restart of FTL + # + # Possible values are: + # - 0 + # Don't hide anything, all statistics are available. + # - 1 + # Hide domains. This setting disables Top Domains and Top Ads + # - 2 + # Hide domains and clients. This setting disables Top Domains, Top Ads, Top + # Clients and Clients over time. + # - 3 + # Anonymize everything. This setting disables almost any statistics and query + # analysis. There will be no long-term database logging and no Query Log. You + # will also lose most regex features. + privacylevel = 0 + + # During startup, in some configurations, network interfaces appear only late during + # system startup and are not ready when FTL tries to bind to them. Therefore, you may + # want FTL to wait a given amount of time before trying to start the DNS revolver. + # This setting takes any integer value between 0 and 300 seconds. To prevent delayed + # startup while the system is already running and FTL is restarted, the delay only + # takes place within the first 180 seconds (hard-coded) after booting. + delay_startup = 0 + + # Set niceness of pihole-FTL. Defaults to -10 and can be disabled altogether by setting + # a value of -999. The nice value is an attribute that can be used to influence the + # CPU scheduler to favor or disfavor a process in scheduling decisions. The range of + # the nice value varies across UNIX systems. On modern Linux, the range is -20 (high + # priority = not very nice to other processes) to +19 (low priority). + nice = -10 + + # Should FTL translate its own stack addresses into code lines during the bug + # backtrace? This improves the analysis of crashed significantly. It is recommended to + # leave the option enabled. This option should only be disabled when addr2line is + # known to not be working correctly on the machine because, in this case, the + # malfunctioning addr2line can prevent from generating any backtrace at all. + addr2line = true + + # Should FTL load additional dnsmasq configuration files from /etc/dnsmasq.d/? + # Warning: This is an advanced setting and should only be used with care. + # Incorrectly formatted or config files specifying options which can only be defined + # once can result in conflicts with the automatic configuration of Pi-hole (see + # /etc/pihole/dnsmasq.conf) and may stop DNS resolution from working. + etc_dnsmasq_d = false + + # Additional lines to inject into the generated dnsmasq configuration. + # Warning: This is an advanced setting and should only be used with care. Incorrectly + # formatted or duplicated lines as well as lines conflicting with the automatic + # configuration of Pi-hole can break the embedded dnsmasq and will stop DNS resolution + # from working. + # Use this option with extra care. + # + # Possible values are: + # array of valid dnsmasq config line options + dnsmasq_lines = [] + + # Log additional information about queries and replies to pihole.log + # When this setting is enabled, the log has extra information at the start of each + # line. This consists of a serial number which ties together the log lines associated + # with an individual query, and the IP address of the requestor. This setting is only + # effective if dns.queryLogging is enabled, too. This option is only useful for + # debugging and is not recommended for normal use. + extraLogging = false + + # Put configuration into read-only mode. This will prevent any changes to the + # configuration file via the API or CLI. This setting useful when a configuration is + # to be forced/modified by some third-party application (like infrastructure-as-code + # providers) and should not be changed by any means. + readOnly = false + + [misc.check] + # Pi-hole is very lightweight on resources. Nevertheless, this does not mean that you + # should run Pi-hole on a server that is otherwise extremely busy as queuing on the + # system can lead to unnecessary delays in DNS operation as the system becomes less + # and less usable as the system load increases because all resources are permanently + # in use. To account for this, FTL regularly checks the system load. To bring this to + # your attention, FTL warns about excessive load when the 15 minute system load + # average exceeds the number of cores. + # This check can be disabled with this setting. + load = true + + # FTL stores history in shared memory to allow inter-process communication with forked + # dedicated TCP workers. If FTL runs out of memory, it cannot continue to work as + # queries cannot be analyzed any further. Hence, FTL checks if enough shared memory is + # available on your system and warns you if this is not the case. + # By default, FTL warns if the shared-memory usage exceeds 90%. You can set any + # integer limit between 0 to 100 (interpreted as percentages) where 0 means that + # checking of shared-memory usage is disabled. + shmem = 90 + + # FTL stores its long-term history in a database file on disk. Furthermore, FTL stores + # log files. By default, FTL warns if usage of the disk holding any crucial file + # exceeds 90%. You can set any integer limit between 0 to 100 (interpreted as + # percentages) where 0 means that checking of disk usage is disabled. + disk = 90 + +[debug] + # Print debugging information about database actions. This prints performed SQL + # statements as well as some general information such as the time it took to store the + # queries and how many have been saved to the database. + database = false + + # Prints a list of the detected interfaces on the startup of pihole-FTL. Also, prints + # whether these interfaces are IPv4 or IPv6 interfaces. + networking = false + + # Print information about shared memory locks. Messages will be generated when waiting, + # obtaining, and releasing a lock. + locks = false + + # Print extensive query information (domains, types, replies, etc.). This has always + # been part of the legacy debug mode of pihole-FTL. + queries = false + + # Print flags of queries received by the DNS hooks. Only effective when DEBUG_QUERIES + # is enabled as well. + flags = false + + # Print information about shared memory buffers. Messages are either about creating or + # enlarging shmem objects or string injections. + shmem = false + + # Print information about garbage collection (GC): What is to be removed, how many have + # been removed and how long did GC take. + gc = false + + # Print information about ARP table processing: How long did parsing take, whether read + # MAC addresses are valid, and if the macvendor.db file exists. + arp = false + + # Controls if FTLDNS should print extended details about regex matching into FTL.log. + regex = false + + # Print extra debugging information concerning API calls. This includes the request, + # the request parameters, and the internal details about how the algorithms decide + # which data to present and in what form. This very verbose output should only be used + # when debugging specific API issues and can be helpful, e.g., when a client cannot + # connect due to an obscure API error. Furthermore, this setting enables logging of + # all API requests (auth log) and details about user authentication attempts. + api = false + + # Print extra debugging information about TLS connections. This includes the TLS + # version, the cipher suite, the certificate chain and much more. This very verbose + # output should only be used when debugging specific TLS issues and can be helpful, + # e.g., when a client cannot connect due to an obscure TLS error as modern browsers do + # not provide much information about the underlying TLS connection and most often give + # only very generic error messages without much/any underlying technical information. + tls = false + + # Print information about overTime memory operations, such as initializing or moving + # overTime slots. + overtime = false + + # Print information about status changes for individual queries. This can be useful to + # identify unexpected unknown queries. + status = false + + # Print information about capabilities granted to the pihole-FTL process. The current + # capabilities are printed on receipt of SIGHUP, i.e., the current set of capabilities + # can be queried without restarting pihole-FTL (by setting DEBUG_CAPS=true and + # thereafter sending killall -HUP pihole-FTL). + caps = false + + # Print information about DNSSEC activity + dnssec = false + + # FTL uses dynamically allocated vectors for various tasks. This config option enables + # extensive debugging information such as information about allocation, referencing, + # deletion, and appending. + vectors = false + + # Extensive information about hostname resolution like which DNS servers are used in + # the first and second hostname resolving tries (only affecting internally generated + # PTR queries). + resolver = false + + # Print debugging information about received EDNS(0) data. + edns0 = false + + # Log various important client events such as change of interface (e.g., client + # switching from WiFi to wired or VPN connection), as well as extensive reporting + # about how clients were assigned to its groups. + clients = false + + # Log information related to alias-client processing. + aliasclients = false + + # Log information regarding FTL's embedded event handling queue. + events = false + + # Log information about script helpers, e.g., due to dhcp-script. + helper = false + + # Print config parsing details + config = false + + # Debug monitoring of /etc/pihole filesystem events + inotify = false + + # Debug monitoring of the webserver (CivetWeb) events + webserver = false + + # Temporary flag that may print additional information. This debug flag is meant to be + # used whenever needed for temporary investigations. The logged content may change + # without further notice at any time. + extra = false + + # Reserved debug flag + reserved = false + + # Print information about NTP synchronization + ntp = false + + # Print information about netlink communication and parsing + netlink = false + + # Set all debug flags at once. This is a convenience option to enable all debug flags + # at once. Note that this option is not persistent, setting it to true will enable all + # *remaining* debug flags but unsetting it will disable *all* debug flags. + all = false + +# Configuration statistics: +# 155 total entries out of which 148 entries are default +# --> 7 entries are modified + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/templates/pihole.toml.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/templates/pihole.toml.j2 new file mode 100644 index 0000000..a459e5e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/templates/pihole.toml.j2 @@ -0,0 +1,1211 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True + +# Pi-hole configuration file (v6.2.3) +# Encoding: UTF-8 +# This file is managed by pihole-FTL +# Last updated on 2025-06-18 17:35:43 UTC + +[dns] + # Array of upstream DNS servers used by Pi-hole + # Example: [ "8.8.8.8", "127.0.0.1#5335", "docker-resolver" ] + # + # Possible values are: + # array of IP addresses and/or hostnames, optionally with a port (#...) + upstreams = [ + {% for i in pihole_dns.upstream %} + "{{ i }}", + {% endfor %} + ] + + # Use this option to control deep CNAME inspection. Disabling it might be beneficial + # for very low-end devices + CNAMEdeepInspect = true + + # Should _esni. subdomains be blocked by default? Encrypted Server Name Indication + # (ESNI) is certainly a good step into the right direction to enhance privacy on the + # web. It prevents on-path observers, including ISPs, coffee shop owners and + # firewalls, from intercepting the TLS Server Name Indication (SNI) extension by + # encrypting it. This prevents the SNI from being used to determine which websites + # users are visiting. + # ESNI will obviously cause issues for pixelserv-tls which will be unable to generate + # matching certificates on-the-fly when it cannot read the SNI. Cloudflare and Firefox + # are already enabling ESNI. According to the IEFT draft (link above), we can easily + # restore piselserv-tls's operation by replying NXDOMAIN to _esni. subdomains of + # blocked domains as this mimics a "not configured for this domain" behavior. + blockESNI = true + + # Should we overwrite the query source when client information is provided through + # EDNS0 client subnet (ECS) information? This allows Pi-hole to obtain client IPs even + # if they are hidden behind the NAT of a router. This feature has been requested and + # discussed on Discourse where further information how to use it can be found: + # https://discourse.pi-hole.net/t/support-for-add-subnet-option-from-dnsmasq-ecs-edns0-client-subnet/35940 + EDNS0ECS = true + + # Should FTL hide queries made by localhost? + ignoreLocalhost = false + + # Should FTL analyze and show internally generated DNSSEC queries? + showDNSSEC = true + + # Should FTL analyze *only* A and AAAA queries? + analyzeOnlyAandAAAA = false + + # Controls whether and how FTL will reply with for address for which a local interface + # exists. Changing this setting causes FTL to restart. + # + # Possible values are: + # - "NONE" + # Pi-hole will not respond automatically on PTR requests to local interface + # addresses. Ensure pi.hole and/or hostname records exist elsewhere. + # - "HOSTNAME" + # Serve the machine's hostname. The hostname is queried from the kernel through + # uname(2)->nodename. If the machine has multiple network interfaces, it can + # also have multiple nodenames. In this case, it is unspecified and up to the + # kernel which one will be returned. On Linux, the returned string is what has + # been set using sethostname(2) which is typically what has been set in + # /etc/hostname. + # - "HOSTNAMEFQDN" + # Serve the machine's hostname (see limitations above) as fully qualified domain + # by adding the local domain. If no local domain has been defined (config option + # dns.domain), FTL tries to query the domain name from the kernel using + # getdomainname(2). If this fails, FTL appends ".no_fqdn_available" to the + # hostname. + # - "PI.HOLE" + # Respond with "pi.hole". + piholePTR = "PI.HOLE" + + # How should FTL handle queries when the gravity database is not available? + # + # Possible values are: + # - "BLOCK" + # Block all queries when the database is busy. + # - "ALLOW" + # Allow all queries when the database is busy. + # - "REFUSE" + # Refuse all queries which arrive while the database is busy. + # - "DROP" + # Just drop the queries, i.e., never reply to them at all. Despite "REFUSE" + # sounding similar to "DROP", it turned out that many clients will just + # immediately retry, causing up to several thousands of queries per second. This + # does not happen in "DROP" mode. + replyWhenBusy = "ALLOW" + + # FTL's internal TTL to be handed out for blocked queries in seconds. This settings + # allows users to select a value different from the dnsmasq config option local-ttl. + # This is useful in context of locally used hostnames that are known to stay constant + # over long times (printers, etc.). + # Note that large values may render whitelisting ineffective due to client-side + # caching of blocked queries. + blockTTL = 2 + + # Array of custom DNS records + # Example: hosts = [ "127.0.0.1 mylocal", "192.168.0.1 therouter" ] + # + # Possible values are: + # Array of custom DNS records each one in HOSTS form: "IP HOSTNAME" + {% if pihole_dns.custom_hosts is defined and + pihole_dns.custom_hosts | count > 0 %} + hosts = [ + {% for h in pihole_dns.custom_hosts %} + "{{ h.host }} {{h.domain }}", + {% endfor %} + ] ### CHANGED, default = [] + {% endif %} + + # If set, A and AAAA queries for plain names, without dots or domain parts, are never + # forwarded to upstream nameservers + domainNeeded = true ### CHANGED, default = false + + # If set, the domain is added to simple names (without a period) in /etc/hosts in the + # same way as for DHCP-derived names + expandHosts = true ### CHANGED, default = false + + # The DNS domain used by your Pi-hole. + # + # This DNS domain is purely local. FTL may answer queries from its local cache and + # configuration but *never* forwards any requests upstream *unless* you have + # configured a dns.revServer exactly for this domain. In the latter case, all queries + # for this domain are sent exclusively to this server (including reverse lookups). + # + # For DHCP, this has two effects; firstly it causes the DHCP server to return the + # domain to any hosts which request it, and secondly it sets the domain which it is + # legal for DHCP-configured hosts to claim. The intention is to constrain hostnames so + # that an untrusted host on the LAN cannot advertise its name via DHCP as e.g. + # "google.com" and capture traffic not meant for it. If no domain suffix is specified, + # then any DHCP hostname with a domain part (ie with a period) will be disallowed and + # logged. If a domain is specified, then hostnames with a domain part are allowed, + # provided the domain part matches the suffix. In addition, when a suffix is set then + # hostnames without a domain part have the suffix added as an optional domain part. + # For instance, we can set domain=mylab.com and have a machine whose DHCP hostname is + # "laptop". The IP address for that machine is available both as "laptop" and + # "laptop.mylab.com". + # + # You can disable setting a domain by setting this option to an empty string. + # + # Possible values are: + # + domain = "matrix.lan" ### CHANGED, default = "lan" + + # Should all reverse lookups for private IP ranges (i.e., 192.168.x.y, etc) which are + # not found in /etc/hosts or the DHCP leases file be answered with "no such domain" + # rather than being forwarded upstream? + bogusPriv = true + + # Validate DNS replies using DNSSEC? + dnssec = false + + # Interface to use for DNS (see also dnsmasq.listening.mode) and DHCP (if enabled) + # + # Possible values are: + # a valid interface name + interface = "eth0" + + # Add A, AAAA and PTR records to the DNS. This adds one or more names to the DNS with + # associated IPv4 (A) and IPv6 (AAAA) records + # + # Possible values are: + # [,....],[],[][,] + hostRecord = "" + + # Pi-hole interface listening modes + # + # Possible values are: + # - "LOCAL" + # Allow only local requests. This setting accepts DNS queries only from hosts + # whose address is on a local subnet, i.e., a subnet for which an interface + # exists on the server. It is intended to be set as a default on installation, + # to allow unconfigured installations to be useful but also safe from being used + # for DNS amplification attacks if (accidentally) running public. + # - "SINGLE" + # Permit all origins, accept only on the specified interface. Respond only to + # queries arriving on the specified interface. The loopback (lo) interface is + # automatically added to the list of interfaces to use when this option is used. + # Make sure your Pi-hole is properly firewalled! + # - "BIND" + # By default, FTL binds the wildcard address. If this is not what you want, you + # can use this option as it forces FTL to really bind only the interfaces it is + # listening on. Note that this may result in issues when the interface may go + # down (cable unplugged, etc.). About the only time when this is useful is when + # running another nameserver on the same port on the same machine. This may also + # happen if you run a virtualization API such as libvirt. When this option is + # used, IP alias interface labels (e.g. enp2s0:0) are checked rather than + # interface names. + # - "ALL" + # Permit all origins, accept on all interfaces. Make sure your Pi-hole is + # properly firewalled! This truly allows any traffic to be replied to and is a + # dangerous thing to do as your Pi-hole could become an open resolver. You + # should always ask yourself if the first option doesn't work for you as well. + # - "NONE" + # Do not add any configuration concerning the listening mode to the dnsmasq + # configuration file. This is useful if you want to manually configure the + # listening mode in auxiliary configuration files. This option is really meant + # for advanced users only, support for this option may be limited. + listeningMode = "LOCAL" + + # Log DNS queries and replies to pihole.log + queryLogging = true + + # List of CNAME records which indicate that is really . If the is + # given, it overwrites the value of local-ttl + # + # Possible values are: + # Array of CNAMEs each on in one of the following forms: ",[,]" + cnameRecords = [] + + # Port used by the DNS server + port = 53 + + # Reverse server (former also called "conditional forwarding") feature + # Array of reverse servers each one in one of the following forms: + # ",[/],[#][,]" + # + # Individual components: + # + # : either "true" or "false" + # + # [/]: Address range for the reverse server feature in CIDR + # notation. If the prefix length is omitted, either 32 (IPv4) or 128 (IPv6) are + # substituted (exact address match). This is almost certainly not what you want here. + # Example: "192.168.0.0/24" for the range 192.168.0.1 - 192.168.0.255 + # + # [#]: Target server to be used for the reverse server feature + # Example: "192.168.0.1#53" + # + # : Domain used for the reverse server feature (e.g., "fritz.box") + # Example: "fritz.box" + # + # Possible values are: + # array of reverse servers each one in one of the following forms: + # ",[/],[#][,]", e.g., + # "true,192.168.0.0/24,192.168.0.1,fritz.box" + revServers = [] + + [dns.cache] + # Cache size of the DNS server. Note that expiring cache entries naturally make room + # for new insertions over time. Setting this number too high will have an adverse + # effect as not only more space is needed, but also lookup speed gets degraded in the + # 10,000+ range. dnsmasq may issue a warning when you go beyond 10,000+ cache entries. + size = 10000 + + # Query cache optimizer: If a DNS name exists in the cache, but its time-to-live has + # expired only recently, the data will be used anyway (a refreshing from upstream is + # triggered). This can improve DNS query delays especially over unreliable Internet + # connections. This feature comes at the expense of possibly sometimes returning + # out-of-date data and less efficient cache utilization, since old data cannot be + # flushed when its TTL expires, so the cache becomes mostly least-recently-used. To + # mitigate issues caused by massively outdated DNS replies, the maximum overaging of + # cached records is limited. We strongly recommend staying below 86400 (1 day) with + # this option. + # Setting the TTL excess time to zero will serve stale cache data regardless how long + # it has expired. This is not recommended as it may lead to stale data being served + # for a long time. Setting this option to any negative value will disable this feature + # altogether. + optimizer = 3600 + + # This setting allows you to specify the TTL used for queries blocked upstream. Once + # the TTL expires, the query will be forwarded to the upstream server again to check + # if the block is still valid. Defaults to caching for one day (86400 seconds). + # Setting this value to zero disables caching of queries blocked upstream. + upstreamBlockedTTL = 86400 + + [dns.blocking] + # Should FTL block queries? + active = true + + # How should FTL reply to blocked queries? + # + # Possible values are: + # - "NULL" + # In NULL mode, which is both the default and recommended mode for Pi-hole + # FTLDNS, blocked queries will be answered with the "unspecified address" + # (0.0.0.0 or ::). The "unspecified address" is a reserved IP address specified + # by RFC 3513 - Internet Protocol Version 6 (IPv6) Addressing Architecture, + # section 2.5.2. + # - "IP_NODATA_AAAA" + # In IP-NODATA-AAAA mode, blocked queries will be answered with the local IPv4 + # addresses of your Pi-hole. Blocked AAAA queries will be answered with + # NODATA-IPV6 and clients will only try to reach your Pi-hole over its static + # IPv4 address. + # - "IP" + # In IP mode, blocked queries will be answered with the local IP addresses of + # your Pi-hole. + # - "NX" + # In NXDOMAIN mode, blocked queries will be answered with an empty response + # (i.e., there won't be an answer section) and status NXDOMAIN. A NXDOMAIN + # response should indicate that there is no such domain to the client making the + # query. + # - "NODATA" + # In NODATA mode, blocked queries will be answered with an empty response (no + # answer section) and status NODATA. A NODATA response indicates that the domain + # exists, but there is no record for the requested query type. + mode = "NULL" + + # Should FTL enrich blocked replies with EDNS0 information? + # + # Possible values are: + # - "NONE" + # In NONE mode, no additional EDNS information is added to blocked queries + # - "CODE" + # In CODE mode, blocked queries will be enriched with EDNS info-code BLOCKED (15) + # - "TEXT" + # In TEXT mode, blocked queries will be enriched with EDNS info-code BLOCKED (15) + # and a text message describing the reason for the block + edns = "TEXT" + + [dns.specialDomains] + # Should Pi-hole always reply with NXDOMAIN to A and AAAA queries of + # use-application-dns.net to disable Firefox automatic DNS-over-HTTP? This is + # following the recommendation on + # https://support.mozilla.org/en-US/kb/configuring-networks-disable-dns-over-https + mozillaCanary = true + + # Should Pi-hole always reply with NXDOMAIN to A and AAAA queries of mask.icloud.com + # and mask-h2.icloud.com to disable Apple's iCloud Private Relay to prevent Apple + # devices from bypassing Pi-hole? This is following the recommendation on + # https://developer.apple.com/support/prepare-your-network-for-icloud-private-relay + iCloudPrivateRelay = true + + # Should Pi-hole always reply with NODATA to all queries to zone resolver.arpa to + # prevent devices from bypassing Pi-hole using Discovery of Designated Resolvers? This + # is based on recommendations at the end of RFC 9462, section 4. + designatedResolver = true + + [dns.reply.host] + # Use a specific IPv4 address for the Pi-hole host? By default, FTL determines the + # address of the interface a query arrived on and uses this address for replying to A + # queries with the most suitable address for the requesting client. This setting can + # be used to use a fixed, rather than the dynamically obtained, address when Pi-hole + # responds to the following names: [ "pi.hole", "", + # "pi.hole.", "." ] + force4 = false + + # Custom IPv4 address for the Pi-hole host + # + # Possible values are: + # or empty string ("") + IPv4 = "" + + # Use a specific IPv6 address for the Pi-hole host? See description for the IPv4 + # variant above for further details. + force6 = false + + # Custom IPv6 address for the Pi-hole host + # + # Possible values are: + # or empty string ("") + IPv6 = "" + + [dns.reply.blocking] + # Use a specific IPv4 address in IP blocking mode? By default, FTL determines the + # address of the interface a query arrived on and uses this address for replying to A + # queries with the most suitable address for the requesting client. This setting can + # be used to use a fixed, rather than the dynamically obtained, address when Pi-hole + # responds in the following cases: IP blocking mode is used and this query is to be + # blocked, regular expressions with the ;reply=IP regex extension. + force4 = false + + # Custom IPv4 address for IP blocking mode + # + # Possible values are: + # or empty string ("") + IPv4 = "" + + # Use a specific IPv6 address in IP blocking mode? See description for the IPv4 variant + # above for further details. + force6 = false + + # Custom IPv6 address for IP blocking mode + # + # Possible values are: + # or empty string ("") + IPv6 = "" + + [dns.rateLimit] + # Rate-limited queries are answered with a REFUSED reply and not further processed by + # FTL. + # The default settings for FTL's rate-limiting are to permit no more than 1000 queries + # in 60 seconds. Both numbers can be customized independently. It is important to note + # that rate-limiting is happening on a per-client basis. Other clients can continue to + # use FTL while rate-limited clients are short-circuited at the same time. + # For this setting, both numbers, the maximum number of queries within a given time, + # and the length of the time interval (seconds) have to be specified. For instance, if + # you want to set a rate limit of 1 query per hour, the option should look like + # dns.rateLimit.count=1 and dns.rateLimit.interval=3600. The time interval is relative + # to when FTL has finished starting (start of the daemon + possible delay by + # DELAY_STARTUP) then it will advance in steps of the rate-limiting interval. If a + # client reaches the maximum number of queries it will be blocked until the end of the + # current interval. This will be logged to /var/log/pihole/FTL.log, e.g. Rate-limiting + # 10.0.1.39 for at least 44 seconds. If the client continues to send queries while + # being blocked already and this number of queries during the blocking exceeds the + # limit the client will continue to be blocked until the end of the next interval + # (FTL.log will contain lines like Still rate-limiting 10.0.1.39 as it made additional + # 5007 queries). As soon as the client requests less than the set limit, it will be + # unblocked (Ending rate-limitation of 10.0.1.39). + # Rate-limiting may be disabled altogether by setting both values to zero (this + # results in the same behavior as before FTL v5.7). + # How many queries are permitted... + count = 1000 + + # ... in the set interval before rate-limiting? + interval = 60 + +[dhcp] + # Is the embedded DHCP server enabled? + active = false + + # Start address of the DHCP address pool + # + # Possible values are: + # or empty string (""), e.g., "192.168.0.10" + start = "" + + # End address of the DHCP address pool + # + # Possible values are: + # or empty string (""), e.g., "192.168.0.250" + end = "" + + # Address of the gateway to be used (typically the address of your router in a home + # installation) + # + # Possible values are: + # or empty string (""), e.g., "192.168.0.1" + router = "" + + # The netmask used by your Pi-hole. For directly connected networks (i.e., networks on + # which the machine running Pi-hole has an interface) the netmask is optional and may + # be set to an empty string (""): it will then be determined from the interface + # configuration itself. For networks which receive DHCP service via a relay agent, we + # cannot determine the netmask itself, so it should explicitly be specified, otherwise + # Pi-hole guesses based on the class (A, B or C) of the network address. + # + # Possible values are: + # (e.g., "255.255.255.0") or empty string ("") for + # auto-discovery + netmask = "" + + # If the lease time is given, then leases will be given for that length of time. If not + # given, the default lease time is one hour for IPv4 and one day for IPv6. + # + # Possible values are: + # The lease time can be in seconds, or minutes (e.g., "45m") or hours (e.g., "1h") + # or days (like "2d") or even weeks ("1w"). You may also use "infinite" as string + # but be aware of the drawbacks + leaseTime = "" + + # Should Pi-hole make an attempt to also satisfy IPv6 address requests (be aware that + # IPv6 works a whole lot different than IPv4) + ipv6 = false + + # Enable DHCPv4 Rapid Commit Option specified in RFC 4039. Should only be enabled if + # either the server is the only server for the subnet to avoid conflicts + rapidCommit = false + + # Advertise DNS server multiple times to clients. Some devices will add their own + # proprietary DNS servers to the list of DNS servers, which can cause issues with + # Pi-hole. This option will advertise the Pi-hole DNS server multiple times to + # clients, which should prevent this from happening. + multiDNS = false + + # Enable logging for DHCP. This will log all relevant DHCP-related activity, including, + # e.g., all the options sent to DHCP clients and the tags used to determine them (if + # any). This can be useful for debugging DHCP issues. The generated output is saved to + # the file specified by files.log.dnsmasq below. + logging = false + + # Ignore unknown DHCP clients. + # If this option is set, Pi-hole ignores all clients which are not explicitly + # configured through dhcp.hosts. This can be useful to prevent unauthorized clients + # from getting an IP address from the DHCP server. + # It should be noted that this option is not a security feature, as clients can still + # assign themselves an IP address and use the network. It is merely a convenience + # feature to prevent unknown clients from getting a valid IP configuration assigned + # automatically. + # Note that you will need to configure new clients manually in dhcp.hosts before they + # can use the network when this feature is enabled. + ignoreUnknownClients = false + + # Per host parameters for the DHCP server. This allows a machine with a particular + # hardware address to be always allocated the same hostname, IP address and lease time + # or to specify static DHCP leases + # + # Possible values are: + # Array of static leases each on in one of the following forms: + # "[][,id:|*][,set:][,tag:][,][,][,][,ignore]" + hosts = [] + + [ntp.ipv4] + # Should FTL act as network time protocol (NTP) server (IPv4)? + active = true + + # IPv4 address to listen on for NTP requests + # + # Possible values are: + # or empty string ("") for wildcard (0.0.0.0) + address = "" + + [ntp.ipv6] + # Should FTL act as network time protocol (NTP) server (IPv6)? + active = true + + # IPv6 address to listen on for NTP requests + # + # Possible values are: + # or empty string ("") for wildcard (::) + address = "" + + [ntp.sync] + # Should FTL try to synchronize the system time with an upstream NTP server? + active = true + + # NTP upstream server to sync with, e.g., "pool.ntp.org". Note that the NTP server + # should be located as close as possible to you in order to minimize the time offset + # possibly introduced by different routing paths. + # + # Possible values are: + # valid NTP upstream server + server = "pool.ntp.org" + + # Interval in seconds between successive synchronization attempts with the NTP server + interval = 3600 + + # Number of NTP syncs to perform and average before updating the system time + count = 8 + + [ntp.sync.rtc] + # Should FTL update a real-time clock (RTC) if available? + set = false + + # Path to the RTC device to update. Leave empty for auto-discovery + # + # Possible values are: + # Path to the RTC device, e.g., "/dev/rtc0" + device = "" + + # Should the RTC be set to UTC? + utc = true + +[resolver] + # Should FTL try to resolve IPv4 addresses to hostnames? + resolveIPv4 = true + + # Should FTL try to resolve IPv6 addresses to hostnames? + resolveIPv6 = true + + # Control whether FTL should use the fallback option to try to obtain client names from + # checking the network table. This behavior can be disabled with this option. + # Assume an IPv6 client without a host names. However, the network table knows - + # though the client's MAC address - that this is the same device where we have a host + # name for another IP address (e.g., a DHCP server managed IPv4 address). In this + # case, we use the host name associated to the other address as this is the same + # device. + networkNames = true + + # With this option, you can change how (and if) hourly PTR requests are made to check + # for changes in client and upstream server hostnames. + # + # Possible values are: + # - "IPV4_ONLY" + # Do hourly PTR lookups only for IPv4 addresses. This is the new default since + # Pi-hole FTL v5.3.2. It should resolve issues with more and more very + # short-lived PE IPv6 addresses coming up in a lot of networks. + # - "ALL" + # Do hourly PTR lookups for all addresses. This was the default until FTL + # v5.3(.1). It has been replaced as it can create a lot of PTR queries for those + # with many IPv6 addresses in their networks. + # - "UNKNOWN" + # Only resolve unknown hostnames. Already existing hostnames are never refreshed, + # i.e., there will be no PTR queries made for clients where hostnames are known. + # This also means that known hostnames will not be updated once known. + # - "NONE" + # Don't do any hourly PTR lookups. This means we look host names up exactly once + # (when we first see a client) and never again. You may miss future changes of + # host names. + refreshNames = "IPV4_ONLY" + +[database] + # Should FTL load information from the database on startup to be aware of the most + # recent history? + DBimport = true + + # How long should queries be stored in the database [days]? + # Setting this value to 0 will disable the database. + maxDBdays = 91 + + # How often do we store queries in FTL's database [seconds]? + DBinterval = 60 + + # Should FTL enable Write-Ahead Log (WAL) mode for the on-disk query database + # (configured via files.database)? + # It is recommended to leave this setting enabled for performance reasons. About the + # only reason to disable WAL mode is if you are experiencing specific issues with it, + # e.g., when using a database that is accessed from multiple hosts via a network + # share. When this setting is disabled, FTL will use SQLite3's default journal mode + # (rollback journal in DELETE mode). + useWAL = true + + [database.network] + # Should FTL analyze the local ARP cache? When disabled, client identification and the + # network table will stop working reliably. + parseARPcache = true + + # How long should IP addresses be kept in the network_addresses table [days]? IP + # addresses (and associated host names) older than the specified number of days are + # removed to avoid dead entries in the network overview table. + expire = 91 + +[webserver] + # On which domain is the web interface served? + # + # Possible values are: + # + domain = "pi.hole" + + # Webserver access control list (ACL) allowing for restrictions to be put on the list + # of IP addresses which have access to the web server. The ACL is a comma separated + # list of IP subnets, where each subnet is prepended by either a - or a + sign. A plus + # sign means allow, where a minus sign means deny. If a subnet mask is omitted, such + # as -1.2.3.4, this means to deny only that single IP address. If this value is not + # set (empty string), all accesses are allowed. Otherwise, the default setting is to + # deny all accesses. On each request the full list is traversed, and the last (!) + # match wins. IPv6 addresses may be specified in CIDR-form [a:b::c]/64. + # + # Example 1: acl = "+127.0.0.1,+[::1]" + # ---> deny all access, except from 127.0.0.1 and ::1, + # Example 2: acl = "+192.168.0.0/16" + # ---> deny all accesses, except from the 192.168.0.0/16 subnet, + # Example 3: acl = "+[::]/0" ---> allow only IPv6 access. + # + # Possible values are: + # + acl = "" + + # Ports to be used by the webserver. + # Comma-separated list of ports to listen on. It is possible to specify an IP address + # to bind to. In this case, an IP address and a colon must be prepended to the port + # number. For example, to bind to the loopback interface on port 80 (IPv4) and to all + # interfaces port 8080 (IPv4), use "127.0.0.1:80,8080". "[::]:80" can be used to + # listen to IPv6 connections to port 80. IPv6 addresses of network interfaces can be + # specified as well, e.g. "[::1]:80" for the IPv6 loopback interface. [::]:80 will + # bind to port 80 IPv6 only. + # In order to use port 80 for all interfaces, both IPv4 and IPv6, use either the + # configuration "80,[::]:80" (create one socket for IPv4 and one for IPv6 only), or + # "+80" (create one socket for both, IPv4 and IPv6). The '+' notation to use IPv4 and + # IPv6 will only work if no network interface is specified. Depending on your + # operating system version and IPv6 network environment, some configurations might not + # work as expected, so you have to test to find the configuration most suitable for + # your needs. In case "+80" does not work for your environment, you need to use + # "80,[::]:80". + # If the port is TLS/SSL, a letter 's' (secure) must be appended, for example, + # "80,443s" will open port 80 and port 443, and connections on port 443 will be + # encrypted. For non-encrypted ports, it is allowed to append letter 'r' (as in + # redirect). Redirected ports will redirect all their traffic to the first configured + # SSL port. For example, if webserver.port is "80r,443s", then all HTTP traffic coming + # at port 80 will be redirected to HTTPS port 443. + # When specifying 'o' (optional) behind a port, inability to use this port is not + # considered an error. For instance, specifying "80o,8080o" will allow the webserver + # to listen on either 80, 8080, both or even none of the two ports. This flag may be + # combined with 'r' and 's' like "80or,443os,8080,4443s" (80 redirecting to SSL if + # available, 443 encrypted if available, 8080 mandatory and unencrypted, 4443 + # mandatory and encrypted). + # If this value is not set (empty string), the web server will not be started and, + # hence, the API will not be available. + # + # Possible values are: + # comma-separated list of <[ip_address:]port> + port = "80o,443os,[::]:80o,[::]:443os" + + # Maximum number of worker threads allowed. + # The Pi-hole web server handles each incoming connection in a separate thread. + # Therefore, the value of this option is effectively the number of concurrent HTTP + # connections that can be handled. Any other connections are queued until they can be + # processed by a unoccupied thread. + # The total number of threads you see may be lower than the configured value as + # threads are only created when needed due to incoming connections. + # The value 0 means the number of threads is 50 (as per default settings of CivetWeb) + # for backwards-compatible behavior. + threads = 50 + + # Additional HTTP headers added to the web server responses. + # The headers are added to all responses, including those for the API. + # Note about the default additional headers: + # - X-DNS-Prefetch-Control: off: Usually browsers proactively perform domain name + # resolution on links that the user may choose to follow. We disable DNS prefetching + # here. + # - Content-Security-Policy: [...] 'unsafe-inline' is both required by Chart.js + # styling some elements directly, and index.html containing some inlined Javascript + # code. + # - X-Frame-Options: DENY: The page can not be displayed in a frame, regardless of the + # site attempting to do so. + # - X-Xss-Protection: 0: Disables XSS filtering in browsers that support it. This + # header is usually enabled by default in browsers, and is not recommended as it can + # hurt the security of the site. + # (https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection). + # - X-Content-Type-Options: nosniff: Marker used by the server to indicate that the + # MIME types advertised in the Content-Type headers should not be changed and be + # followed. This allows to opt-out of MIME type sniffing, or, in other words, it is a + # way to say that the webmasters knew what they were doing. Site security testers + # usually expect this header to be set. + # - Referrer-Policy: strict-origin-when-cross-origin: A referrer will be sent for + # same-site origins, but cross-origin requests will send no referrer information. + # The latter four headers are set as expected by https://securityheaders.io + # + # Possible values are: + # array of HTTP headers + headers = [ + "X-DNS-Prefetch-Control: off", + "Content-Security-Policy: default-src 'self' 'unsafe-inline';", + "X-Frame-Options: DENY", + "X-XSS-Protection: 0", + "X-Content-Type-Options: nosniff", + "Referrer-Policy: strict-origin-when-cross-origin" + ] + + # Should the web server serve all files in webserver.paths.webroot directory? If + # disabled, only files within the path defined through webserver.paths.webhome and + # /api will be served. + serve_all = false + + [webserver.session] + # Session timeout in seconds. If a session is inactive for more than this time, it will + # be terminated. Sessions are continuously refreshed by the web interface, preventing + # sessions from timing out while the web interface is open. + # This option may also be used to make logins persistent for long times, e.g. 86400 + # seconds (24 hours), 604800 seconds (7 days) or 2592000 seconds (30 days). Note that + # the total number of concurrent sessions is limited so setting this value too high + # may result in users being rejected and unable to log in if there are already too + # many sessions active. + timeout = 1800 + + # Should Pi-hole backup and restore sessions from the database? This is useful if you + # want to keep your sessions after a restart of the web interface. + restore = true + + [webserver.tls] + # Path to the TLS (SSL) certificate file. All directories along the path must be + # readable and accessible by the user running FTL (typically 'pihole'). This option is + # only required when at least one of webserver.port is TLS. The file must be in PEM + # format, and it must have both, private key and certificate (the *.pem file created + # must contain a 'CERTIFICATE' section as well as a 'RSA PRIVATE KEY' section). + # The *.pem file can be created using + # cp server.crt server.pem + # cat server.key >> server.pem + # if you have these files instead + # + # Possible values are: + # + cert = "/etc/pihole/tls.pem" + + [webserver.paths] + # Server root on the host + # + # Possible values are: + # + webroot = "/var/www/html" + + # Sub-directory of the root containing the web interface + # + # Possible values are: + # , both slashes are needed! + webhome = "/admin/" + + # Prefix where the web interface is served + # This is useful when you are using a reverse proxy serving the web interface, e.g., + # at http:///pihole/admin/ instead of http:///admin/. In this example, the + # prefix would be "/pihole". Note that the prefix has to be stripped away by the + # reverse proxy, e.g., for traefik: + # - traefik.http.routers.pihole.rule=PathPrefix(`/pihole`) + # - traefik.http.middlewares.piholehttp.stripprefix.prefixes=/pihole + # The prefix should start with a slash. If you don't use a prefix, leave this field + # empty. Setting this field to an incorrect value may result in the web interface not + # being accessible. + # Don't use this setting if you are not using a reverse proxy! + # + # Possible values are: + # valid URL prefix or empty + prefix = "" + + [webserver.interface] + # Should the web interface use the boxed layout? + boxed = true + + # Theme used by the Pi-hole web interface + # + # Possible values are: + # - "default-auto" + # Pi-hole auto + # - "default-light" + # Pi-hole day + # - "default-dark" + # Pi-hole midnight + # - "default-darker" + # Pi-hole deep-midnight + # - "high-contrast" + # High-contrast light + # - "high-contrast-dark" + # High-contrast dark + # - "lcars" + # Star Trek LCARS + theme = "default-light" ### CHANGED, default = "default-auto" + + [webserver.api] + # Number of concurrent sessions allowed for the API. If the number of sessions exceeds + # this value, no new sessions will be allowed until the number of sessions drops due + # to session expiration or logout. Note that the number of concurrent sessions is + # irrelevant if authentication is disabled as no sessions are used in this case. + max_sessions = 16 + + # Should FTL prettify the API output (add extra spaces, newlines and indentation)? + prettyJSON = false + + # API password hash + # + # Possible values are: + # + pwhash = "" + + # Pi-hole 2FA TOTP secret. When set to something different than "", 2FA authentication + # will be enforced for the API and the web interface. This setting is write-only, you + # can not read the secret back. + # + # Possible values are: + # + totp_secret = "" + + # Pi-hole application password. + # After you turn on two-factor (2FA) verification and set up an Authenticator app, you + # may run into issues if you use apps or other services that don't support two-step + # verification. In this case, you can create and use an app password to sign in. An + # app password is a long, randomly generated password that can be used instead of your + # regular password + TOTP token when signing in to the API. The app password can be + # generated through the API and will be shown only once. You can revoke the app + # password at any time. If you revoke the app password, be sure to generate a new one + # and update your app with the new password. + # + # Possible values are: + # + app_pwhash = "" + + # Should application password API sessions be allowed to modify config settings? + # Setting this to true allows third-party applications using the application password + # to modify settings, e.g., the upstream DNS servers, DHCP server settings, or + # changing passwords. This setting should only be enabled if really needed and only if + # you trust the applications using the application password. + app_sudo = false + + # Should FTL create a temporary CLI password? This password is stored in clear in + # /etc/pihole and can be used by the CLI (pihole ... commands) to authenticate + # against the API. Note that the password is only valid for the current session and + # regenerated on each FTL restart. Sessions initiated with this password cannot modify + # the Pi-hole configuration (change passwords, etc.) for security reasons but can + # still use the API to query data and manage lists. + cli_pw = true + + # Array of clients to be excluded from certain API responses (regex): + # - Query Log (/api/queries) + # - Top Clients (/api/stats/top_clients) + # This setting accepts both IP addresses (IPv4 and IPv6) as well as hostnames. + # Note that backslashes "\" need to be escaped, i.e. "\\" in this setting + # + # Example: [ "^192\\.168\\.2\\.56$", "^fe80::341:[0-9a-f]*$", "^localhost$" ] + # + # Possible values are: + # array of regular expressions describing clients + excludeClients = [] + + # Array of domains to be excluded from certain API responses (regex): + # - Query Log (/api/queries) + # - Top Clients (/api/stats/top_domains) + # Note that backslashes "\" need to be escaped, i.e. "\\" in this setting + # + # Example: [ "(^|\\.)\\.google\\.de$", "\\.pi-hole\\.net$" ] + # + # Possible values are: + # array of regular expressions describing domains + excludeDomains = [] + + # How much history should be imported from the database and returned by the API + # [seconds]? (max 24*60*60 = 86400) + maxHistory = 86400 + + # Up to how many clients should be returned in the activity graph endpoint + # (/api/history/clients)? + # This setting can be overwritten at run-time using the parameter N. Setting this to 0 + # will always send all clients. Be aware that this may be challenging for the GUI if + # you have many (think > 1.000 clients) in your network + maxClients = 10 + + # How should the API compute the most active clients? If set to true, the API will + # return the clients with the most queries globally (within 24 hours). If set to + # false, the API will return the clients with the most queries per time slot + # individually. + client_history_global_max = true + + # Allow destructive API calls (e.g. restart DNS server, flush logs, ...) + allow_destructive = true + + [webserver.api.temp] + # Which upper temperature limit should be used by Pi-hole? Temperatures above this + # limit will be shown as "hot". The number specified here is in the unit defined below + limit = 60.000000 + + # Which temperature unit should be used for temperatures processed by FTL? + # + # Possible values are: + # - "C" + # Celsius + # - "F" + # Fahrenheit + # - "K" + # Kelvin + unit = "C" + +[files] + # The file which contains the PID of FTL's main process. + # + # Possible values are: + # + pid = "/run/pihole-FTL.pid" + + # The location of FTL's long-term database + # + # Possible values are: + # + database = "/var/lib/pihole/pihole-FTL.db" + + # The location of Pi-hole's gravity database + # + # Possible values are: + # + gravity = "/var/lib/pihole/gravity.db" + + # A temporary directory where Pi-hole can store files during gravity updates. This + # directory must be writable by the user running gravity (typically pihole). + # + # Possible values are: + # + gravity_tmp = "/var/tmp" + + # The database containing MAC -> Vendor information for the network table + # + # Possible values are: + # + macvendor = "/var/lib/pihole/macvendor.db" + + # An optional file containing a pcap capture of the network traffic. This file is used + # for debugging purposes only. If you don't know what this is, you don't need it. + # Setting this to an empty string disables pcap recording. The file must be writable + # by the user running FTL (typically pihole). Failure to write to this file will + # prevent the DNS resolver from starting. The file is appended to if it already + # exists. + # + # Possible values are: + # + pcap = "" + + [files.log] + # The location of FTL's log file + # + # Possible values are: + # + ftl = "/var/log/pihole/FTL.log" + + # The log file used by the embedded dnsmasq DNS server + # + # Possible values are: + # + dnsmasq = "/var/log/pihole/pihole.log" + + # The log file used by the webserver + # + # Possible values are: + # + webserver = "/var/log/pihole/webserver.log" + +[misc] + # Using privacy levels you can specify which level of detail you want to see in your + # Pi-hole statistics. Changing this setting will trigger a restart of FTL + # + # Possible values are: + # - 0 + # Don't hide anything, all statistics are available. + # - 1 + # Hide domains. This setting disables Top Domains and Top Ads + # - 2 + # Hide domains and clients. This setting disables Top Domains, Top Ads, Top + # Clients and Clients over time. + # - 3 + # Anonymize everything. This setting disables almost any statistics and query + # analysis. There will be no long-term database logging and no Query Log. You + # will also lose most regex features. + privacylevel = 0 + + # During startup, in some configurations, network interfaces appear only late during + # system startup and are not ready when FTL tries to bind to them. Therefore, you may + # want FTL to wait a given amount of time before trying to start the DNS revolver. + # This setting takes any integer value between 0 and 300 seconds. To prevent delayed + # startup while the system is already running and FTL is restarted, the delay only + # takes place within the first 180 seconds (hard-coded) after booting. + delay_startup = 0 + + # Set niceness of pihole-FTL. Defaults to -10 and can be disabled altogether by setting + # a value of -999. The nice value is an attribute that can be used to influence the + # CPU scheduler to favor or disfavor a process in scheduling decisions. The range of + # the nice value varies across UNIX systems. On modern Linux, the range is -20 (high + # priority = not very nice to other processes) to +19 (low priority). + nice = -10 + + # Should FTL translate its own stack addresses into code lines during the bug + # backtrace? This improves the analysis of crashed significantly. It is recommended to + # leave the option enabled. This option should only be disabled when addr2line is + # known to not be working correctly on the machine because, in this case, the + # malfunctioning addr2line can prevent from generating any backtrace at all. + addr2line = true + + # Should FTL load additional dnsmasq configuration files from /etc/dnsmasq.d/? + # Warning: This is an advanced setting and should only be used with care. + # Incorrectly formatted or config files specifying options which can only be defined + # once can result in conflicts with the automatic configuration of Pi-hole (see + # /etc/pihole/dnsmasq.conf) and may stop DNS resolution from working. + etc_dnsmasq_d = false + + # Additional lines to inject into the generated dnsmasq configuration. + # Warning: This is an advanced setting and should only be used with care. Incorrectly + # formatted or duplicated lines as well as lines conflicting with the automatic + # configuration of Pi-hole can break the embedded dnsmasq and will stop DNS resolution + # from working. + # Use this option with extra care. + # + # Possible values are: + # array of valid dnsmasq config line options + dnsmasq_lines = [] + + # Log additional information about queries and replies to pihole.log + # When this setting is enabled, the log has extra information at the start of each + # line. This consists of a serial number which ties together the log lines associated + # with an individual query, and the IP address of the requestor. This setting is only + # effective if dns.queryLogging is enabled, too. This option is only useful for + # debugging and is not recommended for normal use. + extraLogging = false + + # Put configuration into read-only mode. This will prevent any changes to the + # configuration file via the API or CLI. This setting useful when a configuration is + # to be forced/modified by some third-party application (like infrastructure-as-code + # providers) and should not be changed by any means. + readOnly = false + + [misc.check] + # Pi-hole is very lightweight on resources. Nevertheless, this does not mean that you + # should run Pi-hole on a server that is otherwise extremely busy as queuing on the + # system can lead to unnecessary delays in DNS operation as the system becomes less + # and less usable as the system load increases because all resources are permanently + # in use. To account for this, FTL regularly checks the system load. To bring this to + # your attention, FTL warns about excessive load when the 15 minute system load + # average exceeds the number of cores. + # This check can be disabled with this setting. + load = true + + # FTL stores history in shared memory to allow inter-process communication with forked + # dedicated TCP workers. If FTL runs out of memory, it cannot continue to work as + # queries cannot be analyzed any further. Hence, FTL checks if enough shared memory is + # available on your system and warns you if this is not the case. + # By default, FTL warns if the shared-memory usage exceeds 90%. You can set any + # integer limit between 0 to 100 (interpreted as percentages) where 0 means that + # checking of shared-memory usage is disabled. + shmem = 90 + + # FTL stores its long-term history in a database file on disk. Furthermore, FTL stores + # log files. By default, FTL warns if usage of the disk holding any crucial file + # exceeds 90%. You can set any integer limit between 0 to 100 (interpreted as + # percentages) where 0 means that checking of disk usage is disabled. + disk = 90 + +[debug] + # Print debugging information about database actions. This prints performed SQL + # statements as well as some general information such as the time it took to store the + # queries and how many have been saved to the database. + database = false + + # Prints a list of the detected interfaces on the startup of pihole-FTL. Also, prints + # whether these interfaces are IPv4 or IPv6 interfaces. + networking = false + + # Print information about shared memory locks. Messages will be generated when waiting, + # obtaining, and releasing a lock. + locks = false + + # Print extensive query information (domains, types, replies, etc.). This has always + # been part of the legacy debug mode of pihole-FTL. + queries = false + + # Print flags of queries received by the DNS hooks. Only effective when DEBUG_QUERIES + # is enabled as well. + flags = false + + # Print information about shared memory buffers. Messages are either about creating or + # enlarging shmem objects or string injections. + shmem = false + + # Print information about garbage collection (GC): What is to be removed, how many have + # been removed and how long did GC take. + gc = false + + # Print information about ARP table processing: How long did parsing take, whether read + # MAC addresses are valid, and if the macvendor.db file exists. + arp = false + + # Controls if FTLDNS should print extended details about regex matching into FTL.log. + regex = false + + # Print extra debugging information concerning API calls. This includes the request, + # the request parameters, and the internal details about how the algorithms decide + # which data to present and in what form. This very verbose output should only be used + # when debugging specific API issues and can be helpful, e.g., when a client cannot + # connect due to an obscure API error. Furthermore, this setting enables logging of + # all API requests (auth log) and details about user authentication attempts. + api = false + + # Print extra debugging information about TLS connections. This includes the TLS + # version, the cipher suite, the certificate chain and much more. This very verbose + # output should only be used when debugging specific TLS issues and can be helpful, + # e.g., when a client cannot connect due to an obscure TLS error as modern browsers do + # not provide much information about the underlying TLS connection and most often give + # only very generic error messages without much/any underlying technical information. + tls = false + + # Print information about overTime memory operations, such as initializing or moving + # overTime slots. + overtime = false + + # Print information about status changes for individual queries. This can be useful to + # identify unexpected unknown queries. + status = false + + # Print information about capabilities granted to the pihole-FTL process. The current + # capabilities are printed on receipt of SIGHUP, i.e., the current set of capabilities + # can be queried without restarting pihole-FTL (by setting DEBUG_CAPS=true and + # thereafter sending killall -HUP pihole-FTL). + caps = false + + # Print information about DNSSEC activity + dnssec = false + + # FTL uses dynamically allocated vectors for various tasks. This config option enables + # extensive debugging information such as information about allocation, referencing, + # deletion, and appending. + vectors = false + + # Extensive information about hostname resolution like which DNS servers are used in + # the first and second hostname resolving tries (only affecting internally generated + # PTR queries). + resolver = false + + # Print debugging information about received EDNS(0) data. + edns0 = false + + # Log various important client events such as change of interface (e.g., client + # switching from WiFi to wired or VPN connection), as well as extensive reporting + # about how clients were assigned to its groups. + clients = false + + # Log information related to alias-client processing. + aliasclients = false + + # Log information regarding FTL's embedded event handling queue. + events = false + + # Log information about script helpers, e.g., due to dhcp-script. + helper = false + + # Print config parsing details + config = false + + # Debug monitoring of /etc/pihole filesystem events + inotify = false + + # Debug monitoring of the webserver (CivetWeb) events + webserver = false + + # Temporary flag that may print additional information. This debug flag is meant to be + # used whenever needed for temporary investigations. The logged content may change + # without further notice at any time. + extra = false + + # Reserved debug flag + reserved = false + + # Print information about NTP synchronization + ntp = false + + # Print information about netlink communication and parsing + netlink = false + + # Set all debug flags at once. This is a convenience option to enable all debug flags + # at once. Note that this option is not persistent, setting it to true will enable all + # *remaining* debug flags but unsetting it will disable *all* debug flags. + all = false + +# Configuration statistics: +# 155 total entries out of which 148 entries are default +# --> 7 entries are modified + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/templates/setupVars.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/templates/setupVars.conf.j2 new file mode 100644 index 0000000..f1a7182 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/templates/setupVars.conf.j2 @@ -0,0 +1,55 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True + +PIHOLE_INTERFACE={{ pihole_config.dns.interface | default('eth0') }} +{# +# IPV4_ADDRESS={{ pihole_config.ipv4_address }} +# IPV6_ADDRESS={{ pihole_config.ipv6_address }} +#} +QUERY_LOGGING={{ pihole_config.query_logging | default('true') | lower }} +{# +# INSTALL_WEB_SERVER={{ pihole_config.install_web.server | lower }} +# INSTALL_WEB_INTERFACE={{ pihole_config.install_web.interface | lower }} + +# BLOCKING_ENABLED={{ pihole_config.blocking_enabled | lower }} +#} +{% if pihole_config.dns.upstream | default([]) | count > 0 %} + {% for i in pihole_config.dns.upstreams %} +PIHOLE_DNS_{{ loop.index }}={{ i }} + {% endfor %} +{% endif %} + +PIHOLE_DOMAIN={{ pihole_config.webserver.domain }} +{# +# DNS_FQDN_REQUIRED={{ pihole_config.dns.fqdn_required | lower }} +#} +DNS_BOGUS_PRIV={{ pihole_config.dns.bogusPriv | default('true') | lower }} +DNSSEC={{ pihole_config.dns.dnssec | default('false') | lower }} +{# +# HOSTRECORD= + +# DNSMASQ_LISTENING={{ pihole_config.dnsmasq_listening }} + +# REV_SERVER= +# DHCP_ACTIVE= +# DHCP_START= +# DHCP_END= +# DHCP_ROUTER= +# DHCP_LEASETIME= +# DHCP_IPv6= +# DHCP_RAPID_COMMIT= +GRAVITY_TMPDIR={{ pihole_config.files.gravity_tmp }} + +TEMPERATURE_UNIT={{ pihole_config.webserver.api.temp.unit | upper }} +TEMPERATURE_LIMIT={{ pihole_config.webserver.api.temp.limit }} + +WEBUIBOXEDLAYOUT={{ pihole_config.webserver.interface.boxed }} +WEBTHEME={{ pihole_config.webserver.interface.theme }} +#} +# WEBPASSWORD={{ pihole_config.web.password | default('') }} +WEB_PORTS={{ pihole_config.webserver.port }} +{# +#API_EXCLUDE_DOMAINS={{ pihole_config.api.exclude.domains | join(",") }} +#API_EXCLUDE_CLIENTS={{ pihole_config.api.exclude.clients | join(",") }} +#API_QUERY_LOG_SHOW={{ pihole_config.api.query_log_show | lower }} +#API_PRIVACY_MODE={{ pihole_config.api.privacy_mode | lower }} +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/vars/archlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/vars/archlinux.yml new file mode 100644 index 0000000..174f7f3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/vars/archlinux.yml @@ -0,0 +1,9 @@ +--- + +pihole_dependencies: + - bind-tools # dnsutils equivalent + - net-tools + - python-toml + - jq + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/vars/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/vars/debian.yml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/vars/debian.yml @@ -0,0 +1 @@ +--- diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/vars/main.yml new file mode 100644 index 0000000..1e182de --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/pihole/vars/main.yml @@ -0,0 +1,192 @@ +--- + +pihole_delegate_to: localhost + +pihole_local_tmp_directory: "{{ + lookup('env', 'CUSTOM_LOCAL_TMP_DIRECTORY') | + default(lookup('env', 'HOME') ~ '/.cache/ansible/pihole', true) }}/{{ pihole_version }}" + +pihole_cache_directory: /var/cache/ansible/pihole + +pihole_remote_tmp_directory: "{{ pihole_cache_directory }}/{{ pihole_version }}" + +pihole_dependencies: + - dnsutils + - net-tools + - python3-toml + - jq + +# config: https://github.com/pi-hole/FTL/blob/master/test/pihole.toml +pihole_defaults_config: + dns: + upstreams: + - '1.1.1.1' + - '1.0.0.1' + - '9.9.9.9' + CNAMEdeepInspect: "" # true + blockESNI: "" # true + EDNS0ECS: "" # true + ignoreLocalhost: "" # false + showDNSSEC: "" # true + analyzeOnlyAandAAAA: "" # false + piholePTR: "" # PI.HOLE + replyWhenBusy: "" # ALLOW + blockTTL: "" # 2 + domainNeeded: "" # false + expandHosts: "" # false + # changed: https://github.com/pi-hole/FTL/pull/2531 + domain: pi.hole + # name: pi.hole + # local: true + bogusPriv: "" # true + dnssec: "" # false + interface: "" # "{{ ansible_facts.default_ipv4.interface }}" + listeningMode: "" # LOCAL + queryLogging: "" # true + port: "" # 53 + cache: + size: "" # 10000 + optimizer: "" # 3600 + upstreamBlockedTTL: "" # 86400 + blocking: + active: "" # true + mode: "" # NULL + edns: "" # TEXT + specialDomains: + mozillaCanary: "" # true + iCloudPrivateRelay: "" # true + designatedResolver: "" # true + reply: + host: + force4: "" # false + force6: "" # false + blocking: + force4: "" # false + force6: "" # false + rateLimit: + count: "" # 1000 + interval: "" # 60 + dhcp: + active: "" # false + ipv6: "" # false + rapidCommit: "" # false + multiDNS: "" # false + logging: "" # false + ignoreUnknownClients: "" # false + ntp: + ipv4: + active: "" # true + ipv6: + active: "" # true + sync: + active: "" # true + server: "" # pool.ntp.org + interval: "" # 3600 + count: "" # 8 + rtc: + set: "" # false + utc: "" # true + resolver: + resolveIPv4: "" # true + resolveIPv6: "" # true + networkNames: "" # true + refreshNames: "" # IPV4_ONLY + database: + DBimport: "" # true + maxDBdays: "" # 91 + DBinterval: "" # 60 + useWAL: "" # true + network: + parseARPcache: "" # true + expire: "" # 91 + webserver: + domain: "" # pi.hole + port: "" # 80 + threads: "" # 50 + # headers: + # - 'X-DNS-Prefetch-Control: off' + # - "Content-Security-Policy: default-src 'self' 'unsafe-inline';" + # - 'X-Frame-Options: DENY' + # - 'X-XSS-Protection: 0' + # - 'X-Content-Type-Options: nosniff' + # - 'Referrer-Policy: strict-origin-when-cross-origin' + serve_all: "" # false + session: + timeout: 3200 + restore: true + # tls: + # cert: /etc/pihole/tls.pem + # paths: + # webroot: /var/www/html + # webhome: /admin/ + interface: + boxed: true + theme: default-light + api: + max_sessions: "" # 16 + prettyJSON: true + pwhash: "" + app_sudo: "" # false + cli_pw: "" # true + maxHistory: "" # 86400 + maxClients: "" # 10 + client_history_global_max: "" # true + allow_destructive: "" # true + temp: + limit: "" # 60.0 + unit: "" # C + files: + pid: "" # /run/pihole-FTL.pid + database: "" # /etc/pihole/pihole-FTL.db + gravity: "" # /etc/pihole/gravity.db + gravity_tmp: "" # /var/tmp + macvendor: "" # /etc/pihole/macvendor.db + log: + ftl: "" # /var/log/pihole/FTL.log + dnsmasq: "" # /var/log/pihole/pihole.log + webserver: "" # /var/log/pihole/webserver.log + misc: + privacylevel: "" # 0 + delay_startup: 10 + nice: "" # -10 + addr2line: "" # true + etc_dnsmasq_d: "" # false + extraLogging: "" # false + readOnly: "" # false + check: + load: "" # true + shmem: "" # 90 + disk: "" # 90 + debug: + database: false + networking: false + locks: false + queries: false + flags: false + shmem: false + gc: false + arp: false + regex: false + api: false + tls: false + overtime: false + status: false + caps: false + dnssec: false + vectors: false + resolver: false + edns0: false + clients: false + aliasclients: false + events: false + helper: false + config: false + inotify: false + webserver: false + extra: false + reserved: false + ntp: false + netlink: false + all: false + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/README.md new file mode 100644 index 0000000..a6b7f16 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/README.md @@ -0,0 +1,13 @@ +# Ansible role `resolv` + +An Ansible role to configure /etc/resolv.conf + +## defaults + +| Variable Name | Required | Default Value | Type | Description | +| :--- | :---: | :---: | :---: | :--- | +| `resolv_nameservers`| **yes** | [] | list | A list of up to 3 nameserver IP addresses | +| `resolv_domain` | no | "" | string | Local domain name | +| `resolv_search` | no | [] | list | List of up to 6 domains to search for host-name lookup | +| `resolv_sortlist` | no | [] | list | List of IP-address and netmask pairs to sort addresses returned by gethostbyname. | +| `resolv_options` | no | [] | list | List of options to modify certain internal resolver variables. | diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/defaults/main.yml new file mode 100644 index 0000000..73d8dff --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/defaults/main.yml @@ -0,0 +1,9 @@ +--- + +resolv_nameservers: [] +resolv_domain: "" +resolv_search: [] +resolv_sortlist: [] +resolv_options: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/meta/main.yml new file mode 100644 index 0000000..1b710f7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/meta/main.yml @@ -0,0 +1,32 @@ +--- + +galaxy_info: + role_name: resolv + + author: Bodo Schulz + + description: ansible role to configure /etc/resolv.conf + + license: Apache + min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + # 11 + - bullseye + - bookworm + - name: Ubuntu + versions: + # 20.04 + - focal + + galaxy_tags: + - system + - dns + - resolv + +dependencies: [] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/configured/converge.yml new file mode 100644 index 0000000..9dfdbcd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/configured/converge.yml @@ -0,0 +1,10 @@ +--- +- name: converge + hosts: all + any_errors_fatal: false + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.resolv diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..9afd087 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,18 @@ +--- + +resolv_domain: molecule.local + +resolv_search: + - "{{ resolv_domain }}" + +resolv_nameservers: + # - 127.0.0.1 + - 46.38.225.230 + - 46.38.252.230 + +resolv_options: + - "timeout:2" + - no-inet6 + - single-request + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/configured/molecule.yml new file mode 100644 index 0000000..fda92e3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/configured/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/configured/prepare.yml new file mode 100644 index 0000000..4c14c51 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/configured/prepare.yml @@ -0,0 +1,57 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/default/converge.yml new file mode 100644 index 0000000..9dfdbcd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/default/converge.yml @@ -0,0 +1,10 @@ +--- +- name: converge + hosts: all + any_errors_fatal: false + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.resolv diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/default/molecule.yml new file mode 100644 index 0000000..fda92e3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/default/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/default/prepare.yml new file mode 100644 index 0000000..4c14c51 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/molecule/default/prepare.yml @@ -0,0 +1,57 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/tasks/main.yml new file mode 100644 index 0000000..d25fc69 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- name: install dependencies + ansible.builtin.package: + name: "{{ resolv_dependencies }}" + state: present + when: + - resolv_dependencies | default([]) | count > 0 + +- name: re-read ansible facts + ansible.builtin.setup: + +- name: detect docker environment + ansible.builtin.set_fact: + is_docker_guest: "{{ + ansible_facts.virtualization_role | default('host') == 'guest' and + ansible_facts.virtualization_type | default('none') == 'docker' }}" + +- name: configure resolv.conf + become: true + ansible.builtin.template: + src: "etc/resolv.conf.j2" + dest: "/etc/resolv.conf" + mode: "0644" + when: + - not is_docker_guest + +- name: configure resolv.conf + become: true + ansible.builtin.template: + src: "etc/resolv.conf.j2" + dest: "/etc/resolv.conf.docker" + mode: "0644" + when: + - is_docker_guest diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/templates/etc/resolv.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/templates/etc/resolv.conf.j2 new file mode 100644 index 0000000..3ac5d80 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/resolv/templates/etc/resolv.conf.j2 @@ -0,0 +1,24 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if resolv_search is defined and + resolv_search | length > 0 %} +search {{ resolv_search | join(' ') }} +{% endif %} +{% if resolv_domain is defined and + resolv_domain | string | length > 0 %} +domain {{ resolv_domain }} +{% endif %} +{% for ns in resolv_nameservers %} +nameserver {{ ns }} +{% endfor %} +{% if resolv_sortlist is defined and + resolv_sortlist | count > 0 %} + {% for sl in resolv_sortlist %} +sortlist {{ sl }} + {% endfor %} +{% endif %} +{% if resolv_options is defined and + resolv_options | count > 0 %} +options {{ resolv_options | join(' ') }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/.yamllint new file mode 100644 index 0000000..20fd7aa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + +ignore: | + molecule/ + .github diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/Makefile new file mode 100644 index 0000000..40857c8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/Makefile @@ -0,0 +1,20 @@ +# +export TOX_SCENARIO ?= default +# export TOX_PYTHON ?= py310 +export TOX_ANSIBLE ?= ansible_6.1 + +.PHONY: converge destroy verify lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/README.md new file mode 100644 index 0000000..5ff18c7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/README.md @@ -0,0 +1,143 @@ + +# Ansible Role: `unbound` + + +install and configure [unbound](https://www.nlnetlabs.nl/projects/unbound/about/) + + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-unbound/main.yml?branch=main)][ci] +[![GitHub issues](https://img.shields.io/github/issues/bodsch/ansible-unbound)][issues] +[![GitHub release (latest by date)](https://img.shields.io/github/v/release/bodsch/ansible-unbound)][releases] +[![Ansible Quality Score](https://img.shields.io/ansible/quality/50067?label=role%20quality)][quality] + +[ci]: https://github.com/bodsch/ansible-unbound/actions +[issues]: https://github.com/bodsch/ansible-unbound/issues?q=is%3Aopen+is%3Aissue +[releases]: https://github.com/bodsch/ansible-unbound/releases +[quality]: https://galaxy.ansible.com/bodsch/unbound + + +## Requirements & Dependencies + + +### Operating systems + +Tested on + +* Debian based + - Debian 10 / 11 + - Ubuntu 20.10 + +## Role Variables + +``` +unbound_user: unbound +unbound_group: unbound + +unbound_conf_dir: /etc/unbound +``` + +### server +``` +unbound_config: + server: {} +``` + +### forward zone +``` +unbound_config: + forward_zone: {} +``` + +### remote control +``` +unbound_config: + remote_control: {} +``` + +## cachedb +``` +unbound_config: + cachedb: {} +``` + +Role Variables will be merged with [defaults](vars/main.yml) (see below under *default vars*) + + + +## default vars + +``` +unbound_config_defaults: + server: + verbosity: 1 + statistics-interval: 240 + use-syslog: "no" + log-queries: "yes" + logfile: /var/log/unbound.log + num-threads: 1 + directory: "/etc/unbound" + interface: 0.0.0.0 + do-ip4: 'yes' + do-ip6: 'no' + do-udp: 'yes' + do-tcp: 'yes' + access-control: + - '127.0.0.0/8 allow' + cache-min-ttl: 5 + cache-max-negative-ttl: 60 + root-hints: "/etc/unbound/root.hints" + hide-identity: 'yes' + hide-version: 'yes' + prefetch: 'yes' + max-udp-size: 4096 + msg-buffer-size: 65552 + unwanted-reply-threshold: 10000 + ipsecmod-enabled: 'no' + + forward_zone: + name: "." + # definitely censor free & log free with DNSSEC Support: + forward_addrs: + - 84.200.69.80 # DNS Watch + - 84.200.70.40 # DNS Watch + - 77.109.148.136 # Xiala.net + - 77.109.148.137 # Xiala.net + - 91.239.100.100 # censurfridns.dk + - 89.233.43.71 # censurfridns.dk + + remote_control: + server-key-file: "{{ unbound_conf_dir }}/unbound_server.key" + server-cert-file: "{{ unbound_conf_dir }}/unbound_server.pem" + control-key-file: "{{ unbound_conf_dir }}/unbound_control.key" + control-cert-file: "{{ unbound_conf_dir }}/unbound_control.pem" + + certs: + server: + key_file: "{{ unbound_conf_dir }}/unbound_server.key" + cert_file: "{{ unbound_conf_dir }}/unbound_server.pem" + control: + key_file: "{{ unbound_conf_dir }}/unbound_control.key" + cert_file: "{{ unbound_conf_dir }}/unbound_control.pem" + + cachedb: {} +``` + + +--- + +## Contribution + +Please read [Contribution](CONTRIBUTING.md) + +## Development, Branches (Git Tags) + + +## Author + +- Bodo Schulz + +## License + +[Apache](LICENSE) + +**FREE SOFTWARE, HELL YEAH!** diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/defaults/main.yml new file mode 100644 index 0000000..6804ac1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/defaults/main.yml @@ -0,0 +1,14 @@ +--- + +unbound_user: unbound +unbound_group: unbound + +unbound_conf_dir: /etc/unbound + +unbound_chroot_dir: "" + +unbound_config_server: {} +unbound_config_stub_zone: {} +unbound_config_forward_zone: {} +unbound_config_remote_control: {} +unbound_config_cachedb: {} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/handlers/.keepme b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/handlers/.keepme new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/handlers/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/handlers/main.yml new file mode 100644 index 0000000..7b2f167 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/handlers/main.yml @@ -0,0 +1,18 @@ +--- + +- name: restart unbound + ansible.builtin.service: + name: unbound + state: restarted + +- name: disable systemd-resolved + ansible.builtin.service: + name: systemd-resolved + state: stopped + enabled: false + ignore_errors: true + register: systemd_resolved + failed_when: systemd_resolved.rc != 0 + when: + - systemd_resolved_unit_file is defined + - systemd_resolved_unit_file | string | length > 0 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/_tox_base b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/_tox_base new file mode 100644 index 0000000..a15f7c3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/_tox_base @@ -0,0 +1,9 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/converge new file mode 100755 index 0000000..5df5ad6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/converge @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +. hooks/_tox_base + +tox ${TOX_OPTS} -- molecule converge ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/destroy new file mode 100755 index 0000000..98fcf16 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/destroy @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +. hooks/_tox_base + +tox ${TOX_OPTS} -- molecule destroy ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/lint new file mode 100755 index 0000000..6cf7ff3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/lint @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +. hooks/_tox_base + +tox ${TOX_OPTS} -- molecule lint ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/verify new file mode 100755 index 0000000..79a38d4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/hooks/verify @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +. hooks/_tox_base + +tox ${TOX_OPTS} -- molecule verify ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/meta/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/meta/main.yml new file mode 100644 index 0000000..17b0f56 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/meta/main.yml @@ -0,0 +1,31 @@ +--- +galaxy_info: + + role_name: unbound + + author: Bodo Schulz + description: ansible role to install unbound + + license: Apache + min_ansible_version: "2.9" + + platforms: + - name: Debian + versions: + # 10 + - buster + - bullseye + - bookworm + - name: Ubuntu + versions: + # 20.04 + - focal + + galaxy_tags: + - system + - dns + - unbound + +dependencies: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/configured/converge.yml new file mode 100644 index 0000000..4495f80 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/configured/converge.yml @@ -0,0 +1,10 @@ +--- +- name: converge + hosts: all + any_errors_fatal: false + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.unbound diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..fb782bf --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,30 @@ +--- + +unbound_config_server: + access-control: + - '127.0.0.0/8 allow' + - '172.16.0.0/12 allow' + - '192.168.0.0/16 allow' + - 'fc00::/7 allow' + - 'fe80::/10 allow' + local-zone: + - '"doubleclick.net" redirect' + local-data: + - '"doubleclick.net A 127.0.0.1"' + +unbound_config_remote_control: + control-enable: 'yes' + control-interface: 127.0.0.1 + control-port: 8953 + +# unbound_config_cachedb: +# backend: "redis" +# # secret seed string to calculate hashed keys +# secret-seed: "0Wa2NkKveI7Iof7FyTSvGv3uZM6bGSyw6pMBX2jvX6" +# # For "redis" backend: +# # redis server's IP address or host name +# redis-server-host: 127.0.0.1 +# # redis server's TCP port +# redis-server-port: 6379 +# # timeout (in ms) for communication with the redis server +# redis-timeout: 100 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/configured/molecule.yml new file mode 100644 index 0000000..fda92e3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/configured/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/configured/prepare.yml new file mode 100644 index 0000000..437874d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/configured/prepare.yml @@ -0,0 +1,54 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: install dependencies + ansible.builtin.package: + name: + - iproute2 + state: present + + - debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..da7abe6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/configured/tests/test_default.py @@ -0,0 +1,186 @@ +import os +import pprint + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts("all") + + +def base_directory(): + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = "molecule/{}".format( + os.environ.get("MOLECULE_SCENARIO_NAME") + ) + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + ext_arr = ["yml", "yaml"] + + read_file = None + + for e in ext_arr: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return "file={} name={}".format(read_file, role_name) + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + + print(" -> {}".format(distribution)) + print(" -> {}".format(base_dir)) + + if distribution in ["debian", "ubuntu"]: + os = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + os = "redhat" + elif distribution in ["arch"]: + os = "archlinux" + + print(" -> {} / {}".format(distribution, os)) + + file_defaults = read_ansible_yaml( + "{}/defaults/main".format(base_dir), "role_defaults" + ) + file_vars = read_ansible_yaml("{}/vars/main".format(base_dir), "role_vars") + file_distibution = read_ansible_yaml( + "{}/vars/{}".format(base_dir, os), "role_distibution" + ) + file_molecule = read_ansible_yaml( + "{}/group_vars/all/vars".format(molecule_dir), "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +@pytest.mark.parametrize( + "dirs", + [ + "/etc/unbound", + "/etc/unbound/unbound.conf.d", + ], +) +def test_directories(host, dirs): + d = host.file(dirs) + assert d.is_directory + assert d.exists + + +@pytest.mark.parametrize( + "files", + [ + "/etc/unbound/unbound.conf", + "/etc/unbound/unbound.conf.d/server.conf", + "/etc/unbound/unbound.conf.d/forward_zone.conf", + "/etc/unbound/unbound.conf.d/remote_control.conf", + "/etc/unbound/unbound.conf.d/cache_db.conf", + ], +) +def test_files(host, files): + f = host.file(files) + assert f.exists + assert f.is_file + + +def test_user(host): + """ + test service user + """ + shell = "/usr/sbin/nologin" + home = "/var/lib/unbound" + + distribution = host.system_info.distribution + release = host.system_info.release + + print(distribution) + + if distribution == "debian" and release.startswith("9"): + shell = "/bin/false" + + if distribution in ["arch"]: + home = "/etc/unbound" + + assert host.group("unbound").exists + assert host.user("unbound").exists + assert "unbound" in host.user("unbound").groups + assert host.user("unbound").shell == shell + assert host.user("unbound").home == home + + +def test_service(host): + service = host.service("unbound") + assert service.is_enabled + assert service.is_running + + +@pytest.mark.parametrize( + "ports", + [ + "0.0.0.0:53", + "127.0.0.1:8953", + ], +) +def test_open_port(host, ports): + + for i in host.socket.get_listening_sockets(): + print(i) + + application = host.socket(f"tcp://{ports}") + assert application.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/default/converge.yml new file mode 100644 index 0000000..4495f80 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/default/converge.yml @@ -0,0 +1,10 @@ +--- +- name: converge + hosts: all + any_errors_fatal: false + + environment: + NETRC: '' + + roles: + - role: bodsch.dns.unbound diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/default/molecule.yml new file mode 100644 index 0000000..fda92e3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/default/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_facts.args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/default/prepare.yml new file mode 100644 index 0000000..7310f7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/default/prepare.yml @@ -0,0 +1,54 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: make sure iproute2 is installed + ansible.builtin.package: + name: + - iproute2 + state: present + + - debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/default/tests/test_default.py new file mode 100644 index 0000000..37155dd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/default/tests/test_default.py @@ -0,0 +1,185 @@ +import os +import pprint + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts("all") + + +def base_directory(): + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = "molecule/{}".format( + os.environ.get("MOLECULE_SCENARIO_NAME") + ) + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + ext_arr = ["yml", "yaml"] + + read_file = None + + for e in ext_arr: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return "file={} name={}".format(read_file, role_name) + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + + print(" -> {}".format(distribution)) + print(" -> {}".format(base_dir)) + + if distribution in ["debian", "ubuntu"]: + os = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + os = "redhat" + elif distribution in ["arch"]: + os = "archlinux" + + print(" -> {} / {}".format(distribution, os)) + + file_defaults = read_ansible_yaml( + "{}/defaults/main".format(base_dir), "role_defaults" + ) + file_vars = read_ansible_yaml("{}/vars/main".format(base_dir), "role_vars") + file_distibution = read_ansible_yaml( + "{}/vars/{}".format(base_dir, os), "role_distibution" + ) + file_molecule = read_ansible_yaml( + "{}/group_vars/all/vars".format(molecule_dir), "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +@pytest.mark.parametrize( + "dirs", + [ + "/etc/unbound", + "/etc/unbound/unbound.conf.d", + ], +) +def test_directories(host, dirs): + d = host.file(dirs) + assert d.is_directory + assert d.exists + + +@pytest.mark.parametrize( + "files", + [ + "/etc/unbound/unbound.conf", + "/etc/unbound/unbound.conf.d/server.conf", + "/etc/unbound/unbound.conf.d/forward_zone.conf", + "/etc/unbound/unbound.conf.d/remote_control.conf", + "/etc/unbound/unbound.conf.d/cache_db.conf", + ], +) +def test_files(host, files): + f = host.file(files) + assert f.exists + assert f.is_file + + +def test_user(host): + """ + test service user + """ + shell = "/usr/sbin/nologin" + home = "/var/lib/unbound" + + distribution = host.system_info.distribution + release = host.system_info.release + + print(distribution) + + if distribution == "debian" and release.startswith("9"): + shell = "/bin/false" + + if distribution in ["arch"]: + home = "/etc/unbound" + + assert host.group("unbound").exists + assert host.user("unbound").exists + assert "unbound" in host.user("unbound").groups + assert host.user("unbound").shell == shell + assert host.user("unbound").home == home + + +def test_service(host): + service = host.service("unbound") + assert service.is_enabled + assert service.is_running + + +@pytest.mark.parametrize( + "ports", + [ + "0.0.0.0:53", + ], +) +def test_open_port(host, ports): + + for i in host.socket.get_listening_sockets(): + print(i) + + application = host.socket(f"tcp://{ports}") + assert application.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/converge.yml new file mode 100644 index 0000000..a6be3fe --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/converge.yml @@ -0,0 +1,10 @@ +--- +- name: converge + hosts: all + any_errors_fatal: false + + environment: + NETRC: '' + + roles: + - role: ansible-unbound diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/group_vars/all/vars.yml new file mode 100644 index 0000000..64e0cbe --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/group_vars/all/vars.yml @@ -0,0 +1,31 @@ +--- + +unbound_config: + server: + access-control: + - '127.0.0.0/8 allow' + - '172.16.0.0/12 allow' + - '192.168.0.0/16 allow' + - 'fc00::/7 allow' + - 'fe80::/10 allow' + local-zone: + - '"doubleclick.net" redirect' + local-data: + - '"doubleclick.net A 127.0.0.1"' + + remote_control: + control-enable: 'yes' + control-interface: 127.0.0.1 + control-port: 8953 + + cachedb: + # backend: "testframe" + # secret seed string to calculate hashed keys + secret-seed: "default" + # For "redis" backend: + # redis server's IP address or host name + redis-server-host: 127.0.0.1 + # redis server's TCP port + redis-server-port: 6379 + # timeout (in ms) for communication with the redis server + redis-timeout: 100 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/molecule.yml new file mode 100644 index 0000000..d93805f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/molecule.yml @@ -0,0 +1,50 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: podman + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:10}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + # docker_host: "${DOCKER_HOST:-unix:///run/docker.sock}" + privileged: true + pre_build_image: true + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + +provisioner: + name: ansible + ansible_facts.args: + - --diff + # - -vvv + config_options: + defaults: + deprecation_warnings: True + callback_result_format: yaml + +scenario: + test_sequence: + - lint + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/prepare.yml new file mode 100644 index 0000000..562a88a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/prepare.yml @@ -0,0 +1,29 @@ + +- name: prepare container + hosts: all + gather_facts: true + + pre_tasks: + - name: update package cache + apt: + update_cache: true + when: + - ansible_facts.os_family | lower == 'debian' + + - name: install netstat + package: + name: [ net-tools, iproute2 ] + state: present + when: + - ansible_facts.os_family | lower in [ 'archlinux', 'debian' ] + + - debug: + msg: + - "os family : {{ ansible_facts.os_family }}" + - "distribution : {{ ansible_facts.distribution }}" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}" + + roles: + - role: redis diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/requirements.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/requirements.yml new file mode 100644 index 0000000..bfd4f8f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/requirements.yml @@ -0,0 +1,5 @@ +--- + +- name: redis + src: https://github.com/geerlingguy/ansible-role-redis.git + version: 1.7.0 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/tests/test_default.py new file mode 100644 index 0000000..534b64a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/molecule/podman/tests/test_default.py @@ -0,0 +1,146 @@ +import os +import pprint + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts("all") + +""" + get molecule directories +""" + + +def base_directory(): + """...""" + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = "molecule/{}".format( + os.environ.get("MOLECULE_SCENARIO_NAME") + ) + + return directory, molecule_directory + + +""" + parse ansible variables + - defaults/main.yml + - vars/main.yml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml +""" + + +@pytest.fixture() +def get_vars(host): + """...""" + base_dir, molecule_dir = base_directory() + + file_defaults = "file={}/defaults/main.yml name=role_defaults".format(base_dir) + file_vars = "file={}/vars/main.yml name=role_vars".format(base_dir) + file_molecule = "file={}/group_vars/all/vars.yml name=test_vars".format( + molecule_dir + ) + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(molecule_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +@pytest.mark.parametrize( + "dirs", + [ + "/etc/unbound", + "/etc/unbound/unbound.conf.d", + ], +) +def test_directories(host, dirs): + d = host.file(dirs) + assert d.is_directory + assert d.exists + + +@pytest.mark.parametrize( + "files", + [ + "/etc/unbound/unbound.conf", + "/etc/unbound/unbound.conf.d/server.conf", + "/etc/unbound/unbound.conf.d/forward_zone.conf", + "/etc/unbound/unbound.conf.d/remote_control.conf", + "/etc/unbound/unbound.conf.d/cache_db.conf", + ], +) +def test_files(host, files): + f = host.file(files) + assert f.exists + assert f.is_file + + +def test_user(host): + """ + test service user + """ + shell = "/usr/sbin/nologin" + + distribution = host.system_info.distribution + release = host.system_info.release + + if distribution == "debian" and release.startswith("9"): + shell = "/bin/false" + + assert host.group("unbound").exists + assert host.user("unbound").exists + assert "unbound" in host.user("unbound").groups + assert host.user("unbound").shell == shell + assert host.user("unbound").home == "/var/lib/unbound" + + +def test_service(host): + service = host.service("unbound") + assert service.is_enabled + assert service.is_running + + +@pytest.mark.parametrize( + "ports", + [ + "0.0.0.0:53", + "127.0.0.1:8953", + ], +) +def test_open_port(host, ports): + + for i in host.socket.get_listening_sockets(): + print(i) + + application = host.socket("tcp://%s" % (ports)) + assert application.is_listening diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/chroot.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/chroot.yml new file mode 100644 index 0000000..d2a3be4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/chroot.yml @@ -0,0 +1,17 @@ +--- + +- name: create chroot environment + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: "{{ unbound_user }}" + group: "{{ unbound_group }}" + mode: "0750" + loop: + - "{{ unbound_chroot_dir }}" + - "{{ unbound_chroot_dir }}/var/run" + - "{{ unbound_chroot_dir }}/var/log" + - "{{ unbound_chroot_dir }}/var/lib/unbound" + - "{{ unbound_chroot_dir }}{{ unbound_environment_file | dirname }}" + - "{{ unbound_chroot_dir }}/etc/unbound" + - "{{ unbound_chroot_dir }}/etc/unbound/unbound.conf.d" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/configure.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/configure.yml new file mode 100644 index 0000000..9a50c8f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/configure.yml @@ -0,0 +1,134 @@ +--- + +# DNS_ROOT_KEY_FILE="/usr/share/dns/root.key" -> ROOT_TRUST_ANCHOR_FILE="/var/lib/unbound/root.key" + +- name: merge unbound config between defaults and custom for server.conf + ansible.builtin.set_fact: + unbound_config_server: "{{ unbound_config_server_defaults | combine(unbound_config_server, recursive=True) }}" + unbound_config_remote_control: "{{ unbound_config_remote_control_defaults | combine(unbound_config_remote_control, recursive=True) }}" + unbound_config_cachedb: "{{ unbound_config_cachedb_defaults | combine(unbound_config_cachedb, recursive=True) }}" + unbound_config_forward_zone: "{{ unbound_config_forward_zone_defaults | combine(unbound_config_forward_zone, recursive=True) }}" + unbound_config_stub_zone: "{{ unbound_config_stub_zone_defaults | combine(unbound_config_stub_zone, recursive=True) }}" + +- name: create key file + ansible.builtin.command: | + unbound-control-setup -d {{ unbound_conf_dir }} + args: + creates: "{{ unbound_certs.server.key_file }}" + +- name: force update with cert + ansible.builtin.command: | + unbound-anchor -v -F -C /etc/unbound/unbound.conf + register: anchor + ignore_errors: true + changed_when: anchor.rc != 0 + failed_when: anchor.rc != 0 + +- name: create {{ unbound_environment_file }} + ansible.builtin.template: + src: etc/default.j2 + dest: "{{ unbound_environment_file }}" + mode: "0644" + backup: true + validate: sh -n %s + notify: restart unbound + +- name: fix rights for unbound keys + ansible.builtin.file: + mode: "0664" + owner: "{{ unbound_user }}" + group: "{{ unbound_group }}" + path: "{{ item }}" + loop: + - "{{ unbound_certs.server.key_file }}" + - "{{ unbound_certs.server.cert_file }}" + - "{{ unbound_certs.control.key_file }}" + - "{{ unbound_certs.control.cert_file }}" + +- name: get root.hints + ansible.builtin.get_url: + url: https://www.internic.net/domain/named.cache + dest: "/etc/unbound/root.hints" + owner: "{{ unbound_user }}" + group: "{{ unbound_group }}" + mode: "0660" + register: _download_archive + until: _download_archive is succeeded + retries: 5 + delay: 2 + +- name: "create {{ unbound_conf_dir }}/unbound.conf.d directory" + ansible.builtin.file: + path: "{{ unbound_conf_dir }}/unbound.conf.d" + state: directory + owner: "{{ unbound_user }}" + group: "{{ unbound_group }}" + mode: "0755" + +- name: create server.conf + ansible.builtin.template: + src: etc/unbound.conf.d/server.conf.j2 + dest: "{{ unbound_conf_dir }}/unbound.conf.d/server.conf" + owner: "{{ unbound_user }}" + group: "{{ unbound_group }}" + mode: "0666" + backup: true + # validate: "unbound-checkconf %s" + notify: restart unbound + +- name: create remote_control.conf + ansible.builtin.template: + src: etc/unbound.conf.d/remote_control.conf.j2 + dest: "{{ unbound_conf_dir }}/unbound.conf.d/remote_control.conf" + owner: "{{ unbound_user }}" + group: "{{ unbound_group }}" + mode: "0666" + backup: true + # validate: "unbound-checkconf %s" + notify: restart unbound + +- name: create forward_zone.conf + ansible.builtin.template: + src: etc/unbound.conf.d/forward_zone.conf.j2 + dest: "{{ unbound_conf_dir }}/unbound.conf.d/forward_zone.conf" + owner: "{{ unbound_user }}" + group: "{{ unbound_group }}" + mode: "0666" + backup: true + # validate: "unbound-checkconf %s" + notify: restart unbound + +- name: create cache_db.conf + ansible.builtin.template: + src: etc/unbound.conf.d/cache_db.conf.j2 + dest: "{{ unbound_conf_dir }}/unbound.conf.d/cache_db.conf" + owner: "{{ unbound_user }}" + group: "{{ unbound_group }}" + mode: "0666" + backup: true + # validate: "unbound-checkconf %s" + notify: restart unbound + +- name: create unbound configuration + ansible.builtin.template: + src: etc/unbound.conf.j2 + dest: "{{ unbound_conf_dir }}/unbound.conf" + owner: "{{ unbound_user }}" + group: "{{ unbound_group }}" + mode: "0666" + backup: true + # validate: "unbound-checkconf %s" + notify: restart unbound + +- name: check configuration # noqa no-changed-when + ansible.builtin.command: + unbound-checkconf {{ unbound_conf_dir }}/unbound.conf + register: __configuration_check + changed_when: __configuration_check.rc != 0 + failed_when: __configuration_check.rc != 0 + +- name: configuration error + ansible.builtin.debug: + msg: "{{ __configuration_check.stderr_lines }}" + when: + - not __configuration_check.rc | int == 0 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/install.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/install.yml new file mode 100644 index 0000000..ae15802 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/install.yml @@ -0,0 +1,117 @@ +--- + +- name: install unbound + ansible.builtin.package: + name: unbound + state: present + +- name: unbound root.key + when: + - ansible_facts.os_family | lower == 'debian' + block: + - name: test existing root.key + ansible.builtin.stat: + path: "/var/lib/unbound/root.key" + get_mime: true + register: __unbound_rootkey_exists + + - name: detect unbound helper + ansible.builtin.stat: + path: "/usr/lib/unbound/package-helper" + get_mime: false + register: _package_helper + + - name: detect unbound helper + ansible.builtin.stat: + path: "/usr/libexec/unbound-helper" + get_mime: false + register: _unbound_helper + + - name: define helper + ansible.builtin.set_fact: + unbound_helper: "{{ _package_helper | bodsch.dns.unbound_helper(_unbound_helper) }}" + + - name: create root.key # noqa no-changed-when + ansible.builtin.command: | + {{ unbound_helper }} root_trust_anchor_update + when: + - not __unbound_rootkey_exists.stat.exists + +- name: merge unbound config between defaults and custom for server.conf + ansible.builtin.set_fact: + unbound_config_server: "{{ unbound_config_server_defaults | combine(unbound_config_server, recursive=True) }}" + +- name: update unbound server confg + when: + - ansible_facts.os_family | lower == 'archlinux' + block: + - name: update dictionary + ansible.builtin.set_fact: + unbound_config_server: "{{ unbound_config_server | combine({'auto-trust-anchor-file': '/etc/unbound/trusted-key.key'}) }}" + + - name: fix rights for auto-trust-anchor-file + ansible.builtin.file: + path: "{{ unbound_config_server['auto-trust-anchor-file'] }}" + owner: "{{ unbound_user }}" + group: "{{ unbound_group }}" + mode: "0660" + when: + - unbound_config_server['auto-trust-anchor-file'] is defined + +- name: remove distribution config files + ansible.builtin.file: + path: "{{ unbound_conf_dir }}/unbound.conf.d/{{ item }}" + state: absent + loop: + - qname-minimisation.conf + - root-auto-trust-anchor-file.conf + +- name: unbound.log handling + when: + - unbound_config_server.logfile is defined + - unbound_config_server.logfile | length != 0 + block: + - name: test existing unbound.log + ansible.builtin.stat: + path: "/{{ unbound_config_server.logfile }}" + get_mime: true + register: __unbound_logfile_exists + + - name: create unbound.log + ansible.builtin.file: + name: "{{ unbound_config_server.logfile }}" + state: touch + owner: "{{ unbound_user }}" + group: "{{ unbound_group }}" + mode: "0666" + when: + - not __unbound_logfile_exists.stat.exists + +# ----------------------------------------------------------------- + +- name: systemd + when: + - ansible_facts.service_mgr == "systemd" + block: + - name: populate service facts + ansible.builtin.service_facts: + register: systemd_facts + no_log: true + tags: + - unbound + - install + + - name: set systemd unit name + ansible.builtin.set_fact: + resolved_unit_file: "{{ ansible_facts.services | bodsch.systemd.service('systemd-resolved', state='running') }}" + tags: + - unbound + - install + + - name: disable systemd-resolved + service: + name: "{{ resolved_unit_file }}" + state: stopped + enabled: false + when: + - resolved_unit_file | default('') | string | length > 0 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/main.yml new file mode 100644 index 0000000..d26d65d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/main.yml @@ -0,0 +1,19 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yml + +- name: install + ansible.builtin.include_tasks: install.yml + +- name: chroot + ansible.builtin.include_tasks: chroot.yml + when: + - unbound_chroot_dir is defined + - unbound_chroot_dir | string | length > 0 + +- name: configure + ansible.builtin.include_tasks: configure.yml + +- name: service + ansible.builtin.include_tasks: service.yml diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/prepare.yml new file mode 100644 index 0000000..36adad6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/prepare.yml @@ -0,0 +1,26 @@ +--- + +- name: include OS specific configuration ({{ ansible_facts.distribution }} ({{ ansible_facts.os_family }}) {{ ansible_facts.distribution_major_version }}) + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_facts.distribution | lower }}-{{ ansible_facts.service_mgr | lower }}.yml" + # eg. artixlinux + - "{{ ansible_facts.distribution | lower | replace(' ', '') }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_facts.distribution | lower }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_facts.os_family | lower }}.yml" + - default.yml + skip: true + +- name: install dependencies + ansible.builtin.package: + name: "{{ unbound_dependencies }}" + state: present diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/service.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/service.yml new file mode 100644 index 0000000..6d05854 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/tasks/service.yml @@ -0,0 +1,32 @@ +--- + +- name: systemd + when: + - ansible_facts.service_mgr == "systemd" + block: + - name: populate service facts + ansible.builtin.service_facts: + register: systemd_facts + no_log: true + + - name: set systemd unit name + ansible.builtin.set_fact: + systemd_resolved_unit_file: "{{ ansible_facts.services | bodsch.core.get_service('systemd-resolved') }}" + notify: + - disable systemd-resolved + + # - name: disable systemd-resolved + # ansible.builtin.service: + # name: systemd-resolved + # state: stopped + # enabled: false + # ignore_errors: true + # when: + # - systemd_resolved_unit_file is defined + # - systemd_resolved_unit_file | string | length > 0 + +- name: enable unbound service + ansible.builtin.service: + name: unbound + enabled: true + state: started diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/default.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/default.j2 new file mode 100644 index 0000000..4bd18d8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/default.j2 @@ -0,0 +1,7 @@ +# {{ ansible_managed }} + +{% if ansible_facts.os_family | lower == 'redhat' %} +# for extra debug, add "-v -v" or change verbosity: in unbound.conf +UNBOUND_ENABLE=true +UNBOUND_OPTIONS="-c /etc/unbound/unbound.conf" +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/auth_zone.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/auth_zone.conf.j2 new file mode 100644 index 0000000..5c451da --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/auth_zone.conf.j2 @@ -0,0 +1,32 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# auth-zone + +{# +# Authority zones +# The data for these zones is kept locally, from a file or downloaded. +# The data can be served to downstream clients, or used instead of the +# upstream (which saves a lookup to the upstream). The first example +# has a copy of the root for local usage. The second serves example.org +# authoritatively. zonefile: reads from file (and writes to it if you also +# download it), master: fetches with AXFR and IXFR, or url to zonefile. +#} + +auth-zone: + name: "." + for-downstream: no + for-upstream: yes + fallback-enabled: yes + master: b.root-servers.net + master: c.root-servers.net + master: e.root-servers.net + master: f.root-servers.net + master: g.root-servers.net + master: k.root-servers.net +{# +# auth-zone: +# name: "example.org" +# for-downstream: yes +# for-upstream: yes +# zonefile: "example.org.zone" +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/cache_db.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/cache_db.conf.j2 new file mode 100644 index 0000000..d63b97c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/cache_db.conf.j2 @@ -0,0 +1,29 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# cachedb + +{% if unbound_config_cachedb is defined and unbound_config_cachedb | count != 0 %} +cachedb: +{% for k, v in unbound_config_cachedb.items() %} + {{ k }}: {{ v }} +{% endfor %} +{% endif -%} + +{# +# CacheDB +# Enable external backend DB as auxiliary cache. Specify the backend name +# (default is "testframe", which has no use other than for debugging and +# testing) and backend-specific options. The 'cachedb' module must be +# included in module-config. +# cachedb: +# backend: "testframe" +# # secret seed string to calculate hashed keys +# secret-seed: "default" +# # For "redis" backend: +# # redis server's IP address or host name +# redis-server-host: 127.0.0.1 +# # redis server's TCP port +# redis-server-port: 6379 +# # timeout (in ms) for communication with the redis server +# redis-timeout: 100 +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/dnscrypt.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/dnscrypt.conf.j2 new file mode 100644 index 0000000..71a1c83 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/dnscrypt.conf.j2 @@ -0,0 +1,25 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# dnscrypt + +{# +# DNSCrypt +# Caveats: +# 1. the keys/certs cannot be produced by unbound. You can use dnscrypt-wrapper +# for this: https://github.com/cofyc/dnscrypt-wrapper/blob/master/README.md#usage +# 2. dnscrypt channel attaches to an interface. you MUST set interfaces to +# listen on `dnscrypt-port` with the follo0wing snippet: +# server: +# interface: 0.0.0.0@443 +# interface: ::0@443 +# +# Finally, `dnscrypt` config has its own section. +# dnscrypt: +# dnscrypt-enable: yes +# dnscrypt-port: 443 +# dnscrypt-provider: 2.dnscrypt-cert.example.com. +# dnscrypt-secret-key: /path/unbound-conf/keys1/1.key +# dnscrypt-secret-key: /path/unbound-conf/keys2/1.key +# dnscrypt-provider-cert: /path/unbound-conf/keys1/1.cert +# dnscrypt-provider-cert: /path/unbound-conf/keys2/1.cert +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/forward_zone.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/forward_zone.conf.j2 new file mode 100644 index 0000000..594894b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/forward_zone.conf.j2 @@ -0,0 +1,35 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# forward_zone + +{% if unbound_config_forward_zone is defined and unbound_config_forward_zone | count != 0 %} +forward-zone: +{% if unbound_config_forward_zone.name is defined %} + name: "{{ unbound_config_forward_zone.name }}" +{% endif %} +{% for v in unbound_config_forward_zone.forward_addrs %} + forward-addr: {{ v }} +{% endfor %} +{% endif %} + +{# +# Forward zones +# Create entries like below, to make all queries for 'example.com' and +# 'example.org' go to the given list of servers. These servers have to handle +# recursion to other nameservers. List zero or more nameservers by hostname +# or by ipaddress. Use an entry with name "." to forward all queries. +# If you enable forward-first, it attempts without the forward if it fails. +# forward-zone: +# name: "example.com" +# forward-addr: 192.0.2.68 +# forward-addr: 192.0.2.73@5355 # forward to port 5355. +# forward-first: no +# forward-tls-upstream: no +# forward-zone: +# name: "example.org" +# forward-host: fwd.example.com +# +# You can now also dynamically create and delete forward-zone's using +# unbound-control forward_add domain.com 1.2.3.4 5.6.7.8 +# unbound-control forward_remove domain.com 1.2.3.4 5.6.7.8 +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/python.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/python.conf.j2 new file mode 100644 index 0000000..eda3e16 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/python.conf.j2 @@ -0,0 +1,13 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# python + +{# +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# o and give a python-script to run. +# python: +# # Script file to load +# # python-script: "/etc/unbound/ubmodule-tst.py" +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/remote_control.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/remote_control.conf.j2 new file mode 100644 index 0000000..3b530ee --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/remote_control.conf.j2 @@ -0,0 +1,43 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# remote_control + +{% if unbound_config_remote_control is defined and unbound_config_remote_control | count != 0 %} +remote-control: +{% for k, v in unbound_config_remote_control.items() %} + {{ k }}: {{ v }} +{% endfor %} +{% endif -%} + +{# +# # Remote control config section. +# remote-control: +# # Enable remote control with unbound-control(8) here. +# # set up the keys and certificates with unbound-control-setup. +# # Note: required for unbound-munin package +# control-enable: yes +# +# # Set to no and use an absolute path as control-interface to use +# # a unix local named pipe for unbound-control. +# # control-use-cert: yes +# +# # what interfaces are listened to for remote control. +# # give 0.0.0.0 and ::0 to listen to all interfaces. +# # control-interface: 127.0.0.1 +# # control-interface: ::1 +# +# # port number for remote control operations. +# # control-port: 8953 +# +# # unbound server key file. +# server-key-file: "/etc/unbound/unbound_server.key" +# +# # unbound server certificate file. +# server-cert-file: "/etc/unbound/unbound_server.pem" +# +# # unbound-control key file. +# control-key-file: "/etc/unbound/unbound_control.key" +# +# # unbound-control certificate file. +# control-cert-file: "/etc/unbound/unbound_control.pem" +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/server.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/server.conf.j2 new file mode 100644 index 0000000..402710b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/server.conf.j2 @@ -0,0 +1,808 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# server + +{% if unbound_config_server is defined and unbound_config_server | count != 0 %} +server: +{% for k, v in unbound_config_server.items() %} +{% if k == 'access-control' %} +{% for vv in v %} + access-control: {{ vv }} +{% endfor %} +{% elif k == 'local-zone' %} +{% for vv in v %} + local-zone: {{ vv }} +{% endfor %} +{% elif k == 'local-data' %} +{% for vv in v %} + local-data: {{ vv }} +{% endfor %} +{% else %} + {% if v | string | length > 0 %} + {{ k }}: {{ v }} + {% endif %} +{% endif %} +{% endfor %} +{% endif -%} + +{# +# server: +# # whitespace is not necessary, but looks cleaner. +# +# # verbosity number, 0 is least verbose. 1 is default. +# verbosity: 1 +# +# # print statistics to the log (for every thread) every N seconds. +# # Set to "" or 0 to disable. Default is disabled. +# # Needs to be disabled for munin plugin +# statistics-interval: 0 +# +# # enable shm for stats, default no. if you enable also enable +# # statistics-interval, every time it also writes stats to the +# # shared memory segment keyed with shm-key. +# # shm-enable: no +# +# # shm for stats uses this key, and key+1 for the shared mem segment. +# # shm-key: 11777 +# +# # enable cumulative statistics, without clearing them after printing. +# # Needs to be disabled for munin plugin +# statistics-cumulative: no +# +# # enable extended statistics (query types, answer codes, status) +# # printed from unbound-control. default off, because of speed. +# # Needs to be enabled for munin plugin +# extended-statistics: yes +# +# # number of threads to create. 1 disables threading. +# num-threads: 4 +# +# # specify the interfaces to answer queries from by ip-address. +# # The default is to listen to localhost (127.0.0.1 and ::1). +# # specify 0.0.0.0 and ::0 to bind to all available interfaces. +# # specify every interface[@port] on a new 'interface:' labelled line. +# # The listen interfaces are not changed on reload, only on restart. +# # interface: 0.0.0.0 +# # interface: ::0 +# # interface: 192.0.2.153 +# # interface: 192.0.2.154 +# # interface: 192.0.2.154@5003 +# # interface: 2001:DB8::5 +# # +# # for dns over tls and raw dns over port 80 +# # interface: 0.0.0.0@443 +# # interface: ::0@443 +# # interface: 0.0.0.0@80 +# # interface: ::0@80 +# +# # enable this feature to copy the source address of queries to reply. +# # Socket options are not supported on all platforms. experimental. +# # interface-automatic: yes +# # +# # NOTE: Enable this option when specifying interface 0.0.0.0 or ::0 +# # NOTE: Disabled per Fedora policy not to listen to * on default install +# # NOTE: If deploying on non-default port, eg 80/443, this needs to be disabled +# interface-automatic: no +# +# # port to answer queries from +# # port: 53 +# +# # specify the interfaces to send outgoing queries to authoritative +# # server from by ip-address. If none, the default (all) interface +# # is used. Specify every interface on a 'outgoing-interface:' line. +# # outgoing-interface: 192.0.2.153 +# # outgoing-interface: 2001:DB8::5 +# # outgoing-interface: 2001:DB8::6 +# +# # Specify a netblock to use remainder 64 bits as random bits for +# # upstream queries. Uses freebind option (Linux). +# # outgoing-interface: 2001:DB8::/64 +# # Also (Linux:) ip -6 addr add 2001:db8::/64 dev lo +# # And: ip -6 route add local 2001:db8::/64 dev lo +# # And set prefer-ip6: yes to use the ip6 randomness from a netblock. +# # Set this to yes to prefer ipv6 upstream servers over ipv4. +# # prefer-ip6: no +# +# # number of ports to allocate per thread, determines the size of the +# # port range that can be open simultaneously. About double the +# # num-queries-per-thread, or, use as many as the OS will allow you. +# # outgoing-range: 4096 +# +# # permit unbound to use this port number or port range for +# # making outgoing queries, using an outgoing interface. +# # Only ephemeral ports are allowed by SElinux +# outgoing-port-permit: 32768-60999 +# +# # deny unbound the use this of port number or port range for +# # making outgoing queries, using an outgoing interface. +# # Use this to make sure unbound does not grab a UDP port that some +# # other server on this computer needs. The default is to avoid +# # IANA-assigned port numbers. +# # If multiple outgoing-port-permit and outgoing-port-avoid options +# # are present, they are processed in order. +# # Our SElinux policy does not allow non-ephemeral ports to be used +# outgoing-port-avoid: 0-32767 +# +# # number of outgoing simultaneous tcp buffers to hold per thread. +# # outgoing-num-tcp: 10 +# +# # number of incoming simultaneous tcp buffers to hold per thread. +# # incoming-num-tcp: 10 +# +# # buffer size for UDP port 53 incoming (SO_RCVBUF socket option). +# # 0 is system default. Use 4m to catch query spikes for busy servers. +# # so-rcvbuf: 0 +# +# # buffer size for UDP port 53 outgoing (SO_SNDBUF socket option). +# # 0 is system default. Use 4m to handle spikes on very busy servers. +# # so-sndbuf: 0 +# +# # use SO_REUSEPORT to distribute queries over threads. +# so-reuseport: yes +# +# # use IP_TRANSPARENT so the interface: addresses can be non-local +# # and you can config non-existing IPs that are going to work later on +# # (uses IP_BINDANY on FreeBSD). +# ip-transparent: yes +# +# # use IP_FREEBIND so the interface: addresses can be non-local +# # and you can bind to nonexisting IPs and interfaces that are down. +# # Linux only. On Linux you also have ip-transparent that is similar. +# # ip-freebind: no +# +# # EDNS reassembly buffer to advertise to UDP peers (the actual buffer +# # is set with msg-buffer-size). 1472 can solve fragmentation (timeouts). +# # edns-buffer-size: 4096 +# +# # Maximum UDP response size (not applied to TCP response). +# # Suggested values are 512 to 4096. Default is 4096. 65536 disables it. +# # 3072 causes +dnssec any isc.org queries to need TC=1. +# # Helps mitigating DDOS +# max-udp-size: 3072 +# +# # buffer size for handling DNS data. No messages larger than this +# # size can be sent or received, by UDP or TCP. In bytes. +# # msg-buffer-size: 65552 +# +# # the amount of memory to use for the message cache. +# # plain value in bytes or you can append k, m or G. default is "4Mb". +# # msg-cache-size: 4m +# +# # the number of slabs to use for the message cache. +# # the number of slabs must be a power of 2. +# # more slabs reduce lock contention, but fragment memory usage. +# # msg-cache-slabs: 4 +# +# # the number of queries that a thread gets to service. +# # num-queries-per-thread: 1024 +# +# # if very busy, 50% queries run to completion, 50% get timeout in msec +# # jostle-timeout: 200 +# +# # msec to wait before close of port on timeout UDP. 0 disables. +# # delay-close: 0 +# # the amount of memory to use for the RRset cache. +# # plain value in bytes or you can append k, m or G. default is "4Mb". +# # rrset-cache-size: 4m +# +# # the number of slabs to use for the RRset cache. +# # the number of slabs must be a power of 2. +# # more slabs reduce lock contention, but fragment memory usage. +# # rrset-cache-slabs: 4 +# +# # the time to live (TTL) value lower bound, in seconds. Default 0. +# # If more than an hour could easily give trouble due to stale data. +# # cache-min-ttl: 0 +# +# # the time to live (TTL) value cap for RRsets and messages in the +# # cache. Items are not cached for longer. In seconds. +# # cache-max-ttl: 86400 +# +# # the time to live (TTL) value cap for negative responses in the cache +# # cache-max-negative-ttl: 3600 +# +# # the time to live (TTL) value for cached roundtrip times, lameness and +# # EDNS version information for hosts. In seconds. +# # infra-host-ttl: 900 +# +# # minimum wait time for responses, increase if uplink is long. In msec. +# # infra-cache-min-rtt: 50 +# +# # the number of slabs to use for the Infrastructure cache. +# # the number of slabs must be a power of 2. +# # more slabs reduce lock contention, but fragment memory usage. +# # infra-cache-slabs: 4 +# +# # the maximum number of hosts that are cached (roundtrip, EDNS, lame). +# # infra-cache-numhosts: 10000 +# +# # define a number of tags here, use with local-zone, access-control. +# # repeat the define-tag statement to add additional tags. +# # define-tag: "tag1 tag2 tag3" +# +# # Enable IPv4, "yes" or "no". +# # do-ip4: yes +# +# # Enable IPv6, "yes" or "no". +# # do-ip6: yes +# +# # Enable UDP, "yes" or "no". +# # NOTE: if setting up an unbound on tls443 for public use, you might want to +# # disable UDP to avoid being used in DNS amplification attacks. +# # do-udp: yes +# +# # Enable TCP, "yes" or "no". +# # do-tcp: yes +# +# # upstream connections use TCP only (and no UDP), "yes" or "no" +# # useful for tunneling scenarios, default no. +# # tcp-upstream: no +# +# # upstream connections also use UDP (even if do-udp is no). +# # useful if if you want UDP upstream, but don't provide UDP downstream. +# # udp-upstream-without-downstream: no +# +# # Maximum segment size (MSS) of TCP socket on which the server +# # responds to queries. Default is 0, system default MSS. +# # tcp-mss: 0 +# +# # Maximum segment size (MSS) of TCP socket for outgoing queries. +# # Default is 0, system default MSS. +# # outgoing-tcp-mss: 0 +# +# # Fedora note: do not activate this - can cause a crash +# # Use systemd socket activation for UDP, TCP, and control sockets. +# # use-systemd: no +# +# # Detach from the terminal, run in background, "yes" or "no". +# # Set the value to "no" when unbound runs as systemd service. +# # do-daemonize: yes +# # control which clients are allowed to make (recursive) queries +# # to this server. Specify classless netblocks with /size and action. +# # By default everything is refused, except for localhost. +# # Choose deny (drop message), refuse (polite error reply), +# # allow (recursive ok), allow_setrd (recursive ok, rd bit is forced on), +# # allow_snoop (recursive and nonrecursive ok) +# # deny_non_local (drop queries unless can be answered from local-data) +# # refuse_non_local (like deny_non_local but polite error reply). +# # access-control: 0.0.0.0/0 refuse +# # access-control: 127.0.0.0/8 allow +# # access-control: ::0/0 refuse +# # access-control: ::1 allow +# # access-control: ::ffff:127.0.0.1 allow +# +# # tag access-control with list of tags (in "" with spaces between) +# # Clients using this access control element use localzones that +# # are tagged with one of these tags. +# # access-control-tag: 192.0.2.0/24 "tag2 tag3" +# +# # set action for particular tag for given access control element +# # if you have multiple tag values, the tag used to lookup the action +# # is the first tag match between access-control-tag and local-zone-tag +# # where "first" comes from the order of the define-tag values. +# # access-control-tag-action: 192.0.2.0/24 tag3 refuse +# +# # set redirect data for particular tag for access control element +# # access-control-tag-data: 192.0.2.0/24 tag2 "A 127.0.0.1" +# +# # Set view for access control element +# # access-control-view: 192.0.2.0/24 viewname +# +# # if given, a chroot(2) is done to the given directory. +# # i.e. you can chroot to the working directory, for example, +# # for extra security, but make sure all files are in that directory. +# # +# # If chroot is enabled, you should pass the configfile (from the +# # commandline) as a full path from the original root. After the +# # chroot has been performed the now defunct portion of the config +# # file path is removed to be able to reread the config after a reload. +# # +# # All other file paths (working dir, logfile, roothints, and +# # key files) can be specified in several ways: +# # o as an absolute path relative to the new root. +# # o as a relative path to the working directory. +# # o as an absolute path relative to the original root. +# # In the last case the path is adjusted to remove the unused portion. +# # +# # The pid file can be absolute and outside of the chroot, it is +# # written just prior to performing the chroot and dropping permissions. +# # +# # Additionally, unbound may need to access /dev/random (for entropy). +# # How to do this is specific to your OS. +# # +# # If you give "" no chroot is performed. The path must not end in a /. +# # chroot: "/var/lib/unbound" +# chroot: "" +# +# # if given, user privileges are dropped (after binding port), +# # and the given username is assumed. Default is user "unbound". +# # If you give "" no privileges are dropped. +# username: "unbound" +# +# # the working directory. The relative files in this config are +# # relative to this directory. If you give "" the working directory +# # is not changed. +# # If you give a server: directory: dir before include: file statements +# # then those includes can be relative to the working directory. +# directory: "/etc/unbound" +# +# # the log file, "" means log to stderr. +# # Use of this option sets use-syslog to "no". +# # logfile: "" +# +# # Log to syslog(3) if yes. The log facility LOG_DAEMON is used to +# # log to. If yes, it overrides the logfile. +# # use-syslog: yes +# +# # Log identity to report. if empty, defaults to the name of argv[0] +# # (usually "unbound"). +# # log-identity: "" +# +# # print UTC timestamp in ascii to logfile, default is epoch in seconds. +# log-time-ascii: yes +# +# # print one line with time, IP, name, type, class for every query. +# # log-queries: no +# +# # print one line per reply, with time, IP, name, type, class, rcode, +# # timetoresolve, fromcache and responsesize. +# # log-replies: no +# +# # the pid file. Can be an absolute path outside of chroot/work dir. +# pidfile: "/run/unbound/unbound.pid" +# +# # file to read root hints from. +# # get one from https://www.internic.net/domain/named.cache +# # root-hints: "" +# +# # enable to not answer id.server and hostname.bind queries. +# # hide-identity: no +# +# # enable to not answer version.server and version.bind queries. +# # hide-version: no +# +# # enable to not answer trustanchor.unbound queries. +# # hide-trustanchor: no +# +# # the identity to report. Leave "" or default to return hostname. +# # identity: "" +# +# # the version to report. Leave "" or default to return package version. +# # version: "" +# +# # the target fetch policy. +# # series of integers describing the policy per dependency depth. +# # The number of values in the list determines the maximum dependency +# # depth the recursor will pursue before giving up. Each integer means: +# # -1 : fetch all targets opportunistically, +# # 0: fetch on demand, +# # positive value: fetch that many targets opportunistically. +# # Enclose the list of numbers between quotes (""). +# # target-fetch-policy: "3 2 1 0 0" +# +# # Harden against very small EDNS buffer sizes. +# # harden-short-bufsize: no +# +# # Harden against unseemly large queries. +# # harden-large-queries: no +# +# # Harden against out of zone rrsets, to avoid spoofing attempts. +# harden-glue: yes +# +# # Harden against receiving dnssec-stripped data. If you turn it +# # off, failing to validate dnskey data for a trustanchor will +# # trigger insecure mode for that zone (like without a trustanchor). +# # Default on, which insists on dnssec data for trust-anchored zones. +# harden-dnssec-stripped: yes +# +# # Harden against queries that fall under dnssec-signed nxdomain names. +# harden-below-nxdomain: yes +# +# # Harden the referral path by performing additional queries for +# # infrastructure data. Validates the replies (if possible). +# # Default off, because the lookups burden the server. Experimental +# # implementation of draft-wijngaards-dnsext-resolver-side-mitigation. +# harden-referral-path: yes +# +# # Harden against algorithm downgrade when multiple algorithms are +# # advertised in the DS record. If no, allows the weakest algorithm +# # to validate the zone. +# # harden-algo-downgrade: no +# +# # Sent minimum amount of information to upstream servers to enhance +# # privacy. Only sent minimum required labels of the QNAME and set QTYPE +# # to NS when possible. +# qname-minimisation: yes +# +# # QNAME minimisation in strict mode. Do not fall-back to sending full +# # QNAME to potentially broken nameservers. A lot of domains will not be +# # resolvable when this option in enabled. +# # This option only has effect when qname-minimisation is enabled. +# # qname-minimisation-strict: no +# +# # Aggressive NSEC uses the DNSSEC NSEC chain to synthesize NXDOMAIN +# # and other denials, using information from previous NXDOMAINs answers. +# aggressive-nsec: yes +# +# # Use 0x20-encoded random bits in the query to foil spoof attempts. +# # This feature is an experimental implementation of draft dns-0x20. +# # use-caps-for-id: no +# +# # Domains (and domains in them) without support for dns-0x20 and +# # the fallback fails because they keep sending different answers. +# # caps-whitelist: "licdn.com" +# # caps-whitelist: "senderbase.org" +# +# # Enforce privacy of these addresses. Strips them away from answers. +# # It may cause DNSSEC validation to additionally mark it as bogus. +# # Protects against 'DNS Rebinding' (uses browser as network proxy). +# # Only 'private-domain' and 'local-data' names are allowed to have +# # these private addresses. No default. +# # private-address: 10.0.0.0/8 +# # private-address: 172.16.0.0/12 +# # private-address: 192.168.0.0/16 +# # private-address: 169.254.0.0/16 +# # private-address: fd00::/8 +# # private-address: fe80::/10 +# # private-address: ::ffff:0:0/96 +# +# # Allow the domain (and its subdomains) to contain private addresses. +# # local-data statements are allowed to contain private addresses too. +# # private-domain: "example.com" +# +# # If nonzero, unwanted replies are not only reported in statistics, +# # but also a running total is kept per thread. If it reaches the +# # threshold, a warning is printed and a defensive action is taken, +# # the cache is cleared to flush potential poison out of it. +# # A suggested value is 10000000, the default is 0 (turned off). +# unwanted-reply-threshold: 10000000 +# +# # Do not query the following addresses. No DNS queries are sent there. +# # List one address per entry. List classless netblocks with /size, +# # do-not-query-address: 127.0.0.1/8 +# # do-not-query-address: ::1 +# +# # if yes, the above default do-not-query-address entries are present. +# # if no, localhost can be queried (for testing and debugging). +# # do-not-query-localhost: yes +# +# # if yes, perform prefetching of almost expired message cache entries. +# prefetch: yes +# +# # if yes, perform key lookups adjacent to normal lookups. +# prefetch-key: yes +# +# # if yes, Unbound rotates RRSet order in response. +# rrset-roundrobin: yes +# +# # if yes, Unbound doesn't insert authority/additional sections +# # into response messages when those sections are not required. +# minimal-responses: yes +# +# # true to disable DNSSEC lameness check in iterator. +# # disable-dnssec-lame-check: no +# +# # module configuration of the server. A string with identifiers +# # separated by spaces. Syntax: "[dns64] [validator] iterator" +# module-config: "ipsecmod validator iterator" +# +# # File with trusted keys, kept uptodate using RFC5011 probes, +# # initial file like trust-anchor-file, then it stores metadata. +# # Use several entries, one per domain name, to track multiple zones. +# # +# # If you want to perform DNSSEC validation, run unbound-anchor before +# # you start unbound (i.e. in the system boot scripts). And enable: +# # Please note usage of unbound-anchor root anchor is at your own risk +# # and under the terms of our LICENSE (see that file in the source). +# # auto-trust-anchor-file: "/var/lib/unbound/root.key" +# +# # trust anchor signaling sends a RFC8145 key tag query after priming. +# trust-anchor-signaling: yes +# +# # Root key trust anchor sentinel (draft-ietf-dnsop-kskroll-sentinel) +# root-key-sentinel: yes +# +# # File with DLV trusted keys. Same format as trust-anchor-file. +# # There can be only one DLV configured, it is trusted from root down. +# # DLV is going to be decommissioned. Please do not use it any more. +# # dlv-anchor-file: "dlv.isc.org.key" +# +# # File with trusted keys for validation. Specify more than one file +# # with several entries, one file per entry. +# # Zone file format, with DS and DNSKEY entries. +# # Note this gets out of date, use auto-trust-anchor-file please. +# # trust-anchor-file: "" +# +# # Trusted key for validation. DS or DNSKEY. specify the RR on a +# # single line, surrounded by "". TTL is ignored. class is IN default. +# # Note this gets out of date, use auto-trust-anchor-file please. +# # (These examples are from August 2007 and may not be valid anymore). +# # trust-anchor: "nlnetlabs.nl. DNSKEY 257 3 5 AQPzzTWMz8qSWIQlfRnPckx2BiVmkVN6LPupO3mbz7FhLSnm26n6iG9N Lby97Ji453aWZY3M5/xJBSOS2vWtco2t8C0+xeO1bc/d6ZTy32DHchpW 6rDH1vp86Ll+ha0tmwyy9QP7y2bVw5zSbFCrefk8qCUBgfHm9bHzMG1U BYtEIQ==" +# # trust-anchor: "jelte.nlnetlabs.nl. DS 42860 5 1 14D739EB566D2B1A5E216A0BA4D17FA9B038BE4A" +# +# # File with trusted keys for validation. Specify more than one file +# # with several entries, one file per entry. Like trust-anchor-file +# # but has a different file format. Format is BIND-9 style format, +# # the trusted-keys { name flag proto algo "key"; }; clauses are read. +# # you need external update procedures to track changes in keys. +# # trusted-keys-file: "" +# # +# trusted-keys-file: /etc/unbound/keys.d/*.key +# auto-trust-anchor-file: "/var/lib/unbound/root.key" +# +# # Ignore chain of trust. Domain is treated as insecure. +# # domain-insecure: "example.com" +# +# # Override the date for validation with a specific fixed date. +# # Do not set this unless you are debugging signature inception +# # and expiration. "" or "0" turns the feature off. -1 ignores date. +# # val-override-date: "" +# +# # The time to live for bogus data, rrsets and messages. This avoids +# # some of the revalidation, until the time interval expires. in secs. +# # val-bogus-ttl: 60 +# +# # The signature inception and expiration dates are allowed to be off +# # by 10% of the signature lifetime (expir-incep) from our local clock. +# # This leeway is capped with a minimum and a maximum. In seconds. +# # val-sig-skew-min: 3600 +# # val-sig-skew-max: 86400 +# +# # Should additional section of secure message also be kept clean of +# # unsecure data. Useful to shield the users of this validator from +# # potential bogus data in the additional section. All unsigned data +# # in the additional section is removed from secure messages. +# val-clean-additional: yes +# +# # Turn permissive mode on to permit bogus messages. Thus, messages +# # for which security checks failed will be returned to clients, +# # instead of SERVFAIL. It still performs the security checks, which +# # result in interesting log files and possibly the AD bit in +# # replies if the message is found secure. The default is off. +# # NOTE: TURNING THIS ON DISABLES ALL DNSSEC SECURITY +# val-permissive-mode: no +# +# # Ignore the CD flag in incoming queries and refuse them bogus data. +# # Enable it if the only clients of unbound are legacy servers (w2008) +# # that set CD but cannot validate themselves. +# # ignore-cd-flag: no +# +# # Serve expired responses from cache, with TTL 0 in the response, +# # and then attempt to fetch the data afresh. +# serve-expired: yes +# +# # Have the validator log failed validations for your diagnosis. +# # 0: off. 1: A line per failed user query. 2: With reason and bad IP. +# val-log-level: 1 +# +# # It is possible to configure NSEC3 maximum iteration counts per +# # keysize. Keep this table very short, as linear search is done. +# # A message with an NSEC3 with larger count is marked insecure. +# # List in ascending order the keysize and count values. +# # val-nsec3-keysize-iterations: "1024 150 2048 500 4096 2500" +# +# # instruct the auto-trust-anchor-file probing to add anchors after ttl. +# # add-holddown: 2592000 # 30 days +# +# # instruct the auto-trust-anchor-file probing to del anchors after ttl. +# # del-holddown: 2592000 # 30 days +# +# # auto-trust-anchor-file probing removes missing anchors after ttl. +# # If the value 0 is given, missing anchors are not removed. +# # keep-missing: 31622400 # 366 days +# +# # debug option that allows very small holddown times for key rollover, +# # otherwise the RFC mandates probe intervals must be at least 1 hour. +# # permit-small-holddown: no +# +# # the amount of memory to use for the key cache. +# # plain value in bytes or you can append k, m or G. default is "4Mb". +# # key-cache-size: 4m +# +# # the number of slabs to use for the key cache. +# # the number of slabs must be a power of 2. +# # more slabs reduce lock contention, but fragment memory usage. +# # key-cache-slabs: 4 +# +# # the amount of memory to use for the negative cache (used for DLV). +# # plain value in bytes or you can append k, m or G. default is "1Mb". +# # neg-cache-size: 1m +# +# # By default, for a number of zones a small default 'nothing here' +# # reply is built-in. Query traffic is thus blocked. If you +# # wish to serve such zone you can unblock them by uncommenting one +# # of the nodefault statements below. +# # You may also have to use domain-insecure: zone to make DNSSEC work, +# # unless you have your own trust anchors for this zone. +# # local-zone: "localhost." nodefault +# # local-zone: "127.in-addr.arpa." nodefault +# # local-zone: "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa." nodefault +# # local-zone: "onion." nodefault +# # local-zone: "test." nodefault +# # local-zone: "invalid." nodefault +# # local-zone: "10.in-addr.arpa." nodefault +# # local-zone: "16.172.in-addr.arpa." nodefault +# # local-zone: "17.172.in-addr.arpa." nodefault +# # local-zone: "18.172.in-addr.arpa." nodefault +# # local-zone: "19.172.in-addr.arpa." nodefault +# # local-zone: "20.172.in-addr.arpa." nodefault +# # local-zone: "21.172.in-addr.arpa." nodefault +# # local-zone: "22.172.in-addr.arpa." nodefault +# # local-zone: "23.172.in-addr.arpa." nodefault +# # local-zone: "24.172.in-addr.arpa." nodefault +# # local-zone: "25.172.in-addr.arpa." nodefault +# # local-zone: "26.172.in-addr.arpa." nodefault +# # local-zone: "27.172.in-addr.arpa." nodefault +# # local-zone: "28.172.in-addr.arpa." nodefault +# # local-zone: "29.172.in-addr.arpa." nodefault +# # local-zone: "30.172.in-addr.arpa." nodefault +# # local-zone: "31.172.in-addr.arpa." nodefault +# # local-zone: "168.192.in-addr.arpa." nodefault +# # local-zone: "0.in-addr.arpa." nodefault +# # local-zone: "254.169.in-addr.arpa." nodefault +# # local-zone: "2.0.192.in-addr.arpa." nodefault +# # local-zone: "100.51.198.in-addr.arpa." nodefault +# # local-zone: "113.0.203.in-addr.arpa." nodefault +# # local-zone: "255.255.255.255.in-addr.arpa." nodefault +# # local-zone: "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa." nodefault +# # local-zone: "d.f.ip6.arpa." nodefault +# # local-zone: "8.e.f.ip6.arpa." nodefault +# # local-zone: "9.e.f.ip6.arpa." nodefault +# # local-zone: "a.e.f.ip6.arpa." nodefault +# # local-zone: "b.e.f.ip6.arpa." nodefault +# # local-zone: "8.b.d.0.1.0.0.2.ip6.arpa." nodefault +# # And for 64.100.in-addr.arpa. to 127.100.in-addr.arpa. +# +# # If unbound is running service for the local host then it is useful +# # to perform lan-wide lookups to the upstream, and unblock the +# # long list of local-zones above. If this unbound is a dns server +# # for a network of computers, disabled is better and stops information +# # leakage of local lan information. +# # unblock-lan-zones: no +# +# # The insecure-lan-zones option disables validation for +# # these zones, as if they were all listed as domain-insecure. +# # insecure-lan-zones: no +# +# # a number of locally served zones can be configured. +# # local-zone: +# # local-data: "" +# # o deny serves local data (if any), else, drops queries. +# # o refuse serves local data (if any), else, replies with error. +# # o static serves local data, else, nxdomain or nodata answer. +# # o transparent gives local data, but resolves normally for other names +# # o redirect serves the zone data for any subdomain in the zone. +# # o nodefault can be used to normally resolve AS112 zones. +# # o typetransparent resolves normally for other types and other names +# # o inform acts like transparent, but logs client IP address +# # o inform_deny drops queries and logs client IP address +# # o always_transparent, always_refuse, always_nxdomain, resolve in +# # that way but ignore local data for that name +# # o noview breaks out of that view towards global local-zones. +# # +# # defaults are localhost address, reverse for 127.0.0.1 and ::1 +# # and nxdomain for AS112 zones. If you configure one of these zones +# # the default content is omitted, or you can omit it with 'nodefault'. +# # +# # If you configure local-data without specifying local-zone, by +# # default a transparent local-zone is created for the data. +# # +# # You can add locally served data with +# # local-zone: "local." static +# # local-data: "mycomputer.local. IN A 192.0.2.51" +# # local-data: 'mytext.local TXT "content of text record"' +# # +# # You can override certain queries with +# # local-data: "adserver.example.com A 127.0.0.1" +# # +# # You can redirect a domain to a fixed address with +# # (this makes example.com, www.example.com, etc, all go to 192.0.2.3) +# # local-zone: "example.com" redirect +# # local-data: "example.com A 192.0.2.3" +# # +# # Shorthand to make PTR records, "IPv4 name" or "IPv6 name". +# # You can also add PTR records using local-data directly, but then +# # you need to do the reverse notation yourself. +# # local-data-ptr: "192.0.2.3 www.example.com" +# +# include: /etc/unbound/local.d/*.conf +# +# # tag a localzone with a list of tag names (in "" with spaces between) +# # local-zone-tag: "example.com" "tag2 tag3" +# +# # add a netblock specific override to a localzone, with zone type +# # local-zone-override: "example.com" 192.0.2.0/24 refuse +# +# # service clients over SSL (on the TCP sockets), with plain DNS inside +# # the SSL stream. Give the certificate to use and private key. +# # default is "" (disabled). requires restart to take effect. +# # tls-service-key: "/etc/unbound/unbound_server.key" +# # tls-service-pem: "/etc/unbound/unbound_server.pem" +# # tls-port: 853 +# # +# # request upstream over SSL (with plain DNS inside the SSL stream). +# # Default is no. Can be turned on and off with unbound-control. +# # tls-upstream: no +# +# # Certificates used to authenticate connections made upstream. +# # tls-cert-bundle: "" +# +# # Add system certs to the cert bundle, from the Windows Cert Store +# # tls-win-cert: no +# +# # Also serve tls on these port numbers (eg. 443, ...), by listing +# # tls-additional-ports: portno for each of the port numbers. +# +# # DNS64 prefix. Must be specified when DNS64 is use. +# # Enable dns64 in module-config. Used to synthesize IPv6 from IPv4. +# # dns64-prefix: 64:ff9b::0/96 +# +# # ratelimit for uncached, new queries, this limits recursion effort. +# # ratelimiting is experimental, and may help against randomqueryflood. +# # if 0(default) it is disabled, otherwise state qps allowed per zone. +# # ratelimit: 0 +# +# # ratelimits are tracked in a cache, size in bytes of cache (or k,m). +# # ratelimit-size: 4m +# # ratelimit cache slabs, reduces lock contention if equal to cpucount. +# # ratelimit-slabs: 4 +# +# # 0 blocks when ratelimited, otherwise let 1/xth traffic through +# # ratelimit-factor: 10 +# +# # what is considered a low rtt (ping time for upstream server), in msec +# # low-rtt: 45 +# # select low rtt this many times out of 1000. 0 means the fast server +# # select is disabled. prefetches are not sped up. +# # low-rtt-permil: 0 +# +# # override the ratelimit for a specific domain name. +# # give this setting multiple times to have multiple overrides. +# # ratelimit-for-domain: example.com 1000 +# # override the ratelimits for all domains below a domain name +# # can give this multiple times, the name closest to the zone is used. +# # ratelimit-below-domain: com 1000 +# +# # global query ratelimit for all ip addresses. +# # feature is experimental. +# # if 0(default) it is disabled, otherwise states qps allowed per ip address +# # ip-ratelimit: 0 +# +# # ip ratelimits are tracked in a cache, size in bytes of cache (or k,m). +# # ip-ratelimit-size: 4m +# # ip ratelimit cache slabs, reduces lock contention if equal to cpucount. +# # ip-ratelimit-slabs: 4 +# +# # 0 blocks when ip is ratelimited, otherwise let 1/xth traffic through +# # ip-ratelimit-factor: 10 +# +# # Specific options for ipsecmod. unbound needs to be configured with +# # --enable-ipsecmod for these to take effect. +# # +# # Enable or disable ipsecmod (it still needs to be defined in +# # module-config above). Can be used when ipsecmod needs to be +# # enabled/disabled via remote-control(below). +# # Fedora: module will be enabled on-demand by libreswan +# ipsecmod-enabled: no +# +# # Path to executable external hook. It must be defined when ipsecmod is +# # listed in module-config (above). +# # ipsecmod-hook: "./my_executable" +# ipsecmod-hook:/usr/libexec/ipsec/_unbound-hook +# +# # When enabled unbound will reply with SERVFAIL if the return value of +# # the ipsecmod-hook is not 0. +# # ipsecmod-strict: no +# # +# # Maximum time to live (TTL) for cached A/AAAA records with IPSECKEY. +# # ipsecmod-max-ttl: 3600 +# # +# # Reply with A/AAAA even if the relevant IPSECKEY is bogus. Mainly used for +# # testing. +# # ipsecmod-ignore-bogus: no +# # +# # Domains for which ipsecmod will be triggered. If not defined (default) +# # all domains are treated as being whitelisted. +# # ipsecmod-whitelist: "libreswan.org" +# # ipsecmod-whitelist: "nlnetlabs.nl" +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/stub_zone.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/stub_zone.conf.j2 new file mode 100644 index 0000000..44d9e3f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/stub_zone.conf.j2 @@ -0,0 +1,41 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# stub_zone + +{% if unbound_config_stub_zone is defined and unbound_config_stub_zone | count != 0 %} +stub-zone: +{% if unbound_config_stub_zone.name is defined %} + name: "{{ unbound_config_stub_zone.name }}" +{% endif %} +{% if unbound_config_stub_zone.stub_addr is defined %} + stub-addr: {{ unbound_config_stub_zone.stub_addr }} +{% endif %} +{% if unbound_config_stub_zone.stub_host is defined %} + stub-host: {{ unbound_config_stub_zone.stub_host }} +{% endif %} + +{% endif %} + +{# +# Stub zones. +# Create entries like below, to make all queries for 'example.com' and +# 'example.org' go to the given list of nameservers. list zero or more +# nameservers by hostname or by ipaddress. If you set stub-prime to yes, +# the list is treated as priming hints (default is no). +# With stub-first yes, it attempts without the stub if it fails. +# Consider adding domain-insecure: name and local-zone: name nodefault +# to the server: section if the stub is a locally served zone. +# stub-zone: +# name: "example.com" +# stub-addr: 192.0.2.68 +# stub-prime: no +# stub-first: no +# stub-tls-upstream: no +# stub-zone: +# name: "example.org" +# stub-host: ns.example.com. + +# You can now also dynamically create and delete stub-zone's using +# unbound-control stub_add domain.com 1.2.3.4 5.6.7.8 +# unbound-control stub_remove domain.com 1.2.3.4 5.6.7.8 +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/unbound.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/unbound.conf.j2 new file mode 100644 index 0000000..93049ec --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/unbound.conf.j2 @@ -0,0 +1,10 @@ +# {{ ansible_managed }} + +# See the unbound.conf(5) man page. +# +# See /usr/share/doc/unbound/examples/unbound.conf for a commented +# reference config file. +# +# The following line includes additional configuration files from the +# /etc/unbound/unbound.conf.d directory. +include: "/etc/unbound/unbound.conf.d/*.conf" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/views.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/views.conf.j2 new file mode 100644 index 0000000..a07c3fb --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.d/views.conf.j2 @@ -0,0 +1,22 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# views + +{# +# Views +# Create named views. Name must be unique. Map views to requests using +# the access-control-view option. Views can contain zero or more local-zone +# and local-data options. Options from matching views will override global +# options. Global options will be used if no matching view is found. +# With view-first yes, it will try to answer using the global local-zone and +# local-data elements if there is no view specific match. +# view: +# name: "viewname" +# local-zone: "example.com" redirect +# local-data: "example.com A 192.0.2.3" +# local-data-ptr: "192.0.2.3 www.example.com" +# view-first: no +# view: +# name: "anotherview" +# local-zone: "example.com" refuse +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.j2 new file mode 100644 index 0000000..679e458 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/templates/etc/unbound.conf.j2 @@ -0,0 +1,1274 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +include: "/etc/unbound/unbound.conf.d/*.conf" + +{# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.17.0. +# +# this is a comment. + +# Use this anywhere in the file to include other text into this file. +#include: "otherfile.conf" + +# Use this anywhere in the file to include other text, that explicitly starts a +# clause, into this file. Text after this directive needs to start a clause. +#include-toplevel: "otherfile.conf" + +# The server clause sets the main parameters. +server: + # whitespace is not necessary, but looks cleaner. + + # verbosity number, 0 is least verbose. 1 is default. + # verbosity: 1 + + # print statistics to the log (for every thread) every N seconds. + # Set to "" or 0 to disable. Default is disabled. + # statistics-interval: 0 + + # enable shm for stats, default no. if you enable also enable + # statistics-interval, every time it also writes stats to the + # shared memory segment keyed with shm-key. + # shm-enable: no + + # shm for stats uses this key, and key+1 for the shared mem segment. + # shm-key: 11777 + + # enable cumulative statistics, without clearing them after printing. + # statistics-cumulative: no + + # enable extended statistics (query types, answer codes, status) + # printed from unbound-control. default off, because of speed. + # extended-statistics: no + + # number of threads to create. 1 disables threading. + # num-threads: 1 + + # specify the interfaces to answer queries from by ip-address. + # The default is to listen to localhost (127.0.0.1 and ::1). + # specify 0.0.0.0 and ::0 to bind to all available interfaces. + # specify every interface[@port] on a new 'interface:' labelled line. + # The listen interfaces are not changed on reload, only on restart. + # interface: 192.0.2.153 + # interface: 192.0.2.154 + # interface: 192.0.2.154@5003 + # interface: 2001:DB8::5 + # interface: eth0@5003 + + # enable this feature to copy the source address of queries to reply. + # Socket options are not supported on all platforms. experimental. + # interface-automatic: no + + # instead of the default port, open additional ports separated by + # spaces when interface-automatic is enabled, by listing them here. + # interface-automatic-ports: "" + + # port to answer queries from + # port: 53 + + # specify the interfaces to send outgoing queries to authoritative + # server from by ip-address. If none, the default (all) interface + # is used. Specify every interface on a 'outgoing-interface:' line. + # outgoing-interface: 192.0.2.153 + # outgoing-interface: 2001:DB8::5 + # outgoing-interface: 2001:DB8::6 + + # Specify a netblock to use remainder 64 bits as random bits for + # upstream queries. Uses freebind option (Linux). + # outgoing-interface: 2001:DB8::/64 + # Also (Linux:) ip -6 addr add 2001:db8::/64 dev lo + # And: ip -6 route add local 2001:db8::/64 dev lo + # And set prefer-ip6: yes to use the ip6 randomness from a netblock. + # Set this to yes to prefer ipv6 upstream servers over ipv4. + # prefer-ip6: no + + # Prefer ipv4 upstream servers, even if ipv6 is available. + # prefer-ip4: no + + # number of ports to allocate per thread, determines the size of the + # port range that can be open simultaneously. About double the + # num-queries-per-thread, or, use as many as the OS will allow you. + # outgoing-range: 4096 + + # permit Unbound to use this port number or port range for + # making outgoing queries, using an outgoing interface. + # outgoing-port-permit: 32768 + + # deny Unbound the use this of port number or port range for + # making outgoing queries, using an outgoing interface. + # Use this to make sure Unbound does not grab a UDP port that some + # other server on this computer needs. The default is to avoid + # IANA-assigned port numbers. + # If multiple outgoing-port-permit and outgoing-port-avoid options + # are present, they are processed in order. + # outgoing-port-avoid: "3200-3208" + + # number of outgoing simultaneous tcp buffers to hold per thread. + # outgoing-num-tcp: 10 + + # number of incoming simultaneous tcp buffers to hold per thread. + # incoming-num-tcp: 10 + + # buffer size for UDP port 53 incoming (SO_RCVBUF socket option). + # 0 is system default. Use 4m to catch query spikes for busy servers. + # so-rcvbuf: 0 + + # buffer size for UDP port 53 outgoing (SO_SNDBUF socket option). + # 0 is system default. Use 4m to handle spikes on very busy servers. + # so-sndbuf: 0 + + # use SO_REUSEPORT to distribute queries over threads. + # at extreme load it could be better to turn it off to distribute even. + # so-reuseport: yes + + # use IP_TRANSPARENT so the interface: addresses can be non-local + # and you can config non-existing IPs that are going to work later on + # (uses IP_BINDANY on FreeBSD). + # ip-transparent: no + + # use IP_FREEBIND so the interface: addresses can be non-local + # and you can bind to nonexisting IPs and interfaces that are down. + # Linux only. On Linux you also have ip-transparent that is similar. + # ip-freebind: no + + # the value of the Differentiated Services Codepoint (DSCP) + # in the differentiated services field (DS) of the outgoing + # IP packets + # ip-dscp: 0 + + # EDNS reassembly buffer to advertise to UDP peers (the actual buffer + # is set with msg-buffer-size). + # edns-buffer-size: 1232 + + # Maximum UDP response size (not applied to TCP response). + # Suggested values are 512 to 4096. Default is 4096. 65536 disables it. + # max-udp-size: 4096 + + # max memory to use for stream(tcp and tls) waiting result buffers. + # stream-wait-size: 4m + + # buffer size for handling DNS data. No messages larger than this + # size can be sent or received, by UDP or TCP. In bytes. + # msg-buffer-size: 65552 + + # the amount of memory to use for the message cache. + # plain value in bytes or you can append k, m or G. default is "4Mb". + # msg-cache-size: 4m + + # the number of slabs to use for the message cache. + # the number of slabs must be a power of 2. + # more slabs reduce lock contention, but fragment memory usage. + # msg-cache-slabs: 4 + + # the number of queries that a thread gets to service. + # num-queries-per-thread: 1024 + + # if very busy, 50% queries run to completion, 50% get timeout in msec + # jostle-timeout: 200 + + # msec to wait before close of port on timeout UDP. 0 disables. + # delay-close: 0 + + # perform connect for UDP sockets to mitigate ICMP side channel. + # udp-connect: yes + + # The number of retries, per upstream nameserver in a delegation, when + # a throwaway response (also timeouts) is received. + # outbound-msg-retry: 5 + + # msec for waiting for an unknown server to reply. Increase if you + # are behind a slow satellite link, to eg. 1128. + # unknown-server-time-limit: 376 + + # the amount of memory to use for the RRset cache. + # plain value in bytes or you can append k, m or G. default is "4Mb". + # rrset-cache-size: 4m + + # the number of slabs to use for the RRset cache. + # the number of slabs must be a power of 2. + # more slabs reduce lock contention, but fragment memory usage. + # rrset-cache-slabs: 4 + + # the time to live (TTL) value lower bound, in seconds. Default 0. + # If more than an hour could easily give trouble due to stale data. + # cache-min-ttl: 0 + + # the time to live (TTL) value cap for RRsets and messages in the + # cache. Items are not cached for longer. In seconds. + # cache-max-ttl: 86400 + + # the time to live (TTL) value cap for negative responses in the cache + # cache-max-negative-ttl: 3600 + + # the time to live (TTL) value for cached roundtrip times, lameness and + # EDNS version information for hosts. In seconds. + # infra-host-ttl: 900 + + # minimum wait time for responses, increase if uplink is long. In msec. + # infra-cache-min-rtt: 50 + + # maximum wait time for responses. In msec. + # infra-cache-max-rtt: 120000 + + # enable to make server probe down hosts more frequently. + # infra-keep-probing: no + + # the number of slabs to use for the Infrastructure cache. + # the number of slabs must be a power of 2. + # more slabs reduce lock contention, but fragment memory usage. + # infra-cache-slabs: 4 + + # the maximum number of hosts that are cached (roundtrip, EDNS, lame). + # infra-cache-numhosts: 10000 + + # define a number of tags here, use with local-zone, access-control, + # interface-*. + # repeat the define-tag statement to add additional tags. + # define-tag: "tag1 tag2 tag3" + + # Enable IPv4, "yes" or "no". + # do-ip4: yes + + # Enable IPv6, "yes" or "no". + # do-ip6: yes + + # Enable UDP, "yes" or "no". + # do-udp: yes + + # Enable TCP, "yes" or "no". + # do-tcp: yes + + # upstream connections use TCP only (and no UDP), "yes" or "no" + # useful for tunneling scenarios, default no. + # tcp-upstream: no + + # upstream connections also use UDP (even if do-udp is no). + # useful if if you want UDP upstream, but don't provide UDP downstream. + # udp-upstream-without-downstream: no + + # Maximum segment size (MSS) of TCP socket on which the server + # responds to queries. Default is 0, system default MSS. + # tcp-mss: 0 + + # Maximum segment size (MSS) of TCP socket for outgoing queries. + # Default is 0, system default MSS. + # outgoing-tcp-mss: 0 + + # Idle TCP timeout, connection closed in milliseconds + # tcp-idle-timeout: 30000 + + # Enable EDNS TCP keepalive option. + # edns-tcp-keepalive: no + + # Timeout for EDNS TCP keepalive, in msec. + # edns-tcp-keepalive-timeout: 120000 + + # Use systemd socket activation for UDP, TCP, and control sockets. + # use-systemd: no + + # Detach from the terminal, run in background, "yes" or "no". + # Set the value to "no" when Unbound runs as systemd service. + # do-daemonize: yes + + # control which clients are allowed to make (recursive) queries + # to this server. Specify classless netblocks with /size and action. + # By default everything is refused, except for localhost. + # Choose deny (drop message), refuse (polite error reply), + # allow (recursive ok), allow_setrd (recursive ok, rd bit is forced on), + # allow_snoop (recursive and nonrecursive ok) + # deny_non_local (drop queries unless can be answered from local-data) + # refuse_non_local (like deny_non_local but polite error reply). + # access-control: 127.0.0.0/8 allow + # access-control: ::1 allow + # access-control: ::ffff:127.0.0.1 allow + + # tag access-control with list of tags (in "" with spaces between) + # Clients using this access control element use localzones that + # are tagged with one of these tags. + # access-control-tag: 192.0.2.0/24 "tag2 tag3" + + # set action for particular tag for given access control element. + # if you have multiple tag values, the tag used to lookup the action + # is the first tag match between access-control-tag and local-zone-tag + # where "first" comes from the order of the define-tag values. + # access-control-tag-action: 192.0.2.0/24 tag3 refuse + + # set redirect data for particular tag for access control element + # access-control-tag-data: 192.0.2.0/24 tag2 "A 127.0.0.1" + + # Set view for access control element + # access-control-view: 192.0.2.0/24 viewname + + # Similar to 'access-control:' but for interfaces. + # Control which listening interfaces are allowed to accept (recursive) + # queries for this server. + # The specified interfaces should be the same as the ones specified in + # 'interface:' followed by the action. + # The actions are the same as 'access-control:' above. + # By default all the interfaces configured are refused. + # Note: any 'access-control*:' setting overrides all 'interface-*:' + # settings for targeted clients. + # interface-action: 192.0.2.153 allow + # interface-action: 192.0.2.154 allow + # interface-action: 192.0.2.154@5003 allow + # interface-action: 2001:DB8::5 allow + # interface-action: eth0@5003 allow + + # Similar to 'access-control-tag:' but for interfaces. + # Tag interfaces with a list of tags (in "" with spaces between). + # Interfaces using these tags use localzones that are tagged with one + # of these tags. + # The specified interfaces should be the same as the ones specified in + # 'interface:' followed by the list of tags. + # Note: any 'access-control*:' setting overrides all 'interface-*:' + # settings for targeted clients. + # interface-tag: eth0@5003 "tag2 tag3" + + # Similar to 'access-control-tag-action:' but for interfaces. + # Set action for particular tag for a given interface element. + # If you have multiple tag values, the tag used to lookup the action + # is the first tag match between interface-tag and local-zone-tag + # where "first" comes from the order of the define-tag values. + # The specified interfaces should be the same as the ones specified in + # 'interface:' followed by the tag and action. + # Note: any 'access-control*:' setting overrides all 'interface-*:' + # settings for targeted clients. + # interface-tag-action: eth0@5003 tag3 refuse + + # Similar to 'access-control-tag-data:' but for interfaces. + # Set redirect data for a particular tag for an interface element. + # The specified interfaces should be the same as the ones specified in + # 'interface:' followed by the tag and the redirect data. + # Note: any 'access-control*:' setting overrides all 'interface-*:' + # settings for targeted clients. + # interface-tag-data: eth0@5003 tag2 "A 127.0.0.1" + + # Similar to 'access-control-view:' but for interfaces. + # Set view for an interface element. + # The specified interfaces should be the same as the ones specified in + # 'interface:' followed by the view name. + # Note: any 'access-control*:' setting overrides all 'interface-*:' + # settings for targeted clients. + # interface-view: eth0@5003 viewname + + # if given, a chroot(2) is done to the given directory. + # i.e. you can chroot to the working directory, for example, + # for extra security, but make sure all files are in that directory. + # + # If chroot is enabled, you should pass the configfile (from the + # commandline) as a full path from the original root. After the + # chroot has been performed the now defunct portion of the config + # file path is removed to be able to reread the config after a reload. + # + # All other file paths (working dir, logfile, roothints, and + # key files) can be specified in several ways: + # o as an absolute path relative to the new root. + # o as a relative path to the working directory. + # o as an absolute path relative to the original root. + # In the last case the path is adjusted to remove the unused portion. + # + # The pid file can be absolute and outside of the chroot, it is + # written just prior to performing the chroot and dropping permissions. + # + # Additionally, Unbound may need to access /dev/urandom (for entropy). + # How to do this is specific to your OS. + # + # If you give "" no chroot is performed. The path must not end in a /. + # chroot: "/etc/unbound" + + # if given, user privileges are dropped (after binding port), + # and the given username is assumed. Default is user "unbound". + # If you give "" no privileges are dropped. + # username: "unbound" + + # the working directory. The relative files in this config are + # relative to this directory. If you give "" the working directory + # is not changed. + # If you give a server: directory: dir before include: file statements + # then those includes can be relative to the working directory. + # directory: "/etc/unbound" + + # the log file, "" means log to stderr. + # Use of this option sets use-syslog to "no". + # logfile: "" + + # Log to syslog(3) if yes. The log facility LOG_DAEMON is used to + # log to. If yes, it overrides the logfile. + # use-syslog: yes + + # Log identity to report. if empty, defaults to the name of argv[0] + # (usually "unbound"). + # log-identity: "" + + # print UTC timestamp in ascii to logfile, default is epoch in seconds. + # log-time-ascii: no + + # print one line with time, IP, name, type, class for every query. + # log-queries: no + + # print one line per reply, with time, IP, name, type, class, rcode, + # timetoresolve, fromcache and responsesize. + # log-replies: no + + # log with tag 'query' and 'reply' instead of 'info' for + # filtering log-queries and log-replies from the log. + # log-tag-queryreply: no + + # log the local-zone actions, like local-zone type inform is enabled + # also for the other local zone types. + # log-local-actions: no + + # print log lines that say why queries return SERVFAIL to clients. + # log-servfail: no + + # the pid file. Can be an absolute path outside of chroot/work dir. + # pidfile: "/run/unbound.pid" + + # file to read root hints from. + # get one from https://www.internic.net/domain/named.cache + # root-hints: "" + + # enable to not answer id.server and hostname.bind queries. + # hide-identity: no + + # enable to not answer version.server and version.bind queries. + # hide-version: no + + # enable to not answer trustanchor.unbound queries. + # hide-trustanchor: no + + # enable to not set the User-Agent HTTP header. + # hide-http-user-agent: no + + # the identity to report. Leave "" or default to return hostname. + # identity: "" + + # the version to report. Leave "" or default to return package version. + # version: "" + + # NSID identity (hex string, or "ascii_somestring"). default disabled. + # nsid: "aabbccdd" + + # User-Agent HTTP header to use. Leave "" or default to use package name + # and version. + # http-user-agent: "" + + # the target fetch policy. + # series of integers describing the policy per dependency depth. + # The number of values in the list determines the maximum dependency + # depth the recursor will pursue before giving up. Each integer means: + # -1 : fetch all targets opportunistically, + # 0: fetch on demand, + # positive value: fetch that many targets opportunistically. + # Enclose the list of numbers between quotes (""). + # target-fetch-policy: "3 2 1 0 0" + + # Harden against very small EDNS buffer sizes. + # harden-short-bufsize: yes + + # Harden against unseemly large queries. + # harden-large-queries: no + + # Harden against out of zone rrsets, to avoid spoofing attempts. + # harden-glue: yes + + # Harden against receiving dnssec-stripped data. If you turn it + # off, failing to validate dnskey data for a trustanchor will + # trigger insecure mode for that zone (like without a trustanchor). + # Default on, which insists on dnssec data for trust-anchored zones. + # harden-dnssec-stripped: yes + + # Harden against queries that fall under dnssec-signed nxdomain names. + # harden-below-nxdomain: yes + + # Harden the referral path by performing additional queries for + # infrastructure data. Validates the replies (if possible). + # Default off, because the lookups burden the server. Experimental + # implementation of draft-wijngaards-dnsext-resolver-side-mitigation. + # harden-referral-path: no + + # Harden against algorithm downgrade when multiple algorithms are + # advertised in the DS record. If no, allows the weakest algorithm + # to validate the zone. + # harden-algo-downgrade: no + + # Sent minimum amount of information to upstream servers to enhance + # privacy. Only sent minimum required labels of the QNAME and set QTYPE + # to A when possible. + # qname-minimisation: yes + + # QNAME minimisation in strict mode. Do not fall-back to sending full + # QNAME to potentially broken nameservers. A lot of domains will not be + # resolvable when this option in enabled. + # This option only has effect when qname-minimisation is enabled. + # qname-minimisation-strict: no + + # Aggressive NSEC uses the DNSSEC NSEC chain to synthesize NXDOMAIN + # and other denials, using information from previous NXDOMAINs answers. + # aggressive-nsec: yes + + # Use 0x20-encoded random bits in the query to foil spoof attempts. + # This feature is an experimental implementation of draft dns-0x20. + # use-caps-for-id: no + + # Domains (and domains in them) without support for dns-0x20 and + # the fallback fails because they keep sending different answers. + # caps-exempt: "licdn.com" + # caps-exempt: "senderbase.org" + + # Enforce privacy of these addresses. Strips them away from answers. + # It may cause DNSSEC validation to additionally mark it as bogus. + # Protects against 'DNS Rebinding' (uses browser as network proxy). + # Only 'private-domain' and 'local-data' names are allowed to have + # these private addresses. No default. + # private-address: 10.0.0.0/8 + # private-address: 172.16.0.0/12 + # private-address: 192.168.0.0/16 + # private-address: 169.254.0.0/16 + # private-address: fd00::/8 + # private-address: fe80::/10 + # private-address: ::ffff:0:0/96 + + # Allow the domain (and its subdomains) to contain private addresses. + # local-data statements are allowed to contain private addresses too. + # private-domain: "example.com" + + # If nonzero, unwanted replies are not only reported in statistics, + # but also a running total is kept per thread. If it reaches the + # threshold, a warning is printed and a defensive action is taken, + # the cache is cleared to flush potential poison out of it. + # A suggested value is 10000000, the default is 0 (turned off). + # unwanted-reply-threshold: 0 + + # Do not query the following addresses. No DNS queries are sent there. + # List one address per entry. List classless netblocks with /size, + # do-not-query-address: 127.0.0.1/8 + # do-not-query-address: ::1 + + # if yes, the above default do-not-query-address entries are present. + # if no, localhost can be queried (for testing and debugging). + # do-not-query-localhost: yes + + # if yes, perform prefetching of almost expired message cache entries. + # prefetch: no + + # if yes, perform key lookups adjacent to normal lookups. + # prefetch-key: no + + # deny queries of type ANY with an empty response. + # deny-any: no + + # if yes, Unbound rotates RRSet order in response. + # rrset-roundrobin: yes + + # if yes, Unbound doesn't insert authority/additional sections + # into response messages when those sections are not required. + # minimal-responses: yes + + # true to disable DNSSEC lameness check in iterator. + # disable-dnssec-lame-check: no + + # module configuration of the server. A string with identifiers + # separated by spaces. Syntax: "[dns64] [validator] iterator" + # most modules have to be listed at the beginning of the line, + # except cachedb(just before iterator), and python (at the beginning, + # or, just before the iterator). + # module-config: "validator iterator" + + # File with trusted keys, kept uptodate using RFC5011 probes, + # initial file like trust-anchor-file, then it stores metadata. + # Use several entries, one per domain name, to track multiple zones. + # + # If you want to perform DNSSEC validation, run unbound-anchor before + # you start Unbound (i.e. in the system boot scripts). + # And then enable the auto-trust-anchor-file config item. + # Please note usage of unbound-anchor root anchor is at your own risk + # and under the terms of our LICENSE (see that file in the source). + # auto-trust-anchor-file: "/etc/trusted-key.key" + + # trust anchor signaling sends a RFC8145 key tag query after priming. + # trust-anchor-signaling: yes + + # Root key trust anchor sentinel (draft-ietf-dnsop-kskroll-sentinel) + # root-key-sentinel: yes + + # File with trusted keys for validation. Specify more than one file + # with several entries, one file per entry. + # Zone file format, with DS and DNSKEY entries. + # Note this gets out of date, use auto-trust-anchor-file please. + trust-anchor-file: "/etc/unbound/trusted-key.key" + + # Trusted key for validation. DS or DNSKEY. specify the RR on a + # single line, surrounded by "". TTL is ignored. class is IN default. + # Note this gets out of date, use auto-trust-anchor-file please. + # (These examples are from August 2007 and may not be valid anymore). + # trust-anchor: "nlnetlabs.nl. DNSKEY 257 3 5 AQPzzTWMz8qSWIQlfRnPckx2BiVmkVN6LPupO3mbz7FhLSnm26n6iG9N Lby97Ji453aWZY3M5/xJBSOS2vWtco2t8C0+xeO1bc/d6ZTy32DHchpW 6rDH1vp86Ll+ha0tmwyy9QP7y2bVw5zSbFCrefk8qCUBgfHm9bHzMG1U BYtEIQ==" + # trust-anchor: "jelte.nlnetlabs.nl. DS 42860 5 1 14D739EB566D2B1A5E216A0BA4D17FA9B038BE4A" + + # File with trusted keys for validation. Specify more than one file + # with several entries, one file per entry. Like trust-anchor-file + # but has a different file format. Format is BIND-9 style format, + # the trusted-keys { name flag proto algo "key"; }; clauses are read. + # you need external update procedures to track changes in keys. + # trusted-keys-file: "" + + # Ignore chain of trust. Domain is treated as insecure. + # domain-insecure: "example.com" + + # Override the date for validation with a specific fixed date. + # Do not set this unless you are debugging signature inception + # and expiration. "" or "0" turns the feature off. -1 ignores date. + # val-override-date: "" + + # The time to live for bogus data, rrsets and messages. This avoids + # some of the revalidation, until the time interval expires. in secs. + # val-bogus-ttl: 60 + + # The signature inception and expiration dates are allowed to be off + # by 10% of the signature lifetime (expir-incep) from our local clock. + # This leeway is capped with a minimum and a maximum. In seconds. + # val-sig-skew-min: 3600 + # val-sig-skew-max: 86400 + + # The maximum number the validator should restart validation with + # another authority in case of failed validation. + # val-max-restart: 5 + + # Should additional section of secure message also be kept clean of + # unsecure data. Useful to shield the users of this validator from + # potential bogus data in the additional section. All unsigned data + # in the additional section is removed from secure messages. + # val-clean-additional: yes + + # Turn permissive mode on to permit bogus messages. Thus, messages + # for which security checks failed will be returned to clients, + # instead of SERVFAIL. It still performs the security checks, which + # result in interesting log files and possibly the AD bit in + # replies if the message is found secure. The default is off. + # val-permissive-mode: no + + # Ignore the CD flag in incoming queries and refuse them bogus data. + # Enable it if the only clients of Unbound are legacy servers (w2008) + # that set CD but cannot validate themselves. + # ignore-cd-flag: no + + # Serve expired responses from cache, with serve-expired-reply-ttl in + # the response, and then attempt to fetch the data afresh. + # serve-expired: no + # + # Limit serving of expired responses to configured seconds after + # expiration. 0 disables the limit. + # serve-expired-ttl: 0 + # + # Set the TTL of expired records to the serve-expired-ttl value after a + # failed attempt to retrieve the record from upstream. This makes sure + # that the expired records will be served as long as there are queries + # for it. + # serve-expired-ttl-reset: no + # + # TTL value to use when replying with expired data. + # serve-expired-reply-ttl: 30 + # + # Time in milliseconds before replying to the client with expired data. + # This essentially enables the serve-stale behavior as specified in + # RFC 8767 that first tries to resolve before + # immediately responding with expired data. 0 disables this behavior. + # A recommended value is 1800. + # serve-expired-client-timeout: 0 + + # Return the original TTL as received from the upstream name server rather + # than the decrementing TTL as stored in the cache. Enabling this feature + # does not impact cache expiry, it only changes the TTL Unbound embeds in + # responses to queries. Note that enabling this feature implicitly disables + # enforcement of the configured minimum and maximum TTL. + # serve-original-ttl: no + + # Have the validator log failed validations for your diagnosis. + # 0: off. 1: A line per failed user query. 2: With reason and bad IP. + # val-log-level: 0 + + # It is possible to configure NSEC3 maximum iteration counts per + # keysize. Keep this table very short, as linear search is done. + # A message with an NSEC3 with larger count is marked insecure. + # List in ascending order the keysize and count values. + # val-nsec3-keysize-iterations: "1024 150 2048 150 4096 150" + + # if enabled, ZONEMD verification failures do not block the zone. + # zonemd-permissive-mode: no + + # instruct the auto-trust-anchor-file probing to add anchors after ttl. + # add-holddown: 2592000 # 30 days + + # instruct the auto-trust-anchor-file probing to del anchors after ttl. + # del-holddown: 2592000 # 30 days + + # auto-trust-anchor-file probing removes missing anchors after ttl. + # If the value 0 is given, missing anchors are not removed. + # keep-missing: 31622400 # 366 days + + # debug option that allows very small holddown times for key rollover, + # otherwise the RFC mandates probe intervals must be at least 1 hour. + # permit-small-holddown: no + + # the amount of memory to use for the key cache. + # plain value in bytes or you can append k, m or G. default is "4Mb". + # key-cache-size: 4m + + # the number of slabs to use for the key cache. + # the number of slabs must be a power of 2. + # more slabs reduce lock contention, but fragment memory usage. + # key-cache-slabs: 4 + + # the amount of memory to use for the negative cache. + # plain value in bytes or you can append k, m or G. default is "1Mb". + # neg-cache-size: 1m + + # By default, for a number of zones a small default 'nothing here' + # reply is built-in. Query traffic is thus blocked. If you + # wish to serve such zone you can unblock them by uncommenting one + # of the nodefault statements below. + # You may also have to use domain-insecure: zone to make DNSSEC work, + # unless you have your own trust anchors for this zone. + # local-zone: "localhost." nodefault + # local-zone: "127.in-addr.arpa." nodefault + # local-zone: "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa." nodefault + # local-zone: "home.arpa." nodefault + # local-zone: "onion." nodefault + # local-zone: "test." nodefault + # local-zone: "invalid." nodefault + # local-zone: "10.in-addr.arpa." nodefault + # local-zone: "16.172.in-addr.arpa." nodefault + # local-zone: "17.172.in-addr.arpa." nodefault + # local-zone: "18.172.in-addr.arpa." nodefault + # local-zone: "19.172.in-addr.arpa." nodefault + # local-zone: "20.172.in-addr.arpa." nodefault + # local-zone: "21.172.in-addr.arpa." nodefault + # local-zone: "22.172.in-addr.arpa." nodefault + # local-zone: "23.172.in-addr.arpa." nodefault + # local-zone: "24.172.in-addr.arpa." nodefault + # local-zone: "25.172.in-addr.arpa." nodefault + # local-zone: "26.172.in-addr.arpa." nodefault + # local-zone: "27.172.in-addr.arpa." nodefault + # local-zone: "28.172.in-addr.arpa." nodefault + # local-zone: "29.172.in-addr.arpa." nodefault + # local-zone: "30.172.in-addr.arpa." nodefault + # local-zone: "31.172.in-addr.arpa." nodefault + # local-zone: "168.192.in-addr.arpa." nodefault + # local-zone: "0.in-addr.arpa." nodefault + # local-zone: "254.169.in-addr.arpa." nodefault + # local-zone: "2.0.192.in-addr.arpa." nodefault + # local-zone: "100.51.198.in-addr.arpa." nodefault + # local-zone: "113.0.203.in-addr.arpa." nodefault + # local-zone: "255.255.255.255.in-addr.arpa." nodefault + # local-zone: "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa." nodefault + # local-zone: "d.f.ip6.arpa." nodefault + # local-zone: "8.e.f.ip6.arpa." nodefault + # local-zone: "9.e.f.ip6.arpa." nodefault + # local-zone: "a.e.f.ip6.arpa." nodefault + # local-zone: "b.e.f.ip6.arpa." nodefault + # local-zone: "8.b.d.0.1.0.0.2.ip6.arpa." nodefault + # And for 64.100.in-addr.arpa. to 127.100.in-addr.arpa. + + # Add example.com into ipset + # local-zone: "example.com" ipset + + # If Unbound is running service for the local host then it is useful + # to perform lan-wide lookups to the upstream, and unblock the + # long list of local-zones above. If this Unbound is a dns server + # for a network of computers, disabled is better and stops information + # leakage of local lan information. + # unblock-lan-zones: no + + # The insecure-lan-zones option disables validation for + # these zones, as if they were all listed as domain-insecure. + # insecure-lan-zones: no + + # a number of locally served zones can be configured. + # local-zone: + # local-data: "" + # o deny serves local data (if any), else, drops queries. + # o refuse serves local data (if any), else, replies with error. + # o static serves local data, else, nxdomain or nodata answer. + # o transparent gives local data, but resolves normally for other names + # o redirect serves the zone data for any subdomain in the zone. + # o nodefault can be used to normally resolve AS112 zones. + # o typetransparent resolves normally for other types and other names + # o inform acts like transparent, but logs client IP address + # o inform_deny drops queries and logs client IP address + # o inform_redirect redirects queries and logs client IP address + # o always_transparent, always_refuse, always_nxdomain, always_nodata, + # always_deny resolve in that way but ignore local data for + # that name + # o always_null returns 0.0.0.0 or ::0 for any name in the zone. + # o noview breaks out of that view towards global local-zones. + # + # defaults are localhost address, reverse for 127.0.0.1 and ::1 + # and nxdomain for AS112 zones. If you configure one of these zones + # the default content is omitted, or you can omit it with 'nodefault'. + # + # If you configure local-data without specifying local-zone, by + # default a transparent local-zone is created for the data. + # + # You can add locally served data with + # local-zone: "local." static + # local-data: "mycomputer.local. IN A 192.0.2.51" + # local-data: 'mytext.local TXT "content of text record"' + # + # You can override certain queries with + # local-data: "adserver.example.com A 127.0.0.1" + # + # You can redirect a domain to a fixed address with + # (this makes example.com, www.example.com, etc, all go to 192.0.2.3) + # local-zone: "example.com" redirect + # local-data: "example.com A 192.0.2.3" + # + # Shorthand to make PTR records, "IPv4 name" or "IPv6 name". + # You can also add PTR records using local-data directly, but then + # you need to do the reverse notation yourself. + # local-data-ptr: "192.0.2.3 www.example.com" + + # tag a localzone with a list of tag names (in "" with spaces between) + # local-zone-tag: "example.com" "tag2 tag3" + + # add a netblock specific override to a localzone, with zone type + # local-zone-override: "example.com" 192.0.2.0/24 refuse + + # service clients over TLS (on the TCP sockets) with plain DNS inside + # the TLS stream, and over HTTPS using HTTP/2 as specified in RFC8484. + # Give the certificate to use and private key. + # default is "" (disabled). requires restart to take effect. + # tls-service-key: "path/to/privatekeyfile.key" + # tls-service-pem: "path/to/publiccertfile.pem" + # tls-port: 853 + # https-port: 443 + + # cipher setting for TLSv1.2 + # tls-ciphers: "DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256" + # cipher setting for TLSv1.3 + # tls-ciphersuites: "TLS_AES_128_GCM_SHA256:TLS_AES_128_CCM_8_SHA256:TLS_AES_128_CCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256" + + # Pad responses to padded queries received over TLS + # pad-responses: yes + + # Padded responses will be padded to the closest multiple of this size. + # pad-responses-block-size: 468 + + # Use the SNI extension for TLS connections. Default is yes. + # Changing the value requires a reload. + # tls-use-sni: yes + + # Add the secret file for TLS Session Ticket. + # Secret file must be 80 bytes of random data. + # First key use to encrypt and decrypt TLS session tickets. + # Other keys use to decrypt only. + # requires restart to take effect. + # tls-session-ticket-keys: "path/to/secret_file1" + # tls-session-ticket-keys: "path/to/secret_file2" + + # request upstream over TLS (with plain DNS inside the TLS stream). + # Default is no. Can be turned on and off with unbound-control. + # tls-upstream: no + + # Certificates used to authenticate connections made upstream. + # tls-cert-bundle: "" + + # Add system certs to the cert bundle, from the Windows Cert Store + # tls-win-cert: no + # and on other systems, the default openssl certificates + # tls-system-cert: no + + # Pad queries over TLS upstreams + # pad-queries: yes + + # Padded queries will be padded to the closest multiple of this size. + # pad-queries-block-size: 128 + + # Also serve tls on these port numbers (eg. 443, ...), by listing + # tls-additional-port: portno for each of the port numbers. + + # HTTP endpoint to provide DNS-over-HTTPS service on. + # http-endpoint: "/dns-query" + + # HTTP/2 SETTINGS_MAX_CONCURRENT_STREAMS value to use. + # http-max-streams: 100 + + # Maximum number of bytes used for all HTTP/2 query buffers. + # http-query-buffer-size: 4m + + # Maximum number of bytes used for all HTTP/2 response buffers. + # http-response-buffer-size: 4m + + # Set TCP_NODELAY socket option on sockets used for DNS-over-HTTPS + # service. + # http-nodelay: yes + + # Disable TLS for DNS-over-HTTP downstream service. + # http-notls-downstream: no + + # The interfaces that use these listed port numbers will support and + # expect PROXYv2. For UDP and TCP/TLS interfaces. + # proxy-protocol-port: portno for each of the port numbers. + + # DNS64 prefix. Must be specified when DNS64 is use. + # Enable dns64 in module-config. Used to synthesize IPv6 from IPv4. + # dns64-prefix: 64:ff9b::0/96 + + # DNS64 ignore AAAA records for these domains and use A instead. + # dns64-ignore-aaaa: "example.com" + + # ratelimit for uncached, new queries, this limits recursion effort. + # ratelimiting is experimental, and may help against randomqueryflood. + # if 0(default) it is disabled, otherwise state qps allowed per zone. + # ratelimit: 0 + + # ratelimits are tracked in a cache, size in bytes of cache (or k,m). + # ratelimit-size: 4m + # ratelimit cache slabs, reduces lock contention if equal to cpucount. + # ratelimit-slabs: 4 + + # 0 blocks when ratelimited, otherwise let 1/xth traffic through + # ratelimit-factor: 10 + + # Aggressive rate limit when the limit is reached and until demand has + # decreased in a 2 second rate window. + # ratelimit-backoff: no + + # override the ratelimit for a specific domain name. + # give this setting multiple times to have multiple overrides. + # ratelimit-for-domain: example.com 1000 + # override the ratelimits for all domains below a domain name + # can give this multiple times, the name closest to the zone is used. + # ratelimit-below-domain: com 1000 + + # global query ratelimit for all ip addresses. + # feature is experimental. + # if 0(default) it is disabled, otherwise states qps allowed per ip address + # ip-ratelimit: 0 + + # ip ratelimits are tracked in a cache, size in bytes of cache (or k,m). + # ip-ratelimit-size: 4m + # ip ratelimit cache slabs, reduces lock contention if equal to cpucount. + # ip-ratelimit-slabs: 4 + + # 0 blocks when ip is ratelimited, otherwise let 1/xth traffic through + # ip-ratelimit-factor: 10 + + # Aggressive rate limit when the limit is reached and until demand has + # decreased in a 2 second rate window. + # ip-ratelimit-backoff: no + + # Limit the number of connections simultaneous from a netblock + # tcp-connection-limit: 192.0.2.0/24 12 + + # select from the fastest servers this many times out of 1000. 0 means + # the fast server select is disabled. prefetches are not sped up. + # fast-server-permil: 0 + # the number of servers that will be used in the fast server selection. + # fast-server-num: 3 + + # Enable to attach Extended DNS Error codes (RFC8914) to responses. + # ede: no + + # Enable to attach an Extended DNS Error (RFC8914) Code 3 - Stale + # Answer as EDNS0 option to expired responses. + # Note that the ede option above needs to be enabled for this to work. + # ede-serve-expired: no + + # Specific options for ipsecmod. Unbound needs to be configured with + # --enable-ipsecmod for these to take effect. + # + # Enable or disable ipsecmod (it still needs to be defined in + # module-config above). Can be used when ipsecmod needs to be + # enabled/disabled via remote-control(below). + # ipsecmod-enabled: yes + # + # Path to executable external hook. It must be defined when ipsecmod is + # listed in module-config (above). + # ipsecmod-hook: "./my_executable" + # + # When enabled Unbound will reply with SERVFAIL if the return value of + # the ipsecmod-hook is not 0. + # ipsecmod-strict: no + # + # Maximum time to live (TTL) for cached A/AAAA records with IPSECKEY. + # ipsecmod-max-ttl: 3600 + # + # Reply with A/AAAA even if the relevant IPSECKEY is bogus. Mainly used for + # testing. + # ipsecmod-ignore-bogus: no + # + # Domains for which ipsecmod will be triggered. If not defined (default) + # all domains are treated as being allowed. + # ipsecmod-allow: "example.com" + # ipsecmod-allow: "nlnetlabs.nl" + + # Timeout for REUSE entries in milliseconds. + # tcp-reuse-timeout: 60000 + # Max number of queries on a reuse connection. + # max-reuse-tcp-queries: 200 + # Timeout in milliseconds for TCP queries to auth servers. + # tcp-auth-query-timeout: 3000 + + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: + # Script file to load + # python-script: "/etc/unbound/ubmodule-tst.py" + +# Dynamic library config section. To enable: +# o use --with-dynlibmodule to configure before compiling. +# o list dynlib in the module-config string (above) to enable. +# It can be placed anywhere, the dynlib module is only a very thin wrapper +# to load modules dynamically. +# o and give a dynlib-file to run. If more than one dynlib entry is listed in +# the module-config then you need one dynlib-file per instance. +dynlib: + # Script file to load + # dynlib-file: "/etc/unbound/dynlib.so" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + # control-enable: no + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + # control-interface: ::1 + + # port number for remote control operations. + # control-port: 8953 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + # control-use-cert: "yes" + + # Unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # Unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + # control-key-file: "/etc/unbound/unbound_control.key" + + # unbound-control certificate file. + # control-cert-file: "/etc/unbound/unbound_control.pem" + +# Stub zones. +# Create entries like below, to make all queries for 'example.com' and +# 'example.org' go to the given list of nameservers. list zero or more +# nameservers by hostname or by ipaddress. If you set stub-prime to yes, +# the list is treated as priming hints (default is no). +# With stub-first yes, it attempts without the stub if it fails. +# Consider adding domain-insecure: name and local-zone: name nodefault +# to the server: section if the stub is a locally served zone. +# stub-zone: +# name: "example.com" +# stub-addr: 192.0.2.68 +# stub-prime: no +# stub-first: no +# stub-tcp-upstream: no +# stub-tls-upstream: no +# stub-no-cache: no +# stub-zone: +# name: "example.org" +# stub-host: ns.example.com. + +# Forward zones +# Create entries like below, to make all queries for 'example.com' and +# 'example.org' go to the given list of servers. These servers have to handle +# recursion to other nameservers. List zero or more nameservers by hostname +# or by ipaddress. Use an entry with name "." to forward all queries. +# If you enable forward-first, it attempts without the forward if it fails. +# forward-zone: +# name: "example.com" +# forward-addr: 192.0.2.68 +# forward-addr: 192.0.2.73@5355 # forward to port 5355. +# forward-first: no +# forward-tcp-upstream: no +# forward-tls-upstream: no +# forward-no-cache: no +# forward-zone: +# name: "example.org" +# forward-host: fwd.example.com + +# Authority zones +# The data for these zones is kept locally, from a file or downloaded. +# The data can be served to downstream clients, or used instead of the +# upstream (which saves a lookup to the upstream). The first example +# has a copy of the root for local usage. The second serves example.org +# authoritatively. zonefile: reads from file (and writes to it if you also +# download it), primary: fetches with AXFR and IXFR, or url to zonefile. +# With allow-notify: you can give additional (apart from primaries and urls) +# sources of notifies. +# auth-zone: +# name: "." +# primary: 199.9.14.201 # b.root-servers.net +# primary: 192.33.4.12 # c.root-servers.net +# primary: 199.7.91.13 # d.root-servers.net +# primary: 192.5.5.241 # f.root-servers.net +# primary: 192.112.36.4 # g.root-servers.net +# primary: 193.0.14.129 # k.root-servers.net +# primary: 192.0.47.132 # xfr.cjr.dns.icann.org +# primary: 192.0.32.132 # xfr.lax.dns.icann.org +# primary: 2001:500:200::b # b.root-servers.net +# primary: 2001:500:2::c # c.root-servers.net +# primary: 2001:500:2d::d # d.root-servers.net +# primary: 2001:500:2f::f # f.root-servers.net +# primary: 2001:500:12::d0d # g.root-servers.net +# primary: 2001:7fd::1 # k.root-servers.net +# primary: 2620:0:2830:202::132 # xfr.cjr.dns.icann.org +# primary: 2620:0:2d0:202::132 # xfr.lax.dns.icann.org +# fallback-enabled: yes +# for-downstream: no +# for-upstream: yes +# auth-zone: +# name: "example.org" +# for-downstream: yes +# for-upstream: yes +# zonemd-check: no +# zonemd-reject-absence: no +# zonefile: "example.org.zone" + +# Views +# Create named views. Name must be unique. Map views to requests using +# the access-control-view option. Views can contain zero or more local-zone +# and local-data options. Options from matching views will override global +# options. Global options will be used if no matching view is found. +# With view-first yes, it will try to answer using the global local-zone and +# local-data elements if there is no view specific match. +# view: +# name: "viewname" +# local-zone: "example.com" redirect +# local-data: "example.com A 192.0.2.3" +# local-data-ptr: "192.0.2.3 www.example.com" +# view-first: no +# view: +# name: "anotherview" +# local-zone: "example.com" refuse + +# DNSCrypt +# To enable, use --enable-dnscrypt to configure before compiling. +# Caveats: +# 1. the keys/certs cannot be produced by Unbound. You can use dnscrypt-wrapper +# for this: https://github.com/cofyc/dnscrypt-wrapper/blob/master/README.md#usage +# 2. dnscrypt channel attaches to an interface. you MUST set interfaces to +# listen on `dnscrypt-port` with the follo0wing snippet: +# server: +# interface: 0.0.0.0@443 +# interface: ::0@443 +# +# Finally, `dnscrypt` config has its own section. +# dnscrypt: +# dnscrypt-enable: yes +# dnscrypt-port: 443 +# dnscrypt-provider: 2.dnscrypt-cert.example.com. +# dnscrypt-secret-key: /path/unbound-conf/keys1/1.key +# dnscrypt-secret-key: /path/unbound-conf/keys2/1.key +# dnscrypt-provider-cert: /path/unbound-conf/keys1/1.cert +# dnscrypt-provider-cert: /path/unbound-conf/keys2/1.cert + +# CacheDB +# External backend DB as auxiliary cache. +# To enable, use --enable-cachedb to configure before compiling. +# Specify the backend name +# (default is "testframe", which has no use other than for debugging and +# testing) and backend-specific options. The 'cachedb' module must be +# included in module-config, just before the iterator module. +# cachedb: +# backend: "testframe" +# # secret seed string to calculate hashed keys +# secret-seed: "default" +# +# # For "redis" backend: +# # (to enable, use --with-libhiredis to configure before compiling) +# # redis server's IP address or host name +# redis-server-host: 127.0.0.1 +# # redis server's TCP port +# redis-server-port: 6379 +# # timeout (in ms) for communication with the redis server +# redis-timeout: 100 +# # set timeout on redis records based on DNS response TTL +# redis-expire-records: no + +# IPSet +# Add specify domain into set via ipset. +# To enable: +# o use --enable-ipset to configure before compiling; +# o Unbound then needs to run as root user. +# ipset: +# # set name for ip v4 addresses +# name-v4: "list-v4" +# # set name for ip v6 addresses +# name-v6: "list-v6" +# + +# Dnstap logging support, if compiled in by using --enable-dnstap to configure. +# To enable, set the dnstap-enable to yes and also some of +# dnstap-log-..-messages to yes. And select an upstream log destination, by +# socket path, TCP or TLS destination. +# dnstap: +# dnstap-enable: no +# # if set to yes frame streams will be used in bidirectional mode +# dnstap-bidirectional: yes +# dnstap-socket-path: "/etc/unbound/dnstap.sock" +# # if "" use the unix socket in dnstap-socket-path, otherwise, +# # set it to "IPaddress[@port]" of the destination. +# dnstap-ip: "" +# # if set to yes if you want to use TLS to dnstap-ip, no for TCP. +# dnstap-tls: yes +# # name for authenticating the upstream server. or "" disabled. +# dnstap-tls-server-name: "" +# # if "", it uses the cert bundle from the main Unbound config. +# dnstap-tls-cert-bundle: "" +# # key file for client authentication, or "" disabled. +# dnstap-tls-client-key-file: "" +# # cert file for client authentication, or "" disabled. +# dnstap-tls-client-cert-file: "" +# dnstap-send-identity: no +# dnstap-send-version: no +# # if "" it uses the hostname. +# dnstap-identity: "" +# # if "" it uses the package version. +# dnstap-version: "" +# dnstap-log-resolver-query-messages: no +# dnstap-log-resolver-response-messages: no +# dnstap-log-client-query-messages: no +# dnstap-log-client-response-messages: no +# dnstap-log-forwarder-query-messages: no +# dnstap-log-forwarder-response-messages: no + +# Response Policy Zones +# RPZ policies. Applied in order of configuration. QNAME, Response IP +# Address, nsdname, nsip and clientip triggers are supported. Supported +# actions are: NXDOMAIN, NODATA, PASSTHRU, DROP, Local Data, tcp-only +# and drop. Policies can be loaded from a file, or using zone +# transfer, or using HTTP. The respip module needs to be added +# to the module-config, e.g.: module-config: "respip validator iterator". +# rpz: +# name: "rpz.example.com" +# zonefile: "rpz.example.com" +# primary: 192.0.2.0 +# allow-notify: 192.0.2.0/32 +# url: http://www.example.com/rpz.example.org.zone +# rpz-action-override: cname +# rpz-cname-override: www.example.org +# rpz-log: yes +# rpz-log-name: "example policy" +# rpz-signal-nxdomain-ra: no +# for-downstream: no +# tags: "example" + + +#} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/vars/archlinux.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/vars/archlinux.yml new file mode 100644 index 0000000..6b3f669 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/vars/archlinux.yml @@ -0,0 +1,9 @@ +--- + +unbound_dependencies: + - dnsutils + # - iproute2 + +unbound_environment_file: /etc/default/unbound + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/vars/debian.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/vars/debian.yml new file mode 100644 index 0000000..639408d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/vars/debian.yml @@ -0,0 +1,10 @@ +--- + +unbound_dependencies: + - dnsutils + # - iproute2 + - unbound-anchor + +unbound_environment_file: /etc/default/unbound + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/vars/main.yml new file mode 100644 index 0000000..ff3d087 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/roles/unbound/vars/main.yml @@ -0,0 +1,66 @@ +--- + +unbound_dependencies: + - dnsutils + +unbound_environment_file: /etc/default/unbound + +unbound_config_server_defaults: + verbosity: 2 + statistics-interval: 240 + use-syslog: "yes" + log-queries: "yes" + logfile: "" # /var/log/unbound.log" + num-threads: 2 + directory: "/etc/unbound" + username: "unbound" + interface: 0.0.0.0 + do-ip4: 'yes' + do-ip6: 'no' + do-udp: 'yes' + do-tcp: 'yes' + access-control: + - '127.0.0.0/8 allow' + cache-min-ttl: 5 + cache-max-negative-ttl: 60 + root-hints: "/etc/unbound/root.hints" + hide-identity: 'yes' + hide-version: 'yes' + prefetch: 'yes' + max-udp-size: 4096 + msg-buffer-size: 65552 + unwanted-reply-threshold: 10000 + ipsecmod-enabled: 'no' + # trust-anchor-signaling: 'yes' + # trust-anchor-file: "" + auto-trust-anchor-file: "/var/lib/unbound/root.key" + qname-minimisation: 'yes' + +unbound_config_stub_zone_defaults: {} + +unbound_config_forward_zone_defaults: + name: "." + # definitely censor free & log free with DNSSEC Support: + forward_addrs: + - 84.200.69.80 # DNS Watch + - 84.200.70.40 # DNS Watch + - 77.109.148.136 # Xiala.net + - 77.109.148.137 # Xiala.net + - 91.239.100.100 # censurfridns.dk + - 89.233.43.71 # censurfridns.dk + +unbound_config_remote_control_defaults: + server-key-file: "{{ unbound_certs.server.key_file }}" + server-cert-file: "{{ unbound_certs.server.cert_file }}" + control-key-file: "{{ unbound_certs.control.key_file }}" + control-cert-file: "{{ unbound_certs.control.cert_file }}" + +unbound_config_cachedb_defaults: {} + +unbound_certs: + server: + key_file: "{{ unbound_conf_dir }}/unbound_server.key" + cert_file: "{{ unbound_conf_dir }}/unbound_server.pem" + control: + key_file: "{{ unbound_conf_dir }}/unbound_control.key" + cert_file: "{{ unbound_conf_dir }}/unbound_control.pem" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/test-requirements.txt b/ansible/playbooks/collections/ansible_collections/bodsch/dns/test-requirements.txt new file mode 100644 index 0000000..4b5c719 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/test-requirements.txt @@ -0,0 +1,13 @@ +ansible-lint +docker +dnspython +flake8 +molecule +molecule-plugins[docker] +netaddr +pytest-testinfra +tox +tox-gh-actions +yamllint +# collection specific +dnspython diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/dns/tox.ini b/ansible/playbooks/collections/ansible_collections/bodsch/dns/tox.ini new file mode 100644 index 0000000..1dd6dfe --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/dns/tox.ini @@ -0,0 +1,46 @@ +[tox] +ignore_basepython_conflict = True +skip_missing_interpreters = True + +minversion = 3.25 +toxworkdir = /tmp/.tox/ + +skipsdist = true + +[testenv] +passenv = * + +# allowlist_externals = +# /usr/bin/find +# /bin/sh +# rm + +deps = + -r test-requirements.txt + ansible_4.10: ansible>=4.10,<4.11 + ansible_5.1: ansible>=5.1,<5.2 + ansible_5.2: ansible>=5.2,<5.3 + ansible_5.10: ansible>=5.10,<5.11 + ansible_6.1: ansible>=6.1,<6.2 + ansible_6.7: ansible>=6.7,<6.8 + ansible_7.0: ansible>=7.0,<7.1 + ansible_7.5: ansible>=7.5,<7.6 + ansible_8.0: ansible>=8.0,<8.1 + ansible_8.5: ansible>=8.5,<8.6 + ansible_9.0: ansible>=9.0,<9.1 + ansible_9.5: ansible>=9.5,<9.6 + ansible_10.0: ansible>=10.0,<10.1 + ansible_10.7: ansible>=10.7,<10.8 + ansible_11.0: ansible>=11.0,<11.1 + ansible_11.13: ansible>=11.13,<11.14 + ansible_12.0: ansible>=12.0,<12.1 + ansible_12.3: ansible>=12.3,<12.4 + ansible_13.0: ansible>=13.0,<13.1 + ansible_13.1: ansible>=13.1,<13.2 + +#commands_pre = +# /usr/bin/find {toxinidir} -type f -not -path '{toxworkdir}/*' -path '*/__pycache__/*' -name '*.py[c|o]' -delete +# /bin/sh -c '/usr/bin/find {homedir}/.cache -type d -path "*/molecule_*" -exec rm -rfv \{\} +;' + +commands = + {posargs:molecule test --all --destroy always} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/.config/ansible-lint.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/.config/ansible-lint.yml new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/.config/ansible-lint.yml @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/.config/pycodestyle.cfg b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/.config/pycodestyle.cfg new file mode 100644 index 0000000..9dc28a9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/.config/pycodestyle.cfg @@ -0,0 +1,63 @@ +[pycodestyle] + +ignore = + E203, + E251, + W504, + W605 + +exclude = + # No need to traverse our git directory + .git, + .github, + # There's no value in checking cache directories + __pycache__, + .tox, + molecule, + hooks, + tools, + test_*.py + +max-line-length = 150 + +# Optionale Erweiterung: zeige absolute Pfade +show-source = true +count = true + +[flake8] + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals +# E203 für Black-Kompatibilität +# W503 für Zeilenumbruch-Operator) + +ignore = + E203, + E251, + W504, + W605 + +exclude = + # No need to traverse our git directory + .git, + .github, + # There's no value in checking cache directories + __pycache__, + .tox, + molecule, + hooks, + tools, + test_*.py + +max-line-length = 150 + +# Optionale Erweiterung: zeige absolute Pfade +show-source = true +count = true + +[flake8-length] +max-code-length = 120 +max-comment-length = 300 +max-docstring-length = 200 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/FILES.json b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/FILES.json new file mode 100644 index 0000000..323d6a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/FILES.json @@ -0,0 +1,4289 @@ +{ + "files": [ + { + "name": ".", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d5363979d63997d826e3660c0cc7c752f3ac76af25c708712236cb4c217c1535", + "format": 1 + }, + { + "name": "LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4", + "format": 1 + }, + { + "name": "test-requirements.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d9ae10dd85dbce8ca0e6f96d2f900b649eb5d97d4fbdf3df31a403714a0c2e79", + "format": 1 + }, + { + "name": "CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "plugins", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/filter", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/filter/systemd.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c4e555a8d03a5f82d9fac60d7932cb0120e8da954aeb84d1ef616d63d37fd142", + "format": 1 + }, + { + "name": "plugins/filter/lists.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e13dda42de97842ed6bd4ba3f65a2754a8c2da0efb287169509c506f74af78c", + "format": 1 + }, + { + "name": "plugins/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/module_utils/validator.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "966f90c6924b3e157807c25330467b62a72e5086aaf122d106b553f56aee3ae2", + "format": 1 + }, + { + "name": "plugins/module_utils/systemd.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "20959a797e08707915083f4661875bdb40875bed996cd5baf9e4ee6d00824c5c", + "format": 1 + }, + { + "name": "plugins/module_utils/static.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b949700b995472bb79e191564ba99e8aa0785353652fcc2e7bee767a2e5a4f12", + "format": 1 + }, + { + "name": "plugins/module_utils/helper.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c8c651d896fcb1942eca5042324b67403107d988e8229d5c27af155c79d8469", + "format": 1 + }, + { + "name": "plugins/modules", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "plugins/modules/journalctl.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "481b6c46cdbe880c52f2e9a1ce32cf039fb2365d46ea758f0bef67a70ab223e7", + "format": 1 + }, + { + "name": "plugins/modules/systemd_timer.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dc781c66bbb0a56a57846fcdc5ce5408174f2d8f3040f2d912ac8ea61ffb4512", + "format": 1 + }, + { + "name": "plugins/modules/unit_file.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5f0d88326190b448d98aa704d7a8177ef09a4176a54a9febaf7a877f39ee5fc3", + "format": 1 + }, + { + "name": "README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ab8dae007b8c5d7b8fe8d09218ad0b05af20b16543cc267069132e571fd04aea", + "format": 1 + }, + { + "name": "meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "meta/runtime.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "28e2b839864f6f359ccf720be3abdf37cf457a7ab24eeb41bd71c5c615b39ce1", + "format": 1 + }, + { + "name": ".config", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".config/ansible-lint.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f79cdace5e0ac1512d6b3e35517aa678069b483ffb80ca63e5b51718be5bc31", + "format": 1 + }, + { + "name": ".config/pycodestyle.cfg", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "012394884de53906a55e587f0a1bc0942e260d2e2031e131dd3101d3f3fffd8b", + "format": 1 + }, + { + "name": "tox.ini", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ba55d73a97d300ccb3ba35bc0e4c84d534c14abe61bb6575ad95c55950ef951", + "format": 1 + }, + { + "name": "roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/systemd_unit", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/systemd_unit/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0b8af235d23db527385396b1b168b14f5604ad1f82b912f645919b1c95b76a0", + "format": 1 + }, + { + "name": "roles/systemd_unit/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/systemd_unit/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "615d0ffcfe0d26789e714ed1280741094c77aecf5a77e4276a8fcb35a1c8082e", + "format": 1 + }, + { + "name": "roles/systemd_unit/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/systemd_unit/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2b3dc1234181e85f12e7ff28a250f910acd53f872e736257cdd2a84c6b13c1b5", + "format": 1 + }, + { + "name": "roles/systemd_unit/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22a0d6abe4d830d86b79c2ef366996400d6b634b9e11d0834c1bb0a9ce1bd12d", + "format": 1 + }, + { + "name": "roles/systemd_unit/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/systemd_unit/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03502d1cecefbfae0c44115de1394c29618b05a3c1488f2dc4bde321aaf782aa", + "format": 1 + }, + { + "name": "roles/systemd_unit/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/systemd_unit/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/systemd_unit/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be21167cf31ef64dc0dac7d6fee0e2d2ac685874e4e0d455003a5a473b94e0b8", + "format": 1 + }, + { + "name": "roles/systemd_unit/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/systemd_unit/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/systemd_unit/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/systemd_unit/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/systemd_unit/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/systemd_unit/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0c30e0d6fae491626c97a47cfcbf88ef425a07e0e687e8190f69e2bde9557f7e", + "format": 1 + }, + { + "name": "roles/systemd_unit/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a095bbe94265141fb4a0d437fa17d3b8db52938925ad0e88dcaddd3d8a54978f", + "format": 1 + }, + { + "name": "roles/systemd_unit/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/systemd_unit/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "roles/systemd_unit/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/systemd_unit/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/systemd_unit/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5939896652973c69baa0963a85bcfdcb6e158ce581a10652c899c4083019432f", + "format": 1 + }, + { + "name": "roles/systemd_unit/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/systemd_unit/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/systemd_unit/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0445395e9cc85107614c640d43ea0e551b4fd7e575059d5f6e8cdee17e2287d", + "format": 1 + }, + { + "name": "roles/systemd_unit/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/systemd_unit/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8cf98bce11e045e3863190d5deed6c3e316eafb82276764b70d71906ad672567", + "format": 1 + }, + { + "name": "roles/systemd_unit/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/systemd_unit/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b050a5eae6e7d4e20c124d61bb09975c777b8704f80e7fc42f60838099bf22dd", + "format": 1 + }, + { + "name": "roles/systemd_unit/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/systemd_unit/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/systemd_unit/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/journald", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0b8af235d23db527385396b1b168b14f5604ad1f82b912f645919b1c95b76a0", + "format": 1 + }, + { + "name": "roles/journald/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e8f191d8319c9f7a344576bbb45b3c697f108357841f6e692d1164b9a6318a8a", + "format": 1 + }, + { + "name": "roles/journald/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c01e2cf62ec83176866e71d0a23cb10b396df16f45657fc1370aec7dd44980b7", + "format": 1 + }, + { + "name": "roles/journald/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/journald/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03502d1cecefbfae0c44115de1394c29618b05a3c1488f2dc4bde321aaf782aa", + "format": 1 + }, + { + "name": "roles/journald/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/journald/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/journald/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be21167cf31ef64dc0dac7d6fee0e2d2ac685874e4e0d455003a5a473b94e0b8", + "format": 1 + }, + { + "name": "roles/journald/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/journald/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/journald/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/journald/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/journald/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e50ed04277072ae7164d1d484079dcc2fdbc1e722592f93a157fc8c52e8533c4", + "format": 1 + }, + { + "name": "roles/journald/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a095bbe94265141fb4a0d437fa17d3b8db52938925ad0e88dcaddd3d8a54978f", + "format": 1 + }, + { + "name": "roles/journald/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/journald/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/templates/systemd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/templates/systemd/journald.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "853af37400b0249cdea216c8bf9a50b67d9a364b0345edd6091532993531eaea", + "format": 1 + }, + { + "name": "roles/journald/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "50801a8b2852a307a1c54b686c0bb0f2cdeb69518b277c9f9d6d278b9bba0bbc", + "format": 1 + }, + { + "name": "roles/journald/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc0e0cdd9b483a861d63bc45542cee5111ce966d6129fb434b35521522ab61ca", + "format": 1 + }, + { + "name": "roles/journald/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "70a6eadead5f81c78f1d5e8228c37653d1ac9596f164203f0b3bd20343c7336f", + "format": 1 + }, + { + "name": "roles/journald/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/journald/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/journald/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "54229217e888001d6a6ad221e0b65f7b1082c2d370fc89def32708da5d06709d", + "format": 1 + }, + { + "name": "roles/journald/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cc0e0cdd9b483a861d63bc45542cee5111ce966d6129fb434b35521522ab61ca", + "format": 1 + }, + { + "name": "roles/journald/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/journald/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/journald/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/journald/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/journald/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "54229217e888001d6a6ad221e0b65f7b1082c2d370fc89def32708da5d06709d", + "format": 1 + }, + { + "name": "roles/journald/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/journald/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/journald/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/sleep", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0b8af235d23db527385396b1b168b14f5604ad1f82b912f645919b1c95b76a0", + "format": 1 + }, + { + "name": "roles/sleep/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e03d0107e7013ac28f089bd703826800f6da7e1c8bd6211a9e91c7686f960a39", + "format": 1 + }, + { + "name": "roles/sleep/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ce5745ce002bf69d11f7322850322c281650c3394ca78bd71d2a1be77501fa05", + "format": 1 + }, + { + "name": "roles/sleep/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/sleep/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03502d1cecefbfae0c44115de1394c29618b05a3c1488f2dc4bde321aaf782aa", + "format": 1 + }, + { + "name": "roles/sleep/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/sleep/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/sleep/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be21167cf31ef64dc0dac7d6fee0e2d2ac685874e4e0d455003a5a473b94e0b8", + "format": 1 + }, + { + "name": "roles/sleep/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/sleep/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/sleep/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/sleep/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/sleep/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38f1b95db90900d50f286e5966e5bc42ae508b0a97399b87c0c7ff06d310a725", + "format": 1 + }, + { + "name": "roles/sleep/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a095bbe94265141fb4a0d437fa17d3b8db52938925ad0e88dcaddd3d8a54978f", + "format": 1 + }, + { + "name": "roles/sleep/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/sleep/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/templates/systemd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/templates/systemd/sleep.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7d6e8e2b42811448c9b1ae6bedbe630875c6d0bd346cd49c324f529f4208b4af", + "format": 1 + }, + { + "name": "roles/sleep/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "90dde98e6915ce065c52207b6f11972a76accf7d4b61094e1cc08d3990717db7", + "format": 1 + }, + { + "name": "roles/sleep/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c76cf3a599e2cc079bc102e294ae155159befcf43763ff40ff46abd68bc9b5c", + "format": 1 + }, + { + "name": "roles/sleep/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "317e46a6a1e0500f0dc3aa3b414f6e8d87a3a4a06cffb01e7857a924a7082f81", + "format": 1 + }, + { + "name": "roles/sleep/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/sleep/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/sleep/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e78c59f5956738f7b768e9240ebf63870d2a815b24acb85b73112a084a0df0b6", + "format": 1 + }, + { + "name": "roles/sleep/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c76cf3a599e2cc079bc102e294ae155159befcf43763ff40ff46abd68bc9b5c", + "format": 1 + }, + { + "name": "roles/sleep/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/sleep/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/sleep/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/sleep/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/sleep/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e78c59f5956738f7b768e9240ebf63870d2a815b24acb85b73112a084a0df0b6", + "format": 1 + }, + { + "name": "roles/sleep/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/sleep/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/sleep/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/homed", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0b8af235d23db527385396b1b168b14f5604ad1f82b912f645919b1c95b76a0", + "format": 1 + }, + { + "name": "roles/homed/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cce0c31e7ba5cb7ce967e6537781b622cafb9dff55184d5968f9ce62cb825973", + "format": 1 + }, + { + "name": "roles/homed/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5bbff51917aa6d673e932be0a8b71c1dc00cd552816078c37f8bbab7e7eb11fc", + "format": 1 + }, + { + "name": "roles/homed/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/homed/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03502d1cecefbfae0c44115de1394c29618b05a3c1488f2dc4bde321aaf782aa", + "format": 1 + }, + { + "name": "roles/homed/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/homed/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/homed/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be21167cf31ef64dc0dac7d6fee0e2d2ac685874e4e0d455003a5a473b94e0b8", + "format": 1 + }, + { + "name": "roles/homed/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/homed/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/homed/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/homed/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/homed/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e25b553db904fd9cc17e4561e9a968b5110544f0802ffc2c3047c5d8e4135f9b", + "format": 1 + }, + { + "name": "roles/homed/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a095bbe94265141fb4a0d437fa17d3b8db52938925ad0e88dcaddd3d8a54978f", + "format": 1 + }, + { + "name": "roles/homed/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/homed/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/templates/systemd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/templates/systemd/homed.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "46eba15804ea1d86df9fa741b9a42ba3b54ed80bd906e5b37aa5ed847bf88a73", + "format": 1 + }, + { + "name": "roles/homed/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ffd13df3d02362371112322e1f282ac711ef4cd6f6dd4baffdd88d738a3cd163", + "format": 1 + }, + { + "name": "roles/homed/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ef8fc9e7bac001fe0932d17e2a4597d5e8e671df804a34e827e2bddaf5ac69d", + "format": 1 + }, + { + "name": "roles/homed/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "df7247cdf8deee92b7c9485b0b7def09240922436bee1c83f6be295606ac65e5", + "format": 1 + }, + { + "name": "roles/homed/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/homed/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/homed/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a16a8b244005f6cebb6276c86e5dbae32c76af7c4b93fbb5d978634d74ada959", + "format": 1 + }, + { + "name": "roles/homed/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ef8fc9e7bac001fe0932d17e2a4597d5e8e671df804a34e827e2bddaf5ac69d", + "format": 1 + }, + { + "name": "roles/homed/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/homed/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3946da1368f85c578d26cce769fccae58f2cb201e5ab5460b3b2d886baa75ac8", + "format": 1 + }, + { + "name": "roles/homed/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/homed/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/homed/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a16a8b244005f6cebb6276c86e5dbae32c76af7c4b93fbb5d978634d74ada959", + "format": 1 + }, + { + "name": "roles/homed/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/homed/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/homed/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/resolved", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0b8af235d23db527385396b1b168b14f5604ad1f82b912f645919b1c95b76a0", + "format": 1 + }, + { + "name": "roles/resolved/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aa8044840f815279fadb4513bebd911c1ee7cd2ee4987baf623d9a2320676cba", + "format": 1 + }, + { + "name": "roles/resolved/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d7a13e8225c7c80fb60cfd3b3a7df5e13fa5a523aa51618edcb65cb3ae672018", + "format": 1 + }, + { + "name": "roles/resolved/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/resolved/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03502d1cecefbfae0c44115de1394c29618b05a3c1488f2dc4bde321aaf782aa", + "format": 1 + }, + { + "name": "roles/resolved/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/resolved/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/resolved/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be21167cf31ef64dc0dac7d6fee0e2d2ac685874e4e0d455003a5a473b94e0b8", + "format": 1 + }, + { + "name": "roles/resolved/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/resolved/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/resolved/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/resolved/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/resolved/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6af4e92087e8aa4652aaaf9c70dd112e736c800c87f6601c468950babe4694b2", + "format": 1 + }, + { + "name": "roles/resolved/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a095bbe94265141fb4a0d437fa17d3b8db52938925ad0e88dcaddd3d8a54978f", + "format": 1 + }, + { + "name": "roles/resolved/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/resolved/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/templates/systemd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/templates/systemd/resolved.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "535dc25567fee04f78c27e8bb558269b970e1e8312675fa2d2a3f44c45741c8d", + "format": 1 + }, + { + "name": "roles/resolved/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ac4ed6f6c3e59238a48c4c8832fad6be836cac53ebd85caf327fc9a6847c64d2", + "format": 1 + }, + { + "name": "roles/resolved/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "20a7cd81b2d58a7fd6cf1c23be086792b3bd88ea218869dccf660ade5e74e9cc", + "format": 1 + }, + { + "name": "roles/resolved/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "579377b34a785f81e38d8a4cb1bfbe0fe602fbd15e1a24860af91b5df325d8f9", + "format": 1 + }, + { + "name": "roles/resolved/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/resolved/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/resolved/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8d16c69385cc6b6f0da76ada2e718ca1156e52ed0e11fb55a1a9b145cee621c", + "format": 1 + }, + { + "name": "roles/resolved/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "20a7cd81b2d58a7fd6cf1c23be086792b3bd88ea218869dccf660ade5e74e9cc", + "format": 1 + }, + { + "name": "roles/resolved/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e617f10182aaaee74cffae57d129eaa2e17b5c9501863f0dc2de9cbe113c1152", + "format": 1 + }, + { + "name": "roles/resolved/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/resolved/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/resolved/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/resolved/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b8d16c69385cc6b6f0da76ada2e718ca1156e52ed0e11fb55a1a9b145cee621c", + "format": 1 + }, + { + "name": "roles/resolved/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/resolved/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/resolved/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/system", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0b8af235d23db527385396b1b168b14f5604ad1f82b912f645919b1c95b76a0", + "format": 1 + }, + { + "name": "roles/system/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f0a211ab9370cca71eeae026da0ca8c1f202899d7cb594189a16db85660c5fba", + "format": 1 + }, + { + "name": "roles/system/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "28b9ee140fab4b59924900ea75aa742fc231813e26842066ee405723a1cd6b73", + "format": 1 + }, + { + "name": "roles/system/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/system/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03502d1cecefbfae0c44115de1394c29618b05a3c1488f2dc4bde321aaf782aa", + "format": 1 + }, + { + "name": "roles/system/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/system/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/system/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be21167cf31ef64dc0dac7d6fee0e2d2ac685874e4e0d455003a5a473b94e0b8", + "format": 1 + }, + { + "name": "roles/system/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/system/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/system/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/system/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/system/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "55d090c62ad8c772f3c8136bfe9ff489d64fa5daf78ff75a6452db1cc2e45027", + "format": 1 + }, + { + "name": "roles/system/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a095bbe94265141fb4a0d437fa17d3b8db52938925ad0e88dcaddd3d8a54978f", + "format": 1 + }, + { + "name": "roles/system/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/system/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/templates/systemd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/templates/systemd/system.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "63b69dac110f516b377521d4e96a60d502a29f1c6a31b78cd58437994ce9b83c", + "format": 1 + }, + { + "name": "roles/system/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "roles/system/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb98c2bd81dfd92fa45c5c4ff40971bbe99c33fb9a1839ca77754247f150ecfc", + "format": 1 + }, + { + "name": "roles/system/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f6b012a85625dc6c645c6352c3db52d1bd2c085b50a76e3be566fd4bcea9875", + "format": 1 + }, + { + "name": "roles/system/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/system/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/system/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b050a5eae6e7d4e20c124d61bb09975c777b8704f80e7fc42f60838099bf22dd", + "format": 1 + }, + { + "name": "roles/system/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb98c2bd81dfd92fa45c5c4ff40971bbe99c33fb9a1839ca77754247f150ecfc", + "format": 1 + }, + { + "name": "roles/system/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/system/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/system/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/system/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/system/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b050a5eae6e7d4e20c124d61bb09975c777b8704f80e7fc42f60838099bf22dd", + "format": 1 + }, + { + "name": "roles/system/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/system/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/system/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/oomd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0b8af235d23db527385396b1b168b14f5604ad1f82b912f645919b1c95b76a0", + "format": 1 + }, + { + "name": "roles/oomd/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a9ecc35cf1954561421ba5b3406f517cacb36efcc0cb01765f8823ff389df66", + "format": 1 + }, + { + "name": "roles/oomd/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f98074ef8285c696c91a99631c9851f7aceb40a7362bc6ccfbd6fce49d3a5a34", + "format": 1 + }, + { + "name": "roles/oomd/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/oomd/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03502d1cecefbfae0c44115de1394c29618b05a3c1488f2dc4bde321aaf782aa", + "format": 1 + }, + { + "name": "roles/oomd/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/oomd/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/oomd/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be21167cf31ef64dc0dac7d6fee0e2d2ac685874e4e0d455003a5a473b94e0b8", + "format": 1 + }, + { + "name": "roles/oomd/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/oomd/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/oomd/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/oomd/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/oomd/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4dbb558122f2c2fc57fd61a81230a46489bbfef51cebb22334cd76609771366d", + "format": 1 + }, + { + "name": "roles/oomd/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a095bbe94265141fb4a0d437fa17d3b8db52938925ad0e88dcaddd3d8a54978f", + "format": 1 + }, + { + "name": "roles/oomd/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/oomd/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/templates/systemd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/templates/systemd/oomd.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2a8da8d49a168ca71d62908ce895ed13e2e11f665ac8220495454fd94299d01", + "format": 1 + }, + { + "name": "roles/oomd/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "roles/oomd/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "34ffa9fd969a5b35d26815109e29961106f8160dc0484c3337ced924e83e797a", + "format": 1 + }, + { + "name": "roles/oomd/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8820a0176535fee5fc823332320cdd7502843e880257531b41da7a38706be329", + "format": 1 + }, + { + "name": "roles/oomd/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/oomd/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/oomd/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cac73b68a836e6a0e61deb2e99e3c85b7cc7b6daa6887f1ec767dd7917c96ed5", + "format": 1 + }, + { + "name": "roles/oomd/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "34ffa9fd969a5b35d26815109e29961106f8160dc0484c3337ced924e83e797a", + "format": 1 + }, + { + "name": "roles/oomd/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/oomd/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/oomd/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/oomd/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/oomd/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cac73b68a836e6a0e61deb2e99e3c85b7cc7b6daa6887f1ec767dd7917c96ed5", + "format": 1 + }, + { + "name": "roles/oomd/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/oomd/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/oomd/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/timesyncd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0b8af235d23db527385396b1b168b14f5604ad1f82b912f645919b1c95b76a0", + "format": 1 + }, + { + "name": "roles/timesyncd/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f015528f63fb82aae158a6294c4337230e851df43c9fca24e0edc1214919da8c", + "format": 1 + }, + { + "name": "roles/timesyncd/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7d39a65e9e5f43097f750ed80172fa18ced10023bf91c82a4b434558ec731c65", + "format": 1 + }, + { + "name": "roles/timesyncd/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/timesyncd/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03502d1cecefbfae0c44115de1394c29618b05a3c1488f2dc4bde321aaf782aa", + "format": 1 + }, + { + "name": "roles/timesyncd/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/timesyncd/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/timesyncd/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be21167cf31ef64dc0dac7d6fee0e2d2ac685874e4e0d455003a5a473b94e0b8", + "format": 1 + }, + { + "name": "roles/timesyncd/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/timesyncd/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/timesyncd/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/timesyncd/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/timesyncd/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "70e22f9a856bea5bdf0674424390422ab6e3a163d39cbc2dc5b5fd1ccd0a220c", + "format": 1 + }, + { + "name": "roles/timesyncd/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a095bbe94265141fb4a0d437fa17d3b8db52938925ad0e88dcaddd3d8a54978f", + "format": 1 + }, + { + "name": "roles/timesyncd/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/timesyncd/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/templates/systemd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/templates/systemd/timesyncd.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0c95ed21a30e01973b1a708befc36e3f36407e2c9a2ad7adbd95d6a09037f09", + "format": 1 + }, + { + "name": "roles/timesyncd/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f489bb477215fd7ad72160c3898c7217ce8128cfab779270cc1d83bf691d50b", + "format": 1 + }, + { + "name": "roles/timesyncd/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bfe5ce5451497e2c7a3fd0ccba363f6b95b152ac159aa3b42070c2199a78518d", + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4c95c3c98336ab18ca62a26f6c0eb05b8d179da9b592415ba4f94b89b1dfe7bf", + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4735bbbc370a453559a106403f72b8b6397fdc3dea8b8caf204dc1a4335be232", + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bfe5ce5451497e2c7a3fd0ccba363f6b95b152ac159aa3b42070c2199a78518d", + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/timesyncd/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4735bbbc370a453559a106403f72b8b6397fdc3dea8b8caf204dc1a4335be232", + "format": 1 + }, + { + "name": "roles/timesyncd/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/timesyncd/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/timesyncd/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/coredump", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0b8af235d23db527385396b1b168b14f5604ad1f82b912f645919b1c95b76a0", + "format": 1 + }, + { + "name": "roles/coredump/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e21ec7dcbbbf957bd2e3c27ba68ec90f21e29d1a56602b43bad9abea8d9013ef", + "format": 1 + }, + { + "name": "roles/coredump/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "36d2c8091d497164bd7b86614e31ecc7bb1f5d840e25f76ff8516ef9cded7183", + "format": 1 + }, + { + "name": "roles/coredump/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/coredump/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03502d1cecefbfae0c44115de1394c29618b05a3c1488f2dc4bde321aaf782aa", + "format": 1 + }, + { + "name": "roles/coredump/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/coredump/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/coredump/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be21167cf31ef64dc0dac7d6fee0e2d2ac685874e4e0d455003a5a473b94e0b8", + "format": 1 + }, + { + "name": "roles/coredump/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/coredump/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/coredump/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/coredump/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/coredump/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f59dd618c29838bf465a6c2607bc3c0535ba59c68c44a6cbd28efe7572222a62", + "format": 1 + }, + { + "name": "roles/coredump/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a095bbe94265141fb4a0d437fa17d3b8db52938925ad0e88dcaddd3d8a54978f", + "format": 1 + }, + { + "name": "roles/coredump/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/coredump/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/templates/systemd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/templates/systemd/coredump.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "96d467215f13d3b76473d3e21a239e13d71df50fba93e48252ab1427d86ef293", + "format": 1 + }, + { + "name": "roles/coredump/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "add45dbfebdd54c0f8c626cf97ba7d547045865c4001e811ddc3c4c8af96a7c2", + "format": 1 + }, + { + "name": "roles/coredump/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd47bbca76da86c85777a0fa5c8f7bb6de41588256d5abda63108501f84f2dd5", + "format": 1 + }, + { + "name": "roles/coredump/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "edcf85dbd1603f49e5a3b9dadbd31bb1ea6df64e49bfabd3eb5a8e8ecbe273c5", + "format": 1 + }, + { + "name": "roles/coredump/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/coredump/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/coredump/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f8c041b163db0d75fc4af739a22bed911eef91712e5d9d5a0bfd60fe5d62c84", + "format": 1 + }, + { + "name": "roles/coredump/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd47bbca76da86c85777a0fa5c8f7bb6de41588256d5abda63108501f84f2dd5", + "format": 1 + }, + { + "name": "roles/coredump/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/coredump/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/coredump/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/coredump/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/coredump/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8f8c041b163db0d75fc4af739a22bed911eef91712e5d9d5a0bfd60fe5d62c84", + "format": 1 + }, + { + "name": "roles/coredump/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/coredump/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/coredump/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/logind", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0b8af235d23db527385396b1b168b14f5604ad1f82b912f645919b1c95b76a0", + "format": 1 + }, + { + "name": "roles/logind/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6621b951659f12227f64e83a6816716e6d4f8883ea7de5d030c6466b2501fce", + "format": 1 + }, + { + "name": "roles/logind/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0bceaf0381c26bce36cd29792e5b69c980548c91fad7269229801fdecc07851f", + "format": 1 + }, + { + "name": "roles/logind/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/logind/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03502d1cecefbfae0c44115de1394c29618b05a3c1488f2dc4bde321aaf782aa", + "format": 1 + }, + { + "name": "roles/logind/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/logind/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/logind/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be21167cf31ef64dc0dac7d6fee0e2d2ac685874e4e0d455003a5a473b94e0b8", + "format": 1 + }, + { + "name": "roles/logind/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/logind/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/logind/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/logind/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/logind/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "517edab7cefdf47554618f855fdbb3e87e8e986bb0a971adffc75c7e8d267574", + "format": 1 + }, + { + "name": "roles/logind/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a095bbe94265141fb4a0d437fa17d3b8db52938925ad0e88dcaddd3d8a54978f", + "format": 1 + }, + { + "name": "roles/logind/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/logind/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/templates/systemd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/templates/systemd/logind.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9a2184c19989fe28116d4ad09d8877001ecfa153b8ac624ac24ffc5a20ef7ffd", + "format": 1 + }, + { + "name": "roles/logind/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "roles/logind/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7168f2d379c010583df33e1194f7d36844b668360c78175030f541763a2f2d80", + "format": 1 + }, + { + "name": "roles/logind/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7d8923fde8aaa7872f17905280335cd7106b91130e6ccb0cf4477421d916781d", + "format": 1 + }, + { + "name": "roles/logind/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/logind/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/logind/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aafe2897f25733a4664e623dc4efd8287873910d3a277ed13b4e8ede972aebe0", + "format": 1 + }, + { + "name": "roles/logind/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7168f2d379c010583df33e1194f7d36844b668360c78175030f541763a2f2d80", + "format": 1 + }, + { + "name": "roles/logind/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/logind/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/logind/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/logind/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/logind/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aafe2897f25733a4664e623dc4efd8287873910d3a277ed13b4e8ede972aebe0", + "format": 1 + }, + { + "name": "roles/logind/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/logind/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/logind/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/networkd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0b8af235d23db527385396b1b168b14f5604ad1f82b912f645919b1c95b76a0", + "format": 1 + }, + { + "name": "roles/networkd/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "282d32ad567ded27ab3271cb25889fc2e00aa46d6fe7fde0546572127d4bf49b", + "format": 1 + }, + { + "name": "roles/networkd/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "716a7fcb7b9e42382e1303c1e91b9e55ff4f97113f1f8d7954805b77fb26417f", + "format": 1 + }, + { + "name": "roles/networkd/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/networkd/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03502d1cecefbfae0c44115de1394c29618b05a3c1488f2dc4bde321aaf782aa", + "format": 1 + }, + { + "name": "roles/networkd/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/networkd/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/networkd/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be21167cf31ef64dc0dac7d6fee0e2d2ac685874e4e0d455003a5a473b94e0b8", + "format": 1 + }, + { + "name": "roles/networkd/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/networkd/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/networkd/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/networkd/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/networkd/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ccc4966eb5de350a1c481c9f45ee386738a6a3af8eb86535e40c7770b23913b", + "format": 1 + }, + { + "name": "roles/networkd/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a095bbe94265141fb4a0d437fa17d3b8db52938925ad0e88dcaddd3d8a54978f", + "format": 1 + }, + { + "name": "roles/networkd/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/networkd/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/templates/systemd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/templates/systemd/networkd.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3c3252f2ddce87de9b97263d3e89a9baf576ab1f524d06361fabd097aff1a9cb", + "format": 1 + }, + { + "name": "roles/networkd/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "74ab8e66c8f38e54a8f1d307d6a211acd5bfd015b5038daba6ea4dc5cefa3402", + "format": 1 + }, + { + "name": "roles/networkd/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6b4872d31d66f2f37156d638d53d3b66727d4b85ac829af0ee61e6230d83a24b", + "format": 1 + }, + { + "name": "roles/networkd/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cbe404f0d06fd6c6aafd5b8cfb68796094f4cf9c386d8df3a95509aa6b77a747", + "format": 1 + }, + { + "name": "roles/networkd/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/networkd/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/networkd/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91c06f1a68805b12ecf02a5396b7779eb7ed52f3ac74566c0782c7137a8c2cb1", + "format": 1 + }, + { + "name": "roles/networkd/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6b4872d31d66f2f37156d638d53d3b66727d4b85ac829af0ee61e6230d83a24b", + "format": 1 + }, + { + "name": "roles/networkd/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/networkd/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/networkd/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/networkd/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/networkd/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91c06f1a68805b12ecf02a5396b7779eb7ed52f3ac74566c0782c7137a8c2cb1", + "format": 1 + }, + { + "name": "roles/networkd/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/networkd/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/networkd/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/pstore", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0b8af235d23db527385396b1b168b14f5604ad1f82b912f645919b1c95b76a0", + "format": 1 + }, + { + "name": "roles/pstore/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5182b72713f033a38731804012e46b6f3e515d01b971bb64384f079113acf377", + "format": 1 + }, + { + "name": "roles/pstore/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ca37e390303504dabfbb908072532fe33a2926581205b895857543ecc5f75a1a", + "format": 1 + }, + { + "name": "roles/pstore/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/pstore/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03502d1cecefbfae0c44115de1394c29618b05a3c1488f2dc4bde321aaf782aa", + "format": 1 + }, + { + "name": "roles/pstore/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/pstore/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/pstore/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be21167cf31ef64dc0dac7d6fee0e2d2ac685874e4e0d455003a5a473b94e0b8", + "format": 1 + }, + { + "name": "roles/pstore/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/pstore/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/pstore/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/pstore/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/pstore/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "37997eefc9817bbf9336baa5b9cf7d8e6c60d4cde01a1ddc4a2c23173f0e757e", + "format": 1 + }, + { + "name": "roles/pstore/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a095bbe94265141fb4a0d437fa17d3b8db52938925ad0e88dcaddd3d8a54978f", + "format": 1 + }, + { + "name": "roles/pstore/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/pstore/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/templates/systemd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/templates/systemd/pstore.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "403b8d706220c1c9ad20fec9e628cb48958861174b985730ba22c47503999057", + "format": 1 + }, + { + "name": "roles/pstore/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "75ecd48077861247894af6626d51451f0fb7ea32a4be9639e10f7c7345eadd44", + "format": 1 + }, + { + "name": "roles/pstore/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44f90470bb9239c80a2cead5912ace0251cf8041b513b6d32988a7a1d1345844", + "format": 1 + }, + { + "name": "roles/pstore/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "823fce07ac92ad770ec4cd55bd8b1bbeca6c669cc0a3354bb139b35f07a77cce", + "format": 1 + }, + { + "name": "roles/pstore/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/pstore/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/pstore/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "acd9cc800442bee32584991e504152bf3995bf009c6b5562a339fdd86d52c4ff", + "format": 1 + }, + { + "name": "roles/pstore/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "44f90470bb9239c80a2cead5912ace0251cf8041b513b6d32988a7a1d1345844", + "format": 1 + }, + { + "name": "roles/pstore/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/pstore/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/pstore/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/pstore/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/pstore/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "acd9cc800442bee32584991e504152bf3995bf009c6b5562a339fdd86d52c4ff", + "format": 1 + }, + { + "name": "roles/pstore/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/pstore/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/pstore/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + }, + { + "name": "roles/user", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/.yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c0b8af235d23db527385396b1b168b14f5604ad1f82b912f645919b1c95b76a0", + "format": 1 + }, + { + "name": "roles/user/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d91624bfe0081b3238dbb965a7642a832a3b034ab7062d6f14f6e3e80cfc09ed", + "format": 1 + }, + { + "name": "roles/user/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3a78de66afb132364698564c3812ba2eda3bc8125140951b39c6b76d3c60a449", + "format": 1 + }, + { + "name": "roles/user/Makefile", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f80fec55dc046e6af709666d62fc5fd51f45afdf2e8efe978365fa5d0a069dbd", + "format": 1 + }, + { + "name": "roles/user/hooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/hooks/molecule.rc", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "03502d1cecefbfae0c44115de1394c29618b05a3c1488f2dc4bde321aaf782aa", + "format": 1 + }, + { + "name": "roles/user/hooks/destroy", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79c679a67141ef9cbfb56c8c7b948ca3df010c56d0c7af6c46535cde58bac8bb", + "format": 1 + }, + { + "name": "roles/user/hooks/verify", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d3ffa461db1e1516600a5de841029880521aff844c746e5dedf4bebfccdbf39b", + "format": 1 + }, + { + "name": "roles/user/hooks/tox.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be21167cf31ef64dc0dac7d6fee0e2d2ac685874e4e0d455003a5a473b94e0b8", + "format": 1 + }, + { + "name": "roles/user/hooks/converge", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ddaaac121ea494491ac3419f0be9725405644c2b31781030d236ebc3d930664", + "format": 1 + }, + { + "name": "roles/user/hooks/lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fd048a322d0d05e245101fdae1a26a3f6a23c13bbe4f1fc455448672eb59a0a7", + "format": 1 + }, + { + "name": "roles/user/hooks/test", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6137f41add5be2e1a9caa9abd1c1a483c8ce365370fd64eb85dee201a7074403", + "format": 1 + }, + { + "name": "roles/user/LICENSE", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c318b6f42ca9cd8f48502e05161a95a92b154ff5c0647209b208ee9ff9657a04", + "format": 1 + }, + { + "name": "roles/user/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79108f366f6c1ed8197ef3527bf9d9dac15fcd3e5d00cb2b838c15c7529ed2b9", + "format": 1 + }, + { + "name": "roles/user/.ansible-lint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a095bbe94265141fb4a0d437fa17d3b8db52938925ad0e88dcaddd3d8a54978f", + "format": 1 + }, + { + "name": "roles/user/CONTRIBUTING.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af957267f930f931b72fd2dc1d4127af66afe9aea4459075bc1c6b0051109df9", + "format": 1 + }, + { + "name": "roles/user/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/templates/systemd", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/templates/systemd/user.conf.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "95e6f5e9184c7141670337179f6ef3357df7d6c11ce60f3f0863b9bafb677195", + "format": 1 + }, + { + "name": "roles/user/README.md", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b76c24b2010244b42d312010b48d8104b1d8b82d5b7d8e68ee50a97ffb94f936", + "format": 1 + }, + { + "name": "roles/user/molecule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/molecule/configured", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/molecule/configured/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "26787fd8d0ff677f59fd28d0bf20a22f4ddeb17ad27863fc54546efd911affea", + "format": 1 + }, + { + "name": "roles/user/molecule/configured/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/molecule/configured/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/molecule/configured/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5ae14b547ff6fd76ed2e160a38890bdabd95137e189c56ee02cdba944365edd8", + "format": 1 + }, + { + "name": "roles/user/molecule/configured/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/user/molecule/configured/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/user/molecule/configured/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/molecule/configured/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b589fcb7dfcaa727632ce53dbde43ee573b3bb5fa4008d05f29a40a79218ab42", + "format": 1 + }, + { + "name": "roles/user/molecule/default", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/molecule/default/converge.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "26787fd8d0ff677f59fd28d0bf20a22f4ddeb17ad27863fc54546efd911affea", + "format": 1 + }, + { + "name": "roles/user/molecule/default/group_vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/molecule/default/group_vars/all", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/molecule/default/group_vars/all/vars.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "144e1381858db7e2636d8698b249684097a154c87f486b543020bfdf91c8681c", + "format": 1 + }, + { + "name": "roles/user/molecule/default/molecule.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "447e9141327a2a3723f8f99e2a3b6e322ae0964fb4825cdb54af8b3e1722b2d3", + "format": 1 + }, + { + "name": "roles/user/molecule/default/prepare.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4654bd3e55b8b7355de010b11bf80e6fc9d651a782555e25b23d7b28c4d5cc57", + "format": 1 + }, + { + "name": "roles/user/molecule/default/tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "roles/user/molecule/default/tests/test_default.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b589fcb7dfcaa727632ce53dbde43ee573b3bb5fa4008d05f29a40a79218ab42", + "format": 1 + }, + { + "name": "roles/user/.flake8", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2fe45659318525388b0574a4104028589a94f17e5f6a1cd796efca0bb1a4f510", + "format": 1 + }, + { + "name": "roles/user/.gitignore", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "88b2262e97da40d844c28af01fe4153605745a11e29348301bf82f9a1a5881dc", + "format": 1 + }, + { + "name": "roles/user/.editorconfig", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ed43c7886023384f96a2b69d1288065a2cdc651bb9ffc9d7f2bbda6550f2abd6", + "format": 1 + } + ], + "format": 1 +} \ No newline at end of file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/MANIFEST.json b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/MANIFEST.json new file mode 100644 index 0000000..367ce2c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/MANIFEST.json @@ -0,0 +1,36 @@ +{ + "collection_info": { + "namespace": "bodsch", + "name": "systemd", + "version": "1.4.0", + "authors": [ + "Bodo Schulz " + ], + "readme": "README.md", + "tags": [ + "linux", + "systemd", + "system" + ], + "description": "A collection of Ansible roles to manage systemd configurations.", + "license": [ + "Apache-2.0" + ], + "license_file": null, + "dependencies": { + "bodsch.core": ">=1.0.18" + }, + "repository": "https://github.com/bodsch/ansible-collection-systemd", + "documentation": "https://github.com/bodsch/ansible-collection-systemd/README.md", + "homepage": "https://github.com/bodsch/ansible-collection-systemd", + "issues": "https://github.com/bodsch/ansible-collection-systemd/issues" + }, + "file_manifest_file": { + "name": "FILES.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "81019d23a3238303bff7f19d2cf87c435345543ade0f21e11ba2eb87dfd1c597", + "format": 1 + }, + "format": 1 +} \ No newline at end of file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/Makefile new file mode 100644 index 0000000..883bc91 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/Makefile @@ -0,0 +1,43 @@ +# +export COLLECTION_ROLE ?= +export COLLECTION_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_9.5 +export TOX_SILENCE ?= true +# -------------------------------------------------------- + +LANG := C.UTF-8 +TEMP_REPO_URL := http://git.boone-schulz.de/ansible/ansible-hooks.git +TEMP_REPO_PATH := collections/hooks +TARGET_DIR := hooks +CACHE_DIR := $(HOME)/.cache/ansible/ansible-hooks + +# -------------------------------------------------------- + +# Alle Targets, die schlicht ein Skript in hooks/ aufrufen +HOOKS := install uninstall doc prepare converge destroy verify idempotence test lint gh-clean +TARGET_DIR := hooks + +.SILENT: hooks-ready +.PHONY: $(HOOKS) +.ONESHELL: +.DEFAULT_GOAL := converge + +$(HOOKS): | hooks-ready + @hooks/$@ + +hooks-ready: + @if [ ! -d "hooks" ] || [ -z "$$(ls -A 'hooks' 2>/dev/null)" ]; then \ + $(MAKE) --no-print-directory fetch-hooks >/dev/null 2>&1; \ + fi + +fetch-hooks: + @if [ -d "$(CACHE_DIR)/.git" ]; then + git -C "$(CACHE_DIR)" fetch --depth=1 --prune origin + def=$$(git -C "$(CACHE_DIR)" remote show origin | awk '/HEAD branch/ {print "origin/"$$NF}') + git -C "$(CACHE_DIR)" reset --hard "$$def" + else + mkdir -p "$(dir $(CACHE_DIR))" + GIT_TERMINAL_PROMPT=0 git clone --depth 1 "$(TEMP_REPO_URL)" "$(CACHE_DIR)" + fi + @mkdir -p "$(TARGET_DIR)" + @rsync -a --delete "$(CACHE_DIR)/$(TEMP_REPO_PATH)/" "$(TARGET_DIR)/" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/README.md new file mode 100644 index 0000000..7c15108 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/README.md @@ -0,0 +1,107 @@ +# Ansible Collection - bodsch.systemd + +Documentation for the collection. + +## Roles + +| Role | | Description | +| :---- | :---- | :---- | +| [bodsch.systemd.coredump](./roles/coredump/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-systemd/coredump.yml?branch=main)][coredump] | configure systemd-coredump | +| [bodsch.systemd.homed](./roles/homed/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-systemd/homed.yml?branch=main)][homed] | configure systemd-homed | +| [bodsch.systemd.journald](./roles/journald/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-systemd/journald.yml?branch=main)][journald] | configure systemd-journald | +| [bodsch.systemd.oomd](./roles/oomd/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-systemd/oomd.yml?branch=main)][oomd] | configure systemd-oomd | +| [bodsch.systemd.logind](./roles/logind/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-systemd/logind.yml?branch=main)][logind] | configure systemd-logind | +| [bodsch.systemd.networkd](./roles/networkd/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-systemd/networkd.yml?branch=main)][networkd] | configure systemd-networkd | +| [bodsch.systemd.resolved](./roles/resolved/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-systemd/resolved.yml?branch=main)][resolved] | configure systemd-resolved | +| [bodsch.systemd.system](./roles/system/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-systemd/system.yml?branch=main)][system] | configure systemd-system | +| [bodsch.systemd.timesyncd](./roles/timesyncd/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-systemd/timesyncd.yml?branch=main)][timesyncd] | configure systemd-timesyncd | +| [bodsch.systemd.user](./roles/user/README.md) | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-collection-systemd/user.yml?branch=main)][user] | configure systemd-user | + +[coredump]: https://github.com/bodsch/ansible-collection-systemd/actions/workflows/coredump.vml +[homed]: https://github.com/bodsch/ansible-collection-systemd/actions/workflows/homed.vml +[journald]: https://github.com/bodsch/ansible-collection-systemd/actions/workflows/journald.vml +[oomd]: https://github.com/bodsch/ansible-collection-systemd/actions/workflows/oomd.vml +[logind]: https://github.com/bodsch/ansible-collection-systemd/actions/workflows/logind.vml +[networkd]: https://github.com/bodsch/ansible-collection-systemd/actions/workflows/networkd.vml +[resolved]: https://github.com/bodsch/ansible-collection-systemd/actions/workflows/resolved.vml +[system]: https://github.com/bodsch/ansible-collection-systemd/actions/workflows/system.vml +[timesyncd]: https://github.com/bodsch/ansible-collection-systemd/actions/workflows/timesyncd.vml +[user]: https://github.com/bodsch/ansible-collection-systemd/actions/workflows/user.vml + + +## Included content + +### Modules + +| Name | Description | +|:--------------------------|:----| +| [bodsch.systemd.journalctl](./plugins/modules/journalctl.py) | Query the systemd journal with a very limited number of possible parameters | +| [bodsch.systemd.unit_file](./plugins/modules/unit_file.py) | This can be used to create a systemd unit file. The `service`, `timer` and `socket` types are supported. | +| [bodsch.systemd.systemd_timer](./plugins/modules/systemd_timer.py) | This can be used to create a systemd timer file. | + + +## Installing this collection + +You can install the memsource collection with the Ansible Galaxy CLI: + +```sh +#> ansible-galaxy collection install bodsch.systemd +``` + +To install directly from GitHub: + +```sh +#> ansible-galaxy collection install git@github.com:bodsch/ansible-collection-systemd.git +``` + + +You can also include it in a `requirements.yml` file and install it with `ansible-galaxy collection install -r requirements.yml`, using the format: + +```yaml +--- +collections: + - name: bodsch.systemd +``` + +The python module dependencies are not installed by `ansible-galaxy`. They can +be manually installed using pip: + +```sh +#> pip install -r requirements.txt +``` + +## Using this collection + + +You can either call modules by their Fully Qualified Collection Name (FQCN), such as `bodsch.systemd.remove_ansible_backups`, +or you can call modules by their short name if you list the `bodsch.systemd` collection in the playbook's `collections` keyword: + +```yaml +--- +- name: remove older ansible backup files + bodsch.systemd.remove_ansible_backups: + path: /etc + holds: 4 +``` + + +## Contribution + +Please read [Contribution](CONTRIBUTING.md) + +## Development, Branches (Git Tags) + +The `master` Branch is my *Working Horse* includes the "latest, hot shit" and can be complete broken! + +If you want to use something stable, please use a [Tagged Version](https://github.com/bodsch/ansible-collection-systemd/tags)! + + +## Author + +- Bodo Schulz + +## License + +[Apache](LICENSE) + +**FREE SOFTWARE, HELL YEAH!** diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/meta/runtime.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/meta/runtime.yml new file mode 100644 index 0000000..2e0d81e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/meta/runtime.yml @@ -0,0 +1,3 @@ +--- + +requires_ansible: '>=2.9.0' diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/filter/lists.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/filter/lists.py new file mode 100644 index 0000000..19e4205 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/filter/lists.py @@ -0,0 +1,29 @@ +# python 3 headers, required if submitting to Ansible +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from ansible.utils.display import Display + +display = Display() + + +class FilterModule(object): + """ """ + + def filters(self): + return { + "valid_list": self.valid_list, + } + + def valid_list(self, data, valid_entries): + """ """ + # display.v(f"valid_list(self, {data}, {valid_entries})") + result = [] + if isinstance(data, list): + data.sort() + valid_entries.sort() + result = list(set(data).intersection(valid_entries)) + result.sort() + # display.v(f"=result: {result}") + return result diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/filter/systemd.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/filter/systemd.py new file mode 100644 index 0000000..1e80d42 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/filter/systemd.py @@ -0,0 +1,36 @@ +# python 3 headers, required if submitting to Ansible +from __future__ import absolute_import, division, print_function + +import re + +from ansible.utils.display import Display + +__metaclass__ = type + +display = Display() + + +class FilterModule(object): + """ """ + + def filters(self): + return { + "service": self.get_service, + } + + def get_service(self, data, search_for, unit_type="service", state="running"): + """ """ + name = None + regex_list_compiled = re.compile(f"^{search_for}.*") + + match = {k: v for k, v in data.items() if re.match(regex_list_compiled, k)} + + # display.vv(f"found: {match} {type(match)} {len(match)}") + + if isinstance(match, dict) and len(match) > 0: + values = list(match.values())[0] + if values.get("state") == state: + name = values.get("name", search_for).replace(f".{unit_type}", "") + + # display.vv(f"= result {name}") + return name diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/module_utils/helper.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/module_utils/helper.py new file mode 100644 index 0000000..88e462e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/module_utils/helper.py @@ -0,0 +1,185 @@ +from typing import Any, List, Optional + +from ansible_collections.bodsch.systemd.plugins.module_utils.static import ( + VALID_WEEKDAY_TOKENS, + WEEKDAY_ALIASES, +) + + +def snake_to_systemd(key: str) -> str: + """ + Convert a snake_case or mixed-style key into a systemd option name. + + Examples: + "randomized_delay_sec" -> "RandomizedDelaySec" + "WantedBy" -> "WantedBy" (unchanged) + + Keys that are already written in CamelCase / systemd style are returned as-is. + Falsy input is returned unchanged. + """ + if not key: + return key + + # already CamelCase/systemd-style: leave untouched + if "_" not in key and any(c.isupper() for c in key[1:]): + return key + + parts = str(key).split("_") + + return "".join(p.capitalize() for p in parts if p) + + +def bool_to_systemd(value: Any) -> str: + """ + Normalize a Python boolean to a systemd boolean string. + + Args: + value: Boolean value or any other type. + + Returns: + "true" or "false" if value is a bool, otherwise str(value). + """ + if isinstance(value, bool): + return "true" if value else "false" + + return str(value) + + +def normalize_list_or_scalar( + value: Any, default: Optional[str] = None +) -> Optional[str]: + """ + Normalize a scalar or iterable value into a comma-separated string. + + Args: + value: None, a scalar or an iterable (list/tuple/set) of scalars. + default: Value to return if the input is None. + + Returns: + default if value is None, otherwise a string representation: + - iterables are joined with commas + - scalars are converted via str(value) + """ + if value is None: + return default + + if isinstance(value, (list, tuple, set)): + return ",".join(str(v) for v in value) + + return str(value) + + +def timer_component( + value: Any, + default: str = "*", + pad_width: Optional[int] = None, +) -> str: + """ + Convert a single calendar component (year, month, day, hour, minute, second) + to a systemd-compatible string. + + Supports: + * None -> default (usually "*") + * str -> returned unchanged (e.g. "*/15") + * int -> optionally zero-padded according to pad_width + * list/tuple/set -> each element is converted individually and combined + as a comma-separated string + + Args: + value: Component value or iterable of component values. + default: Fallback value when value is None. + pad_width: Optional zero-padding width for integer values. + + Returns: + A normalized string representation usable in OnCalendar expressions. + """ + if value is None: + return default + + # already a complex expression like "*/15" etc. + if isinstance(value, str): + return value + + if isinstance(value, (list, tuple, set)): + # treat each element individually, keep inner strings untouched + parts: List[str] = [] + for v in value: + if isinstance(v, str): + parts.append(v) + elif isinstance(v, int): + if pad_width: + parts.append(f"{v:0{pad_width}d}") + else: + parts.append(str(v)) + else: + parts.append(str(v)) + + return ",".join(parts) + + if isinstance(value, int): + if pad_width: + return f"{value:0{pad_width}d}" + + return str(value) + + # fallback + return str(value) + + +def normalize_weekday_token(token: str, module: Any = None) -> str: + """ + Normalize a weekday token for use in systemd calendar expressions. + + Accepted forms: + * official tokens: "Mon" .. "Sun" + * digits: "0" .. "7" (systemd accepts 0/7 = Sunday) + * configured aliases (for example "monday" -> "Mon") + + On invalid input, either module.fail_json is called (if provided) or + a ValueError is raised. + + Args: + token: Weekday token as provided by the caller. + module: Optional AnsibleModule-like object used to report errors via + module.fail_json(msg=..., value=token). + + Returns: + A normalized weekday token suitable for systemd (e.g. "Mon", "Tue"). + + Raises: + ValueError: If the token cannot be normalized to a valid weekday. + """ + raw = str(token).strip() + if not raw: + msg = "weekday token must not be empty" + if module is not None and hasattr(module, "fail_json"): + module.fail_json(msg=msg, value=token) + raise ValueError(msg) + + # numeric weekday (systemd: 1..7, 0/7 = Sunday) + if raw.isdigit(): + n = int(raw) + if 0 <= n <= 7: + return raw + msg = f"invalid numeric weekday '{raw}', expected 0..7" + if module is not None and hasattr(module, "fail_json"): + module.fail_json(msg=msg, value=token) + raise ValueError(msg) + + # official token directly allowed + if raw in VALID_WEEKDAY_TOKENS: + return raw + + # alias mapping (case-insensitive) + lower = raw.lower() + if lower in WEEKDAY_ALIASES: + return WEEKDAY_ALIASES[lower] + + msg = ( + f"unsupported weekday value '{raw}', " + f"expected one of {sorted(VALID_WEEKDAY_TOKENS)} or 1..7" + ) + if module is not None and hasattr(module, "fail_json"): + module.fail_json(msg=msg, value=token) + + raise ValueError(msg) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/module_utils/static.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/module_utils/static.py new file mode 100644 index 0000000..371ba34 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/module_utils/static.py @@ -0,0 +1,131 @@ +# Whitelists für Strict-Mode (nicht vollständig, aber praxisnahe Basis) +VALID_UNIT_KEYS = { + "Description", + "Documentation", + "Requires", + "Wants", + "BindsTo", + "PartOf", + "Conflicts", + "Before", + "After", + "OnFailure", + "PropagatesReloadTo", + "RequiresMountsFor", + "ConditionPathExists", + "ConditionPathExistsGlob", + "ConditionPathIsDirectory", + "ConditionPathIsSymbolicLink", + "ConditionPathIsMountPoint", + "ConditionArchitecture", + "ConditionVirtualization", + "ConditionHost", + "ConditionKernelCommandLine", + "ConditionSecurity", + "ConditionFirstBoot", + "ConditionNeedsUpdate", + "ConditionACPower", + "AssertPathExists", + "AssertPathExistsGlob", + "AssertPathIsDirectory", + "AssertPathIsSymbolicLink", + "AssertPathIsMountPoint", + "AssertArchitecture", + "AssertVirtualization", + "AssertHost", + "AssertKernelCommandLine", + "AssertSecurity", + "AssertFirstBoot", + "AssertNeedsUpdate", + "AssertACPower", +} + +VALID_TIMER_KEYS = { + "OnActiveSec", + "OnBootSec", + "OnStartupSec", + "OnUnitActiveSec", + "OnUnitInactiveSec", + "OnCalendar", + "AccuracySec", + "RandomizedDelaySec", + "Unit", + "Persistent", + "WakeSystem", + "RemainAfterElapse", + "TimerSlackNSec", + "TimeZone", + "FixedRandomDelay", +} + +VALID_INSTALL_KEYS = { + "WantedBy", + "RequiredBy", + "Also", + "Alias", + "DefaultInstance", +} + +# Timer-Optionen, die Timespans sind +TIMER_TIMESPAN_KEYS = { + "RandomizedDelaySec", + "AccuracySec", + "OnActiveSec", + "OnBootSec", + "OnUnitActiveSec", + "OnUnitInactiveSec", +} + +TIMER_TIMESPAN_PARAM_KEYS = { + "randomized_delay_sec", + "accuracy_sec", + "on_active_sec", + "on_boot_sec", + "on_unit_active_sec", + "on_unit_inactive_sec", +} + +# Timer-Optionen, die boolsche Werte sind +TIMER_BOOL_KEYS = { + "Persistent", + "WakeSystem", + "RemainAfterElapse", +} + +TIMER_BOOL_PARAM_KEYS = { + "persistent", + "wake_system", + "remain_after_elapse", +} + +VALID_WEEKDAY_TOKENS = { + "Mon", + "Tue", + "Wed", + "Thu", + "Fri", + "Sat", + "Sun", +} + +# optionale Aliase +WEEKDAY_ALIASES = { + # Englisch + "mon": "Mon", + "monday": "Mon", + "tue": "Tue", + "tues": "Tue", + "tuesday": "Tue", + "wed": "Wed", + "wednesday": "Wed", + "thu": "Thu", + "thur": "Thu", + "thurs": "Thu", + "thursday": "Thu", + "fri": "Fri", + "friday": "Fri", + "sat": "Sat", + "saturday": "Sat", + "sun": "Sun", + "sunday": "Sun", +} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/module_utils/systemd.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/module_utils/systemd.py new file mode 100644 index 0000000..271cb89 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/module_utils/systemd.py @@ -0,0 +1,1111 @@ +#!/usr/bin/env python3 + +# file: systemd.py +""" +High-level systemd D-Bus client with regex matching for services, sockets and +timers. + +The module is primarily intended to be imported from Ansible modules or other +Python code, but it also exposes a small CLI for ad-hoc use. + +Dependencies (Debian/Ubuntu): + sudo apt-get install python3-dbus python3-gi +""" + +from __future__ import annotations + +import os +import re +import time +from dataclasses import dataclass +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple + +import dbus +from dbus import Interface +from dbus.exceptions import DBusException + +try: + from dbus.mainloop.glib import DBusGMainLoop # type: ignore + from gi.repository import GLib # type: ignore +except Exception: # pragma: no cover + DBusGMainLoop = None # type: ignore + GLib = None # type: ignore + +SERVICE = "org.freedesktop.systemd1" +MANAGER_PATH = "/org/freedesktop/systemd1" +IFACE_MANAGER = "org.freedesktop.systemd1.Manager" +IFACE_PROPS = "org.freedesktop.DBus.Properties" +IFACE_UNIT = "org.freedesktop.systemd1.Unit" +IFACE_SERVICE = "org.freedesktop.systemd1.Service" + +ENABLED_STATES = {"enabled", "enabled-runtime", "linked", "linked-runtime", "alias"} +MASKED_STATES = {"masked", "masked-runtime"} + +# ---------- Exceptions ---------- + + +class SystemdError(Exception): + """Base exception type for all SystemdClient-related errors.""" + + +class UnitNotFoundError(SystemdError): + """Raised when a requested unit or unit file is not known to systemd.""" + + +class AccessDeniedError(SystemdError): + """Raised when access is denied (e.g. PolicyKit / missing root privileges).""" + + +class JobFailedError(SystemdError): + """Raised when a systemd job finishes or is reported with a failure state.""" + + +class DBusIOError(SystemdError): + """Raised for generic D-Bus or transport-level errors.""" + + +def _map_dbus_error(e: DBusException, ctx: str = "") -> SystemdError: + """ + Map a raw DBusException to a more specific SystemdError subclass. + + Args: + e: Original DBusException instance. + ctx: Optional context string to prefix the error message with. + + Returns: + One of UnitNotFoundError, AccessDeniedError, JobFailedError or DBusIOError. + """ + name = getattr(e, "get_dbus_name", lambda: "")() or "" + msg = f"{ctx}: {e}" if ctx else str(e) + if name in ( + "org.freedesktop.systemd1.NoSuchUnit", + "org.freedesktop.DBus.Error.UnknownObject", + ): + return UnitNotFoundError(msg) + + if name == "org.freedesktop.DBus.Error.AccessDenied": + return AccessDeniedError(msg) + + if name == "org.freedesktop.systemd1.JobFailed": + return JobFailedError(msg) + + return DBusIOError(msg) + + +# ---------- Helpers ---------- + + +def _py(v: Any) -> Any: + """ + Convert dbus.* types into plain Python types (recursively). + + This makes it easier to work with values returned from D-Bus calls by + normalising strings, integers, arrays and dictionaries. + """ + if isinstance(v, (dbus.String, dbus.ObjectPath)): + return str(v) + + if isinstance( + v, (dbus.Int16, dbus.Int32, dbus.Int64, dbus.UInt16, dbus.UInt32, dbus.UInt64) + ): + return int(v) + + if isinstance(v, dbus.Boolean): + return bool(v) + + if isinstance(v, dbus.Double): + return float(v) + + if isinstance(v, (dbus.ByteArray, bytes, bytearray)): + return bytes(v) + + if isinstance(v, (list, tuple, dbus.Array)): + return type(v)(_py(x) for x in v) + + if isinstance(v, (dict, dbus.Dictionary)): + return {_py(k): _py(val) for k, val in v.items()} + + return v + + +def _basename_or_name(s: str) -> str: + """ + Return the basename of a path or the input string if it contains no slash. + """ + return os.path.basename(s) if "/" in s else s + + +def _kind_from_name(name: str) -> str: + """ + Extract the unit type (suffix) from a unit name, e.g. 'ssh.service' -> 'service'. + """ + return name.split(".")[-1] if "." in name else "" + + +# ---------- Data ---------- + + +@dataclass(frozen=True) +class Unit: + """Single row returned from Manager.ListUnits().""" + + name: str + description: str + load_state: str + active_state: str + sub_state: str + followed: str + object_path: str + job_id: int + job_type: str + job_path: str + + +@dataclass(frozen=True) +class UnitFile: + """Single row returned from Manager.ListUnitFiles().""" + + path: str + state: str # enabled, disabled, masked, ... + + +@dataclass(frozen=True) +class InstallChange: + """Install-time change as reported by (Un)Mask/(Dis|En)ableUnitFiles().""" + + type: str + file: str + destination: str + + +@dataclass(frozen=True) +class UnitStatus: + """ + Combined view of runtime unit status and install-time unit file state. + + This structure merges data from ListUnits() and ListUnitFiles() and is used + by match_units() to represent both active and inactive units. + """ + + name: str + kind: str # service|socket|timer|... + description: str + active_state: str # active|inactive|failed|... + sub_state: str # running|dead|... + unit_file_state: Optional[str] # enabled|disabled|masked|generated|transient|None + load_state: Optional[str] # loaded|not-found|... + is_enabled: bool # True falls "effectively enabled" + is_masked: bool # True falls masked/masked-runtime + + +# ---------- Client ---------- + + +class SystemdClient: + """ + High-level wrapper around the org.freedesktop.systemd1 D-Bus API. + + The client exposes convenience methods for common lifecycle actions + (start/stop/restart), querying unit state and matching units via regular + expressions. It can be used against the system or per-user systemd manager. + """ + + def __init__(self, *, user_manager: bool = False, use_glib: bool = False) -> None: + """ + Create a new SystemdClient instance and connect to the systemd manager. + + Args: + user_manager: + If True, connect to the per-user systemd manager on the session + bus. If False (default), connect to the system-wide manager on + the system bus. + use_glib: + If True, initialise a GLib main loop and prefer a signal-based + waiting strategy for jobs (wait_job). If False, a pure + polling-based approach (wait_job_poll) is used instead. + """ + self._glib_enabled = bool(use_glib) + + if use_glib: + if DBusGMainLoop is None: + raise RuntimeError( + "GLib is not available. Install 'python3-gi' and 'python3-dbus'." + ) + DBusGMainLoop(set_as_default=True) + + self._bus = dbus.SessionBus() if user_manager else dbus.SystemBus() + self._mgr_obj = self._bus.get_object(SERVICE, MANAGER_PATH) + self._manager: Interface = Interface(self._mgr_obj, IFACE_MANAGER) + self._signals_enabled = False + self._signal_handlers: List[Tuple[Callable, Dict[str, Any]]] = [] + self._unit_props_cache: Dict[str, Interface] = {} + + def __enter__(self) -> "SystemdClient": + """Allow use as a context manager that auto-closes the D-Bus connection.""" + return self + + def __exit__(self, exc_type, exc, tb) -> None: + """Close the underlying D-Bus connection when leaving the context.""" + self.close() + + def close(self) -> None: + """ + Close the underlying D-Bus connection and unregister signal handlers. + + This is safe to call multiple times. + """ + try: + self._bus.close() + except Exception: + pass + + for func, kwargs in self._signal_handlers: + try: + self._bus.remove_signal_receiver(func, **kwargs) + except Exception: + pass + + self._signal_handlers.clear() + self._unit_props_cache.clear() + + # --------- Existence / Status --------- + + def exists(self, unit: str, *, installed_ok: bool = True) -> bool: + """ + Check whether a unit exists. + + A unit counts as existing if it is currently loaded or if a unit file + for it exists on disk. When installed_ok is False, only a loaded unit + counts as existing. + """ + try: + self._manager.GetUnit(unit) # geladen? + return True + except DBusException: + if not installed_ok: + return False + try: + self._manager.GetUnitFileState(unit) # Datei vorhanden? + return True + except DBusException: + return False + + def ensure_loaded(self, unit: str) -> str: + """ + Ensure that a unit is loaded and return its D-Bus object path. + + The method first tries GetUnit() and falls back to LoadUnit() if + the unit is not currently loaded. + + Raises: + SystemdError: If the unit cannot be loaded. + """ + try: + return str(self._manager.GetUnit(unit)) + except DBusException: + try: + return str(self._manager.LoadUnit(unit)) + except DBusException as e: + raise _map_dbus_error(e, f"LoadUnit({unit})") + + def is_active(self, unit: str) -> bool: + """ + Return True if the unit's ActiveState is 'active'. + + Unknown units are treated as inactive and will raise UnitNotFoundError + unless active_state() is called with a default. + """ + return self.active_state(unit, default="inactive") == "active" + + def active_state(self, unit: str, *, default: Optional[str] = None) -> str: + """ + Return the unit's ActiveState. + + Args: + unit: Unit name, e.g. 'ssh.service'. + default: Optional value returned when the unit does not exist. + + Raises: + UnitNotFoundError: If the unit does not exist and no default is set. + """ + try: + return self._get_unit_prop_str(unit, "ActiveState") + except UnitNotFoundError: + if default is not None: + return default + raise + + def sub_state(self, unit: str, *, default: Optional[str] = None) -> str: + """ + Return the unit's SubState. + + Args: + unit: Unit name, e.g. 'ssh.service'. + default: Optional value returned when the unit does not exist. + + Raises: + UnitNotFoundError: If the unit does not exist and no default is set. + """ + try: + return self._get_unit_prop_str(unit, "SubState") + except UnitNotFoundError: + if default is not None: + return default + raise + + def get_unit_properties( + self, unit: str, keys: Optional[Iterable[str]] = None + ) -> Dict[str, Any]: + """ + Fetch selected properties of a unit object. + + Args: + unit: Unit name, e.g. 'ssh.service'. + keys: Iterable of property names to retrieve. If omitted, a useful + default set of properties is returned. + + Returns: + Mapping from property name to converted Python value. + """ + default = ( + "Id", + "Description", + "LoadState", + "ActiveState", + "SubState", + "FragmentPath", + "UnitFileState", + "InactiveEnterTimestamp", + "ActiveEnterTimestamp", + ) + wanted = list(keys) if keys else list(default) + props = self._props_iface_for_unit(unit) + try: + return {k: _py(props.Get(IFACE_UNIT, k)) for k in wanted} + except DBusException as e: + raise _map_dbus_error(e, f"Get({unit})") + + def get_service_properties( + self, unit: str, keys: Optional[Iterable[str]] = None + ) -> Dict[str, Any]: + """ + Fetch selected properties from the Service interface for a unit. + + Args: + unit: Unit name, e.g. 'ssh.service'. + keys: Iterable of property names on org.freedesktop.systemd1.Service. + If omitted, a small default subset is requested. + + Returns: + Mapping from property name to converted Python value. Properties that + cannot be retrieved are silently omitted from the result. + """ + path = self._get_unit_path(unit) + obj = self._bus.get_object(SERVICE, path) + props = Interface(obj, IFACE_PROPS) + default = ("ExecMainPID", "ExecMainStatus", "MainPID", "Type", "Restart") + wanted = list(keys) if keys else list(default) + out: Dict[str, Any] = {} + for k in wanted: + try: + out[k] = _py(props.Get(IFACE_SERVICE, k)) + except DBusException: + pass + return out + + # --------- Lifecycle --------- + + def start(self, unit: str, mode: str = "replace") -> str: + """ + Start a unit using Manager.StartUnit(). + + Returns: + The D-Bus job object path. + """ + try: + return str(self._manager.StartUnit(unit, mode)) + except DBusException as e: + raise _map_dbus_error(e, f"StartUnit({unit})") + + def stop(self, unit: str, mode: str = "replace") -> str: + """ + Stop a unit using Manager.StopUnit(). + + Returns: + The D-Bus job object path. + """ + try: + return str(self._manager.StopUnit(unit, mode)) + except DBusException as e: + raise _map_dbus_error(e, f"StopUnit({unit})") + + def restart(self, unit: str, mode: str = "replace") -> str: + """ + Restart a unit using Manager.RestartUnit(). + + Returns: + The D-Bus job object path. + """ + try: + return str(self._manager.RestartUnit(unit, mode)) + except DBusException as e: + raise _map_dbus_error(e, f"RestartUnit({unit})") + + def reload(self, unit: str, mode: str = "replace") -> str: + """ + Reload a unit using Manager.ReloadUnit(). + + Returns: + The D-Bus job object path. + """ + try: + return str(self._manager.ReloadUnit(unit, mode)) + except DBusException as e: + raise _map_dbus_error(e, f"ReloadUnit({unit})") + + def reload_or_restart(self, unit: str, mode: str = "replace") -> str: + """ + Reload or restart a unit using Manager.ReloadOrRestartUnit(). + + Returns: + The D-Bus job object path. + """ + try: + return str(self._manager.ReloadOrRestartUnit(unit, mode)) + except DBusException as e: + raise _map_dbus_error(e, f"ReloadOrRestartUnit({unit})") + + def reset_failed(self, unit: Optional[str] = None) -> None: + """ + Clear failed state for one unit or for all units. + + Args: + unit: Optional unit name. If None, ResetFailed() is called and all + failed states are cleared. + """ + try: + ( + self._manager.ResetFailed() + if unit is None + else self._manager.ResetFailedUnit(unit) + ) + except DBusException as e: + raise _map_dbus_error(e, f"ResetFailed({unit or ''})") + + # Polling-based variant used when GLib is not available or disabled. + def wait_job_poll( + self, + job_path: str, + *, + timeout_sec: Optional[float] = None, + raise_on_fail: bool = True, + poll_interval: float = 0.1, + ) -> str: + """ + Wait for completion of a systemd job by polling its state. + + Args: + job_path: D-Bus object path of the job. + timeout_sec: Optional timeout in seconds. If None, wait indefinitely. + raise_on_fail: If True, raise JobFailedError on failed or timed-out + jobs. If False, return "failed" or "timeout-wait" instead. + poll_interval: Time in seconds between poll iterations. + + Returns: + One of "done", "failed" or "timeout-wait". + + Note: + Without GLib signals, result details like "canceled", "dependency", + "timeout" or "skipped" cannot be distinguished precisely. The method + relies on unit state and, for oneshot services, ExecMainStatus. + """ + job_obj = self._bus.get_object(SERVICE, job_path) + props = Interface(job_obj, IFACE_PROPS) + try: + job_type = str(props.Get("org.freedesktop.systemd1.Job", "JobType")) + unit_name, _unit_path = props.Get( + "org.freedesktop.systemd1.Job", "Unit" + ) # (s,o) + unit_name = str(unit_name) + except DBusException as e: + raise _map_dbus_error(e, f"JobProps({job_path})") + + # Poll bis Job weg ist + deadline = time.monotonic() + timeout_sec if timeout_sec else None + while True: + if deadline is not None and time.monotonic() >= deadline: + if raise_on_fail: + raise JobFailedError(f"job {job_path} result=timeout-wait") + return "timeout-wait" + try: + # As long as property 'State' can be read, the job still exists. + _ = props.Get( + "org.freedesktop.systemd1.Job", "State" + ) # 'waiting'|'running' + except DBusException as e: + name = getattr(e, "get_dbus_name", lambda: "")() or "" + if name in ( + "org.freedesktop.DBus.Error.UnknownObject", + "org.freedesktop.systemd1.NoSuchJob", + ): + break # Job removed -> finished + raise _map_dbus_error(e, f"JobPoll({job_path})") + time.sleep(poll_interval) + + # Heuristic evaluation based on unit state. + try: + st = self.active_state(unit_name, default="inactive") + except SystemdError: + st = "failed" + + ok = False + if job_type == "stop": + # After a stop job the unit should definitely not be 'active'. + ok = st in ("inactive", "failed") + else: + if st == "active": + ok = True + else: + # oneshot / reload: inspect ExecMainStatus when available. + sp = self.get_service_properties( + unit_name, keys=("Type", "ExecMainStatus") + ) + if str(sp.get("Type", "")) == "oneshot": + ok = int(sp.get("ExecMainStatus", 0)) == 0 + elif st != "failed": + # Fallback: anything not explicitly failed counts as success. + ok = True + + if not ok: + if raise_on_fail: + raise JobFailedError(f"job {job_path} result=failed") + return "failed" + + return "done" + + def wait_job( + self, + job_path: str, + *, + timeout_sec: Optional[float] = None, + raise_on_fail: bool = True, + ) -> str: + """ + Wait for completion of a systemd job using GLib JobRemoved signals. + + This method is used when use_glib=True and GLib is available. It listens + for the Manager.JobRemoved signal and derives the job result from the + 'result' argument. + + Args: + job_path: D-Bus object path of the job. + timeout_sec: Optional timeout in seconds. If None, wait indefinitely. + raise_on_fail: If True, raise JobFailedError for all results other + than "done" or a local timeout. + + Returns: + "done" on success, "failed" on error or "timeout-wait" if the local + wait timeout expires. + """ + if GLib is None: + # Defensive fallback; normally guarded by _wait_job_dispatch. + return self.wait_job_poll( + job_path, timeout_sec=timeout_sec, raise_on_fail=raise_on_fail + ) + + if not self._signals_enabled: + # If subscription is not possible, fall back to polling. + try: + self.subscribe() + except SystemdError: + return self.wait_job_poll( + job_path, timeout_sec=timeout_sec, raise_on_fail=raise_on_fail + ) + + result_holder: Dict[str, Optional[str]] = {"result": None} + loop = GLib.MainLoop() + + def _on_job_removed(job_id, job_path_signal, unit, result) -> None: + if str(job_path_signal) != job_path: + return + result_holder["result"] = str(result) + loop.quit() + + self._bus.add_signal_receiver( + _on_job_removed, + signal_name="JobRemoved", + dbus_interface=IFACE_MANAGER, + path=MANAGER_PATH, + ) + + def _on_timeout() -> bool: + if result_holder["result"] is not None: + return False + result_holder["result"] = "timeout-wait" + loop.quit() + return False + + if timeout_sec is not None: + GLib.timeout_add(int(timeout_sec * 1000), _on_timeout) + + try: + loop.run() + finally: + try: + self._bus.remove_signal_receiver( + _on_job_removed, + signal_name="JobRemoved", + dbus_interface=IFACE_MANAGER, + path=MANAGER_PATH, + ) + except Exception: + pass + + result = result_holder["result"] or "failed" + if result == "timeout-wait": + if raise_on_fail: + raise JobFailedError(f"job {job_path} result=timeout-wait") + return "timeout-wait" + + if result != "done": + if raise_on_fail: + raise JobFailedError(f"job {job_path} result={result}") + return "failed" + + return "done" + + # --------- Lifecycle (blocking auf Job-Resultat) --------- + + def start_wait( + self, + unit: str, + mode: str = "replace", + *, + timeout_sec: Optional[float] = None, + raise_on_fail: bool = True, + ) -> str: + """ + Start a unit and wait for the corresponding job to finish. + + See wait_job_poll() / wait_job() for the meaning of timeout_sec and + raise_on_fail. + """ + job = self.start(unit, mode) + return self._wait_job_dispatch( + job, timeout_sec=timeout_sec, raise_on_fail=raise_on_fail + ) + + def stop_wait( + self, + unit: str, + mode: str = "replace", + *, + timeout_sec: Optional[float] = None, + raise_on_fail: bool = True, + ) -> str: + """ + Stop a unit and wait for the corresponding job to finish. + """ + job = self.stop(unit, mode) + return self._wait_job_dispatch( + job, timeout_sec=timeout_sec, raise_on_fail=raise_on_fail + ) + + def restart_wait( + self, + unit: str, + mode: str = "replace", + *, + timeout_sec: Optional[float] = None, + raise_on_fail: bool = True, + ) -> str: + """ + Restart a unit and wait for the corresponding job to finish. + """ + job = self.restart(unit, mode) + return self._wait_job_dispatch( + job, timeout_sec=timeout_sec, raise_on_fail=raise_on_fail + ) + + def reload_wait( + self, + unit: str, + mode: str = "replace", + *, + timeout_sec: Optional[float] = None, + raise_on_fail: bool = True, + ) -> str: + """ + Reload a unit and wait for the corresponding job to finish. + """ + job = self.reload(unit, mode) + return self._wait_job_dispatch( + job, timeout_sec=timeout_sec, raise_on_fail=raise_on_fail + ) + + def reload_or_restart_wait( + self, + unit: str, + mode: str = "replace", + *, + timeout_sec: Optional[float] = None, + raise_on_fail: bool = True, + ) -> str: + """ + Reload or restart a unit and wait for the corresponding job to finish. + """ + job = self.reload_or_restart(unit, mode) + return self._wait_job_dispatch( + job, timeout_sec=timeout_sec, raise_on_fail=raise_on_fail + ) + + # --------- Unit files / Listings --------- + + def enable( + self, names: Iterable[str], *, runtime: bool = False, force: bool = True + ) -> Tuple[bool, List[InstallChange]]: + """ + Enable unit files via Manager.EnableUnitFiles(). + + Args: + names: Iterable of unit file names. + runtime: If True, only enable for the current runtime. + force: If True, overwrite existing symlinks. + + Returns: + Tuple (carries_install_info, changes) where carries_install_info + indicates whether enablement carries install information and changes + is the list of InstallChange objects. + """ + try: + carries, changes = self._manager.EnableUnitFiles( + list(names), runtime, force + ) + return bool(carries), [InstallChange(*map(str, c)) for c in changes] + except DBusException as e: + raise _map_dbus_error(e, f"EnableUnitFiles({','.join(names)})") + + def disable( + self, names: Iterable[str], *, runtime: bool = False + ) -> List[InstallChange]: + """ + Disable unit files via Manager.DisableUnitFiles(). + """ + try: + changes = self._manager.DisableUnitFiles(list(names), runtime) + return [InstallChange(*map(str, c)) for c in changes] + except DBusException as e: + raise _map_dbus_error(e, f"DisableUnitFiles({','.join(names)})") + + def mask( + self, names: Iterable[str], *, runtime: bool = False, force: bool = True + ) -> List[InstallChange]: + """ + Mask unit files via Manager.MaskUnitFiles(). + """ + try: + changes = self._manager.MaskUnitFiles(list(names), runtime, force) + return [InstallChange(*map(str, c)) for c in changes] + except DBusException as e: + raise _map_dbus_error(e, f"MaskUnitFiles({','.join(names)})") + + def unmask( + self, names: Iterable[str], *, runtime: bool = False + ) -> List[InstallChange]: + """ + Unmask unit files via Manager.UnmaskUnitFiles(). + """ + try: + changes = self._manager.UnmaskUnitFiles(list(names), runtime) + return [InstallChange(*map(str, c)) for c in changes] + except DBusException as e: + raise _map_dbus_error(e, f"UnmaskUnitFiles({','.join(names)})") + + def get_unit_file_state(self, file: str) -> str: + """ + Return the unit file state for a given file name. + + Examples: 'enabled', 'disabled', 'masked', ... + """ + try: + return str(self._manager.GetUnitFileState(file)) + except DBusException as e: + raise _map_dbus_error(e, f"GetUnitFileState({file})") + + def list_units(self) -> List[Unit]: + """ + List all currently loaded units. + + Returns: + List of Unit objects mirroring the fields returned by ListUnits(). + """ + try: + rows = self._manager.ListUnits() + except DBusException as e: + raise _map_dbus_error(e, "ListUnits") + + out: List[Unit] = [] + for r in rows: + out.append( + Unit( + str(r[0]), + str(r[1]), + str(r[2]), + str(r[3]), + str(r[4]), + str(r[5]), + str(r[6]), + int(r[7]), + str(r[8]), + str(r[9]), + ) + ) + return out + + def list_unit_files(self) -> List[UnitFile]: + """ + List all unit files known to systemd. + + Returns: + A list of UnitFile objects containing path and state for each unit. + """ + try: + rows = self._manager.ListUnitFiles() + except DBusException as e: + raise _map_dbus_error(e, "ListUnitFiles") + + return [UnitFile(path=str(r[0]), state=str(r[1])) for r in rows] + + def daemon_reload(self) -> None: + """ + Trigger a systemd daemon reload (Manager.Reload()). + """ + try: + self._manager.Reload() + except DBusException as e: + raise _map_dbus_error(e, "Reload(daemon)") + + # --------- Signals --------- + + def subscribe(self) -> None: + """ + Subscribe to systemd manager events. + + This is required for receiving property change and JobRemoved signals. + """ + try: + self._manager.Subscribe() + self._signals_enabled = True + except DBusException as e: + raise _map_dbus_error(e, "Subscribe") + + def unsubscribe(self) -> None: + """ + Unsubscribe from systemd manager events. + """ + try: + self._manager.Unsubscribe() + self._signals_enabled = False + except DBusException as e: + raise _map_dbus_error(e, "Unsubscribe") + + def on_unit_properties_changed( + self, + unit: str, + callback: Callable[[Dict[str, Any]], None], + *, + only: Iterable[str] = ("ActiveState", "SubState"), + ) -> Callable[[], None]: + """ + Register a callback for unit property changes. + + Args: + unit: Unit name, e.g. 'ssh.service'. + callback: Callable receiving a dict of changed properties (already + converted with _py()). + only: Optional iterable of property names to filter for. + + Returns: + A callable that, when invoked, unregisters the signal handler. + """ + if not self._signals_enabled: + self.subscribe() + path = self._get_unit_path(unit) + wanted = set(only or ()) + + def _handler( + interface: str, changed: Dict[str, Any], invalidated: List[str] + ) -> None: + if interface != IFACE_UNIT: + return + payload = { + k: _py(v) for k, v in changed.items() if not wanted or k in wanted + } + if payload: + callback(payload) + + kwargs = dict( + signal_name="PropertiesChanged", dbus_interface=IFACE_PROPS, path=path + ) + self._bus.add_signal_receiver(_handler, **kwargs) + self._signal_handlers.append((_handler, kwargs)) + + def _off() -> None: + try: + self._bus.remove_signal_receiver(_handler, **kwargs) + except Exception: + pass + + return _off + + # --------- Regex matching across units --------- + + def match_units( + self, + patterns: Iterable[str], + *, + types: Iterable[str] = ("service", "socket", "timer"), + flags: int = re.IGNORECASE, + include_inactive_files: bool = True, + ) -> List[UnitStatus]: + """ + Find units whose names match any of the given regular expressions. + + The result merges runtime information from ListUnits() with install-time + information from ListUnitFiles(). + + Args: + patterns: Sequence of Python regular expressions. + types: Iterable of unit kinds to consider (e.g. {"service", "timer"}). + flags: Regex flags passed to re.compile(). + include_inactive_files: + If True, include unit files that are not currently loaded. + + Returns: + A list of UnitStatus objects, sorted by unit name. + """ + rx = [re.compile(p, flags) for p in patterns] + type_set = set(types) + + # Live-Units + live = { + u.name: u + for u in self.list_units() + if _kind_from_name(u.name) in type_set and any(r.search(u.name) for r in rx) + } + + # UnitFiles (Installzustand), optional + file_rows = self.list_unit_files() if include_inactive_files else [] + file_state_by_name: Dict[str, str] = {} + candidates_from_files: set[str] = set() + for f in file_rows: + name = _basename_or_name(f.path) + if _kind_from_name(name) in type_set and any(r.search(name) for r in rx): + candidates_from_files.add(name) + file_state_by_name[name] = f.state + + # Union of live units and matching unit files. + names = sorted(set(live.keys()) | candidates_from_files) + + out: List[UnitStatus] = [] + for name in names: + file_state = (file_state_by_name.get(name) or "").lower() or None + is_masked = file_state in MASKED_STATES + is_enabled = (file_state in ENABLED_STATES) and not is_masked + + if name in live: + u = live[name] + out.append( + UnitStatus( + name=name, + kind=_kind_from_name(name), + description=u.description, + active_state=u.active_state, + sub_state=u.sub_state, + unit_file_state=file_state, + load_state=u.load_state, + is_enabled=is_enabled, + is_masked=is_masked, + ) + ) + else: + # Datei vorhanden, aber nicht geladen + out.append( + UnitStatus( + name=name, + kind=_kind_from_name(name), + description="", + active_state="inactive", + sub_state="dead", + unit_file_state=file_state, + load_state=None, + is_enabled=is_enabled, + is_masked=is_masked, + ) + ) + return out + + # --------- Low-level helpers --------- + + def _get_unit_path(self, unit: str) -> str: + """ + Resolve the D-Bus object path for a unit. + + The method first tries GetUnit() and falls back to LoadUnit() so that + inactive/dead units can also be resolved. + + Raises: + SystemdError: When the unit cannot be resolved. + """ + try: + return str(self._manager.GetUnit(unit)) + except DBusException as e1: + try: + return str(self._manager.LoadUnit(unit)) + except DBusException as e2: + # Detailierter Fehler ausgeben + raise _map_dbus_error(e2, f"LoadUnit({unit})") from e1 + + def _props_iface_for_unit(self, unit: str) -> Interface: + """ + Return a cached Properties interface proxy for the given unit. + """ + path = self._get_unit_path(unit) + if path in self._unit_props_cache: + return self._unit_props_cache[path] + + obj = self._bus.get_object(SERVICE, path) + props = Interface(obj, IFACE_PROPS) + self._unit_props_cache[path] = props + + return props + + def _get_unit_prop_str(self, unit: str, prop: str) -> str: + """ + Helper to read a single unit property as string. + + Raises: + UnitNotFoundError: If the unit does not exist. + """ + props = self._props_iface_for_unit(unit) + try: + return str(props.Get(IFACE_UNIT, prop)) + except DBusException as e: + raise _map_dbus_error(e, f"Get({unit},{prop})") + + def _wait_job_dispatch( + self, job_path: str, *, timeout_sec: Optional[float], raise_on_fail: bool + ) -> str: + """ + Dispatch job waiting to either the GLib or the polling implementation. + + This is used internally by the *_wait() lifecycle helpers. + """ + if self._glib_enabled and GLib is not None: + return self.wait_job( + job_path, timeout_sec=timeout_sec, raise_on_fail=raise_on_fail + ) + + return self.wait_job_poll( + job_path, timeout_sec=timeout_sec, raise_on_fail=raise_on_fail + ) diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/module_utils/validator.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/module_utils/validator.py new file mode 100644 index 0000000..d5ddbee --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/module_utils/validator.py @@ -0,0 +1,358 @@ +from __future__ import absolute_import, division, print_function + +import subprocess +from typing import Any, Dict, List, Tuple + +from ansible_collections.bodsch.systemd.plugins.module_utils.helper import ( + bool_to_systemd, + snake_to_systemd, +) +from ansible_collections.bodsch.systemd.plugins.module_utils.static import ( + TIMER_BOOL_KEYS, + TIMER_BOOL_PARAM_KEYS, + TIMER_TIMESPAN_KEYS, + TIMER_TIMESPAN_PARAM_KEYS, + VALID_INSTALL_KEYS, + VALID_TIMER_KEYS, + VALID_UNIT_KEYS, +) + + +class SystemdValidator: + """ + Validator and mapper for systemd [Unit], [Timer] and [Install] sections. + + Can be used inside an Ansible module (with an AnsibleModule instance) as well + as from plain Python code. Errors are reported either via module.fail_json() + (if available) or by raising ValueError. + """ + + def __init__( + self, + module: Any, + *, + strict_unit: bool = False, + strict_timer: bool = False, + strict_install: bool = False, + validate_timespans: bool = True, + systemd_analyze_cmd: str = "systemd-analyze", + ) -> None: + """ + Initialize a SystemdValidator. + + Args: + module: + AnsibleModule-like object. + strict_unit: + If True, only keys from VALID_UNIT_KEYS are allowed in [Unit]. + strict_timer: + If True, only keys from VALID_TIMER_KEYS are allowed in [Timer]. + strict_install: + If True, only keys from VALID_INSTALL_KEYS are allowed in [Install]. + validate_timespans: + If True, timespans are validated using "systemd-analyze timespan". + systemd_analyze_cmd: + Command name or path to the systemd-analyze binary. + """ + self.module = module + self.strict_unit = strict_unit + self.strict_timer = strict_timer + self.strict_install = strict_install + self.validate_timespans = validate_timespans + self.systemd_analyze_cmd = systemd_analyze_cmd + + # ------------------------------------------------------------------------- + # öffentliche Validierungs-/Mapping-Funktionen + # ------------------------------------------------------------------------- + + def validate_unit_options(self, options: Dict[str, Any]) -> Dict[str, Any]: + """ + Validate and normalize options for the [Unit] section. + + Behavior: + * snake_case keys are mapped to systemd-style keys where appropriate. + * known convenience keys are mapped via an explicit table. + * in strict mode, resulting keys are validated against VALID_UNIT_KEYS. + + Args: + options: Raw option dictionary as provided by the caller. + + Returns: + A new dictionary with normalized keys, e.g. 'Description', 'After', ... + """ + self.module.log(f"SystemdValidator::validate_unit_options(options={options})") + + unit_section: Dict[str, Any] = {} + + unit_map = { + "description": "Description", + "documentation": "Documentation", + "requires": "Requires", + "wants": "Wants", + "binds_to": "BindsTo", + "part_of": "PartOf", + "conflicts": "Conflicts", + "before": "Before", + "after": "After", + "on_failure": "OnFailure", + } + + for key, value in options.items(): + if value is None: + continue + + # existing systemd name or convenience key? + if key in unit_map: + sd_key = unit_map[key] + elif key in VALID_UNIT_KEYS: + sd_key = key + elif "_" in key and key.lower() == key: + sd_key = snake_to_systemd(key) + else: + sd_key = key + + unit_section[sd_key] = value + + if self.strict_unit: + invalid = [k for k in unit_section.keys() if k not in VALID_UNIT_KEYS] + if invalid: + self._fail( + "Unsupported [Unit] options", + invalid_keys=invalid, + section="Unit", + ) + + return unit_section + + def validate_timer_options(self, options: Dict[str, Any]) -> Dict[str, Any]: + """ + Validate and normalize options for the [Timer] section. + + Behavior: + * convenience options (e.g. 'randomized_delay_sec', 'time_zone') are + mapped via an explicit table. + * snake_case keys are converted using snake_to_systemd(). + * timespan-like options are validated via validate_timespan(). + * boolean-like options are converted to 'true'/'false'. + * in strict mode, resulting keys are validated against VALID_TIMER_KEYS. + + Args: + options: Raw option dictionary as provided by the caller. + + Returns: + A new dictionary with normalized keys (e.g. 'RandomizedDelaySec', + 'OnBootSec', ...). + """ + self.module.log(f"SystemdValidator::validate_timer_options(options={options})") + + timer_section: Dict[str, Any] = {} + + # Komfort-Optionen aus options mappen + option_map = { + "persistent": "Persistent", + "randomized_delay_sec": "RandomizedDelaySec", + "accuracy_sec": "AccuracySec", + "on_active_sec": "OnActiveSec", + "on_boot_sec": "OnBootSec", + "on_unit_active_sec": "OnUnitActiveSec", + "on_unit_inactive_sec": "OnUnitInactiveSec", + "unit": "Unit", + "wake_system": "WakeSystem", + "remain_after_elapse": "RemainAfterElapse", + "timezone": "TimeZone", + "time_zone": "TimeZone", # alias + } + + for key, value in options.items(): + if value is None: + continue + + orig_key = key + + # 1) translate key to systemd key + if key in option_map: + sd_key = option_map[key] + elif key in VALID_TIMER_KEYS: + sd_key = key + elif "_" in key and key.lower() == key: + sd_key = snake_to_systemd(key) + else: + sd_key = key + + # 2) validate timespans (use original parameter name for error messages) + if sd_key in TIMER_TIMESPAN_KEYS or orig_key in TIMER_TIMESPAN_PARAM_KEYS: + ok, normalized = self.validate_timespan(value, orig_key) + if not ok: + self._fail( + f"Invalid timespan for {orig_key}", + value=value, + ) + value = normalized + + # 3) map booleans + if sd_key in TIMER_BOOL_KEYS or orig_key in TIMER_BOOL_PARAM_KEYS: + if isinstance(value, bool): + value = bool_to_systemd(value) + # strings like "true"/"false" are accepted as-is + + timer_section[sd_key] = value + + if self.strict_timer: + invalid = [k for k in timer_section.keys() if k not in VALID_TIMER_KEYS] + if invalid: + self._fail( + "Unsupported [Timer] options", + invalid_keys=invalid, + section="Timer", + ) + + return timer_section + + def validate_install_options(self, options: Dict[str, Any]) -> Dict[str, Any]: + """ + Validate and normalize options for the [Install] section. + + Behavior: + * snake_case keys are mapped to well-known systemd install keys. + * in strict mode, resulting keys are validated against VALID_INSTALL_KEYS. + + Args: + options: Raw option dictionary as provided by the caller. + + Returns: + A new dictionary with normalized keys (e.g. 'WantedBy', 'Alias', ...). + """ + self.module.log( + f"SystemdValidator::validate_install_options(options={options})" + ) + + install_section: Dict[str, Any] = {} + + install_map = { + "wanted_by": "WantedBy", + "required_by": "RequiredBy", + "also": "Also", + "alias": "Alias", + "default_instance": "DefaultInstance", + } + + for key, value in options.items(): + if value is None: + continue + + if key in install_map: + sd_key = install_map[key] + elif key in VALID_INSTALL_KEYS: + sd_key = key + elif "_" in key and key.lower() == key: + sd_key = snake_to_systemd(key) + else: + sd_key = key + + install_section[sd_key] = value + + if self.strict_install: + invalid = [k for k in install_section.keys() if k not in VALID_INSTALL_KEYS] + if invalid: + self._fail( + "Unsupported [Install] options", + invalid_keys=invalid, + section="Install", + ) + + return install_section + + def validate_timespan(self, value: Any, param_name: str) -> Tuple[bool, str]: + """ + Validate a systemd timespan value. + + Accepted forms: + * int -> seconds (normalized to "s") + * str -> passed through and, optionally, validated via systemd-analyze + + If validate_timespans is False, only a minimal check (non-empty string) + is performed and the value is returned unchanged. + + Args: + value: Value to validate (int or str). + param_name: Original parameter name for error reporting. + + Returns: + (True, normalized_string) on success or + (False, error_message) on failure. + """ + self.module.log( + f"SystemdValidator::validate_timespan(value={value}, param_name={param_name})" + ) + + if value is None: + msg = f"{param_name} must not be None" + self.module.log(msg) + return (False, msg) + + if isinstance(value, int): + return (True, f"{value}s") + + if not isinstance(value, str): + msg = ( + f"{param_name} must be int (seconds) or str (systemd timespan), " + f"got {type(value).__name__}" + ) + self.module.log(msg) + return (False, msg) + + val = value.strip() + if not val: + msg = f"{param_name} must not be empty" + self.module.log(msg) + return (False, msg) + + if not self.validate_timespans: + # minimal check only + return (True, val) + + # use systemd-analyze timespan for validation + rc, out, err = self._run_command([self.systemd_analyze_cmd, "timespan", val]) + if rc != 0: + msg = f"Invalid systemd timespan for {param_name!r}: {val!r}" + self.module.log(f"{msg}; rc={rc}, stdout={out!r}, stderr={err!r}") + return (False, msg) + + return (True, val) + + # ------------------------------------------------------------------------- + # internal helpers: logging / error signaling / command wrapper + # ------------------------------------------------------------------------- + + def _fail(self, msg: str, **kwargs: Any) -> None: + """ + Report a validation error. + + If a module with fail_json() is available, fail_json() is called, otherwise + a ValueError is raised. + """ + self._log(f"ERROR: {msg} ({kwargs})") + if self.module is not None and hasattr(self.module, "fail_json"): + self.module.fail_json(msg=msg, **kwargs) + raise ValueError(f"{msg}: {kwargs}") + + def _run_command(self, args: List[str]) -> Tuple[int, str, str]: + """ + Execute an external command. + + Behavior: + * with an AnsibleModule: use module.run_command() + * otherwise: use subprocess.run() + + Returns: + (returncode, stdout, stderr) + """ + self.module.log(f"SystemdValidator::_run_command(args={args})") + + if self.module is not None and hasattr(self.module, "run_command"): + rc, out, err = self.module.run_command(args, check_rc=False) + return rc, out, err + + cp = subprocess.run(args, capture_output=True, text=True) + + return cp.returncode, cp.stdout, cp.stderr diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/modules/journalctl.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/modules/journalctl.py new file mode 100644 index 0000000..08e9035 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/modules/journalctl.py @@ -0,0 +1,198 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function + +from ansible.module_utils.basic import AnsibleModule + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: journalctl +author: + - Bodo 'bodsch' Schulz (@bodsch) +short_description: Query the systemd journal with a very limited number of possible parameters. +version_added: 1.1.0 + +description: + - Query the systemd journal with a very limited number of possible parameters. + - In certain cases there are errors that are not clearly traceable but are logged in the journal. + - This module is intended to be a tool for error analysis. + +options: + identifier: + description: + - Show entries with the specified syslog identifier + type: str + required: false + unit: + description: + - Show logs from the specified unit + type: str + required: false + lines: + description: + - Number of journal entries to show + type: int + required: false + reverse: + description: + - Show the newest entries first + type: bool + required: false + arguments: + description: + - A list of custom attributes + type: list + required: false +""" + +EXAMPLES = """ +- name: chrony entries from journalctl + bodsch.systemd.journalctl: + identifier: chrony + lines: 50 + register: journalctl + when: + - ansible_service_mgr == 'systemd' + +- name: journalctl entries from this module + bodsch.systemd.journalctl: + identifier: ansible-journalctl + lines: 250 + register: journalctl + when: + - ansible_service_mgr == 'systemd' +""" + +RETURN = """ +rc: + description: + - Return Value + type: int +cmd: + description: + - journalctl with the called parameters + type: string +stdout: + description: + - The output as a list on stdout + type: list +stderr: + description: + - The output as a list on stderr + type: list +""" + +# --------------------------------------------------------------------------------------- + + +class JournalCtl(object): + """ """ + + module = None + + def __init__(self, module): + """ """ + self.module = module + + self._journalctl = module.get_bin_path("journalctl", True) + + self.unit = module.params.get("unit") + self.identifier = module.params.get("identifier") + self.lines = module.params.get("lines") + self.reverse = module.params.get("reverse") + self.arguments = module.params.get("arguments") + + def run(self): + """ """ + result = dict( + rc=1, + failed=True, + changed=False, + ) + + result = self.journalctl_lines() + + return result + + def journalctl_lines(self): + """ + journalctl --help + journalctl [OPTIONS...] [MATCHES...] + + Query the journal. + """ + args = [] + args.append(self._journalctl) + + if self.unit: + args.append("--unit") + args.append(self.unit) + + if self.identifier: + args.append("--identifier") + args.append(self.identifier) + + if self.lines: + args.append("--lines") + args.append(str(self.lines)) + + if self.reverse: + args.append("--reverse") + + if len(self.arguments) > 0: + for arg in self.arguments: + args.append(arg) + + rc, out, err = self._exec(args) + + return dict( + rc=rc, + cmd=" ".join(args), + stdout=out, + stderr=err, + ) + + def _exec(self, args): + """ """ + rc, out, err = self.module.run_command(args, check_rc=False) + + if rc != 0: + self.module.log(msg=f" rc : '{rc}'") + self.module.log(msg=f" out: '{out}'") + self.module.log(msg=f" err: '{err}'") + + return rc, out, err + + +def main(): + """ """ + args = dict( + identifier=dict(required=False, type="str"), + unit=dict(required=False, type="str"), + lines=dict(required=False, type="int"), + reverse=dict(required=False, default=False, type="bool"), + arguments=dict(required=False, default=[], type=list), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=False, + ) + + k = JournalCtl(module) + result = k.run() + + # module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/modules/systemd_timer.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/modules/systemd_timer.py new file mode 100644 index 0000000..00e5118 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/modules/systemd_timer.py @@ -0,0 +1,508 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2025, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import os +from typing import Any, Dict, List, Optional, Tuple + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.systemd.plugins.module_utils.helper import ( + normalize_weekday_token, + timer_component, +) +from ansible_collections.bodsch.systemd.plugins.module_utils.validator import ( + SystemdValidator, +) + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = r""" +--- +module: systemd_timer +version_added: 1.4.0 +author: "Bodo Schulz (@bodsch) " + +short_description: Manage systemd timer unit files +description: + - Create, update, or remove systemd timer units (.timer files). + - Support generic options for the [Unit], [Timer], and [Install] sections. + - Provide a structured, dynamic definition of OnCalendar via schedule/schedules. + +options: + name: + description: + - Base name of the timer unit (without ".timer"). + - The file is written to I(path)/.timer. + type: str + required: true + state: + description: + - Whether the timer unit file should be present. + type: str + choices: [present, absent] + default: present + path: + description: + - Base directory for the .timer file. + type: str + default: /lib/systemd/system + unit: + description: + - Options for the [Unit] section as a key/value mapping. + - Values can be scalars or lists; booleans are converted to C(true)/C(false). + type: dict + timer: + description: + - Options for the [Timer] section as a key/value mapping. + - If C(OnCalendar) is set here, it overrides schedule/schedules. + type: dict + install: + description: + - Options for the [Install] section as a key/value mapping. + type: dict + schedule: + description: + - Structured definition for a single OnCalendar specification. + - Ignored when C(timer.OnCalendar) is set. + type: dict + suboptions: + raw: + description: + - Raw systemd calendar pattern written as-is to OnCalendar. + type: str + special: + description: + - Shortcut like C(hourly), C(daily), C(weekly), C(monthly), C(yearly), C(quarterly), C(semiannually), etc. + type: str + year: + description: + - Year(s), for example C(2025) or list/range as string (C(2025,2026), C(2025..2030)). + type: raw + month: + description: + - Month(s), for example C(1), C(01), C(3,6,9) or C(*). + type: raw + day: + description: + - Day(s) of month, for example C(1), C(01), C(1,15), C(*/2). + type: raw + weekday: + description: + - Weekday(s), for example C(Mon), C(Mon,Fri) or numeric C(1..7). + type: raw + hour: + description: + - Hour(s), for example C(2), C(02), C(0..23), C(*/2). + type: raw + minute: + description: + - Minute(s), for example C(0), C(0,30), C(*/15). + type: raw + second: + description: + - Second(s), default is C(00). + type: raw + schedules: + description: + - List of multiple structured OnCalendar definitions. + - Each list item becomes its own C(OnCalendar=) entry. + type: list + elements: dict + enabled: + description: + - Whether the timer should be enabled or disabled using C(systemctl enable/disable). + - C(null) means the enable state is not changed. + type: bool + daemon_reload: + description: + - Whether to run C(systemctl daemon-reload) after changing the file. + type: bool + default: true + owner: + description: + - Owner of the .timer file. + type: str + default: root + group: + description: + - Group of the .timer file. + type: str + default: root + mode: + description: + - File mode of the .timer file. + type: str + default: '0644' +""" + +EXAMPLES = r""" +- name: Simple daily timer for Certbot + systemd_timer: + name: certbot + unit: + Description: Run Certbot daily + timer: + Persistent: true + RandomizedDelaySec: 12h + schedule: + special: daily + install: + WantedBy: timers.target + enabled: true + +- name: Timer twice a day at fixed times + systemd_timer: + name: certbot + unit: + Description: Run Certbot twice daily + timer: + Persistent: true + RandomizedDelaySec: 12h + schedules: + - hour: 2 + minute: 58 + - hour: 14 + minute: 58 + install: + WantedBy: timers.target + enabled: true + +- name: More complex pattern - Mon and Thu at 02:58 + systemd_timer: + name: certbot + unit: + Description: Run Certbot on specific weekdays + timer: + Persistent: true + schedule: + weekday: [Mon, Thu] + hour: 2 + minute: 58 + install: + WantedBy: timers.target + +- name: Use raw calendar pattern directly + systemd_timer: + name: custom + unit: + Description: Custom raw OnCalendar + timer: + Persistent: true + schedule: + raw: '*-*-* 00/12:00:00' + install: + WantedBy: timers.target + +- name: Remove timer + systemd_timer: + name: certbot + state: absent + enabled: false +""" + +RETURN = r""" +timer_path: + description: Path to the .timer file. + returned: always + type: str +on_calendar: + description: List of generated OnCalendar expressions. + returned: success + type: list + sample: + - Mon,Thu *-*-* 02:58:00 +changed: + description: Whether anything changed. + returned: always + type: bool +""" + +# --------------------------------------------------------------------------------------- + + +class SystemdTimer: + """ """ + + module = None + + def __init__(self, module): + """ """ + self.module = module + + self.name: str = module.params.get("name") + self.state: str = module.params.get("state") + self.enabled: Optional[bool] = module.params.get("enabled") + self.description: str = module.params.get("description") + self.base_path: str = module.params.get("path") + + self.unit_options: Dict[str, Any] = module.params.get("unit") + self.timer_options: Dict[str, Any] = module.params.get("timer") + self.timer_validation: bool = module.params.get("timer_validation") + self.install_options: Dict[str, Any] = module.params.get("install") + + self.schedule: Optional[Dict[str, Any]] = module.params.get("schedule") + self.schedules_param: Optional[List[Dict[str, Any]]] = module.params.get( + "schedules" + ) + + self.owner: str = module.params.get("owner") + self.group: str = module.params.get("group") + self.mode: str = module.params.get("mode") + + def run(self): + """ """ + self.module.log("SystemdTimer::run()") + + timer_path = os.path.join(self.base_path, f"{self.name}.timer") + + result: Dict[str, Any] = { + "changed": False, + "timer_path": timer_path, + "on_calendar": [], + } + + # Absent: Datei löschen, optional disable + if self.state == "absent": + changed = self.remove_file(timer_path) + result["changed"] = changed + + return result + + # state == present: Datei erzeugen/aktualisieren + validator = SystemdValidator(module=self.module) + + unit_options = validator.validate_unit_options(self.unit_options) + timer_options = validator.validate_timer_options(self.timer_options) + install_options = validator.validate_install_options(self.install_options) + + # schedule / schedules -> OnCalendar + on_calendar_values: List[str] = [] + + if self.schedule: + spec = self.build_calendar_spec(self.schedule) + + if spec: + on_calendar_values.append(spec) + + if self.schedules_param: + for sch in self.schedules_param: + spec = self.build_calendar_spec(sch) + if spec: + on_calendar_values.append(spec) + + # Nur setzen, wenn nicht explizit über timer.OnCalendar überschrieben + if on_calendar_values and "OnCalendar" not in self.timer_options: + if len(on_calendar_values) == 1: + timer_options["OnCalendar"] = on_calendar_values[0] + else: + timer_options["OnCalendar"] = on_calendar_values + + result["on_calendar"] = on_calendar_values + + # Sections rendern + sections: List[str] = [] + + sections.append(self.render_section("Unit", unit_options or {})) + sections.append(self.render_section("Timer", timer_options or {})) + + if install_options: + sections.append(self.render_section("Install", install_options)) + + self.module.log(f" - sections: {sections}") + + content = "\n\n".join(sections) + "\n" + + # validator.validate_timer_options(content) + + # Datei schreiben, falls nötig + changed, diff = self.write_file( + timer_path, content, self.owner, self.group, self.mode + ) + result["changed"] = changed + result["diff"] = diff + + return result + + def build_calendar_spec(self, schedule: Dict[str, Any]) -> Optional[str]: + """ + Wandelt einen schedule-Dict in einen systemd Calendar String um. + Unterstützt: + - raw: komplett vorgegebenes Pattern + - special: shortcuts (daily, weekly, ...) + - year, month, day, weekday, hour, minute, second + """ + self.module.log(f"SystemdTimer::build_calendar_spec(schedule={schedule})") + + if not schedule: + return None + + raw = schedule.get("raw") + if raw: + return str(raw) + + special = schedule.get("special") + if special: + return str(special) + + # weekday separat, weil optionaler führender Block + weekday = schedule.get("weekday") + weekday_str = "" + + if weekday is not None: + if isinstance(weekday, (list, tuple, set)): + normalized = [normalize_weekday_token(w, self.module) for w in weekday] + weekday_str = ",".join(normalized) + else: + weekday_str = normalize_weekday_token(weekday, self.module) + + year = timer_component(schedule.get("year"), default="*") + month = timer_component(schedule.get("month"), default="*", pad_width=2) + day = timer_component(schedule.get("day"), default="*", pad_width=2) + + hour = timer_component(schedule.get("hour"), default="*", pad_width=2) + minute = timer_component(schedule.get("minute"), default="*", pad_width=2) + second = timer_component(schedule.get("second"), default="00", pad_width=2) + + date_part = f"{year}-{month}-{day}" + time_part = f"{hour}:{minute}:{second}" + + if weekday_str: + return f"{weekday_str} {date_part} {time_part}" + + return f"{date_part} {time_part}" + + def render_section(self, name, options): + """ + Rendert einen Abschnitt im systemd-Unit-Format. + options: dict[str, str|list[str]] + """ + self.module.log(f"SystemdTimer::render_section(name={name}, options={options})") + + lines = [f"[{name}]"] + for key, value in options.items(): + if value is None: + continue + # sd_key = snake_to_systemd(key) + # Mehrere Werte -> mehrere Zeilen + if isinstance(value, (list, tuple, set)): + for v in value: + lines.append(f"{key} = {v}") + else: + lines.append(f"{key} = {value}") + + return "\n".join(lines) + + def write_file( + self, path: str, content: str, owner: str, group: str, mode: str + ) -> Tuple[bool, Dict]: + """ + Schreibt eine Datei nur dann, wenn sich der Inhalt geändert hat. + + Setzt bei aktiviertem diff-Modus before/after in module.result["diff"]. + Außerdem werden Besitzer, Gruppe und Modus über Ansible-Helfer gesetzt. + """ + self.module.log( + f"SystemdTimer::write_file(path={path}, content, owner={owner}, group={group}, mode={mode}" + ) + + changed = False + before = "" + result = {} + + if os.path.exists(path): + try: + with open(path, "r", encoding="utf-8") as f: + before = f.read() + except OSError: + # ignore read problems, treat as changed + before = "" + + self.module.log(f" - before: {before}") + self.module.log(f" - content: {content}") + self.module.log(f" - check_mode: {self.module.check_mode}") + + if before != content: + changed = True + + if not self.module.check_mode: + os.makedirs(os.path.dirname(path), exist_ok=True) + + with open(path, "w", encoding="utf-8") as f: + f.write(content) + + # Dateirechte setzen + self.module.set_owner_if_different(path, owner, False) + self.module.set_group_if_different(path, group, False) + self.module.set_mode_if_different(path, mode, False) + + # if self.module._diff: + + result["diff"] = {} + result["diff"]["before"] = before + result["diff"]["after"] = content + + return (changed, result) + + def remove_file(self, path: str) -> bool: + """ + Entfernt die angegebene Datei, falls sie existiert. + + Gibt True zurück, wenn die Datei entfernt wurde, sonst False. + Bei Fehlern beim Entfernen schlägt das Modul mit fail_json fehl. + """ + self.module.log(f"SystemdTimer::remove_file(path={path}") + + if os.path.exists(path): + if not self.module.check_mode: + try: + os.remove(path) + except OSError as e: + self.module.fail_json(msg=f"Failed to remove {path}: {e}") + return True + return False + + # ---- FRIEDHOF --- + + +def main(): + + argument_spec = dict( + name=dict(type="str", required=True), + description=dict(type="str", required=False), + state=dict(type="str", default="present", choices=["present", "absent"]), + enabled=dict(type="bool", default=None), + path=dict(type="str", default="/lib/systemd/system"), + unit=dict(type="dict", default=None), + timer=dict(type="dict", default=None), + timer_validation=dict(type="bool", default=True), + install=dict(type="dict", default=None), + schedule=dict(type="dict", default=None), + schedules=dict(type="list", elements="dict", default=None), + owner=dict(type="str", default="root"), + group=dict(type="str", default="root"), + mode=dict(type="str", default="0644"), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + t = SystemdTimer(module) + result = t.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/modules/unit_file.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/modules/unit_file.py new file mode 100644 index 0000000..b711a0a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/plugins/modules/unit_file.py @@ -0,0 +1,427 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function + +import os +import shutil + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.checksum import Checksum +from ansible_collections.bodsch.core.plugins.module_utils.directory import ( + create_directory, +) + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: unit_file +author: + - Bodo 'bodsch' Schulz (@bodsch) +short_description: Creates a systemd unit file. +version_added: 1.2.0 + +description: + - Creates a systemd unit file. + - The `service`, `timer` and `socket` types are supported. + +options: + unit_type: + description: + - The unit type to be created. + - The (C(service), C(timer) and C(socket)) types are supported. + - Default is C(service). + type: str + choices: [ service, timer, socket ] + name: + description: + - The name of the unit file. + type: str + state: + description: + - Whether to install (C(present) or remove (C(absent) a unit file. + - Default is C(present). + type: str + choices: [ absent, present ] + drop_ins: + description: + - A list of possible systemd drop-ins. + - The structure corresponds exactly to that of a normal unit file. + type: list + unit_file: + description: The content of a systemd unit file. + type: dict + +""" + +EXAMPLES = """ +- name: create getty drop-ins + bodsch.systemd.unit_file: + name: "getty@tty1" + state: "present" + unit_type: "service" + drop_ins: + - name: autologin + state: present + service: + ExecStart: + - "" + - "{% raw %}-/sbin/agetty -o '-p -f -- \\\\u' --noclear --autologin username %I $TERM{% endraw %}" + Type: simple + + - name: noclear + state: absent + service: + TTYVTDisallocate: false + when: + - ansible_service_mgr == 'systemd' + +- name: create systemd unit file + bodsch.systemd.unit_file: + name: "vaultwarden" + state: "present" + unit_type: "service" + unit_file: + description: | + # + # + # + # + # + # + + Unit: + Description: Vaultwarden API server + Documentation: https://github.com/dani-garcia/vaultwarden + After: network.target + Service: + Type: simple + User: vaultwarden + Group: vaultwarden + LimitNOFILE: 1048576 + UMask: "0077" + + ExecStart: /usr/bin/vaultwarden + EnvironmentFile: /etc/vaultwarden/config.env + Install: + WantedBy: multi-user.target + when: + - ansible_service_mgr == 'systemd' + +- name: create systemd socket + bodsch.systemd.unit_file: + name: "systemd-initctl" + state: "present" + unit_type: "socket" + unit_file: + description: | + # SPDX-License-Identifier: LGPL-2.1-or-later + # + # This file is part of systemd. + # + # systemd is free software; you can redistribute it and/or modify it + # under the terms of the GNU Lesser General Public License as published by + # the Free Software Foundation; either version 2.1 of the License, or + # (at your option) any later version. + unit: + Description: initctl Compatibility Named Pipe + Documentation: man:systemd-initctl.socket(8) + DefaultDependencies: no + Before: sockets.target + + Socket: + ListenFIFO: /run/initctl + Symlinks: /dev/initctl + SocketMode: "0600" + when: + - ansible_service_mgr == 'systemd' + + +- name: create systemd timer + bodsch.systemd.unit_file: + name: "systemd-tmpfiles-clean" + state: "present" + unit_type: "timer" + unit_file: + unit: + Description: Daily Cleanup of Temporary Directories + Documentation: man:tmpfiles.d(5) man:systemd-tmpfiles(8) + ConditionPathExists: "!/etc/initrd-release" + timer: + OnBootSec: 15min + OnUnitActiveSec: 1d + when: + - ansible_service_mgr == 'systemd' +""" + +RETURN = """ +changed: + type: bool + description: Status of the action +msg: + type: str + description: Readable edition of what has been done. +""" + +# -------------------------------------------------------------------------------------------------- + +UNIT_TPL = """# generated by ansible + +{% if item.description is defined %} +{{ item.description }} +{% set _ = item.pop('description') %} +{% endif %} +{% for section, options in item.items() %} +[{{ section | capitalize }}] + {% for option, values in options.items() %} + {% if values is string or values is number %} +{{ option.ljust(34) }} = {{ values }} + {% else %} + {% for value in values %} +{{ option.ljust(34) }} = {{ value }} + {% endfor %} + {% endif %} + {% endfor %} + +{% endfor %} +{# +#} +""" + + +class SystemdUnitFile(object): + """ """ + + module = None + + def __init__(self, module): + """ """ + self.module = module + + self.unit_type = module.params.get("unit_type") + self.name = module.params.get("name") + self.state = module.params.get("state") + self.overwrite = module.params.get("overwrite") + self.drop_ins = module.params.get("drop_ins") + self.unit_file = module.params.get("unit_file") + + self.tmp_directory = os.path.join( + "/run/.ansible", f"systemd_unit.{str(os.getpid())}" + ) + + def run(self): + """ """ + result = dict( + rc=1, + failed=True, + changed=False, + ) + + self.checksum = Checksum(self.module) + + if self.state == "absent": + result = self.clean_unit_files() + + else: + result = self.create_unit_files() + + if os.path.exists(self.tmp_directory): + shutil.rmtree(self.tmp_directory) + + return result + + def create_unit_files(self): + """ """ + if isinstance(self.drop_ins, list) and len(self.drop_ins) > 0: + + create_directory(directory=self.tmp_directory, mode="0750") + result = self.create_drop_in(self.drop_ins) + + if isinstance(self.unit_file, dict) and len(self.unit_file) > 0: + + create_directory(directory=self.tmp_directory, mode="0750") + result = self.create_unit_file(self.unit_file) + + return result + + def clean_unit_files(self): + """ """ + if isinstance(self.drop_ins, list) and len(self.drop_ins) > 0: + + service_name = f"/etc/systemd/system/{self.name}.d" + + for drop_in in self.drop_ins: + name = drop_in.get("name") + state = drop_in.get("state", "present") + + if state == "absent": + unit_file = os.path.join(service_name, f"{name}.conf") + result = self.__remove_unit(unit_file) + + if isinstance(self.unit_file, dict) and len(self.unit_file) > 0: + unit_file = os.path.join( + "/lib/systemd/system", f"{self.name}.{self.unit_type}" + ) + result = self.__remove_unit(unit_file) + + return result + + def create_drop_in(self, data): + """ """ + service_name = f"/etc/systemd/system/{self.name}.d" + + if not os.path.exists(service_name): + create_directory(service_name) + + for drop_in in data: + name = drop_in.get("name") + state = drop_in.get("state", "present") + + unit_file = os.path.join(service_name, f"{name}.conf") + file_temporary = os.path.join(self.tmp_directory, f"{name}.conf") + + if state == "present": + file_temporary = os.path.join(self.tmp_directory, f"{name}.conf") + + data = self.__template(drop_in) + + with open(file_temporary, "w") as f: + f.write(data) + + result = self.__changed(file_temporary, unit_file) + + else: + result = self.__remove_unit(unit_file) + + return result + + def create_unit_file(self, data): + """ """ + unit_file = os.path.join("/lib/systemd/system", f"{self.name}.{self.unit_type}") + file_temporary = os.path.join( + self.tmp_directory, f"{self.name}.{self.unit_type}" + ) + + if self.state == "present": + data = self.__template(data) + + with open(file_temporary, "w") as f: + f.write(data) + + result = self.__changed(file_temporary, unit_file) + else: + pass + + result = dict(changed=False, failed=False) + + return result + + def __changed(self, file_temporary, unit_file): + """ """ + + old_checksum = self.checksum.checksum_from_file(unit_file) + new_checksum = self.checksum.checksum_from_file(file_temporary) + + changed = not (new_checksum == old_checksum) + new_file = False + msg = "The unit-file has not been changed" + + # self.module.log(msg=f" file_name {unit_file}") + # self.module.log(msg=f" file_temporary {file_temporary}") + # self.module.log(msg=f" old_checksum {old_checksum}") + # self.module.log(msg=f" new_checksum {new_checksum}") + # self.module.log(msg=f" changed {changed}") + + if changed: + new_file = old_checksum is None + shutil.move(file_temporary, unit_file) + msg = "The unit-file was successfully changed" + + if new_file: + msg = "The unit-file was successfully created" + + result = dict(changed=changed, msg=msg) + + return result + + def __remove_unit(self, unit_file): + """ """ + if os.path.exists(unit_file): + os.remove(unit_file) + + result = dict( + changed=True, + failed=False, + msg="The unit-file was successfully removed.", + ) + else: + result = dict( + changed=False, + failed=False, + msg="The unit-file has already been removed.", + ) + + return result + + def __template(self, data): + """ """ + # self.module.log(msg=f"__template({data} ({type(data)}))") + + if isinstance(data, dict): + from jinja2 import Template + + if data.get("name"): + _ = data.pop("name") + if data.get("state"): + _ = data.pop("state") + + # self.module.log(msg=f"{data} ({type(data)})") + + tm = Template(UNIT_TPL, trim_blocks=True, lstrip_blocks=True) + d = tm.render(item=data) + else: + d = None + + return d + + +def main(): + """ """ + args = dict( + unit_type=dict( + choose=["service", "socket", "timer"], default="service", type="str" + ), + name=dict(required=True, type="str"), + state=dict( + choose=[ + "absent", + "present", + ], + default="present", + type="str", + ), + overwrite=dict(required=False, default=False, type="bool"), + drop_ins=dict(required=False, default=[], type=list), + unit_file=dict(required=False, default={}, type=dict), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=False, + ) + + k = SystemdUnitFile(module) + result = k.run() + + # module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/.ansible-lint new file mode 100644 index 0000000..5343e85 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/.ansible-lint @@ -0,0 +1,6 @@ +--- + +skip_list: + - name[casing] + - name[template] + - syntax-check[specific] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/README.md new file mode 100644 index 0000000..8c1982e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/README.md @@ -0,0 +1,3 @@ + + +# https://www.freedesktop.org/software/systemd/man/coredump.conf.html diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/defaults/main.yml new file mode 100644 index 0000000..4024611 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/defaults/main.yml @@ -0,0 +1,12 @@ +--- + +systemd_coredump: {} + # storage: "" + # compress: "" + # process_size_max: "" + # external_size_max: "" + # journal_size_max: "" + # max_use: "" + # keep_free: "" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/molecule.rc new file mode 100644 index 0000000..a15f7c3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/molecule.rc @@ -0,0 +1,9 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/tox.sh new file mode 100755 index 0000000..62bb777 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/tox.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + + echo "The required collection '${collection}' is installed in version ${collection_version}." + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/configured/converge.yml new file mode 100644 index 0000000..bfae97b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/configured/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.coredump diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..a0ef4c7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,12 @@ +--- + +systemd_coredump: + storage: external + compress: true + process_size_max: 32G + external_size_max: 32G + journal_size_max: 767M + max_use: "" + keep_free: "" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/configured/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/configured/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/configured/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/configured/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..5180084 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/configured/tests/test_default.py @@ -0,0 +1,148 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return ( + host.ansible("setup").get("ansible_facts").get("ansible_local").get("coredump") + ) + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/coredump.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/default/converge.yml new file mode 100644 index 0000000..bfae97b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/default/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.coredump diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/default/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/default/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/default/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/default/tests/test_default.py new file mode 100644 index 0000000..5180084 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/molecule/default/tests/test_default.py @@ -0,0 +1,148 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return ( + host.ansible("setup").get("ansible_facts").get("ansible_local").get("coredump") + ) + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/coredump.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/tasks/main.yml new file mode 100644 index 0000000..52bed48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: merge systemd coredump configuration between defaults and custom + ansible.builtin.set_fact: + systemd_coredump: "{{ systemd_defaults_coredump | combine(systemd_coredump, recursive=True) }}" + +- name: create systemd coredump configuration + ansible.builtin.template: + src: systemd/coredump.conf.j2 + dest: /etc/systemd/coredump.conf + mode: 0644 + owner: root + group: root + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/templates/systemd/coredump.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/templates/systemd/coredump.conf.j2 new file mode 100644 index 0000000..d18726a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/templates/systemd/coredump.conf.j2 @@ -0,0 +1,50 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Entries in this file show the compile time defaults. Local configuration +# should be created by either modifying this file, or by creating "drop-ins" in +# the coredump.conf.d/ subdirectory. The latter is generally recommended. +# Defaults can be restored by simply deleting this file and all drop-ins. +# +# Use 'systemd-analyze cat-config systemd/coredump.conf' to display the full config. +# +# See coredump.conf(5) for details. + +[Coredump] +{% if systemd_coredump.storage is defined and + systemd_coredump.storage | string | length > 0 and + systemd_coredump.storage in ["none", "external", "journal"] %} +Storage = {{ systemd_coredump.storage }} +{% endif %} +{% if systemd_coredump.compress is defined and + systemd_coredump.compress | string | length > 0 and + systemd_coredump.compress %} +Compress = yes +{% endif %} +{% if systemd_coredump.process_size_max is defined and + systemd_coredump.process_size_max | string | length > 0 %} +ProcessSizeMax = {{ systemd_coredump.process_size_max }} +{% endif %} +{% if systemd_coredump.external_size_max is defined and + systemd_coredump.external_size_max | string | length > 0 %} +ExternalSizeMax = {{ systemd_coredump.external_size_max }} +{% endif %} +{% if systemd_coredump.journal_size_max is defined and + systemd_coredump.journal_size_max | string | length > 0 %} +JournalSizeMax = {{ systemd_coredump.journal_size_max }} +{% endif %} +{% if systemd_coredump.max_use is defined and + systemd_coredump.max_use | string | length > 0 %} +MaxUse = {{ systemd_coredump.max_use }} +{% endif %} +{% if systemd_coredump.keep_free is defined and + systemd_coredump.keep_free | string | length > 0 %} +KeepFree = {{ systemd_coredump.keep_free }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/vars/main.yml new file mode 100644 index 0000000..80dc8a6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/coredump/vars/main.yml @@ -0,0 +1,12 @@ +--- + +systemd_defaults_coredump: + storage: "" # external + compress: "" # true + process_size_max: "" # 32G + external_size_max: "" # 32G + journal_size_max: "" # 767M + max_use: "" + keep_free: "" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/.ansible-lint new file mode 100644 index 0000000..5343e85 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/.ansible-lint @@ -0,0 +1,6 @@ +--- + +skip_list: + - name[casing] + - name[template] + - syntax-check[specific] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/README.md new file mode 100644 index 0000000..6efc439 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/README.md @@ -0,0 +1,2 @@ + +# https://www.freedesktop.org/software/systemd/man/latest/homed.conf.html diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/defaults/main.yml new file mode 100644 index 0000000..d761310 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/defaults/main.yml @@ -0,0 +1,8 @@ +--- + +systemd_homed: {} + # default_storage: "" + # default_filesystem_type: "" # btrfs + +... + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/molecule.rc new file mode 100644 index 0000000..a15f7c3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/molecule.rc @@ -0,0 +1,9 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/tox.sh new file mode 100755 index 0000000..62bb777 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/tox.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + + echo "The required collection '${collection}' is installed in version ${collection_version}." + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/configured/converge.yml new file mode 100644 index 0000000..4735ca1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/configured/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.homed diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..d9ea723 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,7 @@ +--- + +systemd_homed: + default_storage: "directory" + default_filesystem_type: "ext4" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/configured/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/configured/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/configured/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/configured/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..cb2ef99 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/configured/tests/test_default.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("homed") + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/homed.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/default/converge.yml new file mode 100644 index 0000000..4735ca1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/default/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.homed diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/default/molecule.yml new file mode 100644 index 0000000..e43fcff --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/default/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:13}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/default/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/default/tests/test_default.py new file mode 100644 index 0000000..cb2ef99 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/molecule/default/tests/test_default.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("homed") + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/homed.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/tasks/main.yml new file mode 100644 index 0000000..0bec034 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: merge systemd homed configuration between defaults and custom + ansible.builtin.set_fact: + systemd_homed: "{{ systemd_defaults_homed | combine(systemd_homed, recursive=True) }}" + +- name: create systemd homed configuration + ansible.builtin.template: + src: systemd/homed.conf.j2 + dest: /etc/systemd/homed.conf + mode: 0644 + owner: root + group: root + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/templates/systemd/homed.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/templates/systemd/homed.conf.j2 new file mode 100644 index 0000000..f548601 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/templates/systemd/homed.conf.j2 @@ -0,0 +1,29 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Entries in this file show the compile time defaults. Local configuration +# should be created by either modifying this file, or by creating "drop-ins" in +# the homed.conf.d/ subdirectory. The latter is generally recommended. +# Defaults can be restored by simply deleting this file and all drop-ins. +# +# Use 'systemd-analyze cat-config systemd/homed.conf' to display the full config. +# +# See homed.conf(5) for details. + +[Home] +{% if systemd_homed.default_storage is defined and + systemd_homed.default_storage | string | length > 0 and + systemd_homed.default_storage in ["luks", "fscrypt", "directory", "subvolume", "cifs"] %} +DefaultStorage = {{ systemd_homed.default_storage }} +{% endif %} +{% if systemd_homed.default_filesystem_type is defined and + systemd_homed.default_filesystem_type | string | length > 0 %} +DefaultFileSystemType = {{ systemd_homed.default_filesystem_type }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/vars/main.yml new file mode 100644 index 0000000..d8ff0c6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/homed/vars/main.yml @@ -0,0 +1,7 @@ +--- + +systemd_defaults_homed: + default_storage: "" # one of "luks", "fscrypt", "directory", "subvolume", "cifs" + default_filesystem_type: "" # btrfs + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/.ansible-lint new file mode 100644 index 0000000..5343e85 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/.ansible-lint @@ -0,0 +1,6 @@ +--- + +skip_list: + - name[casing] + - name[template] + - syntax-check[specific] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/README.md new file mode 100644 index 0000000..fc7828f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/README.md @@ -0,0 +1,2 @@ + +# https://www.freedesktop.org/software/systemd/man/journald.conf.html diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/defaults/main.yml new file mode 100644 index 0000000..6fbd055 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/defaults/main.yml @@ -0,0 +1,32 @@ +--- +systemd_journald: {} +# storage: "" +# compress: "" +# seal: "" +# split_mode: "" +# sync_interval_sec: "" +# rate_limit_interval_sec: "" +# rate_limit_burst: "" +# system_max_use: "" +# system_keep_free: "" +# system_max_file_size: "" +# system_max_files: "" +# runtime_max_use: "" +# runtime_keep_free: "" +# runtime_max_file_size: "" +# runtime_max_files: "" +# max_retention_sec: "" +# max_file_sec: "" +# forward_to_syslog: "" +# forward_to_kmsg: "" +# forward_to_console: "" +# forward_to_wall: "" +# tty_path: "" +# max_level_store: "" +# max_level_syslog: "" +# max_level_kmsg: "" +# max_level_console: "" +# max_level_wall: "" +# line_max: "" +# read_kmsg: "" +# audit: "" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/molecule.rc new file mode 100644 index 0000000..a15f7c3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/molecule.rc @@ -0,0 +1,9 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/tox.sh new file mode 100755 index 0000000..62bb777 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/tox.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + + echo "The required collection '${collection}' is installed in version ${collection_version}." + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/configured/converge.yml new file mode 100644 index 0000000..2ad3d24 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/configured/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.journald diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..6f95ed0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,35 @@ +--- + +systemd_journald: + storage: "auto" # auto + compress: "true" # true + seal: "" # true + split_mode: "" # uid + sync_interval_sec: "2m" # 5m + rate_limit_interval_sec: "" # 30s + rate_limit_burst: "" # 10000 + system_max_use: "" + system_keep_free: "" + system_max_file_size: "" + system_max_files: "4" # 100 + runtime_max_use: "" + runtime_keep_free: "" + runtime_max_file_size: "" + runtime_max_files: "" # 100 + max_retention_sec: "" + max_file_sec: "" # 1month + forward_to_syslog: "" # true + forward_to_kmsg: "" # false + forward_to_console: "" # false + forward_to_wall: "" # true + tty_path: "" # /dev/console + max_level_store: "" # debug + max_level_syslog: "" # debug + max_level_kmsg: "" # notice + max_level_console: "" # info + max_level_wall: "" # emerg + line_max: "" # 48K + read_kmsg: "" # true + audit: "true" # false + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/configured/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/configured/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/configured/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/configured/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..0339a67 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/configured/tests/test_default.py @@ -0,0 +1,148 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return ( + host.ansible("setup").get("ansible_facts").get("ansible_local").get("journald") + ) + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/journald.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/default/converge.yml new file mode 100644 index 0000000..2ad3d24 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/default/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.journald diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/default/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/default/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/default/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/default/tests/test_default.py new file mode 100644 index 0000000..0339a67 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/molecule/default/tests/test_default.py @@ -0,0 +1,148 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return ( + host.ansible("setup").get("ansible_facts").get("ansible_local").get("journald") + ) + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/journald.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/tasks/main.yml new file mode 100644 index 0000000..6fe538e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: merge systemd journald configuration between defaults and custom + ansible.builtin.set_fact: + systemd_journald: "{{ systemd_defaults_journald | combine(systemd_journald, recursive=True) }}" + +- name: create systemd journald configuration + ansible.builtin.template: + src: systemd/journald.conf.j2 + dest: /etc/systemd/journald.conf + mode: 0644 + owner: root + group: root + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/templates/systemd/journald.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/templates/systemd/journald.conf.j2 new file mode 100644 index 0000000..608318e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/templates/systemd/journald.conf.j2 @@ -0,0 +1,146 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# Entries in this file show the compile time defaults. +# You can change settings by editing this file. +# Defaults can be restored by simply deleting this file. +# +# See journald.conf(5) for details. + +[Journal] +{% if systemd_journald.storage is defined and + systemd_journald.storage | string | length > 0 and + systemd_journald.storage in ["volatile", "persistent", "auto", "none"] %} +Storage = {{ systemd_journald.storage }} +{% endif %} +{% if systemd_journald.compress is defined and + systemd_journald.compress | string | length > 0 and + systemd_journald.compress %} +Compress = yes +{% endif %} +{% if systemd_journald.seal is defined and + systemd_journald.seal | string | length > 0 and + systemd_journald.seal %} +Seal = yes +{% endif %} +{% if systemd_journald.split_mode is defined and + systemd_journald.split_mode | string | length > 0 and + systemd_journald.split_mode in ["uid", "none"] %} +SplitMode = {{ systemd_journald.split_mode }} +{% endif %} +{% if systemd_journald.sync_interval_sec is defined and + systemd_journald.sync_interval_sec | string | length > 0 %} +SyncIntervalSec = {{ systemd_journald.sync_interval_sec }} +{% endif %} +{% if systemd_journald.rate_limit_interval_sec is defined and + systemd_journald.rate_limit_interval_sec | string | length > 0 %} +RateLimitIntervalSec = {{ systemd_journald.rate_limit_interval_sec }} +{% endif %} +{% if systemd_journald.rate_limit_burst is defined and + systemd_journald.rate_limit_burst | string | length > 0 %} +RateLimitBurst = {{ systemd_journald.rate_limit_burst }} +{% endif %} +{% if systemd_journald.system_max_use is defined and + systemd_journald.system_max_use | string | length > 0 %} +SystemMaxUse = {{ systemd_journald.system_max_use }} +{% endif %} +{% if systemd_journald.system_keep_free is defined and + systemd_journald.system_keep_free | string | length > 0 %} +SystemKeepFree = {{ systemd_journald.system_keep_free }} +{% endif %} +{% if systemd_journald.system_max_file_size is defined and + systemd_journald.system_max_file_size | string | length > 0 %} +SystemMaxFileSize = {{ systemd_journald.system_max_file_size }} +{% endif %} +{% if systemd_journald.system_max_files is defined and + systemd_journald.system_max_files | string | length > 0 %} +SystemMaxFiles = {{ systemd_journald.system_max_files }} +{% endif %} +{% if systemd_journald.runtime_max_use is defined and + systemd_journald.runtime_max_use | string | length > 0 %} +RuntimeMaxUse = {{ systemd_journald.runtime_max_use }} +{% endif %} +{% if systemd_journald.runtime_keep_free is defined and + systemd_journald.runtime_keep_free | string | length > 0 %} +RuntimeKeepFree = {{ systemd_journald.runtime_keep_free }} +{% endif %} +{% if systemd_journald.runtime_max_file_size is defined and + systemd_journald.runtime_max_file_size | string | length > 0 %} +RuntimeMaxFileSize = {{ systemd_journald.runtime_max_file_size }} +{% endif %} +{% if systemd_journald.runtime_max_files is defined and + systemd_journald.runtime_max_files | string | length > 0 %} +RuntimeMaxFiles = {{ systemd_journald.runtime_max_files }} +{% endif %} +{% if systemd_journald.max_retention_sec is defined and + systemd_journald.max_retention_sec | string | length > 0 %} +MaxRetentionSec = {{ systemd_journald.max_retention_sec }} +{% endif %} +{% if systemd_journald.max_file_sec is defined and + systemd_journald.max_file_sec | string | length > 0 %} +MaxFileSec = {{ systemd_journald.max_file_sec }} +{% endif %} +{% if systemd_journald.forward_to_syslog is defined and + systemd_journald.forward_to_syslog | string | length > 0 %} +ForwardToSyslog = {{ systemd_journald.forward_to_syslog | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_journald.forward_to_kmsg is defined and + systemd_journald.forward_to_kmsg | string | length > 0 %} +ForwardToKMsg = {{ systemd_journald.forward_to_kmsg | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_journald.forward_to_console is defined and + systemd_journald.forward_to_console | string | length > 0 %} +ForwardToConsole = {{ systemd_journald.forward_to_console | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_journald.forward_to_wall is defined and + systemd_journald.forward_to_wall | string | length > 0 %} +ForwardToWall = {{ systemd_journald.forward_to_wall | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_journald.tty_path is defined and + systemd_journald.tty_path | string | length > 0 %} +TTYPath = {{ systemd_journald.tty_path }} +{% endif %} +{% if systemd_journald.max_level_store is defined and + systemd_journald.max_level_store | string | length > 0 and + systemd_journald.max_level_store in ["emerg", "alert", "crit", "err", "warning", "notice", "info", "debug"] %} +MaxLevelStore = {{ systemd_journald.max_level_store }} +{% endif %} +{% if systemd_journald.max_level_syslog is defined and + systemd_journald.max_level_syslog | string | length > 0 and + systemd_journald.max_level_syslog in ["emerg", "alert", "crit", "err", "warning", "notice", "info", "debug"] %} +MaxLevelSyslog = {{ systemd_journald.max_level_syslog }} +{% endif %} +{% if systemd_journald.max_level_kmsg is defined and + systemd_journald.max_level_kmsg | string | length > 0 and + systemd_journald.max_level_kmsg in ["emerg", "alert", "crit", "err", "warning", "notice", "info", "debug"] %} +MaxLevelKMsg = {{ systemd_journald.max_level_kmsg }} +{% endif %} +{% if systemd_journald.max_level_console is defined and + systemd_journald.max_level_console | string | length > 0 and + systemd_journald.max_level_console in ["emerg", "alert", "crit", "err", "warning", "notice", "info", "debug"] %} +MaxLevelConsole = {{ systemd_journald.max_level_console }} +{% endif %} +{% if systemd_journald.max_level_wall is defined and + systemd_journald.max_level_wall | string | length > 0 and + systemd_journald.max_level_wall in ["emerg", "alert", "crit", "err", "warning", "notice", "info", "debug"] %} +MaxLevelWall = {{ systemd_journald.max_level_wall }} +{% endif %} +{% if systemd_journald.line_max is defined and + systemd_journald.line_max | string | length > 0 %} +LineMax = {{ systemd_journald.line_max }} +{% endif %} +{% if systemd_journald.read_kmsg is defined and + systemd_journald.read_kmsg | string | length > 0 %} +ReadKMsg = {{ systemd_journald.read_kmsg | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_journald.audit is defined and + systemd_journald.audit | string | length > 0 %} +Audit = {{ systemd_journald.audit | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/vars/main.yml new file mode 100644 index 0000000..72adbca --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/journald/vars/main.yml @@ -0,0 +1,35 @@ +--- + +systemd_defaults_journald: + storage: "" # auto + compress: "" # true + seal: "" # true + split_mode: "" # uid + sync_interval_sec: "" # 5m + rate_limit_interval_sec: "" # 30s + rate_limit_burst: "" # 10000 + system_max_use: "" + system_keep_free: "" + system_max_file_size: "" + system_max_files: "" # 100 + runtime_max_use: "" + runtime_keep_free: "" + runtime_max_file_size: "" + runtime_max_files: "" # 100 + max_retention_sec: "" + max_file_sec: "" # 1month + forward_to_syslog: "" # true + forward_to_kmsg: "" # false + forward_to_console: "" # false + forward_to_wall: "" # true + tty_path: "" # /dev/console + max_level_store: "" # debug + max_level_syslog: "" # debug + max_level_kmsg: "" # notice + max_level_console: "" # info + max_level_wall: "" # emerg + line_max: "" # 48K + read_kmsg: "" # true + audit: "" # false + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/.ansible-lint new file mode 100644 index 0000000..5343e85 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/.ansible-lint @@ -0,0 +1,6 @@ +--- + +skip_list: + - name[casing] + - name[template] + - syntax-check[specific] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/README.md new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/defaults/main.yml new file mode 100644 index 0000000..a4ae4ad --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/defaults/main.yml @@ -0,0 +1,5 @@ +--- + +systemd_logind: {} + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/molecule.rc new file mode 100644 index 0000000..a15f7c3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/molecule.rc @@ -0,0 +1,9 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/tox.sh new file mode 100755 index 0000000..62bb777 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/tox.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + + echo "The required collection '${collection}' is installed in version ${collection_version}." + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/configured/converge.yml new file mode 100644 index 0000000..adc4d5f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/configured/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.logind diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..811b650 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,31 @@ +--- + +systemd_logind: + n_auto_vt: "12" # 6 + reserve_vt: "12" # 6 + kill_user_processes: "" # false + kill_only_users: "" # + kill_exclude_users: "" # root + inhibit_delay_max_sec: "" # 5 + user_stop_delay_sec: "" # 10 + handle_power_key: "" # poweroff + handle_suspend_key: "" # suspend + handle_hibernate_key: "" # hibernate + handle_lid_switch: "" # suspend + handle_lid_switch_external_power: "" # suspend + handle_lid_switch_docked: "" # ignore + handle_reboot_key: "" # reboot + power_key_ignore_inhibited: "" # false + suspend_key_ignore_inhibited: "" # false + hibernate_key_ignore_inhibited: "" # false + lid_switch_ignore_inhibited: "" # true + reboot_key_ignore_inhibited: "" # false + holdoff_timeout_sec: "" # 30s + idle_action: "" # ignore + idle_action_sec: "" # 30min + runtime_directory_size: "" # 10% + runtime_directory_inodes: "" # 400k + remove_ipc: "" # true + inhibitors_max: "" # 8192 + sessions_max: "" # 8192 +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/configured/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/configured/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/configured/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/configured/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..5733549 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/configured/tests/test_default.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("logind") + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/logind.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/default/converge.yml new file mode 100644 index 0000000..adc4d5f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/default/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.logind diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/default/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/default/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/default/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/default/tests/test_default.py new file mode 100644 index 0000000..5733549 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/molecule/default/tests/test_default.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("logind") + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/logind.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/tasks/main.yml new file mode 100644 index 0000000..6bc7dc5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: merge systemd logind configuration between defaults and custom + ansible.builtin.set_fact: + systemd_logind: "{{ systemd_defaults_logind | combine(systemd_logind, recursive=True) }}" + +- name: create systemd logind configuration + ansible.builtin.template: + src: systemd/logind.conf.j2 + dest: /etc/systemd/logind.conf + mode: 0644 + owner: root + group: root + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/templates/systemd/logind.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/templates/systemd/logind.conf.j2 new file mode 100644 index 0000000..6e7d6bc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/templates/systemd/logind.conf.j2 @@ -0,0 +1,132 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# Entries in this file show the compile time defaults. +# You can change settings by editing this file. +# Defaults can be restored by simply deleting this file. +# +# See logind.conf(5) for details. + +[Login] +{% if systemd_logind.n_auto_vt is defined and + systemd_logind.n_auto_vt | string | length > 0 %} +NAutoVTs = {{ systemd_logind.n_auto_vt }} +{% endif %} +{% if systemd_logind.reserve_vt is defined and + systemd_logind.reserve_vt | string | length > 0 %} +ReserveVT = {{ systemd_logind.reserve_vt }} +{% endif %} +{% if systemd_logind.kill_user_processes is defined and + systemd_logind.kill_user_processes | string | length > 0 %} +KillUserProcesses = {{ systemd_logind.kill_user_processes | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_logind.kill_only_users is defined and + systemd_logind.kill_only_users | string | length > 0 %} +KillOnlyUsers = {{ systemd_logind.kill_only_users }} +{% endif %} +{% if systemd_logind.kill_exclude_users is defined and + systemd_logind.kill_exclude_users | string | length > 0 %} +KillExcludeUsers = {{ systemd_logind.kill_exclude_users }} +{% endif %} +{% if systemd_logind.inhibit_delay_max_sec is defined and + systemd_logind.inhibit_delay_max_sec | string | length > 0 %} +InhibitDelayMaxSec = {{ systemd_logind.inhibit_delay_max_sec }} +{% endif %} +{% if systemd_logind.user_stop_delay_sec is defined and + systemd_logind.user_stop_delay_sec | string | length > 0 %} +UserStopDelaySec = {{ systemd_logind.user_stop_delay_sec }} +{% endif %} +{% if systemd_logind.handle_power_key is defined and + systemd_logind.handle_power_key | string | length > 0 and + systemd_logind.handle_power_key in ["ignore", "poweroff", "reboot", "halt", "kexec", "suspend", "hibernate", "hybrid-sleep", "suspend-then-hibernate", "lock", "factory-reset"] %} +HandlePowerKey = {{ systemd_logind.handle_power_key }} +{% endif %} +{% if systemd_logind.handle_suspend_key is defined and + systemd_logind.handle_suspend_key | string | length > 0 and + systemd_logind.handle_suspend_key in ["ignore", "poweroff", "reboot", "halt", "kexec", "suspend", "hibernate", "hybrid-sleep", "suspend-then-hibernate", "lock", "factory-reset"] %} +HandleSuspendKey = {{ systemd_logind.handle_suspend_key }} +{% endif %} +{% if systemd_logind.handle_hibernate_key is defined and + systemd_logind.handle_hibernate_key | string | length > 0 and + systemd_logind.handle_hibernate_key in ["ignore", "poweroff", "reboot", "halt", "kexec", "suspend", "hibernate", "hybrid-sleep", "suspend-then-hibernate", "lock", "factory-reset"] %} +HandleHibernateKey = {{ systemd_logind.handle_hibernate_key }} +{% endif %} +{% if systemd_logind.handle_lid_switch is defined and + systemd_logind.handle_lid_switch | string | length > 0 and + systemd_logind.handle_lid_switch in ["ignore", "poweroff", "reboot", "halt", "kexec", "suspend", "hibernate", "hybrid-sleep", "suspend-then-hibernate", "lock", "factory-reset"] %} +HandleLidSwitch = {{ systemd_logind.handle_lid_switch }} +{% endif %} +{% if systemd_logind.handle_lid_switch_external_power is defined and + systemd_logind.handle_lid_switch_external_power | string | length > 0 %} +HandleLidSwitchExternalPower = suspend +{% endif %} +{% if systemd_logind.handle_lid_switch_docked is defined and + systemd_logind.handle_lid_switch_docked | string | length > 0 and + systemd_logind.handle_lid_switch_docked in ["ignore", "poweroff", "reboot", "halt", "kexec", "suspend", "hibernate", "hybrid-sleep", "suspend-then-hibernate", "lock", "factory-reset"] %} +HandleLidSwitchDocked = {{ systemd_logind.handle_lid_switch_docked }} +{% endif %} +{% if systemd_logind.handle_reboot_key is defined and + systemd_logind.handle_reboot_key | string | length > 0 and + systemd_logind.handle_reboot_key in ["ignore", "poweroff", "reboot", "halt", "kexec", "suspend", "hibernate", "hybrid-sleep", "suspend-then-hibernate", "lock", "factory-reset"] %} +HandleRebootKey = {{ systemd_logind.handle_reboot_key }} +{% endif %} +{% if systemd_logind.power_key_ignore_inhibited is defined and + systemd_logind.power_key_ignore_inhibited | string | length > 0 %} +PowerKeyIgnoreInhibited = {{ systemd_logind.power_key_ignore_inhibited | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_logind.suspend_key_ignore_inhibited is defined and + systemd_logind.suspend_key_ignore_inhibited | string | length > 0 %} +SuspendKeyIgnoreInhibited = {{ systemd_logind.suspend_key_ignore_inhibited | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_logind.hibernate_key_ignore_inhibited is defined and + systemd_logind.hibernate_key_ignore_inhibited | string | length > 0 %} +HibernateKeyIgnoreInhibited = {{ systemd_logind.hibernate_key_ignore_inhibited | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_logind.lid_switch_ignore_inhibited is defined and + systemd_logind.lid_switch_ignore_inhibited | string | length > 0 %} +LidSwitchIgnoreInhibited = {{ systemd_logind.lid_switch_ignore_inhibited | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_logind.reboot_key_ignore_inhibited is defined and + systemd_logind.reboot_key_ignore_inhibited | string | length > 0 %} +RebootKeyIgnoreInhibited = {{ systemd_logind.reboot_key_ignore_inhibited | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_logind.holdoff_timeout_sec is defined and + systemd_logind.holdoff_timeout_sec | string | length > 0 %} +HoldoffTimeoutSec = {{ systemd_logind.holdoff_timeout_sec }} +{% endif %} +{% if systemd_logind.idle_action is defined and + systemd_logind.idle_action | string | length > 0 and + systemd_logind.idle_action in ["ignore", "poweroff", "reboot", "halt", "kexec", "suspend", "hibernate", "hybrid-sleep", "suspend-then-hibernate", "lock"] %} +IdleAction = {{ systemd_logind.idle_action }} +{% endif %} +{% if systemd_logind.idle_action_sec is defined and + systemd_logind.idle_action_sec | string | length > 0 %} +IdleActionSec = {{ systemd_logind.idle_action_sec }} +{% endif %} +{% if systemd_logind.runtime_directory_size is defined and + systemd_logind.runtime_directory_size | string | length > 0 %} +RuntimeDirectorySize = {{ systemd_logind.runtime_directory_size }} +{% endif %} +{% if systemd_logind.runtime_directory_inodes is defined and + systemd_logind.runtime_directory_inodes | string | length > 0 %} +RuntimeDirectoryInodes = {{ systemd_logind.runtime_directory_inodes }} +{% endif %} +{% if systemd_logind.remove_ipc is defined and + systemd_logind.remove_ipc | string | length > 0 %} +RemoveIPC = {{ systemd_logind.runtime_directory_inodes | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_logind.inhibitors_max is defined and + systemd_logind.inhibitors_max | string | length > 0 %} +InhibitorsMax = {{ systemd_logind.inhibitors_max }} +{% endif %} +{% if systemd_logind.sessions_max is defined and + systemd_logind.sessions_max | string | length > 0 %} +SessionsMax = {{ systemd_logind.sessions_max }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/vars/main.yml new file mode 100644 index 0000000..116702b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/logind/vars/main.yml @@ -0,0 +1,32 @@ +--- + +systemd_defaults_logind: + n_auto_vt: "" # 6 + reserve_vt: "" # 6 + kill_user_processes: "" # false + kill_only_users: "" # + kill_exclude_users: "" # root + inhibit_delay_max_sec: "" # 5 + user_stop_delay_sec: "" # 10 + handle_power_key: "" # poweroff + handle_suspend_key: "" # suspend + handle_hibernate_key: "" # hibernate + handle_lid_switch: "" # suspend + handle_lid_switch_external_power: "" # suspend + handle_lid_switch_docked: "" # ignore + handle_reboot_key: "" # reboot + power_key_ignore_inhibited: "" # false + suspend_key_ignore_inhibited: "" # false + hibernate_key_ignore_inhibited: "" # false + lid_switch_ignore_inhibited: "" # true + reboot_key_ignore_inhibited: "" # false + holdoff_timeout_sec: "" # 30s + idle_action: "" # ignore + idle_action_sec: "" # 30min + runtime_directory_size: "" # 10% + runtime_directory_inodes: "" # 400k + remove_ipc: "" # true + inhibitors_max: "" # 8192 + sessions_max: "" # 8192 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/.ansible-lint new file mode 100644 index 0000000..5343e85 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/.ansible-lint @@ -0,0 +1,6 @@ +--- + +skip_list: + - name[casing] + - name[template] + - syntax-check[specific] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/README.md new file mode 100644 index 0000000..b88305c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/README.md @@ -0,0 +1,2 @@ + +# https://www.freedesktop.org/software/systemd/man/networkd.conf.html diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/defaults/main.yml new file mode 100644 index 0000000..785e590 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/defaults/main.yml @@ -0,0 +1,18 @@ +--- + +systemd_networkd: {} + # speed_meter: false + # speed_meter_interval_sec: 10sec + # manage_foreign_routing_policy_rules: true + # manage_foreign_routes: true + # route_table: "" + # ipv6_privacy_extensions: false + # dhcp4: + # duid_type: vendor + # duid_raw_data: "" + # dhcp6: + # duid_type: vendor + # duid_raw_data: "" + +... + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/molecule.rc new file mode 100644 index 0000000..a15f7c3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/molecule.rc @@ -0,0 +1,9 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/tox.sh new file mode 100755 index 0000000..62bb777 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/tox.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + + echo "The required collection '${collection}' is installed in version ${collection_version}." + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/configured/converge.yml new file mode 100644 index 0000000..a45cadb --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/configured/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.networkd diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..fee51fa --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,17 @@ +--- + +systemd_networkd: + speed_meter: "true" # false + speed_meter_interval_sec: "20sec" # 10sec + manage_foreign_routing_policy_rules: "" # true + manage_foreign_routes: "" # true + route_table: "" + ipv6_privacy_extensions: "" # false + dhcp4: + duid_type: "" # vendor + duid_raw_data: "" + dhcp6: + duid_type: "" # vendor + duid_raw_data: "" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/configured/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/configured/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/configured/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/configured/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..537af86 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/configured/tests/test_default.py @@ -0,0 +1,148 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return ( + host.ansible("setup").get("ansible_facts").get("ansible_local").get("networkd") + ) + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/networkd.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/default/converge.yml new file mode 100644 index 0000000..a45cadb --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/default/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.networkd diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/default/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/default/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/default/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/default/tests/test_default.py new file mode 100644 index 0000000..537af86 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/molecule/default/tests/test_default.py @@ -0,0 +1,148 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return ( + host.ansible("setup").get("ansible_facts").get("ansible_local").get("networkd") + ) + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/networkd.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/tasks/main.yml new file mode 100644 index 0000000..6feeac3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: merge systemd networkd configuration between defaults and custom + ansible.builtin.set_fact: + systemd_networkd: "{{ systemd_defaults_networkd | combine(systemd_networkd, recursive=True) }}" + +- name: create systemd networkd configuration + ansible.builtin.template: + src: systemd/networkd.conf.j2 + dest: /etc/systemd/networkd.conf + mode: 0644 + owner: root + group: root + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/templates/systemd/networkd.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/templates/systemd/networkd.conf.j2 new file mode 100644 index 0000000..acb84a4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/templates/systemd/networkd.conf.j2 @@ -0,0 +1,68 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Entries in this file show the compile time defaults. Local configuration +# should be created by either modifying this file, or by creating "drop-ins" in +# the networkd.conf.d/ subdirectory. The latter is generally recommended. +# Defaults can be restored by simply deleting this file and all drop-ins. +# +# See networkd.conf(5) for details. + +[Network] +{% if systemd_networkd.speed_meter is defined and + systemd_networkd.speed_meter | string | length > 0 %} +SpeedMeter = {{ systemd_networkd.speed_meter | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_networkd.speed_meter_interval_sec is defined and + systemd_networkd.speed_meter_interval_sec | string | length > 0 %} +SpeedMeterIntervalSec = {{ systemd_networkd.speed_meter_interval_sec }} +{% endif %} +{% if systemd_networkd.manage_foreign_routing_policy_rules is defined and + systemd_networkd.manage_foreign_routing_policy_rules | string | length > 0 %} +ManageForeignRoutingPolicyRules= {{ systemd_networkd.manage_foreign_routing_policy_rules | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_networkd.manage_foreign_routes is defined and + systemd_networkd.manage_foreign_routes | string | length > 0 %} +ManageForeignRoutes = {{ systemd_networkd.manage_foreign_routes | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_networkd.route_table is defined and + systemd_networkd.route_table | string | length > 0 %} +RouteTable = {{ systemd_networkd.route_table }} +{% endif %} +{% if systemd_networkd.ipv6_privacy_extensions is defined and + systemd_networkd.ipv6_privacy_extensions | string | length > 0 %} +IPv6PrivacyExtensions = {{ systemd_networkd.ipv6_privacy_extensions | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} + +[DHCPv4] +{% if systemd_networkd.dhcp4 is defined and + systemd_networkd.dhcp4 | count > 0 %} + {% if systemd_networkd.dhcp4.duid_type is defined and + systemd_networkd.dhcp4.duid_type | string | length > 0 %} +DUIDType = {{ systemd_networkd.dhcp4.duid_type }} + {% endif %} + {% if systemd_networkd.dhcp4.duid_raw_data is defined and + systemd_networkd.dhcp4.duid_raw_data | string | length > 0 %} +DUIDRawData = {{ systemd_networkd.dhcp4.duid_raw_data }} + {% endif %} +{% endif %} + +[DHCPv6] +{% if systemd_networkd.dhcp6 is defined and + systemd_networkd.dhcp6 | count > 0 %} + {% if systemd_networkd.dhcp6.duid_type is defined and + systemd_networkd.dhcp6.duid_type | string | length > 0 %} +DUIDType = {{ systemd_networkd.dhcp6.duid_type }} + {% endif %} + {% if systemd_networkd.dhcp6.duid_raw_data is defined and + systemd_networkd.dhcp6.duid_raw_data | string | length > 0 %} +DUIDRawData = {{ systemd_networkd.dhcp6.duid_raw_data }} + {% endif %} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/vars/main.yml new file mode 100644 index 0000000..54975f7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/networkd/vars/main.yml @@ -0,0 +1,17 @@ +--- + +systemd_defaults_networkd: + speed_meter: "" # false + speed_meter_interval_sec: "" # 10sec + manage_foreign_routing_policy_rules: "" # true + manage_foreign_routes: "" # true + route_table: "" + ipv6_privacy_extensions: "" # false + dhcp4: + duid_type: "" # vendor + duid_raw_data: "" + dhcp6: + duid_type: "" # vendor + duid_raw_data: "" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/.ansible-lint new file mode 100644 index 0000000..5343e85 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/.ansible-lint @@ -0,0 +1,6 @@ +--- + +skip_list: + - name[casing] + - name[template] + - syntax-check[specific] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/README.md new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/defaults/main.yml new file mode 100644 index 0000000..13b3bdd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/defaults/main.yml @@ -0,0 +1,7 @@ +--- + +systemd_oomd: {} + # swap_used_limit: "" # 90% + # default_memory_pressure_limit: "" # 60% + # default_memory_pressure_durationSec: "" # 30s +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/molecule.rc new file mode 100644 index 0000000..a15f7c3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/molecule.rc @@ -0,0 +1,9 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/tox.sh new file mode 100755 index 0000000..62bb777 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/tox.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + + echo "The required collection '${collection}' is installed in version ${collection_version}." + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/configured/converge.yml new file mode 100644 index 0000000..2cbdb1c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/configured/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.oomd diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..89c6f96 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,8 @@ +--- + +systemd_oomd: + swap_used_limit: "95%" # 90% + default_memory_pressure_limit: "" # 60% + default_memory_pressure_durationSec: "" # 30s + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/configured/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/configured/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/configured/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/configured/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..c3879c2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/configured/tests/test_default.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("oomd") + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/oomd.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/default/converge.yml new file mode 100644 index 0000000..2cbdb1c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/default/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.oomd diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/default/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/default/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/default/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/default/tests/test_default.py new file mode 100644 index 0000000..c3879c2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/molecule/default/tests/test_default.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("oomd") + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/oomd.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/tasks/main.yml new file mode 100644 index 0000000..688a34c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: merge systemd oomd configuration between defaults and custom + ansible.builtin.set_fact: + systemd_oomd: "{{ systemd_defaults_oomd | combine(systemd_oomd, recursive=True) }}" + +- name: create systemd oomd configuration + ansible.builtin.template: + src: systemd/oomd.conf.j2 + dest: /etc/systemd/oomd.conf + mode: 0644 + owner: root + group: root + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/templates/systemd/oomd.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/templates/systemd/oomd.conf.j2 new file mode 100644 index 0000000..6bbc385 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/templates/systemd/oomd.conf.j2 @@ -0,0 +1,32 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Entries in this file show the compile time defaults. Local configuration +# should be created by either modifying this file, or by creating "drop-ins" in +# the oomd.conf.d/ subdirectory. The latter is generally recommended. +# Defaults can be restored by simply deleting this file and all drop-ins. +# +# Use 'systemd-analyze cat-config systemd/oomd.conf' to display the full config. +# +# See oomd.conf(5) for details + +[OOM] +{% if systemd_oomd.swap_used_limit is defined and + systemd_oomd.swap_used_limit | string | length > 0 %} +SwapUsedLimit = {{ systemd_oomd.swap_used_limit }} +{% endif %} +{% if systemd_oomd.default_memory_pressure_limit is defined and + systemd_oomd.default_memory_pressure_limit | string | length > 0 %} +DefaultMemoryPressureLimit = {{ systemd_oomd.default_memory_pressure_limit }} +{% endif %} +{% if systemd_oomd.default_memory_pressure_durationSec is defined and + systemd_oomd.default_memory_pressure_durationSec | string | length > 0 %} +DefaultMemoryPressureDurationSec = {{ systemd_oomd.default_memory_pressure_durationSec }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/vars/main.yml new file mode 100644 index 0000000..ff7ca21 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/oomd/vars/main.yml @@ -0,0 +1,8 @@ +--- + +systemd_defaults_oomd: + swap_used_limit: "" # 90% + default_memory_pressure_limit: "" # 60% + default_memory_pressure_durationSec: "" # 30s + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/.ansible-lint new file mode 100644 index 0000000..5343e85 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/.ansible-lint @@ -0,0 +1,6 @@ +--- + +skip_list: + - name[casing] + - name[template] + - syntax-check[specific] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/README.md new file mode 100644 index 0000000..f5aeee4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/README.md @@ -0,0 +1,2 @@ + +# https://www.freedesktop.org/software/systemd/man/latest/pstore.conf.html diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/defaults/main.yml new file mode 100644 index 0000000..be51583 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/defaults/main.yml @@ -0,0 +1,6 @@ +--- + +systemd_pstore: {} + # storage: external + # unlink: true +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/molecule.rc new file mode 100644 index 0000000..a15f7c3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/molecule.rc @@ -0,0 +1,9 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/tox.sh new file mode 100755 index 0000000..62bb777 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/tox.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + + echo "The required collection '${collection}' is installed in version ${collection_version}." + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/configured/converge.yml new file mode 100644 index 0000000..ed99dd5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/configured/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.pstore diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..49accb9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,7 @@ +--- + +systemd_pstore: + storage: "journal" # external + unlink: "" # true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/configured/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/configured/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/configured/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/configured/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..e2ebe58 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/configured/tests/test_default.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("pstore") + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/pstore.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/default/converge.yml new file mode 100644 index 0000000..ed99dd5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/default/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.pstore diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/default/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/default/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/default/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/default/tests/test_default.py new file mode 100644 index 0000000..e2ebe58 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/molecule/default/tests/test_default.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("pstore") + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/pstore.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/tasks/main.yml new file mode 100644 index 0000000..276434c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: merge systemd pstore configuration between defaults and custom + ansible.builtin.set_fact: + systemd_pstore: "{{ systemd_defaults_pstore | combine(systemd_pstore, recursive=True) }}" + +- name: create systemd pstore configuration + ansible.builtin.template: + src: systemd/pstore.conf.j2 + dest: /etc/systemd/pstore.conf + mode: 0644 + owner: root + group: root + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/templates/systemd/pstore.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/templates/systemd/pstore.conf.j2 new file mode 100644 index 0000000..bd9a235 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/templates/systemd/pstore.conf.j2 @@ -0,0 +1,27 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Entries in this file show the compile time defaults. Local configuration +# should be created by either modifying this file, or by creating "drop-ins" in +# the pstore.conf.d/ subdirectory. The latter is generally recommended. +# Defaults can be restored by simply deleting this file and all drop-ins. +# +# See pstore.conf(5) for details. + +[PStore] +{% if systemd_pstore.storage is defined and + systemd_pstore.storage | string | length > 0 and + systemd_pstore.storage in ["none", "external", "journal"] %} +Storage = {{ systemd_pstore.storage }} +{% endif %} +{% if systemd_pstore.unlink is defined and + systemd_pstore.unlink | string | length > 0 %} +Unlink = {{ systemd_pstore.unlink }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/vars/main.yml new file mode 100644 index 0000000..00404f4 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/pstore/vars/main.yml @@ -0,0 +1,7 @@ +--- + +systemd_defaults_pstore: + storage: "" # external | One of "none", "external", and "journal" + unlink: "" # true + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/.ansible-lint new file mode 100644 index 0000000..5343e85 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/.ansible-lint @@ -0,0 +1,6 @@ +--- + +skip_list: + - name[casing] + - name[template] + - syntax-check[specific] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/README.md new file mode 100644 index 0000000..a8a630b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/README.md @@ -0,0 +1,2 @@ + +# https://www.freedesktop.org/software/systemd/man/resolved.conf.html diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/defaults/main.yml new file mode 100644 index 0000000..b986da9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/defaults/main.yml @@ -0,0 +1,24 @@ +--- + +systemd_resolved: {} + # # Some examples of DNS servers which may be used for DNS= and FallbackDNS=: + # # Cloudflare: 1.1.1.1#cloudflare-dns.com 1.0.0.1#cloudflare-dns.com 2606:4700:4700::1111#cloudflare-dns.com 2606:4700:4700::1001#cloudflare-dns.com + # # Google: 8.8.8.8#dns.google 8.8.4.4#dns.google 2001:4860:4860::8888#dns.google 2001:4860:4860::8844#dns.google + # # Quad9: 9.9.9.9#dns.quad9.net 149.112.112.112#dns.quad9.net 2620:fe::fe#dns.quad9.net 2620:fe::9#dns.quad9.net + # dns: [] + # fallback_dns: [] + # #FallbackDNS=1.1.1.1#cloudflare-dns.com 9.9.9.9#dns.quad9.net 8.8.8.8#dns.google 2606:4700:4700::1111#cloudflare-dns.com 2620:fe::9#dns.quad9.net 2001:4860:4860::8888#dns.google + # domains: [] + # dns_sec: false + # dns_over_tls: false + # multicast_dns: true + # llmnr: true + # cache: true + # cache_from_localhost: false + # dns_stub_listener: true + # dns_stub_listener_extra: "" + # read_etc_hosts: true + # resolve_unicast_single_label: false + # stale_retention_sec: 0 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/molecule.rc new file mode 100644 index 0000000..a15f7c3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/molecule.rc @@ -0,0 +1,9 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/tox.sh new file mode 100755 index 0000000..62bb777 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/tox.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + + echo "The required collection '${collection}' is installed in version ${collection_version}." + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/configured/converge.yml new file mode 100644 index 0000000..2770580 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/configured/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.resolved diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..9ac822a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,12 @@ +--- + +alertmanager_route: + group_by: + - 'alertname' + - 'service' + group_wait: 30s + group_interval: 5m + repeat_interval: 4h + default_receiver: blackhole + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/configured/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/configured/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/configured/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/configured/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..49de2bc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/configured/tests/test_default.py @@ -0,0 +1,148 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return ( + host.ansible("setup").get("ansible_facts").get("ansible_local").get("resolved") + ) + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/resolved.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/default/converge.yml new file mode 100644 index 0000000..2770580 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/default/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.resolved diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..5a99a2e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,9 @@ +--- + +systemd_resolved: + dns: + - "1.1.1.1" + - "9.9.9.9" + + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/default/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/default/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/default/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/default/tests/test_default.py new file mode 100644 index 0000000..49de2bc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/molecule/default/tests/test_default.py @@ -0,0 +1,148 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return ( + host.ansible("setup").get("ansible_facts").get("ansible_local").get("resolved") + ) + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/resolved.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/tasks/main.yml new file mode 100644 index 0000000..4be7588 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: merge systemd resolved configuration between defaults and custom + ansible.builtin.set_fact: + systemd_resolved: "{{ systemd_defaults_resolved | combine(systemd_resolved, recursive=True) }}" + +- name: create systemd resolved configuration + ansible.builtin.template: + src: systemd/resolved.conf.j2 + dest: /etc/systemd/resolved.conf + mode: 0644 + owner: root + group: root + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/templates/systemd/resolved.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/templates/systemd/resolved.conf.j2 new file mode 100644 index 0000000..054202e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/templates/systemd/resolved.conf.j2 @@ -0,0 +1,98 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Entries in this file show the compile time defaults. Local configuration +# should be created by either modifying this file, or by creating "drop-ins" in +# the resolved.conf.d/ subdirectory. The latter is generally recommended. +# Defaults can be restored by simply deleting this file and all drop-ins. +# +# Use 'systemd-analyze cat-config systemd/resolved.conf' to display the full config. +# +# See resolved.conf(5) for details. + +[Resolve] +{% if systemd_resolved.dns is defined and + systemd_resolved.dns | count > 0 %} +DNS = {{ systemd_resolved.dns | join(' ') }} +{% endif %} +{% if systemd_resolved.fallback_dns is defined and + systemd_resolved.fallback_dns | count > 0 %} +FallbackDNS = {{ systemd_resolved.fallback_dns | join(' ') }} +{% endif %} +{% if systemd_resolved.domains is defined and + systemd_resolved.domains | count > 0 %} +Domains = {{ systemd_resolved.domains | join(' ') }} +{% endif %} +{% if systemd_resolved.dns_sec is defined and + systemd_resolved.dns_sec | string | length > 0 %} + {% if systemd_resolved.dns_sec | bodsch.core.type == "bool" %} +DNSSEC = {{ systemd_resolved.dns_sec | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% elif systemd_resolved.dns_sec in ["allow-downgrade"] %} +DNSSEC = {{ systemd_resolved.dns_sec }} + {% endif %} +{% endif %} +{% if systemd_resolved.dns_over_tls is defined and + systemd_resolved.dns_over_tls | string | length > 0 %} + {% if systemd_resolved.dns_over_tls | bodsch.core.type == "bool" %} +DNSOverTLS = {{ systemd_resolved.dns_over_tls | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% elif systemd_resolved.dns_over_tls in ["opportunistic"] %} +DNSOverTLS = {{ systemd_resolved.dns_over_tls }} + {% endif %} +{% endif %} +{% if systemd_resolved.multicast_dns is defined and + systemd_resolved.multicast_dns | string | length > 0 %} + {% if systemd_resolved.multicast_dns | bodsch.core.type == "bool" %} +MulticastDNS = {{ systemd_resolved.multicast_dns | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% elif systemd_resolved.multicast_dns in ["resolve"] %} +MulticastDNS = {{ systemd_resolved.multicast_dns }} + {% endif %} +{% endif %} +{% if systemd_resolved.llmnr is defined and + systemd_resolved.llmnr | string | length > 0 %} + {% if systemd_resolved.llmnr | bodsch.core.type == "bool" %} +LLMNR = {{ systemd_resolved.llmnr | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% elif systemd_resolved.llmnr in ["resolve"] %} +LLMNR = {{ systemd_resolved.llmnr }} + {% endif %} +{% endif %} +{% if systemd_resolved.cache is defined and + systemd_resolved.cache | string | length > 0 %} + {% if systemd_resolved.cache | bodsch.core.type == "bool" %} +Cache = {{ systemd_resolved.cache | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% elif systemd_resolved.cache in ["no-negative"] %} +Cache = {{ systemd_resolved.cache }} + {% endif %} +{% endif %} +{% if systemd_resolved.cache_from_localhost is defined and + systemd_resolved.cache_from_localhost | string | length > 0 %} +CacheFromLocalhost = {{ systemd_resolved.cache_from_localhost | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_resolved.dns_stub_listener is defined and + systemd_resolved.dns_stub_listener | string | length > 0 %} +DNSStubListener = {{ systemd_resolved.dns_stub_listener | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_resolved.dns_stub_listener_extra is defined and + systemd_resolved.dns_stub_listener_extra | count > 0 %} + {% for stub in systemd_resolved.dns_stub_listener_extra %} +DNSStubListenerExtra = {{ stub }} + {% endfor %} +{% endif %} +{% if systemd_resolved.read_etc_hosts is defined and + systemd_resolved.read_etc_hosts | string | length > 0 %} +ReadEtcHosts = {{ systemd_resolved.read_etc_hosts | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_resolved.resolve_unicast_single_label is defined and + systemd_resolved.resolve_unicast_single_label | string | length > 0 %} +ResolveUnicastSingleLabel = {{ systemd_resolved.resolve_unicast_single_label | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_resolved.stale_retention_sec is defined and + systemd_resolved.stale_retention_sec | string | length > 0 %} +StaleRetentionSec = {{ systemd_resolved.stale_retention_sec }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/vars/main.yml new file mode 100644 index 0000000..f1b1987 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/resolved/vars/main.yml @@ -0,0 +1,29 @@ +--- + +systemd_defaults_resolved: + # Some examples of DNS servers which may be used for DNS= and FallbackDNS=: + # Cloudflare: 1.1.1.1#cloudflare-dns.com 1.0.0.1#cloudflare-dns.com 2606:4700:4700::1111#cloudflare-dns.com 2606:4700:4700::1001#cloudflare-dns.com + # Google: 8.8.8.8#dns.google 8.8.4.4#dns.google 2001:4860:4860::8888#dns.google 2001:4860:4860::8844#dns.google + # Quad9: 9.9.9.9#dns.quad9.net 149.112.112.112#dns.quad9.net 2620:fe::fe#dns.quad9.net 2620:fe::9#dns.quad9.net + dns: [] + fallback_dns: [] + # - 1.1.1.1#cloudflare-dns.com + # - 9.9.9.9#dns.quad9.net + # - 8.8.8.8#dns.google + # - 2606:4700:4700::1111#cloudflare-dns.com + # - 2620:fe::9#dns.quad9.net + # - 2001:4860:4860::8888#dns.google + domains: [] + dns_sec: "" # false + dns_over_tls: "" # false + multicast_dns: "" # true + llmnr: "" # true + cache: "" # true + cache_from_localhost: "" # false + dns_stub_listener: "" # true + dns_stub_listener_extra: [] + read_etc_hosts: "" # true + resolve_unicast_single_label: "" # false + stale_retention_sec: "" # 0 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/.ansible-lint new file mode 100644 index 0000000..5343e85 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/.ansible-lint @@ -0,0 +1,6 @@ +--- + +skip_list: + - name[casing] + - name[template] + - syntax-check[specific] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/README.md new file mode 100644 index 0000000..4904e0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/README.md @@ -0,0 +1,2 @@ + +https://www.freedesktop.org/software/systemd/man/systemd-sleep.conf.html diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/defaults/main.yml new file mode 100644 index 0000000..962992c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/defaults/main.yml @@ -0,0 +1,17 @@ +--- + +systemd_sleep: {} + # allow_suspend: "" # true + # allow_hibernation: "" # true + # allow_suspend_then_hibernate: "" # true + # allow_hybrid_sleep: "" # true + # suspend_mode: [] # [] # [platform] shutdown reboot suspend test_resume + # suspend_state: [] # [mem, standby, freeze] + # hibernate_mode: [] # [platform, shutdown] + # hibernate_state: [] # [disk] + # hibernate_delay_sec: "" + # hybrid_sleep_mode: [] # [suspend, platform, shutdown] + # hybrid_sleep_state: [] # [disk] + # suspend_estimation_sec: "" # 60min + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/molecule.rc new file mode 100644 index 0000000..a15f7c3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/molecule.rc @@ -0,0 +1,9 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/tox.sh new file mode 100755 index 0000000..62bb777 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/tox.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + + echo "The required collection '${collection}' is installed in version ${collection_version}." + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/configured/converge.yml new file mode 100644 index 0000000..83f4153 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/configured/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.sleep diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..fdb4964 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,18 @@ +--- + +systemd_sleep: + allow_suspend: "false" # true + allow_hibernation: "false" # true + allow_suspend_then_hibernate: "" # true + allow_hybrid_sleep: "" # true + # [platform] shutdown reboot suspend test_resume + suspend_mode: [] + # freeze mem disk + suspend_state: [] # [mem, standby, freeze] + hibernate_mode: [] # [platform, shutdown] + hibernate_state: [] # [disk] + hibernate_delay_sec: "" + hybrid_sleep_mode: [] # [suspend, platform, shutdown] + hybrid_sleep_state: [] # [disk] + suspend_estimation_sec: "" # 60min +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/configured/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/configured/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/configured/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/configured/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..e31398b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/configured/tests/test_default.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("sleep") + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/sleep.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/default/converge.yml new file mode 100644 index 0000000..83f4153 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/default/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.sleep diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/default/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/default/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/default/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/default/tests/test_default.py new file mode 100644 index 0000000..e31398b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/molecule/default/tests/test_default.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("sleep") + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/sleep.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/tasks/main.yml new file mode 100644 index 0000000..c51ec54 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: merge systemd sleep configuration between defaults and custom + ansible.builtin.set_fact: + systemd_sleep: "{{ systemd_defaults_sleep | combine(systemd_sleep, recursive=True) }}" + +- name: create systemd sleep configuration + ansible.builtin.template: + src: systemd/sleep.conf.j2 + dest: /etc/systemd/sleep.conf + mode: 0644 + owner: root + group: root + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/templates/systemd/sleep.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/templates/systemd/sleep.conf.j2 new file mode 100644 index 0000000..0c9553d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/templates/systemd/sleep.conf.j2 @@ -0,0 +1,72 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Entries in this file show the compile time defaults. Local configuration +# should be created by either modifying this file, or by creating "drop-ins" in +# the sleep.conf.d/ subdirectory. The latter is generally recommended. +# Defaults can be restored by simply deleting this file and all drop-ins. +# +# See systemd-sleep.conf(5) for details. + +[Sleep] +{% if systemd_sleep.allow_suspend is defined and + systemd_sleep.allow_suspend | string | length > 0 %} +AllowSuspend = {{ systemd_sleep.allow_suspend | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_sleep.allow_hibernation is defined and + systemd_sleep.allow_hibernation | string | length > 0 %} +AllowHibernation = {{ systemd_sleep.allow_hibernation | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_sleep.allow_suspend_then_hibernate is defined and + systemd_sleep.allow_suspend_then_hibernate | string | length > 0 %} +AllowSuspendThenHibernate = {{ systemd_sleep.allow_suspend_then_hibernate | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_sleep.allow_hybrid_sleep is defined and + systemd_sleep.allow_hybrid_sleep | string | length > 0 %} +AllowHybridSleep = {{ systemd_sleep.allow_hybrid_sleep | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_sleep.suspend_mode is defined and + systemd_sleep.suspend_mode | count > 0 %} + {% set _valid_list = systemd_sleep.suspend_mode | bodsch.systemd.valid_list(["disk", "platform", "shutdown", "reboot", "suspend", "test_resume"]) %} +SuspendMode = {{ _valid_list | join(' ') }} +{% endif %} +{% if systemd_sleep.suspend_state is defined and + systemd_sleep.suspend_state | count > 0 %} + {% set _valid_list = systemd_sleep.suspend_state | bodsch.systemd.valid_list(["mem", "disk", "standby", "freeze"]) %} +SuspendState = {{ _valid_list | join(' ') }} +{% endif %} +{% if systemd_sleep.hibernate_mode is defined and + systemd_sleep.hibernate_mode | count > 0 %} + {% set _valid_list = systemd_sleep.hibernate_mode | bodsch.systemd.valid_list(["disk", "platform", "shutdown", "reboot", "suspend", "test_resume"]) %} +HibernateMode = {{ _valid_list | join(' ') }} +{% endif %} +{% if systemd_sleep.hibernate_state is defined and + systemd_sleep.hibernate_state | count > 0 %} + {% set _valid_list = systemd_sleep.hibernate_state | bodsch.systemd.valid_list(["mem", "disk", "standby", "freeze"]) %} +HibernateState = {{ _valid_list | join(' ') }} +{% endif %} +{% if systemd_sleep.hybrid_sleep_mode is defined and + systemd_sleep.hybrid_sleep_mode | count > 0 %} + {% set _valid_list = systemd_sleep.hibernate_mode | bodsch.systemd.valid_list(["disk", "platform", "shutdown", "reboot", "suspend", "test_resume"]) %} +HybridSleepMode = {{ _valid_list | join(' ') }} +{% endif %} +{% if systemd_sleep.hybrid_sleep_state is defined and + systemd_sleep.hybrid_sleep_state | count > 0 %} + {% set _valid_list = systemd_sleep.hybrid_sleep_state | bodsch.systemd.valid_list(["mem", "disk", "standby", "freeze"]) %} +HybridSleepState = {{ _valid_list | join(' ') }} +{% endif %} +{% if systemd_sleep.hibernate_delay_sec is defined and + systemd_sleep.hibernate_delay_sec | string | length > 0 %} +HibernateDelaySec = {{ systemd_sleep.hibernate_delay_sec }} +{% endif %} +{% if systemd_sleep.suspend_estimation_sec is defined and + systemd_sleep.suspend_estimation_sec | string | length > 0 %} +SuspendEstimationSec = {{ systemd_sleep.suspend_estimation_sec }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/vars/main.yml new file mode 100644 index 0000000..511bb42 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/sleep/vars/main.yml @@ -0,0 +1,19 @@ +--- + +systemd_defaults_sleep: + allow_suspend: "" # true + allow_hibernation: "" # true + allow_suspend_then_hibernate: "" # true + allow_hybrid_sleep: "" # true + # [platform] shutdown reboot suspend test_resume + suspend_mode: [] + # freeze mem disk + suspend_state: [] # [mem, standby, freeze] + hibernate_mode: [] # [platform, shutdown] + hibernate_state: [] # [disk] + hibernate_delay_sec: "" + hybrid_sleep_mode: [] # [suspend, platform, shutdown] + hybrid_sleep_state: [] # [disk] + suspend_estimation_sec: "" # 60min + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/.ansible-lint new file mode 100644 index 0000000..5343e85 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/.ansible-lint @@ -0,0 +1,6 @@ +--- + +skip_list: + - name[casing] + - name[template] + - syntax-check[specific] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/README.md new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/defaults/main.yml new file mode 100644 index 0000000..210e531 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/defaults/main.yml @@ -0,0 +1,5 @@ +--- + +systemd_system: {} + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/molecule.rc new file mode 100644 index 0000000..a15f7c3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/molecule.rc @@ -0,0 +1,9 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/tox.sh new file mode 100755 index 0000000..62bb777 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/tox.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + + echo "The required collection '${collection}' is installed in version ${collection_version}." + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/configured/converge.yml new file mode 100644 index 0000000..1384e18 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/configured/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.system diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..fc1fa13 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,63 @@ +--- + +systemd_system: + log_level: "debug" # info + log_target: "" # auto + log_color: "false" # true + log_location: "" # false + log_time: "true" # false + dump_core: "false" # true + show_status: "false" # true + crash_change_vt: "" # false + crash_shell: "" # false + crash_reboot: "" # false + ctrl_alt_del_burst_action: "" # eboot-force + cpu_affinity: "" # + numa_policy: "" # default + numa_mask: "" # + runtime_watchdog_sec: "" # off + runtime_watchdog_pre_sec: "" # off + runtime_watchdog_pre_governor: "" # + reboot_watchdog_sec: "" # 10min + kexec_watchdog_sec: "" # off + watchdog_device: "" # + capability_bounding_set: "" # + no_new_privileges: "" # false + system_call_architectures: "" + timer_slack_nsec: "" + status_unit_format: "" # description + default_timer_accuracy_sec: "" # 1min + default_standard_output: "" # inherit + default_standard_error: "" # inherit + default_timeout_start_sec: "" # 90s + default_timeout_stop_sec: "" # 90s + default_timeout_abort_sec: "" + default_device_timeout_sec: "" # 90s + default_restart_sec: "" # 100ms + default_start_limit_interval_sec: "" # 10s + default_start_limit_burst: "" # 5 + default_environment: "" + default_limit_cpu: "" + default_limit_fsize: "" + default_limit_data: "" + default_limit_stack: "" + default_limit_core: "" + default_limit_rss: "" + default_limit_nofile: "" + default_limit_as: "" + default_limit_nproc: "" + default_limit_memlock: "" + default_limit_locks: "" + default_limit_sigpending: "" + default_limit_msgqueue: "" + default_limit_nice: "" + default_limit_rtprio: "" + default_limit_rttime: "" + default_memory_pressure_threshold_sec: "" # 200ms + default_memory_pressure_watch: "" # auto + default_oom_policy: "" # stop + default_smack_process_label: "" + reload_limit_interval_sec: "" + reload_limit_burst: "" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/configured/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/configured/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/configured/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/configured/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..c0cfc33 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/configured/tests/test_default.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("system") + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/system.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/default/converge.yml new file mode 100644 index 0000000..1384e18 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/default/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.system diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/default/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/default/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/default/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/default/tests/test_default.py new file mode 100644 index 0000000..c0cfc33 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/molecule/default/tests/test_default.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("system") + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/system.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/tasks/main.yml new file mode 100644 index 0000000..5d67cb7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: merge systemd system configuration between defaults and custom + ansible.builtin.set_fact: + systemd_system: "{{ systemd_defaults_system | combine(systemd_system, recursive=True) }}" + +- name: create systemd system configuration + ansible.builtin.template: + src: systemd/system.conf.j2 + dest: /etc/systemd/system.conf + mode: 0644 + owner: root + group: root + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/templates/systemd/system.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/templates/systemd/system.conf.j2 new file mode 100644 index 0000000..c32aff7 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/templates/systemd/system.conf.j2 @@ -0,0 +1,270 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Entries in this file show the compile time defaults. Local configuration +# should be created by either modifying this file, or by creating "drop-ins" in +# the system.conf.d/ subdirectory. The latter is generally recommended. +# Defaults can be restored by simply deleting this file and all drop-ins. +# +# Use 'systemd-analyze cat-config systemd/system.conf' to display the full config. +# +# See systemd-system.conf(5) for details. + +[Manager] +{% if systemd_system.log_level is defined and + systemd_system.log_level | string | length > 0 and + systemd_system.log_level in ["emerg", "alert", "crit", "err", "warning", "notice", "info", "debug"] %} +LogLevel = {{ systemd_system.log_level }} +{% endif %} +{% if systemd_system.log_target is defined and + systemd_system.log_target | string | length > 0 %} +LogTarget = {{ systemd_system.log_target }} +{% endif %} +{% if systemd_system.log_color is defined and + systemd_system.log_color | string | length > 0 %} + {% if systemd_system.log_color | bodsch.core.type == "bool" %} +LogColor = {{ systemd_system.log_color | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% endif %} +{% endif %} +{% if systemd_system.log_location is defined and + systemd_system.log_location | string | length > 0 %} + {% if systemd_system.log_location | bodsch.core.type == "bool" %} +LogLocation = {{ systemd_system.log_location | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% endif %} +{% endif %} +{% if systemd_system.log_time is defined and + systemd_system.log_time | string | length > 0 %} + {% if systemd_system.log_time | bodsch.core.type == "bool" %} +LogTime = {{ systemd_system.log_time | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% endif %} +{% endif %} +{% if systemd_system.dump_core is defined and + systemd_system.dump_core | string | length > 0 %} +DumpCore = {{ systemd_system.dump_core | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_system.show_status is defined and + systemd_system.show_status | string | length > 0 %} +ShowStatus = {{ systemd_system.show_status | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_system.crash_change_vt is defined and + systemd_system.crash_change_vt | string | length > 0 %} +CrashChangeVT = {{ systemd_system.crash_change_vt | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_system.crash_shell is defined and + systemd_system.crash_shell | string | length > 0 %} +CrashShell = {{ systemd_system.crash_shell | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_system.crash_reboot is defined and + systemd_system.crash_reboot | string | length > 0 %} +CrashReboot = {{ systemd_system.crash_reboot | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_system.ctrl_alt_del_burst_action is defined and + systemd_system.ctrl_alt_del_burst_action | string | length > 0 %} +CtrlAltDelBurstAction = {{ systemd_system.ctrl_alt_del_burst_action }} +{% endif %} +{% if systemd_system.cpu_affinity is defined and + systemd_system.cpu_affinity | string | length > 0 %} +CPUAffinity = {{ systemd_system.cpu_affinity }} +{% endif %} +{% if systemd_system.numa_policy is defined and + systemd_system.numa_policy | string | length > 0 %} +NUMAPolicy = {{ systemd_system.numa_policy }} +{% endif %} +{% if systemd_system.numa_mask is defined and + systemd_system.numa_mask | string | length > 0 %} +NUMAMask = {{ systemd_system.numa_mask }} +{% endif %} +{% if systemd_system.runtime_watchdog_sec is defined and + systemd_system.runtime_watchdog_sec | string | length > 0 %} +RuntimeWatchdogSec = {{ systemd_system.runtime_watchdog_sec }} +{% endif %} +{% if systemd_system.runtime_watchdog_pre_sec is defined and + systemd_system.runtime_watchdog_pre_sec | string | length > 0 %} +RuntimeWatchdogPreSec = {{ systemd_system.runtime_watchdog_pre_sec }} +{% endif %} +{% if systemd_system.runtime_watchdog_pre_governor is defined and + systemd_system.runtime_watchdog_pre_governor | string | length > 0 %} +RuntimeWatchdogPreGovernor = {{ systemd_system.runtime_watchdog_pre_governor }} +{% endif %} +{% if systemd_system.reboot_watchdog_sec is defined and + systemd_system.reboot_watchdog_sec | string | length > 0 %} +RebootWatchdogSec = {{ systemd_system.reboot_watchdog_sec }} +{% endif %} +{% if systemd_system.kexec_watchdog_sec is defined and + systemd_system.kexec_watchdog_sec | string | length > 0 %} +KExecWatchdogSec = {{ systemd_system.kexec_watchdog_sec }} +{% endif %} +{% if systemd_system.watchdog_device is defined and + systemd_system.watchdog_device | string | length > 0 %} +WatchdogDevice = {{ systemd_system.watchdog_device }} +{% endif %} +{% if systemd_system.capability_bounding_set is defined and + systemd_system.capability_bounding_set | string | length > 0 %} +CapabilityBoundingSet = {{ systemd_system.capability_bounding_set }} +{% endif %} +{% if systemd_system.no_new_privileges is defined and + systemd_system.no_new_privileges | string | length > 0 %} +NoNewPrivileges = {{ systemd_system.no_new_privileges | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} +{% endif %} +{% if systemd_system.system_call_architectures is defined and + systemd_system.system_call_architectures | string | length > 0 %} +SystemCallArchitectures = {{ systemd_system.system_call_architectures }} +{% endif %} +{% if systemd_system.timer_slack_nsec is defined and + systemd_system.timer_slack_nsec | string | length > 0 %} +TimerSlackNSec = {{ systemd_system.timer_slack_nsec }} +{% endif %} +{% if systemd_system.status_unit_format is defined and + systemd_system.status_unit_format | string | length > 0 %} +StatusUnitFormat = {{ systemd_system.status_unit_format }} +{% endif %} +{% if systemd_system.default_timer_accuracy_sec is defined and + systemd_system.default_timer_accuracy_sec | string | length > 0 %} +DefaultTimerAccuracySec = {{ systemd_system.default_timer_accuracy_sec }} +{% endif %} +{% if systemd_system.default_standard_output is defined and + systemd_system.default_standard_output | string | length > 0 %} +DefaultStandardOutput = {{ systemd_system.default_standard_output }} +{% endif %} +{% if systemd_system.default_standard_error is defined and + systemd_system.default_standard_error | string | length > 0 %} +DefaultStandardError = {{ systemd_system.default_standard_error }} +{% endif %} +{% if systemd_system.default_timeout_start_sec is defined and + systemd_system.default_timeout_start_sec | string | length > 0 %} +DefaultTimeoutStartSec = {{ systemd_system.default_timeout_start_sec }} +{% endif %} +{% if systemd_system.default_timeout_stop_sec is defined and + systemd_system.default_timeout_stop_sec | string | length > 0 %} +DefaultTimeoutStopSec = {{ systemd_system.default_timeout_stop_sec }} +{% endif %} +{% if systemd_system.default_timeout_abort_sec is defined and + systemd_system.default_timeout_abort_sec | string | length > 0 %} +DefaultTimeoutAbortSec = {{ systemd_system.default_timeout_abort_sec }} +{% endif %} +{% if systemd_system.default_device_timeout_sec is defined and + systemd_system.default_device_timeout_sec | string | length > 0 %} +DefaultDeviceTimeoutSec = {{ systemd_system.default_device_timeout_sec }} +{% endif %} +{% if systemd_system.default_restart_sec is defined and + systemd_system.default_restart_sec | string | length > 0 %} +DefaultRestartSec = {{ systemd_system.default_restart_sec }} +{% endif %} +{% if systemd_system.default_start_limit_interval_sec is defined and + systemd_system.default_start_limit_interval_sec | string | length > 0 %} +DefaultStartLimitIntervalSec = {{ systemd_system.default_start_limit_interval_sec }} +{% endif %} +{% if systemd_system.default_start_limit_burst is defined and + systemd_system.default_start_limit_burst | string | length > 0 %} +DefaultStartLimitBurst = {{ systemd_system.default_start_limit_burst }} +{% endif %} +{% if systemd_system.default_environment is defined and + systemd_system.default_environment | string | length > 0 %} +DefaultEnvironment = {{ systemd_system.default_environment }} +{% endif %} +{% if systemd_system.default_limit_cpu is defined and + systemd_system.default_limit_cpu | string | length > 0 %} +DefaultLimitCPU = {{ systemd_system.default_limit_cpu }} +{% endif %} +{% if systemd_system.default_limit_fsize is defined and + systemd_system.default_limit_fsize | string | length > 0 %} +DefaultLimitFSIZE = {{ systemd_system.default_limit_fsize }} +{% endif %} +{% if systemd_system.default_limit_data is defined and + systemd_system.default_limit_data | string | length > 0 %} +DefaultLimitDATA = {{ systemd_system.default_limit_data }} +{% endif %} +{% if systemd_system.default_limit_stack is defined and + systemd_system.default_limit_stack | string | length > 0 %} +DefaultLimitSTACK = {{ systemd_system.default_limit_stack }} +{% endif %} +{% if systemd_system.default_limit_core is defined and + systemd_system.default_limit_core | string | length > 0 %} +DefaultLimitCORE = {{ systemd_system.default_limit_core }} +{% endif %} +{% if systemd_system.default_limit_rss is defined and + systemd_system.default_limit_rss | string | length > 0 %} +DefaultLimitRSS = {{ systemd_system.default_limit_rss }} +{% endif %} +{% if systemd_system.default_limit_nofile is defined and + systemd_system.default_limit_nofile | string | length > 0 %} +DefaultLimitNOFILE = {{ systemd_system.default_limit_nofile }} +{% endif %} +{% if systemd_system.default_limit_as is defined and + systemd_system.default_limit_as | string | length > 0 %} +DefaultLimitAS = {{ systemd_system.default_limit_as }} +{% endif %} +{% if systemd_system.default_limit_nproc is defined and + systemd_system.default_limit_nproc | string | length > 0 %} +DefaultLimitNPROC = {{ systemd_system.default_limit_nproc }} +{% endif %} +{% if systemd_system.default_limit_memlock is defined and + systemd_system.default_limit_memlock | string | length > 0 %} +DefaultLimitMEMLOCK = {{ systemd_system.default_limit_memlock }} +{% endif %} +{% if systemd_system.default_limit_locks is defined and + systemd_system.default_limit_locks | string | length > 0 %} +DefaultLimitLOCKS = {{ systemd_system.default_limit_locks }} +{% endif %} +{% if systemd_system.default_limit_sigpending is defined and + systemd_system.default_limit_sigpending | string | length > 0 %} +DefaultLimitSIGPENDING = {{ systemd_system.default_limit_sigpending }} +{% endif %} +{% if systemd_system.default_limit_msgqueue is defined and + systemd_system.default_limit_msgqueue | string | length > 0 %} +DefaultLimitMSGQUEUE = {{ systemd_system.default_limit_msgqueue }} +{% endif %} +{% if systemd_system.default_limit_nice is defined and + systemd_system.default_limit_nice | string | length > 0 %} +DefaultLimitNICE = {{ systemd_system.default_limit_nice }} +{% endif %} +{% if systemd_system.default_limit_rtprio is defined and + systemd_system.default_limit_rtprio | string | length > 0 %} +DefaultLimitRTPRIO = {{ systemd_system.default_limit_rtprio }} +{% endif %} +{% if systemd_system.default_limit_rttime is defined and + systemd_system.default_limit_rttime | string | length > 0 %} +DefaultLimitRTTIME = {{ systemd_system.default_limit_rttime }} +{% endif %} +{% if systemd_system.default_limit_msgqueue is defined and + systemd_system.default_limit_msgqueue | string | length > 0 %} +DefaultLimitMSGQUEUE = {{ systemd_system.default_limit_msgqueue }} +{% endif %} +{% if systemd_system.default_limit_nice is defined and + systemd_system.default_limit_nice | string | length > 0 %} +DefaultLimitNICE = {{ systemd_system.default_limit_nice }} +{% endif %} +{% if systemd_system.default_limit_rtprio is defined and + systemd_system.default_limit_rtprio | string | length > 0 %} +DefaultLimitRTPRIO = {{ systemd_system.default_limit_rtprio }} +{% endif %} +{% if systemd_system.default_memory_pressure_threshold_sec is defined and + systemd_system.default_memory_pressure_threshold_sec | string | length > 0 %} +DefaultMemoryPressureThresholdSec = {{ systemd_system.default_memory_pressure_threshold_sec }} +{% endif %} +{% if systemd_system.default_memory_pressure_watch is defined and + systemd_system.default_memory_pressure_watch | string | length > 0 %} +DefaultMemoryPressureWatch = {{ systemd_system.default_memory_pressure_watch }} +{% endif %} +{% if systemd_system.default_oom_policy is defined and + systemd_system.default_oom_policy | string | length > 0 %} +DefaultOOMPolicy = {{ systemd_system.default_oom_policy }} +{% endif %} +{% if systemd_system.default_smack_process_label is defined and + systemd_system.default_smack_process_label | string | length > 0 %} +DefaultSmackProcessLabel = {{ systemd_system.default_smack_process_label }} +{% endif %} +{% if systemd_system.reload_limit_interval_sec is defined and + systemd_system.reload_limit_interval_sec | string | length > 0 %} +ReloadLimitIntervalSec = {{ systemd_system.reload_limit_interval_sec }} +{% endif %} +{% if systemd_system.reload_limit_burst is defined and + systemd_system.reload_limit_burst | string | length > 0 %} +ReloadLimitBurst = {{ systemd_system.reload_limit_burst }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/vars/main.yml new file mode 100644 index 0000000..e8bc510 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/system/vars/main.yml @@ -0,0 +1,63 @@ +--- + +systemd_defaults_system: + log_level: "" # info + log_target: "" # auto + log_color: "" # true + log_location: "" # false + log_time: "" # false + dump_core: "" # true + show_status: "" # true + crash_change_vt: "" # false + crash_shell: "" # false + crash_reboot: "" # false + ctrl_alt_del_burst_action: "" # eboot-force + cpu_affinity: "" # + numa_policy: "" # default + numa_mask: "" # + runtime_watchdog_sec: "" # off + runtime_watchdog_pre_sec: "" # off + runtime_watchdog_pre_governor: "" # + reboot_watchdog_sec: "" # 10min + kexec_watchdog_sec: "" # off + watchdog_device: "" # + capability_bounding_set: "" # + no_new_privileges: "" # false + system_call_architectures: "" + timer_slack_nsec: "" + status_unit_format: "" # description + default_timer_accuracy_sec: "" # 1min + default_standard_output: "" # inherit + default_standard_error: "" # inherit + default_timeout_start_sec: "" # 90s + default_timeout_stop_sec: "" # 90s + default_timeout_abort_sec: "" + default_device_timeout_sec: "" # 90s + default_restart_sec: "" # 100ms + default_start_limit_interval_sec: "" # 10s + default_start_limit_burst: "" # 5 + default_environment: "" + default_limit_cpu: "" + default_limit_fsize: "" + default_limit_data: "" + default_limit_stack: "" + default_limit_core: "" + default_limit_rss: "" + default_limit_nofile: "" + default_limit_as: "" + default_limit_nproc: "" + default_limit_memlock: "" + default_limit_locks: "" + default_limit_sigpending: "" + default_limit_msgqueue: "" + default_limit_nice: "" + default_limit_rtprio: "" + default_limit_rttime: "" + default_memory_pressure_threshold_sec: "" # 200ms + default_memory_pressure_watch: "" # auto + default_oom_policy: "" # stop + default_smack_process_label: "" + reload_limit_interval_sec: "" + reload_limit_burst: "" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/.ansible-lint new file mode 100644 index 0000000..5343e85 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/.ansible-lint @@ -0,0 +1,6 @@ +--- + +skip_list: + - name[casing] + - name[template] + - syntax-check[specific] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/Makefile new file mode 100644 index 0000000..3abaf48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_6.1 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/README.md new file mode 100644 index 0000000..e69de29 diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/defaults/main.yml new file mode 100644 index 0000000..83b94ed --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/defaults/main.yml @@ -0,0 +1,5 @@ +--- + +systemd_unit: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/molecule.rc new file mode 100644 index 0000000..a15f7c3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/molecule.rc @@ -0,0 +1,9 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/tox.sh new file mode 100755 index 0000000..62bb777 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/tox.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + + echo "The required collection '${collection}' is installed in version ${collection_version}." + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/molecule/default/converge.yml new file mode 100644 index 0000000..fa8cdb5 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/molecule/default/converge.yml @@ -0,0 +1,8 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.systemd_unit diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..e13386b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,75 @@ +--- + +systemd_unit: + - name: vaultwarden + state: absent + unit_type: service + overwrite: false + unit_file: + description: | + # + # + # + # + # + # + + Unit: + Description: Vaultwarden API server + Documentation: https://github.com/dani-garcia/vaultwarden + After: network.target + + Service: + Type: simple + User: vaultwarden + Group: vaultwarden + LimitNOFILE: 1048576 + UMask: "0077" + + ExecStart: /usr/bin/vaultwarden + EnvironmentFile: /etc/vaultwarden/config.env + + Restart: on-failure + RestartSec: 15s + + CapabilityBoundingSet: CAP_NET_BIND_SERVICE + AmbientCapabilities: CAP_NET_BIND_SERVICE + + LockPersonality: true + MemoryDenyWriteExecute: true + PrivateDevices: true + PrivateTmp: true + ProtectClock: true + ProtectControlGroups: true + ProtectHome: true + ProtectHostname: true + ProtectKernelLogs: true + ProtectKernelModules: true + ProtectKernelTunables: true + ProtectSystem: strict + RemoveIPC: true + RestrictAddressFamilies: AF_UNIX AF_INET AF_INET6 + RestrictNamespaces: true + RestrictRealtime: true + RestrictSUIDSGID: true + + NoNewPrivileges: true + + SystemCallFilter: + - "@system-service" + - "~@privileged @resources" + SystemCallArchitectures: native + + WorkingDirectory: /var/lib/vaultwarden + ReadWriteDirectories: /var/lib/vaultwarden + # ReadWriteDirectories: {{ vaultwarden_config.logging.log_file | dirname }} + # {% if vaultwarden_config.web_vault.enabled is defined and + # vaultwarden_config.web_vault.enabled | string | length > 0 and + # vaultwarden_config.web_vault.enabled | bool == True %} + # ReadWriteDirectories: {{ vaultwarden_config.directories.web_vault }}/web-vault + # {% endif %} + + Install: + WantedBy: multi-user.target + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/molecule/default/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/molecule/default/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/molecule/default/prepare.yml new file mode 100644 index 0000000..85ceacd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/molecule/default/prepare.yml @@ -0,0 +1,48 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" + +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.system diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/molecule/default/tests/test_default.py new file mode 100644 index 0000000..c0cfc33 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/molecule/default/tests/test_default.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("system") + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/system.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/tasks/main.yml new file mode 100644 index 0000000..d7b1a42 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/tasks/main.yml @@ -0,0 +1,78 @@ +--- + +- name: create getty drop-ins + bodsch.systemd.unit_file: + name: "getty@tty1" + state: "present" + unit_type: "service" + drop_ins: + - name: autologin + state: present + service: + ExecStart: + - "" + - "{% raw %}-/sbin/agetty -o '-p -f -- \\\\u' --noclear --autologin username %I $TERM{% endraw %}" + Type: simple + + - name: noclear + state: absent + service: + TTYVTDisallocate: false + when: + - ansible_facts.service_mgr == 'systemd' + +- name: create systemd unit + bodsch.systemd.unit_file: + name: "nextcloud-cron" + state: "present" + unit_type: "service" + unit_file: + unit: + Description: Nextcloud cron.php job + service: + User: www-data + ExecCondition: php -f /var/www/nextcloud/server/occ status --exit-code + ExecStart: /usr/bin/php -f /var/www/nextcloud/server/cron.php + KillMode: process + when: + - ansible_facts.service_mgr == 'systemd' + +- name: create systemd timer + bodsch.systemd.unit_file: + name: "nextcloud-cron" + state: "present" + unit_type: "timer" + unit_file: + unit: + Description: Run Nextcloud cron.php every 5 minutes + timer: + OnBootSec: 5min + OnUnitActiveSec: 5min + Unit: nextcloud-cron.service + install: + WantedBy: timers.target + when: + - ansible_facts.service_mgr == 'systemd' + +- name: create systemd unit files + bodsch.systemd.unit_file: + name: "{{ item.name }}" + state: "{{ item.state }}" + unit_type: "{{ item.unit_type }}" + overwrite: "{{ item.overwrite | default(omit) }}" + drop_ins: "{{ item.drop_ins | default(omit) }}" + unit_file: "{{ item.unit_file | default(omit) }}" + loop: + "{{ systemd_unit }}" + loop_control: + label: "{{ item.name }}" + register: systemd_unit_file + ignore_errors: true + when: + - systemd_unit | count > 0 + +# - name: d +# debug: +# msg: "{{ systemd_unit_file }}" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/vars/main.yml new file mode 100644 index 0000000..6a63a96 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/systemd_unit/vars/main.yml @@ -0,0 +1,5 @@ +--- + +systemd_defaults_unit: [] + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/.ansible-lint new file mode 100644 index 0000000..5343e85 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/.ansible-lint @@ -0,0 +1,6 @@ +--- + +skip_list: + - name[casing] + - name[template] + - syntax-check[specific] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/README.md new file mode 100644 index 0000000..e8a5742 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/README.md @@ -0,0 +1,2 @@ + +# https://www.freedesktop.org/software/systemd/man/latest/timesyncd.conf.html diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/defaults/main.yml new file mode 100644 index 0000000..c09d96b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/defaults/main.yml @@ -0,0 +1,12 @@ +--- + +systemd_timesyncd: {} + # ntp: [] + # fallback_ntp: [] # 0.arch.pool.ntp.org 1.arch.pool.ntp.org 2.arch.pool.ntp.org 3.arch.pool.ntp.org + # root_distance_max_sec: "" # 5 + # poll_interval_min_sec: "" # 32 + # poll_interval_max_sec: "" # 2048 + # connection_retry_sec: "" # 30 + # save_interval_sec: "" # 60 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/molecule.rc new file mode 100644 index 0000000..a15f7c3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/molecule.rc @@ -0,0 +1,9 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/tox.sh new file mode 100755 index 0000000..62bb777 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/tox.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + + echo "The required collection '${collection}' is installed in version ${collection_version}." + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/configured/converge.yml new file mode 100644 index 0000000..b6ff0d2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/configured/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.timesyncd diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..6aa36ba --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,14 @@ +--- + +systemd_timesyncd: + ntp: [] + fallback_ntp: # 0.arch.pool.ntp.org 1.arch.pool.ntp.org 2.arch.pool.ntp.org 3.arch.pool.ntp.org + - 0.arch.pool.ntp.org + - 1.arch.pool.ntp.org + root_distance_max_sec: "2" # 5 + poll_interval_min_sec: "60" # 32 + poll_interval_max_sec: "" # 2048 + connection_retry_sec: "10" # 30 + save_interval_sec: "" # 60 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/configured/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/configured/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/configured/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/configured/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..206aeba --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/configured/tests/test_default.py @@ -0,0 +1,148 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return ( + host.ansible("setup").get("ansible_facts").get("ansible_local").get("timesyncd") + ) + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/timesyncd.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/default/converge.yml new file mode 100644 index 0000000..b6ff0d2 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/default/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.timesyncd diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/default/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/default/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/default/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/default/tests/test_default.py new file mode 100644 index 0000000..206aeba --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/molecule/default/tests/test_default.py @@ -0,0 +1,148 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return ( + host.ansible("setup").get("ansible_facts").get("ansible_local").get("timesyncd") + ) + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/timesyncd.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/tasks/main.yml new file mode 100644 index 0000000..0b928c6 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: merge systemd timesyncd configuration between defaults and custom + ansible.builtin.set_fact: + systemd_timesyncd: "{{ systemd_defaults_timesyncd | combine(systemd_timesyncd, recursive=True) }}" + +- name: create systemd timesyncd configuration + ansible.builtin.template: + src: systemd/timesyncd.conf.j2 + dest: /etc/systemd/timesyncd.conf + mode: 0644 + owner: root + group: root + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/templates/systemd/timesyncd.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/templates/systemd/timesyncd.conf.j2 new file mode 100644 index 0000000..9197251 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/templates/systemd/timesyncd.conf.j2 @@ -0,0 +1,46 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Entries in this file show the compile time defaults. Local configuration +# should be created by either modifying this file, or by creating "drop-ins" in +# the timesyncd.conf.d/ subdirectory. The latter is generally recommended. +# Defaults can be restored by simply deleting this file and all drop-ins. +# +# See timesyncd.conf(5) for details. + +[Time] +{% if systemd_timesyncd.ntp is defined and + systemd_timesyncd.ntp | count > 0 %} +NTP = {{ systemd_timesyncd.ntp | join(' ') }} +{% endif %} +{% if systemd_timesyncd.fallback_ntp is defined and + systemd_timesyncd.fallback_ntp | count > 0 %} +FallbackNTP = {{ systemd_timesyncd.fallback_ntp | join(' ') }} +{% endif %} +{% if systemd_timesyncd.root_distance_max_sec is defined and + systemd_timesyncd.root_distance_max_sec | string | length > 0 %} +RootDistanceMaxSec = {{ systemd_timesyncd.root_distance_max_sec }} +{% endif %} +{% if systemd_timesyncd.poll_interval_min_sec is defined and + systemd_timesyncd.poll_interval_min_sec | string | length > 0 %} +PollIntervalMinSec = {{ systemd_timesyncd.poll_interval_min_sec }} +{% endif %} +{% if systemd_timesyncd.poll_interval_max_sec is defined and + systemd_timesyncd.poll_interval_max_sec | string | length > 0 %} +PollIntervalMaxSec = {{ systemd_timesyncd.poll_interval_max_sec }} +{% endif %} +{% if systemd_timesyncd.connection_retry_sec is defined and + systemd_timesyncd.connection_retry_sec | string | length > 0 %} +ConnectionRetrySec = {{ systemd_timesyncd.connection_retry_sec }} +{% endif %} +{% if systemd_timesyncd.save_interval_sec is defined and + systemd_timesyncd.save_interval_sec | string | length > 0 %} +SaveIntervalSec = {{ systemd_timesyncd.save_interval_sec }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/vars/main.yml new file mode 100644 index 0000000..0610365 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/timesyncd/vars/main.yml @@ -0,0 +1,12 @@ +--- + +systemd_defaults_timesyncd: + ntp: [] + fallback_ntp: [] # 0.arch.pool.ntp.org 1.arch.pool.ntp.org 2.arch.pool.ntp.org 3.arch.pool.ntp.org + root_distance_max_sec: "" # 5 + poll_interval_min_sec: "" # 32 + poll_interval_max_sec: "" # 2048 + connection_retry_sec: "" # 30 + save_interval_sec: "" # 60 + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/.ansible-lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/.ansible-lint new file mode 100644 index 0000000..5343e85 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/.ansible-lint @@ -0,0 +1,6 @@ +--- + +skip_list: + - name[casing] + - name[template] + - syntax-check[specific] diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/.editorconfig b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/.flake8 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/.gitignore b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/.yamllint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/CONTRIBUTING.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/LICENSE b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/Makefile b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/Makefile new file mode 100644 index 0000000..bfaab7c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_8.5 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/README.md b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/README.md new file mode 100644 index 0000000..1734676 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/README.md @@ -0,0 +1,2 @@ + +https://www.freedesktop.org/software/systemd/man/systemd-user.conf.html diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/defaults/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/defaults/main.yml new file mode 100644 index 0000000..b384eed --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/defaults/main.yml @@ -0,0 +1,45 @@ +--- + +systemd_user: {} + # log_level: "" # info + # log_target: "" # auto + # log_color: "" # yes + # log_location: "" # no + # log_time: "" # no + # system_call_architectures: "" + # timer_slack_nsec: "" + # status_unit_format: "" # description + # default_timer_accuracy_sec: "" # 1min + # default_standard_output: "" # inherit + # default_standard_error: "" # inherit + # default_timeout_start_sec: "" # 90s + # default_timeout_stop_sec: "" # 90s + # default_timeout_abort_sec: "" + # default_device_timeout_sec: "" # 90s + # default_restart_sec: "" # 100ms + # default_start_limit_interval_sec: "" # 10s + # default_start_limit_burst: "" # 5 + # default_environment: "" + # default_limit_cpu: "" + # default_limit_fsize: "" + # default_limit_data: "" + # default_limit_stack: "" + # default_limit_core: "" + # default_limit_rss: "" + # default_limit_nofile: "" + # default_limit_as: "" + # default_limit_nproc: "" + # default_limit_memlock: "" + # default_limit_locks: "" + # default_limit_sigpending: "" + # default_limit_msgqueue: "" + # default_limit_nice: "" + # default_limit_rtprio: "" + # default_limit_rttime: "" + # default_memory_pressure_threshold_sec: "" # 200ms + # default_memory_pressure_watch: "" # auto + # default_smack_process_label: "" + # reload_limit_interval_sec: "" + # reload_limit_burst: "" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/converge b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/destroy b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/lint b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/molecule.rc b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/molecule.rc new file mode 100644 index 0000000..a15f7c3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/molecule.rc @@ -0,0 +1,9 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/test b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/tox.sh b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/tox.sh new file mode 100755 index 0000000..62bb777 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/tox.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + + echo "The required collection '${collection}' is installed in version ${collection_version}." + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/verify b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/configured/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/configured/converge.yml new file mode 100644 index 0000000..25e4f09 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/configured/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.user diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/configured/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..5ae73ba --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,45 @@ +--- + +systemd_user: + log_level: "warning" # info + log_target: "" # auto + log_color: false # yes + log_location: true # no + log_time: true # no + system_call_architectures: "" + timer_slack_nsec: "" + status_unit_format: "" # description + default_timer_accuracy_sec: "" # 1min + default_standard_output: "" # inherit + default_standard_error: "" # inherit + default_timeout_start_sec: "" # 90s + default_timeout_stop_sec: "" # 90s + default_timeout_abort_sec: "" + default_device_timeout_sec: "" # 90s + default_restart_sec: "" # 100ms + default_start_limit_interval_sec: "" # 10s + default_start_limit_burst: "" # 5 + default_environment: "" + default_limit_cpu: "" + default_limit_fsize: "" + default_limit_data: "" + default_limit_stack: "" + default_limit_core: "" + default_limit_rss: "" + default_limit_nofile: "" + default_limit_as: "" + default_limit_nproc: "" + default_limit_memlock: "" + default_limit_locks: "" + default_limit_sigpending: "" + default_limit_msgqueue: "" + default_limit_nice: "" + default_limit_rtprio: "" + default_limit_rttime: "" + default_memory_pressure_threshold_sec: "" # 200ms + default_memory_pressure_watch: "" # auto + default_smack_process_label: "" + reload_limit_interval_sec: "" + reload_limit_burst: "" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/configured/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/configured/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/configured/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/configured/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/configured/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/configured/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/configured/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..827e095 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/configured/tests/test_default.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("user") + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/user.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/default/converge.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/default/converge.yml new file mode 100644 index 0000000..25e4f09 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/default/converge.yml @@ -0,0 +1,7 @@ +--- +- name: converge + hosts: instance + any_errors_fatal: false + + roles: + - role: bodsch.systemd.user diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/default/group_vars/all/vars.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/default/molecule.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/default/molecule.yml new file mode 100644 index 0000000..67a0c0f --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/default/molecule.yml @@ -0,0 +1,51 @@ +dependency: + name: galaxy + +driver: + name: docker + +platforms: + - name: instance + image: "ghcr.io/bodsch/docker-ansible/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + mounts: + - source: /sys/fs/cgroup + target: /sys/fs/cgroup + type: bind + read_only: false + volumes: + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + callback_result_format: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching_timeout: 320 +scenario: + test_sequence: + - destroy + - dependency + - create + - prepare + - converge + - idempotence + - verify + - destroy +verifier: + name: testinfra diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/default/prepare.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/default/prepare.yml new file mode 100644 index 0000000..d680e48 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/default/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_facts.distribution | lower == 'archlinux' or + ansible_facts.os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + register: pacman + changed_when: pacman.rc != 0 + failed_when: pacman.rc != 0 + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_facts.os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_facts.os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_facts.distribution }} ({{ ansible_facts.os_family }})" + - "distribution version : {{ ansible_facts.distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_facts.python.version.major }}.{{ ansible_facts.python.version.minor }}" diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/default/tests/test_default.py b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/default/tests/test_default.py new file mode 100644 index 0000000..827e095 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/molecule/default/tests/test_default.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import os + +import pytest +import testinfra.utils.ansible_runner +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +HOST = "instance" + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ["MOLECULE_INVENTORY_FILE"] +).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ """ + cwd = os.getcwd() + + if "group_vars" in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = f"{file_name}.{e}" + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ["debian", "ubuntu"]: + operation_system = "debian" + elif distribution in ["redhat", "ol", "centos", "rocky", "almalinux"]: + operation_system = "redhat" + elif distribution in ["arch", "artix"]: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml( + f"{base_dir}/vars/{operation_system}", "role_distibution" + ) + file_molecule = read_ansible_yaml( + f"{molecule_dir}/group_vars/all/vars", "test_vars" + ) + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = ( + host.ansible("include_vars", file_defaults) + .get("ansible_facts") + .get("role_defaults") + ) + vars_vars = ( + host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + ) + distibution_vars = ( + host.ansible("include_vars", file_distibution) + .get("ansible_facts") + .get("role_distibution") + ) + molecule_vars = ( + host.ansible("include_vars", file_molecule) + .get("ansible_facts") + .get("test_vars") + ) + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("user") + + +@pytest.mark.parametrize( + "directories", + [ + "/etc/systemd", + ], +) +def test_directories(host, directories): + d = host.file(directories) + assert d.is_directory + + +@pytest.mark.parametrize( + "files", + [ + "/etc/systemd/user.conf", + ], +) +def test_systemd_files(host, files): + """ """ + d = host.file(files) + assert d.is_file diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/tasks/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/tasks/main.yml new file mode 100644 index 0000000..a098f5c --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: merge systemd user configuration between defaults and custom + ansible.builtin.set_fact: + systemd_user: "{{ systemd_defaults_user | combine(systemd_user, recursive=True) }}" + +- name: create systemd user configuration + ansible.builtin.template: + src: systemd/user.conf.j2 + dest: /etc/systemd/user.conf + mode: 0644 + owner: root + group: root + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/templates/systemd/user.conf.j2 b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/templates/systemd/user.conf.j2 new file mode 100644 index 0000000..06d50ab --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/templates/systemd/user.conf.j2 @@ -0,0 +1,197 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} +# +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Entries in this file show the compile time defaults. Local configuration +# should be created by either modifying this file, or by creating "drop-ins" in +# the user.conf.d/ subdirectory. The latter is generally recommended. +# Defaults can be restored by simply deleting this file and all drop-ins. +# +# See systemd-user.conf(5) for details. + +[Manager] +{% if systemd_user.log_level is defined and + systemd_user.log_level | string | length > 0 and + systemd_user.log_level in ["emerg", "alert", "crit", "err", "warning", "notice", "info", "debug"] %} +LogLevel = {{ systemd_user.log_level }} +{% endif %} +{% if systemd_user.log_target is defined and + systemd_user.log_target | string | length > 0 %} +LogTarget = {{ systemd_user.log_target }} +{% endif %} +{% if systemd_user.log_color is defined and + systemd_user.log_color | string | length > 0 %} + {% if systemd_user.log_color | bodsch.core.type == "bool" %} +LogColor = {{ systemd_user.log_color | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% endif %} +{% endif %} +{% if systemd_user.log_location is defined and + systemd_user.log_location | string | length > 0 %} + {% if systemd_user.log_location | bodsch.core.type == "bool" %} +LogLocation = {{ systemd_user.log_location | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% endif %} +{% endif %} +{% if systemd_user.log_time is defined and + systemd_user.log_time | string | length > 0 %} + {% if systemd_user.log_time | bodsch.core.type == "bool" %} +LogTime = {{ systemd_user.log_time | bool | bodsch.core.config_bool(true_as='yes', false_as='no') }} + {% endif %} +{% endif %} +{% if systemd_user.system_call_architectures is defined and + systemd_user.system_call_architectures | string | length > 0 %} +SystemCallArchitectures = {{ systemd_user.system_call_architectures }} +{% endif %} +{% if systemd_user.timer_slack_nsec is defined and + systemd_user.timer_slack_nsec | string | length > 0 %} +TimerSlackNSec = {{ systemd_user.timer_slack_nsec }} +{% endif %} +{% if systemd_user.status_unit_format is defined and + systemd_user.status_unit_format | string | length > 0 %} +StatusUnitFormat = {{ systemd_user.status_unit_format }} +{% endif %} +{% if systemd_user.default_timer_accuracy_sec is defined and + systemd_user.default_timer_accuracy_sec | string | length > 0 %} +DefaultTimerAccuracySec = {{ systemd_user.default_timer_accuracy_sec }} +{% endif %} +{% if systemd_user.default_standard_output is defined and + systemd_user.default_standard_output | string | length > 0 %} +DefaultStandardOutput = {{ systemd_user.default_standard_output }} +{% endif %} +{% if systemd_user.default_standard_error is defined and + systemd_user.default_standard_error | string | length > 0 %} +DefaultStandardError = {{ systemd_user.default_standard_error }} +{% endif %} +{% if systemd_user.default_timeout_start_sec is defined and + systemd_user.default_timeout_start_sec | string | length > 0 %} +DefaultTimeoutStartSec = {{ systemd_user.default_timeout_start_sec }} +{% endif %} +{% if systemd_user.default_timeout_stop_sec is defined and + systemd_user.default_timeout_stop_sec | string | length > 0 %} +DefaultTimeoutStopSec = {{ systemd_user.default_timeout_stop_sec }} +{% endif %} +{% if systemd_user.default_timeout_abort_sec is defined and + systemd_user.default_timeout_abort_sec | string | length > 0 %} +DefaultTimeoutAbortSec = {{ systemd_user.default_timeout_abort_sec }} +{% endif %} +{% if systemd_user.default_device_timeout_sec is defined and + systemd_user.default_device_timeout_sec | string | length > 0 %} +DefaultDeviceTimeoutSec = {{ systemd_user.default_device_timeout_sec }} +{% endif %} +{% if systemd_user.default_restart_sec is defined and + systemd_user.default_restart_sec | string | length > 0 %} +DefaultRestartSec = {{ systemd_user.default_restart_sec }} +{% endif %} +{% if systemd_user.default_start_limit_interval_sec is defined and + systemd_user.default_start_limit_interval_sec | string | length > 0 %} +DefaultStartLimitIntervalSec = {{ systemd_user.default_start_limit_interval_sec }} +{% endif %} +{% if systemd_user.default_start_limit_burst is defined and + systemd_user.default_start_limit_burst | string | length > 0 %} +DefaultStartLimitBurst = {{ systemd_user.default_start_limit_burst }} +{% endif %} +{% if systemd_user.default_environment is defined and + systemd_user.default_environment | string | length > 0 %} +DefaultEnvironment = {{ systemd_user.default_environment }} +{% endif %} +{% if systemd_user.default_limit_cpu is defined and + systemd_user.default_limit_cpu | string | length > 0 %} +DefaultLimitCPU = {{ systemd_user.default_limit_cpu }} +{% endif %} +{% if systemd_user.default_limit_fsize is defined and + systemd_user.default_limit_fsize | string | length > 0 %} +DefaultLimitFSIZE = {{ systemd_user.default_limit_fsize }} +{% endif %} +{% if systemd_user.default_limit_data is defined and + systemd_user.default_limit_data | string | length > 0 %} +DefaultLimitDATA = {{ systemd_user.default_limit_data }} +{% endif %} +{% if systemd_user.default_limit_stack is defined and + systemd_user.default_limit_stack | string | length > 0 %} +DefaultLimitSTACK = {{ systemd_user.default_limit_stack }} +{% endif %} +{% if systemd_user.default_limit_core is defined and + systemd_user.default_limit_core | string | length > 0 %} +DefaultLimitCORE = {{ systemd_user.default_limit_core }} +{% endif %} +{% if systemd_user.default_limit_rss is defined and + systemd_user.default_limit_rss | string | length > 0 %} +DefaultLimitRSS = {{ systemd_user.default_limit_rss }} +{% endif %} +{% if systemd_user.default_limit_nofile is defined and + systemd_user.default_limit_nofile | string | length > 0 %} +DefaultLimitNOFILE = {{ systemd_user.default_limit_nofile }} +{% endif %} +{% if systemd_user.default_limit_as is defined and + systemd_user.default_limit_as | string | length > 0 %} +DefaultLimitAS = {{ systemd_user.default_limit_as }} +{% endif %} +{% if systemd_user.default_limit_nproc is defined and + systemd_user.default_limit_nproc | string | length > 0 %} +DefaultLimitNPROC = {{ systemd_user.default_limit_nproc }} +{% endif %} +{% if systemd_user.default_limit_memlock is defined and + systemd_user.default_limit_memlock | string | length > 0 %} +DefaultLimitMEMLOCK = {{ systemd_user.default_limit_memlock }} +{% endif %} +{% if systemd_user.default_limit_locks is defined and + systemd_user.default_limit_locks | string | length > 0 %} +DefaultLimitLOCKS = {{ systemd_user.default_limit_locks }} +{% endif %} +{% if systemd_user.default_limit_sigpending is defined and + systemd_user.default_limit_sigpending | string | length > 0 %} +DefaultLimitSIGPENDING = {{ systemd_user.default_limit_sigpending }} +{% endif %} +{% if systemd_user.default_limit_msgqueue is defined and + systemd_user.default_limit_msgqueue | string | length > 0 %} +DefaultLimitMSGQUEUE = {{ systemd_user.default_limit_msgqueue }} +{% endif %} +{% if systemd_user.default_limit_nice is defined and + systemd_user.default_limit_nice | string | length > 0 %} +DefaultLimitNICE = {{ systemd_user.default_limit_nice }} +{% endif %} +{% if systemd_user.default_limit_rtprio is defined and + systemd_user.default_limit_rtprio | string | length > 0 %} +DefaultLimitRTPRIO = {{ systemd_user.default_limit_rtprio }} +{% endif %} +{% if systemd_user.default_limit_rttime is defined and + systemd_user.default_limit_rttime | string | length > 0 %} +DefaultLimitRTTIME = {{ systemd_user.default_limit_rttime }} +{% endif %} +{% if systemd_user.default_limit_msgqueue is defined and + systemd_user.default_limit_msgqueue | string | length > 0 %} +DefaultLimitMSGQUEUE = {{ systemd_user.default_limit_msgqueue }} +{% endif %} +{% if systemd_user.default_limit_nice is defined and + systemd_user.default_limit_nice | string | length > 0 %} +DefaultLimitNICE = {{ systemd_user.default_limit_nice }} +{% endif %} +{% if systemd_user.default_limit_rtprio is defined and + systemd_user.default_limit_rtprio | string | length > 0 %} +DefaultLimitRTPRIO = {{ systemd_user.default_limit_rtprio }} +{% endif %} +{% if systemd_user.default_memory_pressure_threshold_sec is defined and + systemd_user.default_memory_pressure_threshold_sec | string | length > 0 %} +DefaultMemoryPressureThresholdSec = {{ systemd_user.default_memory_pressure_threshold_sec }} +{% endif %} +{% if systemd_user.default_memory_pressure_watch is defined and + systemd_user.default_memory_pressure_watch | string | length > 0 %} +DefaultMemoryPressureWatch = {{ systemd_user.default_memory_pressure_watch }} +{% endif %} +{% if systemd_user.default_smack_process_label is defined and + systemd_user.default_smack_process_label | string | length > 0 %} +DefaultSmackProcessLabel = {{ systemd_user.default_smack_process_label }} +{% endif %} +{% if systemd_user.reload_limit_interval_sec is defined and + systemd_user.reload_limit_interval_sec | string | length > 0 %} +ReloadLimitIntervalSec = {{ systemd_user.reload_limit_interval_sec }} +{% endif %} +{% if systemd_user.reload_limit_burst is defined and + systemd_user.reload_limit_burst | string | length > 0 %} +ReloadLimitBurst = {{ systemd_user.reload_limit_burst }} +{% endif %} diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/vars/main.yml b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/vars/main.yml new file mode 100644 index 0000000..e2d1bcc --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/roles/user/vars/main.yml @@ -0,0 +1,45 @@ +--- + +systemd_defaults_user: + log_level: "" # info + log_target: "" # auto + log_color: "" # yes + log_location: "" # no + log_time: "" # no + system_call_architectures: "" + timer_slack_nsec: "" + status_unit_format: "" # description + default_timer_accuracy_sec: "" # 1min + default_standard_output: "" # inherit + default_standard_error: "" # inherit + default_timeout_start_sec: "" # 90s + default_timeout_stop_sec: "" # 90s + default_timeout_abort_sec: "" + default_device_timeout_sec: "" # 90s + default_restart_sec: "" # 100ms + default_start_limit_interval_sec: "" # 10s + default_start_limit_burst: "" # 5 + default_environment: "" + default_limit_cpu: "" + default_limit_fsize: "" + default_limit_data: "" + default_limit_stack: "" + default_limit_core: "" + default_limit_rss: "" + default_limit_nofile: "" + default_limit_as: "" + default_limit_nproc: "" + default_limit_memlock: "" + default_limit_locks: "" + default_limit_sigpending: "" + default_limit_msgqueue: "" + default_limit_nice: "" + default_limit_rtprio: "" + default_limit_rttime: "" + default_memory_pressure_threshold_sec: "" # 200ms + default_memory_pressure_watch: "" # auto + default_smack_process_label: "" + reload_limit_interval_sec: "" + reload_limit_burst: "" + +... diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/test-requirements.txt b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/test-requirements.txt new file mode 100644 index 0000000..dc5c9a1 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/test-requirements.txt @@ -0,0 +1,11 @@ +ansible-lint +docker +dnspython +flake8 +molecule +molecule-plugins[docker] +netaddr +pytest-testinfra +tox +tox-gh-actions +yamllint diff --git a/ansible/playbooks/collections/ansible_collections/bodsch/systemd/tox.ini b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/tox.ini new file mode 100644 index 0000000..c3099d3 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/bodsch/systemd/tox.ini @@ -0,0 +1,39 @@ +[tox] +ignore_basepython_conflict = True +skip_missing_interpreters = True + +minversion = 3.25 +toxworkdir = /tmp/.tox/ + +skipsdist = true + +[testenv] +passenv = * + +# allowlist_externals = +# /usr/bin/find +# /bin/sh +# rm + +deps = + -r test-requirements.txt + ansible_4.10: ansible>=4.10,<4.11 + ansible_5.1: ansible>=5.1,<5.2 + ansible_5.2: ansible>=5.2,<5.3 + ansible_5.10: ansible>=5.10,<5.11 + ansible_6.1: ansible>=6.1,<6.2 + ansible_6.7: ansible>=6.7,<6.8 + ansible_7.0: ansible>=7.0,<7.1 + ansible_7.5: ansible>=7.5,<7.6 + ansible_8.0: ansible>=8.0,<8.1 + ansible_8.5: ansible>=8.5,<8.6 + ansible_9.0: ansible>=9.0,<9.1 + ansible_9.5: ansible>=9.5,<9.6 + ansible_10.0: ansible>=10.0,<10.1 + +#commands_pre = +# /usr/bin/find {toxinidir} -type f -not -path '{toxworkdir}/*' -path '*/__pycache__/*' -name '*.py[c|o]' -delete +# /bin/sh -c '/usr/bin/find {homedir}/.cache -type d -path "*/molecule_*" -exec rm -rfv \{\} +;' + +commands = + {posargs:molecule test --all --destroy always} diff --git a/ansible/playbooks/collections/ansible_collections/community.general-12.4.0.info/GALAXY.yml b/ansible/playbooks/collections/ansible_collections/community.general-12.4.0.info/GALAXY.yml new file mode 100644 index 0000000..f2867a8 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/community.general-12.4.0.info/GALAXY.yml @@ -0,0 +1,8 @@ +download_url: https://galaxy.ansible.com/api/v3/plugin/ansible/content/published/collections/artifacts/community-general-12.4.0.tar.gz +format_version: 1.0.0 +name: general +namespace: community +server: https://galaxy.ansible.com/api/ +signatures: [] +version: 12.4.0 +version_url: /api/v3/plugin/ansible/content/published/collections/index/community/general/versions/12.4.0/ diff --git a/ansible/playbooks/collections/ansible_collections/community/general/.azure-pipelines/README.md b/ansible/playbooks/collections/ansible_collections/community/general/.azure-pipelines/README.md new file mode 100644 index 0000000..9e8ad74 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/community/general/.azure-pipelines/README.md @@ -0,0 +1,9 @@ + + +## Azure Pipelines Configuration + +Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information. diff --git a/ansible/playbooks/collections/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml b/ansible/playbooks/collections/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml new file mode 100644 index 0000000..00717b9 --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml @@ -0,0 +1,425 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +trigger: + batch: true + branches: + include: + - main + - stable-* + +pr: + autoCancel: true + branches: + include: + - main + - stable-* + +schedules: + - cron: 0 8 * * * + displayName: Nightly (main) + always: true + branches: + include: + - main + - cron: 0 10 * * * + displayName: Nightly (active stable branches) + always: true + branches: + include: + - stable-12 + - stable-11 + - cron: 0 11 * * 0 + displayName: Weekly (old stable branches) + always: true + branches: + include: + - stable-10 + +variables: + - name: checkoutPath + value: ansible_collections/community/general + - name: coverageBranches + value: main + - name: entryPoint + value: tests/utils/shippable/shippable.sh + - name: fetchDepth + value: 0 + +resources: + containers: + - container: default + image: quay.io/ansible/azure-pipelines-test-container:7.0.0 + +pool: Standard + +stages: +### Sanity + - stage: Sanity_devel + displayName: Sanity devel + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Test {0} + testFormat: devel/sanity/{0} + targets: + - test: 1 + - test: 2 + - test: 3 + - test: 4 + - stage: Sanity_2_20 + displayName: Sanity 2.20 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Test {0} + testFormat: 2.20/sanity/{0} + targets: + - test: 1 + - test: 2 + - test: 3 + - test: 4 + - stage: Sanity_2_19 + displayName: Sanity 2.19 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Test {0} + testFormat: 2.19/sanity/{0} + targets: + - test: 1 + - test: 2 + - test: 3 + - test: 4 + - stage: Sanity_2_18 + displayName: Sanity 2.18 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Test {0} + testFormat: 2.18/sanity/{0} + targets: + - test: 1 + - test: 2 + - test: 3 + - test: 4 +### Units + - stage: Units_devel + displayName: Units devel + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Python {0} + testFormat: devel/units/{0}/1 + targets: + - test: 3.9 + - test: '3.10' + - test: '3.11' + - test: '3.12' + - test: '3.13' + - test: '3.14' + - stage: Units_2_20 + displayName: Units 2.20 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Python {0} + testFormat: 2.20/units/{0}/1 + targets: + - test: 3.9 + - test: "3.12" + - test: "3.14" + - stage: Units_2_19 + displayName: Units 2.19 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Python {0} + testFormat: 2.19/units/{0}/1 + targets: + - test: 3.8 + - test: "3.11" + - test: "3.13" + - stage: Units_2_18 + displayName: Units 2.18 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + nameFormat: Python {0} + testFormat: 2.18/units/{0}/1 + targets: + - test: 3.8 + - test: "3.11" + - test: "3.13" + +## Remote + - stage: Remote_devel_extra_vms + displayName: Remote devel extra VMs + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: devel/{0} + targets: + - name: Alpine 3.23 + test: alpine/3.23 + # - name: Fedora 43 + # test: fedora/43 + - name: Ubuntu 22.04 + test: ubuntu/22.04 + - name: Ubuntu 24.04 + test: ubuntu/24.04 + groups: + - vm + - stage: Remote_devel + displayName: Remote devel + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: devel/{0} + targets: + - name: macOS 15.3 + test: macos/15.3 + - name: RHEL 10.1 + test: rhel/10.1 + - name: RHEL 9.7 + test: rhel/9.7 + # TODO: enable this ASAP! + # - name: FreeBSD 15.0 + # test: freebsd/15.0 + - name: FreeBSD 14.3 + test: freebsd/14.3 + groups: + - 1 + - 2 + - 3 + - stage: Remote_2_20 + displayName: Remote 2.20 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.20/{0} + targets: + - name: RHEL 10.1 + test: rhel/10.1 + - name: FreeBSD 14.3 + test: freebsd/14.3 + groups: + - 1 + - 2 + - 3 + - stage: Remote_2_19 + displayName: Remote 2.19 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.19/{0} + targets: + - name: RHEL 10.1 + test: rhel/10.1 + - name: FreeBSD 14.2 + test: freebsd/14.2 + groups: + - 1 + - 2 + - 3 + - stage: Remote_2_18 + displayName: Remote 2.18 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.18/{0} + targets: + - name: macOS 14.3 + test: macos/14.3 + - name: FreeBSD 14.1 + test: freebsd/14.1 + groups: + - 1 + - 2 + - 3 + +### Docker + - stage: Docker_devel + displayName: Docker devel + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: devel/linux/{0} + targets: + - name: Fedora 43 + test: fedora43 + - name: Alpine 3.23 + test: alpine323 + - name: Ubuntu 22.04 + test: ubuntu2204 + - name: Ubuntu 24.04 + test: ubuntu2404 + groups: + - 1 + - 2 + - 3 + - stage: Docker_2_20 + displayName: Docker 2.20 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.20/linux/{0} + targets: + - name: Fedora 42 + test: fedora42 + - name: Alpine 3.22 + test: alpine322 + groups: + - 1 + - 2 + - 3 + - stage: Docker_2_19 + displayName: Docker 2.19 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.19/linux/{0} + targets: + - name: Fedora 41 + test: fedora41 + - name: Alpine 3.21 + test: alpine321 + groups: + - 1 + - 2 + - 3 + - stage: Docker_2_18 + displayName: Docker 2.18 + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: 2.18/linux/{0} + targets: + - name: Fedora 40 + test: fedora40 + - name: Alpine 3.20 + test: alpine320 + - name: Ubuntu 24.04 + test: ubuntu2404 + groups: + - 1 + - 2 + - 3 + +### Community Docker + - stage: Docker_community_devel + displayName: Docker (community images) devel + dependsOn: [] + jobs: + - template: templates/matrix.yml + parameters: + testFormat: devel/linux-community/{0} + targets: + - name: Debian 11 Bullseye + test: debian-bullseye/3.9 + - name: Debian 12 Bookworm + test: debian-bookworm/3.11 + - name: Debian 13 Trixie + test: debian-13-trixie/3.13 + - name: ArchLinux + test: archlinux/3.14 + groups: + - 1 + - 2 + - 3 + +### Generic +# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. +# - stage: Generic_devel +# displayName: Generic devel +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: devel/generic/{0}/1 +# targets: +# - test: '3.9' +# - test: '3.12' +# - test: '3.14' +# - stage: Generic_2_20 +# displayName: Generic 2.20 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.20/generic/{0}/1 +# targets: +# - test: '3.10' +# - test: '3.14' +# - stage: Generic_2_19 +# displayName: Generic 2.19 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.19/generic/{0}/1 +# targets: +# - test: '3.9' +# - test: '3.13' +# - stage: Generic_2_18 +# displayName: Generic 2.18 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.18/generic/{0}/1 +# targets: +# - test: '3.8' +# - test: '3.13' + + - stage: Summary + condition: succeededOrFailed() + dependsOn: + - Sanity_devel + - Sanity_2_20 + - Sanity_2_19 + - Sanity_2_18 + - Units_devel + - Units_2_20 + - Units_2_19 + - Units_2_18 + - Remote_devel_extra_vms + - Remote_devel + - Remote_2_20 + - Remote_2_19 + - Remote_2_18 + - Docker_devel + - Docker_2_20 + - Docker_2_19 + - Docker_2_18 + - Docker_community_devel +# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. +# - Generic_devel +# - Generic_2_20 +# - Generic_2_19 +# - Generic_2_18 + jobs: + - template: templates/coverage.yml diff --git a/ansible/playbooks/collections/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh b/ansible/playbooks/collections/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh new file mode 100755 index 0000000..ca2b19d --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# Aggregate code coverage results for later processing. + +set -o pipefail -eu + +agent_temp_directory="$1" + +PATH="${PWD}/bin:${PATH}" + +mkdir "${agent_temp_directory}/coverage/" + +options=(--venv --venv-system-site-packages --color -v) + +ansible-test coverage combine --group-by command --export "${agent_temp_directory}/coverage/" "${options[@]}" + +if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then + # Only analyze coverage if the installed version of ansible-test supports it. + # Doing so allows this script to work unmodified for multiple Ansible versions. + ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}" +fi diff --git a/ansible/playbooks/collections/ansible_collections/community/general/.azure-pipelines/scripts/combine-coverage.py b/ansible/playbooks/collections/ansible_collections/community/general/.azure-pipelines/scripts/combine-coverage.py new file mode 100755 index 0000000..bf1592a --- /dev/null +++ b/ansible/playbooks/collections/ansible_collections/community/general/.azure-pipelines/scripts/combine-coverage.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +""" +Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job. +Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}" +The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName) +Keep in mind that Azure Pipelines does not enforce unique job display names (only names). +It is up to pipeline authors to avoid name collisions when deviating from the recommended format. +""" + +from __future__ import annotations + +import os +import re +import shutil +import sys + + +def main(): + """Main program entry point.""" + source_directory = sys.argv[1] + + if "/ansible_collections/" in os.getcwd(): + output_path = "tests/output" + else: + output_path = "test/results" + + destination_directory = os.path.join(output_path, "coverage") + + if not os.path.exists(destination_directory): + os.makedirs(destination_directory) + + jobs = {} + count = 0 + + for name in os.listdir(source_directory): + match = re.search("^Coverage (?P[0-9]+) (?P