Compare commits
6 Commits
Author | SHA1 | Date |
---|---|---|
Franca Debole | 02e4dd8c39 | |
Franca Debole | eb874d0a32 | |
Franca Debole | be9feb1253 | |
Franca Debole | ccaf0bfff6 | |
Franca Debole | 03bce51546 | |
Franca Debole | c37c893883 |
|
@ -1 +0,0 @@
|
|||
.vscode/settings.json
|
|
@ -1,29 +1,13 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: '../../library/centos/roles/centos-bootstrap'
|
||||
- { role: '../../library/roles/cloud-init', when: ansible_product_name == "oVirt Node" }
|
||||
- role: '../../library/roles/sshd_config'
|
||||
- role: '../../library/centos/roles/fail2ban'
|
||||
- src: git+https://gitea-s2i2s.isti.cnr.it/ISTI-ansible-roles/ansible-role-rsyslog.git
|
||||
version: master
|
||||
name: rsyslog
|
||||
state: latest
|
||||
- role: '../../library/roles/rsyslog'
|
||||
- role: '../../library/roles/dell-server-utilities'
|
||||
- src: git+https://gitea-s2i2s.isti.cnr.it/ISTI-ansible-roles/ansible-role-linux-firewall.git
|
||||
version: master
|
||||
name: linux-firewall
|
||||
state: latest
|
||||
- src: git+https://gitea-s2i2s.isti.cnr.it/ISTI-ansible-roles/ansible-role-letsencrypt-acme-sh-client.git
|
||||
version: master
|
||||
name: letsencrypt-acme-sh-client
|
||||
state: latest
|
||||
- src: git+https://gitea-s2i2s.isti.cnr.it/ISTI-ansible-roles/ansible-role-zabbix-agent.git
|
||||
version: master
|
||||
name: zabbix-agent
|
||||
state: latest
|
||||
when: zabbix_agent_install is defined and zabbix_agent_install
|
||||
- src: git+https://gitea-s2i2s.isti.cnr.it/ISTI-ansible-roles/ansible-role-prometheus-node-exporter.git
|
||||
version: master
|
||||
name: prometheus-node-exporter
|
||||
state: latest
|
||||
when: prometheus_enabled is defined and prometheus_enabled
|
||||
- role: '../../library/roles/sshd_config'
|
||||
- { role: '../../library/roles/data_disk', when: additional_disks is defined and additional_disks }
|
||||
- { role: '../../library/roles/postfix-relay', when: postfix_relay_client is defined and postfix_relay_client }
|
||||
- role: '../../library/centos/roles/firewalld'
|
||||
- role: '../../library/centos/roles/fail2ban'
|
||||
- { role: '../../library/roles/cloud-init', when: ansible_product_name == "oVirt Node" }
|
||||
- { role: '../../library/roles/letsencrypt-acme-sh-client', when: letsencrypt_acme_sh_install is defined and letsencrypt_acme_sh_install }
|
||||
- { role: '../../library/centos/roles/prometheus-node-exporter', when: prometheus_enabled }
|
||||
|
|
|
@ -1,28 +1,13 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: '../../library/roles/ubuntu-deb-general'
|
||||
- role: '../../library/roles/rsyslog'
|
||||
- { role: '../../library/roles/cloud-init', when: ansible_product_name == "oVirt Node" }
|
||||
- role: '../../library/roles/tmpreaper'
|
||||
- role: '../../library/roles/iptables'
|
||||
- { role: '../../library/roles/data_disk', when: additional_disks is defined and additional_disks }
|
||||
- role: '../../library/roles/sshd_config'
|
||||
- src: git+https://gitea-s2i2s.isti.cnr.it/ISTI-ansible-roles/ansible-role-rsyslog.git
|
||||
version: master
|
||||
name: rsyslog
|
||||
- src: git+https://gitea-s2i2s.isti.cnr.it/ISTI-ansible-roles/ansible-role-linux-firewall.git
|
||||
version: master
|
||||
name: linux-firewall
|
||||
state: latest
|
||||
- src: git+https://gitea-s2i2s.isti.cnr.it/ISTI-ansible-roles/ansible-role-letsencrypt-acme-sh-client.git
|
||||
version: master
|
||||
name: letsencrypt-acme-sh-client
|
||||
state: latest
|
||||
when: letsencrypt_acme_install is defined and letsencrypt_acme_install
|
||||
- { role: '../library/roles/letsencrypt-acme-sh-client', when: letsencrypt_acme_sh_install is defined and letsencrypt_acme_sh_install }
|
||||
- { role: '../../library/roles/nagios', when: nagios_enabled is defined and nagios_enabled }
|
||||
- src: git+https://gitea-s2i2s.isti.cnr.it/ISTI-ansible-roles/ansible-role-zabbix-agent.git
|
||||
version: master
|
||||
name: zabbix-agent
|
||||
state: latest
|
||||
when: zabbix_agent_install is defined and zabbix_agent_install
|
||||
- src: git+https://gitea-s2i2s.isti.cnr.it/ISTI-ansible-roles/ansible-role-prometheus-node-exporter.git
|
||||
version: master
|
||||
name: prometheus-node-exporter
|
||||
state: latest
|
||||
when: prometheus_enabled is defined and prometheus_enabled
|
||||
- { role: '../../library/roles/prometheus-node-exporter', when: prometheus_enabled is defined and prometheus_enabled }
|
||||
|
||||
|
|
|
@ -9,29 +9,20 @@ dns1: 208.67.220.220
|
|||
dns2: 208.67.222.222
|
||||
configure_domain_name_in_interface: False
|
||||
|
||||
el_yum_automation: True
|
||||
centos7_packages_automation:
|
||||
- yum-cron
|
||||
- yum-plugin-fastestmirror
|
||||
|
||||
el_dnf_automation: False
|
||||
centos8_packages_automation:
|
||||
- dnf-automatic
|
||||
|
||||
centos7_packages_to_install:
|
||||
- policycoreutils-python
|
||||
centos8_packages_to_install:
|
||||
- policycoreutils-python-utils
|
||||
centos_packages_to_install:
|
||||
- dstat
|
||||
- lsof
|
||||
- strace
|
||||
- traceroute
|
||||
- bind-utils
|
||||
- yum-cron
|
||||
- yum-plugin-fastestmirror
|
||||
- whois
|
||||
- iotop
|
||||
- policycoreutils-python
|
||||
- firewalld
|
||||
- ipset
|
||||
- ntp
|
||||
- psmisc
|
||||
- tcpdump
|
||||
- tuned
|
||||
|
@ -46,8 +37,10 @@ centos_packages_from_epel:
|
|||
- htop
|
||||
- lbzip2
|
||||
|
||||
centos_ntpd_enabled: True
|
||||
|
||||
centos_packages_cleanup: True
|
||||
centos_remove_avahi: False
|
||||
centos_remove_avahi: True
|
||||
centos_remove_networkmanager: False
|
||||
centos_disable_avahi: True
|
||||
centos_disable_networkmanager: False
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: '../../library/roles/ntp'
|
|
@ -1,42 +1,8 @@
|
|||
---
|
||||
- name: Install the basic packages commont to CentOS 7 and 8+
|
||||
- name: Install the basic packages
|
||||
yum: name={{ centos_packages_to_install }} state={{ centos_pkg_state }}
|
||||
tags: [ 'centos', 'bootstrap', 'packages' ]
|
||||
|
||||
- name: Install CentOS 7 packages
|
||||
yum:
|
||||
pkg: '{{ centos7_packages_to_install }}'
|
||||
state: present
|
||||
when:
|
||||
- ansible_distribution_file_variety == "RedHat"
|
||||
- ansible_distribution_major_version is version_compare('7', '<=')
|
||||
|
||||
- name: Install CentOS 8 packages
|
||||
dnf:
|
||||
pkg: '{{ centos8_packages_to_install }}'
|
||||
state: present
|
||||
when:
|
||||
- ansible_distribution_file_variety == "RedHat"
|
||||
- ansible_distribution_major_version is version_compare('8', '>=')
|
||||
|
||||
- name: Install the packages to automate some yum tasks on CentOS 7
|
||||
yum:
|
||||
pkg: '{{ centos7_packages_automation }}'
|
||||
state: present
|
||||
when:
|
||||
- ansible_distribution_file_variety == "RedHat"
|
||||
- ansible_distribution_major_version is version_compare('7', '<=')
|
||||
- el_yum_automation
|
||||
|
||||
- name: Install the packages to automate some dnf tasks on CentOS 8
|
||||
yum:
|
||||
pkg: '{{ centos8_packages_automation }}'
|
||||
state: present
|
||||
when:
|
||||
- ansible_distribution_file_variety == "RedHat"
|
||||
- ansible_distribution_major_version is version_compare('8', '>=')
|
||||
- el_dnf_automation
|
||||
|
||||
- name: Install the basic packages from the EPEL repository
|
||||
yum: name={{ centos_packages_from_epel }} state={{ centos_pkg_state }}
|
||||
when: centos_install_epel
|
||||
|
|
|
@ -1,11 +1,6 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: '../../library/centos/roles/external-repos'
|
||||
- role: 'basic-system-setup'
|
||||
- src: git+https://code-repo.d4science.org/InfraScience/ansible-role-ntp.git
|
||||
version: master
|
||||
name: ntp
|
||||
state: latest
|
||||
- role: '../../library/centos/roles/basic-setup'
|
||||
- role: '../../library/roles/motd'
|
||||
- role: '../../library/roles/linux-kernel-sysctl'
|
||||
|
|
|
@ -5,7 +5,6 @@ DUPLY=/usr/bin/duply
|
|||
D_PROFILE={{ duply_default_profile }}
|
||||
LOG_FILE={{ duplicity_cron_job_logfile }}
|
||||
LOCK_FILE={{ duplicity_temp_dir }}/.duply-backup.lock
|
||||
ulimit -n 32000
|
||||
|
||||
if [ ! -f $LOCK_FILE ] ; then
|
||||
echo $$ > $LOCK_FILE
|
||||
|
|
|
@ -121,8 +121,8 @@ MAX_FULLS_WITH_INCRS={{ duplicity_max_full_with_incrs }}
|
|||
# forces a full backup if last full backup reaches a specified age, for the
|
||||
# format of MAX_FULLBKP_AGE see duplicity man page, chapter TIME_FORMATS
|
||||
# Uncomment the following two lines to enable this setting.
|
||||
MAX_FULLBKP_AGE={{ duplicity_max_backup_age }}
|
||||
DUPL_PARAMS="$DUPL_PARAMS --full-if-older-than $MAX_FULLBKP_AGE "
|
||||
#MAX_FULLBKP_AGE=1M
|
||||
#DUPL_PARAMS="$DUPL_PARAMS --full-if-older-than $MAX_FULLBKP_AGE "
|
||||
|
||||
# sets duplicity --volsize option (available since v0.4.3.RC7)
|
||||
# set the size of backup chunks to VOLSIZE MB instead of the default 25MB.
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
firewalld_enabled: True
|
||||
firewalld_default_zone: public
|
||||
firewalld_ssh_enabled_on_default_zone: True
|
||||
|
||||
firewalld_rules:
|
||||
# - { service: 'http', zone: 'public', permanent: 'true', state: 'enabled' }
|
||||
# - { port: '9001', protocol: 'tcp', zone: 'public', permanent: 'true', state: 'enabled' }
|
||||
# - { rich_rule: 'rule service name="ftp" audit limit value="1/m" accept', zone: 'public', permanent: 'true', state: 'enabled' }
|
||||
|
||||
#firewalld_new_services:
|
||||
# - { name: 'mosh', zone: 'public', permanent: 'true', state: 'enabled' }
|
||||
|
||||
# We execute direct rules as they are written
|
||||
# firewalld_direct_rules:
|
||||
# - { action: '--add-rule', parameters: 'ipv4 filter FORWARD 0 -s 136.243.21.126 --in-interface br0 -d 0/0 -j ACCEPT' }
|
||||
|
||||
# firewalld_zones_interfaces:
|
||||
# - { interface: 'eth1', zone: 'internal' }
|
|
@ -0,0 +1,16 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<service>
|
||||
<short>Mosh SSH service</short>
|
||||
<description>This allows mosh to send and receive datagram connections.</description>
|
||||
<port protocol="udp" port="60000"/>
|
||||
<port protocol="udp" port="60001"/>
|
||||
<port protocol="udp" port="60002"/>
|
||||
<port protocol="udp" port="60003"/>
|
||||
<port protocol="udp" port="60004"/>
|
||||
<port protocol="udp" port="60005"/>
|
||||
<port protocol="udp" port="60006"/>
|
||||
<port protocol="udp" port="60007"/>
|
||||
<port protocol="udp" port="60008"/>
|
||||
<port protocol="udp" port="60009"/>
|
||||
<port protocol="udp" port="60010"/>
|
||||
</service>
|
|
@ -0,0 +1,7 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<service>
|
||||
<short>ports needed by traceroute</short>
|
||||
<description>This allows the host to be reached by traceroute.</description>
|
||||
<port protocol="udp" port="33434"/>
|
||||
<port protocol="udp" port="33523"/>
|
||||
</service>
|
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
- name: Enable and start firewalld
|
||||
service: name=firewalld state=started enabled=yes
|
||||
when: firewalld_enabled
|
||||
|
||||
- name: Reload firewall config
|
||||
command: firewall-cmd --reload
|
||||
notify: Restart fail2ban
|
||||
when: firewalld_enabled
|
||||
|
||||
- name: Restart fail2ban
|
||||
service: name=fail2ban state=restarted
|
||||
when:
|
||||
- fail2ban_enabled is defined and fail2ban_enabled
|
||||
- centos_install_epel
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
- name: Ensure that the firewalld service is stopped and disabled if we do not want it
|
||||
service: name=firewalld state=stopped enabled=no
|
||||
when: not firewalld_enabled | bool
|
||||
tags: [ 'iptables', 'firewall', 'firewalld' ]
|
|
@ -0,0 +1,91 @@
|
|||
---
|
||||
- block:
|
||||
- name: Ensure that the service is enabled and started
|
||||
service: name=firewalld state=started enabled=yes
|
||||
notify: Restart fail2ban
|
||||
|
||||
- name: Open the ssh service to the world. We rely on fail2ban to stop unauthorized accesses
|
||||
firewalld: service=ssh zone={{ firewalld_default_zone }} permanent=True state=enabled immediate=True
|
||||
when: firewalld_ssh_enabled_on_default_zone | bool
|
||||
|
||||
- name: Set the firewalld default zone.
|
||||
command: firewall-cmd --set-default-zone={{ firewalld_default_zone }}
|
||||
|
||||
- name: Add sources to the availability zones, if any
|
||||
firewalld: source={{ item.cidr }} zone={{ item.zone }} permanent={{ item.permanent }} state={{ item.state }} immediate=True
|
||||
with_items: '{{ firewalld_src_rules | default([]) }}'
|
||||
|
||||
- name: Assign interfaces to firewalld zones if needed
|
||||
firewalld: zone={{ item.zone }} interface={{ item.interface }} permanent={{ item.permanent | default(True) }} state={{ item.state | default('enabled') }} immediate=True
|
||||
with_items: '{{ firewalld_zones_interfaces | default([]) }}'
|
||||
when:
|
||||
- firewalld_zones_interfaces is defined
|
||||
- item.interface is defined
|
||||
- item.zone is defined
|
||||
|
||||
- name: Manage services firewalld rules. Services names must be the known ones. Save the services that are meant to be permanent
|
||||
firewalld: service={{ item.service }} zone={{ item.zone }} permanent={{ item.permanent | default(False) }} state={{ item.state }} immediate=True
|
||||
with_items: '{{ firewalld_rules }}'
|
||||
when:
|
||||
- firewalld_rules is defined
|
||||
- item.service is defined
|
||||
|
||||
- name: Save the ports firewalld rules that need to be permanent
|
||||
firewalld: port={{ item.port }}/{{ item.protocol }} zone={{ item.zone }} permanent={{ item.permanent | default(False) }} state={{ item.state }} immediate=True
|
||||
with_items: '{{ firewalld_rules }}'
|
||||
when:
|
||||
- firewalld_rules is defined
|
||||
- item.port is defined
|
||||
- item.protocol is defined
|
||||
|
||||
- name: Save the rich_rules firewalld rules that need to be permanent
|
||||
firewalld: rich_rule='{{ item.rich_rule }}' zone={{ item.zone }} permanent={{ item.permanent | default(False) }} state={{ item.state }} immediate=True
|
||||
with_items: '{{ firewalld_rules }}'
|
||||
when:
|
||||
- firewalld_rules is defined
|
||||
- item.rich_rule is defined
|
||||
notify: Reload firewall config
|
||||
|
||||
- name: Enable the firewall-cmd direct passthrough rules
|
||||
shell: touch /etc/firewalld/.{{ item.label }} ; firewall-cmd --direct --passthrough {{ item.action }}
|
||||
with_items: '{{ firewalld_direct_rules }}'
|
||||
args:
|
||||
creates: /etc/firewalld/.{{ item.label }}
|
||||
when:
|
||||
- firewalld_direct_rules is defined
|
||||
- item.action is defined
|
||||
|
||||
- name: Set the firewall-cmd direct passthrough rules as permanent ones
|
||||
command: firewall-cmd --direct --permanent --passthrough {{ item.action }}
|
||||
with_items: '{{ firewalld_direct_rules }}'
|
||||
when:
|
||||
- firewalld_direct_rules is defined
|
||||
- item.action is defined
|
||||
|
||||
- name: Add new not yet defined services, if any. They need an additional task to really install a meaningful service config file
|
||||
command: firewall-cmd --new-service={{ item.name }} --permanent
|
||||
args:
|
||||
creates: '/etc/firewalld/services/{{ item.name }}.xml'
|
||||
with_items: '{{ firewalld_new_services }}'
|
||||
when: firewalld_new_services is defined
|
||||
notify: Reload firewall config
|
||||
|
||||
- name: Install the custom firewall services
|
||||
copy: src={{ item.name }}.xml dest=/etc/firewalld/services/{{ item.name }}.xml
|
||||
with_items: '{{ firewalld_new_services }}'
|
||||
when: firewalld_new_services is defined
|
||||
notify: Reload firewall config
|
||||
|
||||
- name: Manage the custom services firewalld rules.
|
||||
firewalld: service={{ item.name }} zone={{ item.zone }} permanent={{ item.permanent }} state={{ item.state }} immediate=True
|
||||
with_items: '{{ firewalld_new_services }}'
|
||||
when:
|
||||
- firewalld_new_services is defined
|
||||
- item.name is defined
|
||||
notify: Reload firewall config
|
||||
|
||||
# Last one to not take ourselves out
|
||||
- name: Set the firewalld default zone.
|
||||
command: firewall-cmd --set-default-zone={{ firewalld_default_zone }}
|
||||
|
||||
tags: [ 'iptables', 'firewall', 'firewalld' ]
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
- import_tasks: firewalld_rules.yml
|
||||
when: firewalld_enabled | bool
|
||||
|
||||
- import_tasks: disable_firewalld.yml
|
||||
when: not firewalld_enabled | bool
|
||||
|
|
@ -7,7 +7,6 @@ virtualization_packages:
|
|||
- bridge-utils
|
||||
- virt-install
|
||||
- cloud-utils
|
||||
- libguestfs-tools
|
||||
|
||||
virtualization_centos6_packages:
|
||||
- python-virtinst
|
||||
|
|
|
@ -22,13 +22,3 @@ vsftpd_chroot_list_enable: 'YES'
|
|||
vsftpd_text_userdb_names: 'YES'
|
||||
vsftpd_pasv_min_port: 19000
|
||||
vsftpd_pasv_max_port: 19999
|
||||
|
||||
# The first listens on ipv4 only. The second on both, despite the name
|
||||
vsftpd_listen: "NO"
|
||||
vsftpd_listen_ipv6: "YES"
|
||||
vsftpd_tls_enabled: True
|
||||
vsftpd_force_tls: True
|
||||
vsftpd_tls_letsencrypt: True
|
||||
vsftpd_ssl_ca_certificate: '{{ letsencrypt_acme_certs_dir }}/fullchain'
|
||||
vsftpd_ssl_certificate: '{{ letsencrypt_acme_certs_dir }}/cert'
|
||||
vsftpd_ssl_certificate_key: '{{ letsencrypt_acme_certs_dir }}/privkey'
|
||||
|
|
|
@ -1,6 +1,13 @@
|
|||
---
|
||||
- name: Install the ftp server packages
|
||||
yum: pkg={{ vsftpd_pkgs }} state={{ pkg_state }}
|
||||
yum: pkg={{ item }} state={{ pkg_state }}
|
||||
with_items: vsftpd_pkgs
|
||||
tags:
|
||||
- ftp
|
||||
- vsftpd
|
||||
|
||||
- name: Ensure that the vsftpd service is enabled
|
||||
service: name=vsftpd enabled=yes
|
||||
tags:
|
||||
- ftp
|
||||
- vsftpd
|
||||
|
@ -19,14 +26,13 @@
|
|||
tags:
|
||||
- ftp
|
||||
- vsftpd
|
||||
- vsftpd_config
|
||||
|
||||
- name: Set the needed SELinux booleans when local users are enabled
|
||||
seboolean: name={{ item }} state=yes persistent=yes
|
||||
with_items:
|
||||
- ftp_home_dir
|
||||
- ftpd_full_access
|
||||
when: vsftpd_local | bool
|
||||
when: vsftpd_local
|
||||
tags:
|
||||
- ftp
|
||||
- vsftpd
|
||||
|
@ -36,24 +42,7 @@
|
|||
with_items:
|
||||
- allow_ftpd_full_access
|
||||
- allow_ftpd_anon_write
|
||||
when: vsftpd_anonymous_upload | bool
|
||||
when: vsftpd_anonymous_upload
|
||||
tags:
|
||||
- ftp
|
||||
- vsftpd
|
||||
|
||||
- name: Ensure that the vsftpd service is started and enabled
|
||||
service: name=vsftpd enabled=yes
|
||||
tags:
|
||||
- ftp
|
||||
- vsftpd
|
||||
|
||||
- name: Manage the letsencrypt hook
|
||||
block:
|
||||
- name: Create the acme hooks directory if it does not yet exist
|
||||
file: dest={{ letsencrypt_acme_sh_services_scripts_dir }} state=directory owner=root group=root
|
||||
|
||||
- name: Install the vsftp hook for letsencrypt
|
||||
template: src=vsftpd-letsencrypt-hook.sh.j2 dest=/usr/lib/acme/hooks/vsftpd owner=root group=root mode=0550
|
||||
|
||||
when: vsftpd_tls_letsencrypt | bool
|
||||
tags: [ 'ftp', 'vsftpd', 'vsftpd_config', 'letsencrypt' ]
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
systemctl restart vsftpd
|
||||
|
||||
exit 0
|
|
@ -10,13 +10,6 @@
|
|||
#
|
||||
# Allow anonymous FTP? (Beware - allowed by default if you comment this out).
|
||||
anonymous_enable={{ vsftpd_anonymous_enable }}
|
||||
{% if vsftpd_anonymous and vsftpd_tls_enabled %}
|
||||
allow_anon_ssl=YES
|
||||
{% if vsftpd_force_tls %}
|
||||
force_anon_data_ssl=YES
|
||||
force_anon_logins_ssl=YES
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
#
|
||||
# Uncomment this to allow local users to log in.
|
||||
local_enable={{ vsftpd_local_enable }}
|
||||
|
@ -119,9 +112,9 @@ chroot_list_file=/etc/vsftpd/chroot_list
|
|||
pam_service_name=vsftpd
|
||||
userlist_enable=YES
|
||||
#enable for standalone mode
|
||||
listen={{ vsftpd_listen }}
|
||||
listen=NO
|
||||
# This one listens on both ipv4 and ipv6 sockets
|
||||
listen_ipv6={{ vsftpd_listen_ipv6 }}
|
||||
listen_ipv6=YES
|
||||
# maximum number of clients which may be connected.
|
||||
max_clients=50
|
||||
max_per_ip=10
|
||||
|
@ -138,20 +131,3 @@ pasv_min_port={{ vsftpd_pasv_min_port }}
|
|||
pasv_max_port={{ vsftpd_pasv_max_port }}
|
||||
#
|
||||
use_localtime=YES
|
||||
|
||||
{% if vsftpd_tls_enabled %}
|
||||
# SSL/TLS
|
||||
ssl_enable=YES
|
||||
ssl_sslv2=NO
|
||||
ssl_sslv3=NO
|
||||
ssl_tlsv1=NO
|
||||
ssl_tlsv1_1=NO
|
||||
ssl_tlsv1_2=YES
|
||||
ca_certs_file={{ vsftpd_ssl_ca_certificate }}
|
||||
rsa_cert_file={{ vsftpd_ssl_certificate }}
|
||||
rsa_private_key_file={{ vsftpd_ssl_certificate_key }}
|
||||
{% if vsftpd_force_tls %}
|
||||
force_local_logins_ssl=YES
|
||||
force_local_data_ssl=YES
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
---
|
||||
dependencies:
|
||||
- { role: '../../../library/roles/nginx', when: elastic_hq_use_nginx_proxy | bool }
|
||||
- { role: '../../library/roles/nginx', when: elastic_hq_use_nginx_proxy }
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
elasticsearch_install: True
|
||||
elasticsearch_repo_key: https://packages.elastic.co/GPG-KEY-elasticsearch
|
||||
elasticsearch_repo_version: '{{ elasticsearch_major_version }}.x'
|
||||
#elasticsearch_major_version: 2
|
||||
|
@ -12,28 +11,10 @@ elasticsearch_repo: 'deb https://artifacts.elastic.co/packages/{{ elasticsearch_
|
|||
elasticsearch_packages:
|
||||
- elasticsearch
|
||||
|
||||
elasticsearch_kibana_install: False
|
||||
elasticsearch_kibana_enabled: True
|
||||
elasticsearch_kibana_proxy: False
|
||||
elasticsearch_kibana_nginx_proxy: True
|
||||
elasticsearch_kibana_packages:
|
||||
- kibana
|
||||
|
||||
elasticsearch_kibana_http_port: 5601
|
||||
elasticsearch_kibana_bind_ip: 127.0.0.1
|
||||
elasticsearch_kibana_serverpath: ''
|
||||
elasticsearch_kibana_servername: '{{ ansible_fqdn }}'
|
||||
elasticsearch_kibana_elasticsearch_url: 'http://localhost:9200'
|
||||
elasticsearch_kibana_preserve_host: 'false'
|
||||
elasticsearch_kibana_ssl_enabled: False
|
||||
elasticsearch_kibana_rundir: /run/kibana
|
||||
|
||||
elasticsearch_package_state: 'present'
|
||||
elasticsearch_cluster_name: 'Elasticsearch Cluster'
|
||||
elasticsearch_enabled: True
|
||||
elasticsearch_http_port: 9200
|
||||
elasticsearch_transport_min_port: 9300
|
||||
elasticsearch_transport_max_port: 9400
|
||||
elasticsearch_data_dir: /var/lib/elasticsearch
|
||||
elasticsearch_log_dir: /var/log/elasticsearch
|
||||
elasticsearch_bind_ip: 0.0.0.0
|
||||
|
|
|
@ -1,11 +1,5 @@
|
|||
---
|
||||
- name: Restart elasticsearch
|
||||
service: name=elasticsearch state=restarted enabled=yes
|
||||
when: elasticsearch_enabled | bool
|
||||
ignore_errors: True
|
||||
|
||||
- name: Restart kibana
|
||||
service: name=kibana state=restarted enabled=yes
|
||||
when: elasticsearch_kibana_enabled | bool
|
||||
|
||||
when: elasticsearch_enabled
|
||||
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
---
|
||||
dependencies:
|
||||
- { role: '../../../library/roles/openjdk' }
|
||||
- { role: '../../../library/roles/nginx', when: elasticsearch_kibana_nginx_proxy | bool }
|
||||
|
|
|
@ -1,23 +1,25 @@
|
|||
---
|
||||
- name: Elasticsearch installation
|
||||
block:
|
||||
- block:
|
||||
- name: Install the elasticsearch repo key
|
||||
apt_key: url={{ elasticsearch_repo_key }} state=present
|
||||
|
||||
- name: Install the elasticsearch deb repository
|
||||
apt_repository: repo='{{ elasticsearch_repo }}' state=present update_cache=yes
|
||||
|
||||
- name: Install the elasticsearch deb packages
|
||||
apt: name='{{ elasticsearch_packages }}' state={{ elasticsearch_package_state }} update_cache=yes cache_valid_time=1800
|
||||
|
||||
- name: Install the elasticsearch startup default
|
||||
template: src=elasticsearch-default.j2 dest=/etc/default/elasticsearch owner=root group=elasticsearch mode=0640
|
||||
register: elasticsearch_default
|
||||
notify: Restart elasticsearch
|
||||
|
||||
- name: Install the elasticsearch JVM options
|
||||
template: src=jvm.options.j2 dest=/etc/elasticsearch/jvm.options owner=root group=elasticsearch mode=0640
|
||||
register: elasticsearch_jvm_opts
|
||||
notify: Restart elasticsearch
|
||||
tags: [ 'ELK', 'elasticsearch', 'elk', 'elasticsearch_conf' ]
|
||||
|
||||
- name: Install the elasticsearch configuration
|
||||
template: src=elasticsearch.yml.j2 dest=/etc/elasticsearch/elasticsearch.yml owner=root group=elasticsearch mode=0640
|
||||
register: elasticsearch_configuration
|
||||
notify: Restart elasticsearch
|
||||
tags: [ 'ELK', 'elasticsearch', 'elk', 'elasticsearch_conf' ]
|
||||
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
---
|
||||
- name: Ensure that elasticsearch is enabled and running
|
||||
service: name=elasticsearch state=started enabled=yes
|
||||
when: elasticsearch_enabled | bool
|
||||
tags: [ 'ELK', 'elasticsearch', 'elk' ]
|
||||
|
||||
- name: Ensure that elasticsearch is disabled and stopped
|
||||
service: name=elasticsearch state=stopped enabled=no
|
||||
when: not elasticsearch_enabled | bool
|
||||
tags: [ 'ELK', 'elasticsearch', 'elk' ]
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
- name: ELK repository
|
||||
block:
|
||||
- name: Install the elasticsearch repo key
|
||||
apt_key: url={{ elasticsearch_repo_key }} state=present
|
||||
|
||||
- name: Install the elasticsearch deb repository
|
||||
apt_repository: repo='{{ elasticsearch_repo }}' state=present update_cache=yes
|
||||
|
||||
tags: [ 'ELK', 'elasticsearch', 'elk' ]
|
|
@ -1,50 +0,0 @@
|
|||
---
|
||||
- name: Kibana x509 certificate management
|
||||
block:
|
||||
- name: Create the acme hooks directory if it does not yet exist
|
||||
file: dest={{ letsencrypt_acme_sh_services_scripts_dir }} state=directory owner=root group=root
|
||||
|
||||
- name: Create the kibana pki subdir
|
||||
file: dest={{ pki_dir }}/kibana state=directory owner=root group=kibana mode=0750
|
||||
|
||||
- name: Check if the global certificate private key exists
|
||||
stat: path={{ letsencrypt_acme_certs_dir }}/privkey
|
||||
register: kibana_privkey
|
||||
|
||||
- name: Check if the kibana certificate private key exists under the pki directory
|
||||
stat: path={{ pki_dir }}/kibana/privkey
|
||||
register: kibana_pki_privkey
|
||||
|
||||
- name: Copy the private key into the expected place if it is not already there
|
||||
copy: src={{ letsencrypt_acme_certs_dir }}/privkey dest={{ pki_dir }}/kibana/privkey remote_src=yes owner=root group=kibana mode=0440
|
||||
when:
|
||||
- kibana_privkey.stat.exists
|
||||
- not kibana_pki_privkey.stat.exists
|
||||
|
||||
- name: Install the kibana hook for letsencrypt
|
||||
template: src=kibana-letsencrypt-hook.sh.j2 dest=/usr/lib/acme/hooks/kibana owner=root group=root mode=0550
|
||||
|
||||
when:
|
||||
- elasticsearch_kibana_ssl_enabled | bool
|
||||
- letsencrypt_acme_install is defined and letsencrypt_acme_install | bool
|
||||
tags: [ 'ELK', 'elasticsearch', 'elk', 'kibana' ]
|
||||
|
||||
- name: Kibana installation
|
||||
block:
|
||||
- name: Install the Kibana packages
|
||||
apt: name='{{ elasticsearch_kibana_packages }}' state={{ elasticsearch_package_state }} update_cache=yes cache_valid_time=1800
|
||||
|
||||
- name: Install the kibana systemd configuration to manage the rundir directory
|
||||
template: src=kibana_rundir.conf.j2 dest=/usr/lib/tmpfiles.d/kibana.conf owner=root group=root mode=0644
|
||||
register: reconfigure_systemd
|
||||
|
||||
- name: Reload the systemd configuration
|
||||
systemd: daemon_reload=yes
|
||||
|
||||
- name: Install the Kibana configuration
|
||||
template: src=kibana.yml.j2 dest=/etc/kibana/kibana.yml owner=root group=kibana mode=0640
|
||||
register: kibana_configuration
|
||||
notify: Restart kibana
|
||||
tags: [ 'ELK', 'elasticsearch', 'elk', 'kibana', 'kibana_conf' ]
|
||||
|
||||
tags: [ 'ELK', 'elasticsearch', 'elk', 'kibana' ]
|
|
@ -1,11 +0,0 @@
|
|||
---
|
||||
- name: Ensure that kibana is enabled and running
|
||||
service: name=kibana state=started enabled=yes
|
||||
when: elasticsearch_kibana_enabled | bool
|
||||
tags: [ 'ELK', 'elasticsearch', 'elk', 'kibana' ]
|
||||
|
||||
- name: Ensure that kibana is disabled and stopped
|
||||
service: name=kibana state=stopped enabled=no
|
||||
when: not elasticsearch_kibana_enabled | bool
|
||||
tags: [ 'ELK', 'elasticsearch', 'elk', 'kibana' ]
|
||||
|
|
@ -1,13 +1,14 @@
|
|||
---
|
||||
- import_tasks: elk_repo.yml
|
||||
- import_tasks: elasticsearch.yml
|
||||
when: elasticsearch_install | bool
|
||||
- import_tasks: elasticsearch_plugins.yml
|
||||
when: elasticsearch_install | bool
|
||||
- import_tasks: elasticsearch_service.yml
|
||||
when: elasticsearch_install | bool
|
||||
- import_tasks: kibana.yml
|
||||
when: elasticsearch_kibana_install | bool
|
||||
- import_tasks: kibana_service.yml
|
||||
when: elasticsearch_kibana_install | bool
|
||||
- import_tasks: plugins.yml
|
||||
|
||||
- name: Ensure that elasticsearch is enabled and running
|
||||
service: name=elasticsearch state=started enabled=yes
|
||||
when: elasticsearch_enabled
|
||||
tags: [ 'ELK', 'elasticsearch', 'elk' ]
|
||||
|
||||
- name: Ensure that elasticsearch is disabled and stopped
|
||||
service: name=elasticsearch state=stopped enabled=no
|
||||
when: not elasticsearch_enabled
|
||||
tags: [ 'ELK', 'elasticsearch', 'elk' ]
|
||||
|
||||
|
|
|
@ -22,13 +22,6 @@ cluster.name: {{ elasticsearch_cluster_name }}
|
|||
#
|
||||
node.name: {{ ansible_fqdn }}
|
||||
|
||||
{% if elasticsearch_kibana_proxy %}
|
||||
# This node is bein used by kibana as proxy to a cluster
|
||||
node.master: false
|
||||
node.data: false
|
||||
node.ingest: false
|
||||
{% endif %}
|
||||
|
||||
{% if elasticsearch_major_version >= 7 %}
|
||||
cluster.initial_master_nodes:
|
||||
{% for n in elasticsearch_bootstrap_known_masters %}
|
||||
|
@ -67,21 +60,11 @@ bootstrap.mlockall: true
|
|||
#
|
||||
# Set the bind address to a specific IP (IPv4 or IPv6):
|
||||
#
|
||||
{% if elasticsearch_kibana_proxy %}
|
||||
network.host: localhost
|
||||
{% else %}
|
||||
network.host: {{ elasticsearch_bind_ip }}
|
||||
{% endif %}
|
||||
#
|
||||
# Set a custom port for HTTP:
|
||||
#
|
||||
http.port: {{ elasticsearch_http_port }}
|
||||
|
||||
# by default transport.host refers to network.host
|
||||
transport.host: {{ elasticsearch_bind_ip }}
|
||||
{% if elasticsearch_major_version >= 6 %}
|
||||
transport.tcp.port: {{ elasticsearch_transport_min_port }}-{{ elasticsearch_transport_max_port }}
|
||||
{% endif %}
|
||||
#
|
||||
# For more information, see the documentation at:
|
||||
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html>
|
||||
|
|
|
@ -1,38 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
H_NAME=$( hostname -f )
|
||||
LE_SERVICES_SCRIPT_DIR=/usr/lib/acme/hooks
|
||||
LE_CERTS_DIR="/var/lib/acme/live/$H_NAME"
|
||||
LE_LOG_DIR=/var/log/letsencrypt
|
||||
KIBANA_CERTDIR=/etc/pki/kibana
|
||||
KIBANA_KEYFILE="$KIBANA_CERTDIR/privkey"
|
||||
DATE=$( date )
|
||||
|
||||
[ ! -d $KIBANA_CERTDIR ] && mkdir -p $KIBANA_CERTDIR
|
||||
[ ! -d $LE_LOG_DIR ] && mkdir $LE_LOG_DIR
|
||||
echo "$DATE" >> $LE_LOG_DIR/kibana.log
|
||||
|
||||
{% if letsencrypt_acme_install %}
|
||||
LE_ENV_FILE=/etc/default/letsencrypt
|
||||
{% endif %}
|
||||
{% if letsencrypt_acme_sh_install %}
|
||||
LE_ENV_FILE=/etc/default/acme_sh_request_env
|
||||
{% endif %}
|
||||
if [ -f "$LE_ENV_FILE" ] ; then
|
||||
. "$LE_ENV_FILE"
|
||||
else
|
||||
echo "No letsencrypt default file" >> $LE_LOG_DIR/kibana.log
|
||||
fi
|
||||
|
||||
echo "Building the new certificate file" >> $LE_LOG_DIR/kibana.log
|
||||
cp -f ${LE_CERTS_DIR}/privkey ${KIBANA_KEYFILE}
|
||||
chmod 440 ${KIBANA_KEYFILE}
|
||||
chgrp kibana ${KIBANA_KEYFILE}
|
||||
|
||||
echo "Reload the kibana service" >> $LE_LOG_DIR/kibana.log
|
||||
systemctl restart kibana >> $LE_LOG_DIR/kibana.log 2>&1
|
||||
|
||||
|
||||
echo "Done." >> $LE_LOG_DIR/kibana.log
|
||||
|
||||
exit 0
|
|
@ -1,108 +0,0 @@
|
|||
# Kibana is served by a back end server. This setting specifies the port to use.
|
||||
server.port: {{ elasticsearch_kibana_http_port }}
|
||||
|
||||
# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
|
||||
# The default is 'localhost', which usually means remote machines will not be able to connect.
|
||||
# To allow connections from remote users, set this parameter to a non-loopback address.
|
||||
server.host: "{{ elasticsearch_kibana_bind_ip }}"
|
||||
|
||||
# Enables you to specify a path to mount Kibana at if you are running behind a proxy. This only affects
|
||||
# the URLs generated by Kibana, your proxy is expected to remove the basePath value before forwarding requests
|
||||
# to Kibana. This setting cannot end in a slash.
|
||||
server.basePath: "{{ elasticsearch_kibana_serverpath }}"
|
||||
|
||||
# The maximum payload size in bytes for incoming server requests.
|
||||
#server.maxPayloadBytes: 1048576
|
||||
|
||||
# The Kibana server's name. This is used for display purposes.
|
||||
server.name: "{{ elasticsearch_kibana_servername }}"
|
||||
|
||||
# The URL of the Elasticsearch instance to use for all your queries.
|
||||
elasticsearch.url: "{{ elasticsearch_kibana_elasticsearch_url }}"
|
||||
|
||||
# When this setting's value is true Kibana uses the hostname specified in the server.host
|
||||
# setting. When the value of this setting is false, Kibana uses the hostname of the host
|
||||
# that connects to this Kibana instance.
|
||||
elasticsearch.preserveHost: {{ elasticsearch_kibana_preserve_host }}
|
||||
|
||||
# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
|
||||
# dashboards. Kibana creates a new index if the index doesn't already exist.
|
||||
#kibana.index: ".kibana"
|
||||
|
||||
# The default application to load.
|
||||
#kibana.defaultAppId: "discover"
|
||||
|
||||
# If your Elasticsearch is protected with basic authentication, these settings provide
|
||||
# the username and password that the Kibana server uses to perform maintenance on the Kibana
|
||||
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
|
||||
# is proxied through the Kibana server.
|
||||
#elasticsearch.username: "user"
|
||||
#elasticsearch.password: "pass"
|
||||
|
||||
{% if letsencrypt_acme_install is defined and letsencrypt_acme_install %}
|
||||
{% if elasticsearch_kibana_ssl_enabled %}
|
||||
# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
|
||||
# These settings enable SSL for outgoing requests from the Kibana server to the browser.
|
||||
server.ssl.enabled: true
|
||||
server.ssl.certificate: {{ letsencrypt_acme_certs_dir }}/fullchain
|
||||
server.ssl.key: {{ pki_dir }}/kibana/privkey
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
|
||||
# These files validate that your Elasticsearch backend uses the same key files.
|
||||
#elasticsearch.ssl.certificate: /path/to/your/client.crt
|
||||
#elasticsearch.ssl.key: /path/to/your/client.key
|
||||
|
||||
# Optional setting that enables you to specify a path to the PEM file for the certificate
|
||||
# authority for your Elasticsearch instance.
|
||||
#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
|
||||
|
||||
# To disregard the validity of SSL certificates, change this setting's value to 'none'.
|
||||
#elasticsearch.ssl.verificationMode: full
|
||||
|
||||
# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
|
||||
# the elasticsearch.requestTimeout setting.
|
||||
#elasticsearch.pingTimeout: 1500
|
||||
|
||||
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
|
||||
# must be a positive integer.
|
||||
#elasticsearch.requestTimeout: 30000
|
||||
|
||||
# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
|
||||
# headers, set this value to [] (an empty list).
|
||||
#elasticsearch.requestHeadersWhitelist: [ authorization ]
|
||||
|
||||
# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
|
||||
# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
|
||||
#elasticsearch.customHeaders: {}
|
||||
|
||||
# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
|
||||
#elasticsearch.shardTimeout: 0
|
||||
|
||||
# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying.
|
||||
#elasticsearch.startupTimeout: 5000
|
||||
|
||||
# Specifies the path where Kibana creates the process ID file.
|
||||
pid.file: {{ elasticsearch_kibana_rundir }}/kibana.pid
|
||||
|
||||
# Enables you specify a file where Kibana stores log output.
|
||||
#logging.dest: stdout
|
||||
|
||||
# Set the value of this setting to true to suppress all logging output.
|
||||
#logging.silent: false
|
||||
|
||||
# Set the value of this setting to true to suppress all logging output other than error messages.
|
||||
#logging.quiet: false
|
||||
|
||||
# Set the value of this setting to true to log all events, including system usage information
|
||||
# and all requests.
|
||||
#logging.verbose: false
|
||||
|
||||
# Set the interval in milliseconds to sample system and process performance
|
||||
# metrics. Minimum is 100ms. Defaults to 5000.
|
||||
#ops.interval: 5000
|
||||
|
||||
# The default locale. This locale can be used in certain circumstances to substitute any missing
|
||||
# translations.
|
||||
#i18n.defaultLocale: "en"
|
|
@ -1 +0,0 @@
|
|||
d {{ elasticsearch_kibana_rundir }} 0775 kibana kibana
|
|
@ -10,8 +10,8 @@
|
|||
# The install/remove script has been taken from here: http://adamj.eu/tech/2014/07/19/installing-and-removing-r-packages-with-ansible/
|
||||
#
|
||||
|
||||
# Set to True if you want install from the CRAN deb repository
|
||||
r_install_cran_repo: False
|
||||
# Set to present if you want install from CRAN
|
||||
r_install_cran_repo: absent
|
||||
#r_cran_mirror_site: http://cran.rstudio.com
|
||||
r_cran_set_default_mirror: True
|
||||
r_cran_mirror_site: https://cran.mirror.garr.it/mirrors/CRAN/
|
||||
|
|
|
@ -1,20 +1,10 @@
|
|||
---
|
||||
- block:
|
||||
- name: Add the cran repository key
|
||||
apt_key: id=E084DAB9 keyserver=keyserver.ubuntu.com state=present
|
||||
tags: [ 'r_software', 'r_repo', 'r_repo_key' ]
|
||||
- name: Manage the cran repository key
|
||||
apt_key: id=E084DAB9 keyserver=keyserver.ubuntu.com state={{ r_install_cran_repo }}
|
||||
tags: [ 'r_software', 'r_repo', 'r_repo_key' ]
|
||||
|
||||
- name: Add the CRAN repository
|
||||
apt_repository: repo='deb http://cran.rstudio.com/bin/linux/ubuntu {{ ansible_distribution_release }}/' state=present update_cache=yes
|
||||
|
||||
when: r_install_cran_repo | bool
|
||||
tags: [ 'r_software', 'r_repo' ]
|
||||
|
||||
- block:
|
||||
- name: Remove the CRAN repository
|
||||
apt_repository: repo='deb http://cran.rstudio.com/bin/linux/ubuntu {{ ansible_distribution_release }}/' state=absent update_cache=yes
|
||||
|
||||
when: not r_install_cran_repo | bool
|
||||
- name: Manage the cran repository definition
|
||||
apt_repository: repo='deb http://cran.rstudio.com/bin/linux/ubuntu {{ ansible_distribution_release }}/' state={{ r_install_cran_repo }} update_cache=yes
|
||||
tags: [ 'r_software', 'r_repo' ]
|
||||
|
||||
- name: Remove the hold state from the debian R packages
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
---
|
||||
apache_service_enabled: True
|
||||
apache_user: www-data
|
||||
apache_pkg_state: latest
|
||||
apache_group: '{{ apache_user }}'
|
||||
apache_from_ppa: False
|
||||
apache_ppa_repo: 'ppa:ondrej/apache2'
|
||||
|
||||
apache_listen_ports:
|
||||
- 80
|
||||
- '{{ apache_ssl_port }}'
|
||||
|
||||
# Possible choices: event, prefork (the old ones), worker (the threaded version), itm
|
||||
apache_mpm_mode: worker
|
||||
|
||||
apache_packages:
|
||||
- apache2
|
||||
- apache2-utils
|
||||
- libapache2-mod-xsendfile
|
||||
- unzip
|
||||
- zip
|
||||
|
||||
apache_modules_packages:
|
||||
- 'apache2-mpm-{{ apache_mpm_mode }}'
|
||||
|
||||
# Only one can be present at the same time. It needs to be listed as the last one
|
||||
apache_worker_modules:
|
||||
# - { name: 'mpm_itm', state: 'absent' }
|
||||
- { name: 'mpm_event', state: 'absent' }
|
||||
- { name: 'mpm_prefork', state: 'absent' }
|
||||
- { name: 'mpm_{{ apache_mpm_mode }}', state: 'present' }
|
||||
|
||||
# apache RPAF is needed to obtain the real client addresses when behind a reverse proxy
|
||||
apache_rpaf_install: False
|
||||
|
||||
apache_default_modules:
|
||||
- headers
|
||||
- rewrite
|
||||
- expires
|
||||
- xsendfile
|
||||
|
||||
apache_ssl_modules_enabled: True
|
||||
apache_ssl_port: 443
|
||||
apache_ssl_modules:
|
||||
- ssl
|
||||
- socache_shmcb
|
||||
apache_http_proxy_modules_enabled: False
|
||||
apache_http_proxy_modules:
|
||||
- proxy
|
||||
- proxy_ajp
|
||||
- proxy_http
|
||||
|
||||
apache_status_module: True
|
||||
apache_status_location: '/server-status'
|
||||
apache_status_allowed_hosts:
|
||||
- 127.0.0.1/8
|
||||
|
||||
apache_info_module: True
|
||||
apache_info_location: '/server-info'
|
||||
apache_info_allowed_hosts:
|
||||
- 127.0.0.1/8
|
||||
|
||||
apache_basic_auth: False
|
||||
apache_basic_auth_single_file: True
|
||||
apache_basic_auth_dir: /etc/apache2/auth
|
||||
apache_basic_auth_file: '{{ apache_basic_auth_dir }}/htpasswd'
|
||||
|
||||
apache_basic_auth_modules:
|
||||
- auth_basic
|
||||
- authn_file
|
||||
- authz_user
|
||||
|
||||
# Put them in a vault file. auth_file is optional. Not used when apache_basic_auth_single_file is true
|
||||
# apache_basic_users:
|
||||
# - { username:'', password:'', state:'present,absent', auth_file:'path_to_file' }
|
||||
|
||||
#
|
||||
apache_additional_packages: False
|
||||
apache_additional_packages_list:
|
||||
# - libapache2-mod-uwsgi
|
||||
# - ...
|
||||
#
|
||||
# Set this variable to load the modules you need
|
||||
apache_additional_modules: False
|
||||
apache_additional_modules_list:
|
||||
# -
|
||||
# -
|
||||
|
||||
apache_letsencrypt_managed: True
|
||||
apache_letsencrypt_proxy_modules:
|
||||
- proxy
|
||||
- proxy_http
|
||||
|
||||
apache_letsencrypt_proxy_conf:
|
||||
- letsencrypt-proxy.conf
|
|
@ -0,0 +1,25 @@
|
|||
#!/bin/bash
|
||||
|
||||
LE_SERVICES_SCRIPT_DIR=/usr/lib/acme/hooks
|
||||
LE_LOG_DIR=/var/log/letsencrypt
|
||||
DATE=$( date )
|
||||
|
||||
[ ! -d $LE_LOG_DIR ] && mkdir $LE_LOG_DIR
|
||||
echo "$DATE" >> $LE_LOG_DIR/apache.log
|
||||
|
||||
if [ -f /etc/default/letsencrypt ] ; then
|
||||
. /etc/default/letsencrypt
|
||||
else
|
||||
echo "No letsencrypt default file" >> $LE_LOG_DIR/apache.log
|
||||
fi
|
||||
|
||||
echo "Reload the apache service" >> $LE_LOG_DIR/apache.log
|
||||
if [ -x /bin/systemctl ] ; then
|
||||
systemctl reload apache2 >> $LE_LOG_DIR/apache.log 2>&1
|
||||
else
|
||||
service apache2 reload >> $LE_LOG_DIR/apache.log 2>&1
|
||||
fi
|
||||
|
||||
echo "Done." >> $LE_LOG_DIR/apache.log
|
||||
|
||||
exit 0
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
- name: apache2 reload
|
||||
service: name=apache2 state=reloaded
|
||||
|
||||
- name: apache2 restart
|
||||
service: name=apache2 state=restarted
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
---
|
||||
- name: Load the basic auth modules
|
||||
apache2_module: name={{ item }} state=present
|
||||
with_items: '{{ apache_basic_auth_modules }}'
|
||||
notify: apache2 reload
|
||||
tags:
|
||||
- apache
|
||||
- apache_basic_auth
|
||||
|
||||
- name: Create the authentication directory
|
||||
file: path={{ apache_basic_auth_dir }} mode=0750 owner=root group={{ apache_group }} state=directory
|
||||
tags:
|
||||
- apache
|
||||
- apache_basic_auth
|
||||
|
||||
- name: Install the python-passlib library
|
||||
apt: pkg=python-passlib state=present
|
||||
tags:
|
||||
- apache
|
||||
- apache_basic_auth
|
||||
|
||||
- name: Create the basic auth file when it is unique to all the virtualhosts
|
||||
htpasswd: path={{ apache_basic_auth_file }} name={{ item.username }} password={{ item.password }} create=yes state={{ item.state }} owner=root group={{ apache_group }} mode=0640
|
||||
when: apache_basic_users is defined and apache_basic_auth_single_file
|
||||
with_items: '{{ apache_basic_users }}'
|
||||
tags:
|
||||
- apache
|
||||
- apache_basic_auth
|
||||
|
||||
- name: Create the basic auth files
|
||||
htpasswd: path={{ item.auth_file }} name={{ item.username }} password={{ item.password }} create=yes state={{ item.state }} owner=root group={{ apache_group }} mode=0640
|
||||
with_items: '{{ apache_basic_users | default([]) }}'
|
||||
when: apache_basic_users is defined and not apache_basic_auth_single_file
|
||||
tags:
|
||||
- apache
|
||||
- apache_basic_auth
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
---
|
||||
- block:
|
||||
- name: Enable the proxy modules needed by letsencrypt
|
||||
apache2_module: name={{ item }} state=present
|
||||
with_items: '{{ apache_letsencrypt_proxy_modules }}'
|
||||
notify: apache2 reload
|
||||
|
||||
- name: Install the apache letsencrypt directives on trusty
|
||||
template: src={{ item }}.j2 dest=/etc/apache2/conf-available/{{ item }} owner=root group=root mode=0644
|
||||
with_items: '{{ apache_letsencrypt_proxy_conf }}'
|
||||
notify: apache2 reload
|
||||
|
||||
- name: Enable the apache letsencrypt directives on trusty
|
||||
file: src=/etc/apache2/conf-available/{{ item }} dest=/etc/apache2/conf-enabled/{{ item }} state=link
|
||||
with_items: '{{ apache_letsencrypt_proxy_conf }}'
|
||||
notify: apache2 reload
|
||||
|
||||
- name: Create the acme hooks directory if it does not yet exist
|
||||
file: dest={{ letsencrypt_acme_services_scripts_dir }} state=directory owner=root group=root
|
||||
|
||||
- name: Install a letsencrypt hook for apache
|
||||
copy: src=apache-letsencrypt-acme.sh dest={{ letsencrypt_acme_services_scripts_dir }}/apache2 owner=root group=root mode=4555
|
||||
|
||||
when:
|
||||
- letsencrypt_acme_install is defined and letsencrypt_acme_install
|
||||
- apache_letsencrypt_managed
|
||||
tags: [ 'apache', 'letsencrypt' ]
|
||||
|
||||
- block:
|
||||
- name: Disable the letsencrypt conf
|
||||
file: dest=/etc/apache2/conf-enabled/letsencrypt-proxy.conf state=absent
|
||||
notify: apache2 reload
|
||||
|
||||
- name: Remove the letsencrypt hook for apache
|
||||
file: path={{ letsencrypt_acme_services_scripts_dir }}/apache2 state=absent
|
||||
|
||||
when: not apache_letsencrypt_managed
|
||||
tags: [ 'apache', 'letsencrypt' ]
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
---
|
||||
- name: Load the required modules
|
||||
apache2_module: name={{ item }} state=present force=yes
|
||||
with_items: '{{ apache_default_modules }}'
|
||||
notify: apache2 reload
|
||||
ignore_errors: True
|
||||
tags: [ 'apache', 'apache_modules' ]
|
||||
|
||||
- name: Install the libapache2-mod-rpaf module
|
||||
apt: pkg=libapache2-mod-rpaf state=present
|
||||
when: apache_rpaf_install | bool
|
||||
tags: [ 'apache', 'apache_mods', 'apache_rpaf' ]
|
||||
|
||||
- name: Enable the apache rpaf module
|
||||
apache2_module: name=rpaf state=present
|
||||
when: apache_rpaf_install | bool
|
||||
notify: apache2 reload
|
||||
tags: [ 'apache', 'apache_mods', 'apache_rpaf' ]
|
||||
|
||||
- name: Load the apache ssl modules
|
||||
apache2_module: name={{ item }} state=present
|
||||
with_items: '{{ apache_ssl_modules }}'
|
||||
when: apache_ssl_modules_enabled | bool
|
||||
notify: apache2 reload
|
||||
tags: [ 'apache', 'apache_mods' ]
|
||||
|
||||
- name: Load some apache proxy modules
|
||||
apache2_module: name={{ item }} state=present
|
||||
with_items: '{{ apache_http_proxy_modules }}'
|
||||
when: apache_http_proxy_modules_enabled | bool
|
||||
notify: apache2 reload
|
||||
tags: [ 'apache', 'apache_mods' ]
|
||||
|
||||
- name: Load additional apache modules if any
|
||||
apache2_module: name={{ item }} state=present
|
||||
with_items: '{{ apache_additional_modules_list | default ([]) }}'
|
||||
when: apache_additional_modules_list is defined
|
||||
notify: apache2 reload
|
||||
tags: [ 'apache', 'apache_mods' ]
|
||||
|
||||
- name: Disable apache modules if any
|
||||
apache2_module: name={{ item }} state=absent
|
||||
with_items: '{{ apache_modules_to_be_removed | default ([]) }}'
|
||||
notify: apache2 reload
|
||||
tags: [ 'apache', 'apache_mods' ]
|
||||
|
||||
- name: Load the apache status module
|
||||
apache2_module: name={{ item }} state=present
|
||||
with_items: status
|
||||
when: apache_status_module | bool
|
||||
notify: apache2 reload
|
||||
tags: [ 'apache', 'apache_mods', 'apache_status' ]
|
||||
|
||||
- name: Configure the apache status module
|
||||
template: src={{ item }}.j2 dest=/etc/apache2/mods-available/{{ item }} owner=root group=root mode=0644
|
||||
with_items: status.conf
|
||||
when: apache_status_module | bool
|
||||
notify: apache2 reload
|
||||
tags: [ 'apache', 'apache_mods', 'apache_status' ]
|
||||
|
||||
- name: Load the apache info module
|
||||
apache2_module: name={{ item }} state=present
|
||||
with_items: info
|
||||
when: apache_info_module | bool
|
||||
notify: apache2 reload
|
||||
tags: [ 'apache', 'apache_mods', 'apache_info' ]
|
||||
|
||||
- name: Configure the apache info module
|
||||
template: src={{ item }}.j2 dest=/etc/apache2/mods-available/{{ item }} owner=root group=root mode=0644
|
||||
with_items: info.conf
|
||||
when: apache_info_module | bool
|
||||
notify: apache2 reload
|
||||
tags: [ 'apache', 'apache_mods', 'apache_info' ]
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
- block:
|
||||
- name: Install the Ubuntu apache PPA
|
||||
apt_repository: repo='{{ apache_ppa_repo }}' update_cache=yes
|
||||
|
||||
when: apache_from_ppa
|
||||
tags: [ 'apache', 'apache_ppa' ]
|
||||
|
||||
- block:
|
||||
- name: Remove the Ubuntu apache PPA
|
||||
apt_repository: repo='{{ apache_ppa_repo }}' update_cache=yes state=absent
|
||||
|
||||
when: not apache_from_ppa
|
||||
tags: [ 'apache', 'apache_ppa' ]
|
|
@ -0,0 +1,44 @@
|
|||
---
|
||||
- name: Install the apache packages
|
||||
apt: pkg={{ item }} state={{ apache_pkg_state }} update_cache=yes cache_valid_time=3600
|
||||
with_items: '{{ apache_packages }}'
|
||||
tags: [ 'apache', 'apache_main_packages' ]
|
||||
|
||||
- name: Install the apache modules packages
|
||||
apt: pkg={{ item }} state={{ apache_pkg_state }} update_cache=yes cache_valid_time=3600
|
||||
with_items: '{{ apache_modules_packages }}'
|
||||
when:
|
||||
- not apache_from_ppa
|
||||
- is_trusty
|
||||
tags: [ 'apache', 'apache_additional_packages' ]
|
||||
|
||||
- name: Install the apache additional packages, if any
|
||||
apt: pkg={{ item }} state={{ apache_pkg_state }} update_cache=yes cache_valid_time=3600
|
||||
with_items: '{{ apache_additional_packages_list }}'
|
||||
when: apache_additional_packages
|
||||
tags: [ 'apache', 'apache_additional_packages' ]
|
||||
|
||||
- name: Instal the ports conf file
|
||||
template: src=ports.conf dest=/etc/apache2/ports.conf
|
||||
notify: apache2 reload
|
||||
tags: [ 'apache', 'apache_conf' ]
|
||||
|
||||
- name: Remove the default virtualhost file
|
||||
file: dest=/etc/apache2/sites-enabled/{{ item }} state=absent
|
||||
with_items:
|
||||
- 000-default
|
||||
- 000-default.conf
|
||||
notify: apache2 reload
|
||||
tags: apache
|
||||
|
||||
- name: Ensure that the apache service is enabled and started
|
||||
service: name=apache2 state=started enabled=yes
|
||||
when: apache_service_enabled
|
||||
ignore_errors: True
|
||||
tags: apache
|
||||
|
||||
- name: Ensure that the apache service is disabled and stopped if we do not want it running
|
||||
service: name=apache2 state=stopped enabled=no
|
||||
when: not apache_service_enabled
|
||||
ignore_errors: True
|
||||
tags: apache
|
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
- import_tasks: apache-ppa.yml
|
||||
- import_tasks: apache.yml
|
||||
- import_tasks: apache-modules.yml
|
||||
- import_tasks: apache-basic-auth.yml
|
||||
when: apache_basic_auth
|
||||
- import_tasks: apache-letsencrypt.yml
|
||||
when: letsencrypt_acme_install is defined and letsencrypt_acme_install
|
|
@ -0,0 +1,20 @@
|
|||
<IfModule mod_info.c>
|
||||
|
||||
# Allow remote server configuration reports, with the URL of
|
||||
# http://servername/server-info (requires that mod_info.c be loaded).
|
||||
# Uncomment and change the "192.0.2.0/24" to allow access from other hosts.
|
||||
#
|
||||
<Location {{ apache_info_location }}>
|
||||
SetHandler server-info
|
||||
Require local
|
||||
{% if nagios_monitoring_server_ip is defined %}
|
||||
{% for addr in nagios_monitoring_server_ip %}
|
||||
Require ip {{ addr }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% for addr in apache_info_allowed_hosts %}
|
||||
Require ip {{ addr }}
|
||||
{% endfor %}
|
||||
</Location>
|
||||
|
||||
</IfModule>
|
|
@ -0,0 +1 @@
|
|||
ProxyPass "/.well-known/acme-challenge" "http://127.0.0.1:{{ letsencrypt_acme_standalone_port}}/.well-known/acme-challenge"
|
|
@ -0,0 +1,3 @@
|
|||
{% for port in apache_listen_ports %}
|
||||
Listen {{ port }}
|
||||
{% endfor %}
|
|
@ -0,0 +1,32 @@
|
|||
<IfModule mod_status.c>
|
||||
# Allow server status reports generated by mod_status,
|
||||
# with the URL of http://servername/server-status
|
||||
# Uncomment and change the "192.0.2.0/24" to allow access from other hosts.
|
||||
|
||||
<Location {{ apache_status_location }}>
|
||||
SetHandler server-status
|
||||
Require local
|
||||
{% if nagios_monitoring_server_ip is defined %}
|
||||
{% for addr in nagios_monitoring_server_ip %}
|
||||
Require ip {{ addr }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% for addr in apache_status_allowed_hosts %}
|
||||
Require ip {{ addr }}
|
||||
{% endfor %}
|
||||
</Location>
|
||||
|
||||
# Keep track of extended status information for each request
|
||||
ExtendedStatus On
|
||||
|
||||
# Determine if mod_status displays the first 63 characters of a request or
|
||||
# the last 63, assuming the request itself is greater than 63 chars.
|
||||
# Default: Off
|
||||
#SeeRequestTail On
|
||||
|
||||
<IfModule mod_proxy.c>
|
||||
# Show Proxy LoadBalancer status in mod_status
|
||||
ProxyStatus On
|
||||
</IfModule>
|
||||
|
||||
</IfModule>
|
|
@ -42,7 +42,6 @@ ckan_geoview_url: ckanext-geoview
|
|||
ckan_geoview_name: resource_proxy
|
||||
ckan_dcat: False
|
||||
ckan_dcat_url: 'git+https://github.com/ckan/ckanext-dcat.git#egg=ckanext-dcat'
|
||||
ckan_dcat_1_0_0_url: 'git+https://github.com/ckan/ckanext-dcat.git@v1.0.0#egg=ckanext-dcat'
|
||||
# dcat implement harvesters too.
|
||||
# ckan_dcat_name: 'dcat dcat_rdf_harvester dcat_json_harvester dcat_json_interface'
|
||||
ckan_dcat_name: 'dcat dcat_json_interface'
|
||||
|
@ -75,40 +74,23 @@ ckan_ldap_fallback: True
|
|||
ckan_ckanext_lire: False
|
||||
ckan_ckanext_lire_n: lire
|
||||
ckan_ckanext_lire_url: 'https://github.com/milicp/ckanext-lire.git'
|
||||
# Kata OAI-PMH
|
||||
ckan_kata_oai_pmh: False
|
||||
# OAI-PMH
|
||||
ckan_oai_pmh: False
|
||||
ckan_oai_pmh_name: oaipmh
|
||||
ckan_oai_pmh_state: absent
|
||||
ckan_oai_pmh_url: 'git+https://github.com/kata-csc/ckanext-oaipmh#egg=ckanext-oaipmh'
|
||||
ckan_oai_pmh_kata_plugin_url: 'git+https://github.com/kata-csc/ckanext-kata.git#egg=ckanext-kata'
|
||||
ckan_oai_pmh_kata_ini_state: 'present'
|
||||
ckan_oai_pmh_kata_ini_options:
|
||||
- { section: 'app:main', option: 'kata.storage.malware_scan', value: 'false', state: '{{ ckan_oai_pmh_kata_ini_state }}' }
|
||||
- { section: 'app:main', option: 'kata.ldap.enabled', value: 'false', state: '{{ ckan_oai_pmh_kata_ini_state }}' }
|
||||
- { section: 'app:main', option: 'kata.disable_contact', value: 'true', state: '{{ ckan_oai_pmh_kata_ini_state }}' }
|
||||
|
||||
# OLD OAI-PMH
|
||||
ckan_oai_pm: False
|
||||
ckan_oai_pm_name: oaipmh
|
||||
ckan_oai_pm_state: absent
|
||||
ckan_oai_pm_url: 'git+https://github.com/florenthemmi/ckanext-oaipmh#egg=ckanext-oaipm'
|
||||
ckan_oai_pmh_url: 'git+https://github.com/florenthemmi/ckanext-oaipmh#egg=ckanext-oaipm'
|
||||
# Google analytics
|
||||
ckan_google_analytics: False
|
||||
ckan_ga_plugin_state: '{{ ckan_plugins_state }}'
|
||||
ckan_google_analytics_name: googleanalytics
|
||||
ckan_google_analytics_url: 'git+https://github.com/ckan/ckanext-googleanalytics.git#egg=ckanext-googleanalytics'
|
||||
ckan_google_analytics_fixed_file: 'http://svn.research-infrastructures.eu/public/d4science/gcube/trunk/data-catalogue/ckan-d4science-extension/{{ ckan_version }}/ckan-default/plugins/googleanalytics/plugin.py'
|
||||
#CKANEXT-RATING
|
||||
ckan_star_ratings: False
|
||||
ckan_star_ratings_state: present
|
||||
ckan_star_ratings_name: rating
|
||||
ckan_star_ratings_url: 'git+https://github.com/6aika/ckanext-rating.git#egg=ckanext-rating'
|
||||
|
||||
ckan_memcache_sessions: False
|
||||
ckan_memcache_deb_pkgs:
|
||||
- libmemcached10
|
||||
- libmemcached-dev
|
||||
|
||||
ckan_memcache_ini_opts:
|
||||
- { section: 'app:main', option: 'beaker.session.type', value: 'ext:memcached', state: 'present' }
|
||||
- { section: 'app:main', option: 'beaker.session.url ', value: "{{ mc_ipaddress | default('127.0.0.1') }}:{{ mc_port | default('11211') }}", state: 'present' }
|
||||
|
@ -149,18 +131,15 @@ ckan_pip_dependencies:
|
|||
- 'urllib3[secure]'
|
||||
- bleach
|
||||
- pyOpenSSL
|
||||
- cryptography
|
||||
- idna
|
||||
- certifi
|
||||
- xmltodict
|
||||
- ndg-httpsclient
|
||||
- pyasn1
|
||||
- enum
|
||||
- ipaddress
|
||||
- x509
|
||||
|
||||
ckan_pip_versioned_dependencies:
|
||||
- { name: 'SQLAlchemy', version: '0.9.6', state: 'present' }
|
||||
- { name: 'cryptography', version: '2.8', state: 'present' }
|
||||
|
||||
#
|
||||
apache_additional_packages:
|
||||
|
|
|
@ -1,17 +1,9 @@
|
|||
---
|
||||
- name: Configure the CKAN plugins list into the configuration file
|
||||
ini_file: dest={{ ckan_config_file }} section={{ item.section }} option={{ item.option }} value={{ item.value }} state={{ item.state }} backup=no
|
||||
ini_file: dest={{ ckan_config_file }} section={{ item.section }} option={{ item.option }} value={{ item.value }} state={{ item.state }} backup=yes
|
||||
with_items: '{{ ckan_production_ini_plugins_opts }}'
|
||||
notify:
|
||||
- Restart CKAN
|
||||
- Restart fetch and gather consumers
|
||||
tags: [ 'ckan', 'ckan_ini', 'ckan_plugins' ]
|
||||
|
||||
- name: Configure the CKAN options used by the KATA plugin
|
||||
ini_file: dest={{ ckan_config_file }} section={{ item.section }} option={{ item.option }} value={{ item.value }} state={{ item.state }} backup=no
|
||||
with_items: '{{ ckan_oai_pmh_kata_ini_options }}'
|
||||
notify:
|
||||
- Restart CKAN
|
||||
- Restart fetch and gather consumers
|
||||
tags: [ 'ckan', 'ckan_ini', 'ckan_plugins', 'ckan_oai_pmh' ]
|
||||
|
||||
|
|
|
@ -1,11 +1,4 @@
|
|||
---
|
||||
- block:
|
||||
- name: Install the memcache library deb package
|
||||
apt: pkg={{ ckan_memcache_deb_pkgs }} state=present cache_valid_time=1800
|
||||
|
||||
when: ckan_memcache_sessions is defined and ckan_memcache_sessions
|
||||
tags: [ 'ckan', 'ckan_sessions', 'ckan_memcache' ]
|
||||
|
||||
- block:
|
||||
- name: Install the memcache library
|
||||
pip: name=pylibmc virtualenv={{ ckan_virtenv }} state=present
|
||||
|
@ -16,6 +9,9 @@
|
|||
tags: [ 'ckan', 'ckan_sessions', 'ckan_memcache' ]
|
||||
|
||||
- block:
|
||||
- name: Install the memcache library deb package
|
||||
apt: pkg=libmemcached10 state=present update_cache=yes cache_valid_time=1800
|
||||
|
||||
- name: Configure CKAN so that it uses memcache for its sessions
|
||||
ini_file: dest={{ ckan_config_file }} section={{ item.section }} option={{ item.option }} value={{ item.value }} state={{ item.state }}
|
||||
with_items: '{{ ckan_memcache_ini_opts }}'
|
||||
|
|
|
@ -6,10 +6,6 @@
|
|||
tags: [ 'ckan', 'geonetwork', 'ckan_plugins', 'ckan_pip_deps' ]
|
||||
|
||||
- block:
|
||||
- name: Upgrade pip inside the virtualenv
|
||||
pip: name=pip virtualenv={{ ckan_virtenv }} state=latest
|
||||
tags: [ 'ckan', 'geonetwork', 'ckan_plugins', 'ckan_pip_deps' ]
|
||||
|
||||
- name: Install some python versioned plugins dependencies inside the CKAN virtualenv
|
||||
pip: name={{ item.name }} virtualenv={{ ckan_virtenv }} version={{ item.version }} state={{ item.state }}
|
||||
with_items: '{{ ckan_pip_versioned_dependencies }}'
|
||||
|
@ -21,7 +17,7 @@
|
|||
|
||||
- name: Download the CKAN ckanext-harvest plugin
|
||||
pip: name='{{ ckan_ckanext_harvester_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
|
||||
when: ckan_geonetwork_harvester | bool
|
||||
when: ckan_geonetwork_harvester
|
||||
register: ckanext_harvest_install
|
||||
notify:
|
||||
- Restart CKAN
|
||||
|
@ -30,7 +26,7 @@
|
|||
|
||||
- name: Download the CKAN ckanext-harvest requirements
|
||||
pip: requirements={{ ckan_virtenv }}/src/ckanext-harvest/pip-requirements.txt virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }}
|
||||
when: ckan_geonetwork_harvester | bool
|
||||
when: ckan_geonetwork_harvester
|
||||
notify: Restart fetch and gather consumers
|
||||
tags: [ 'ckan', 'geonetwork', 'ckan_plugins' ]
|
||||
|
||||
|
@ -38,60 +34,50 @@
|
|||
shell: . /usr/lib/ckan/default/bin/activate ; paster --plugin=ckanext-harvest harvester initdb --config={{ ckan_config_file }}
|
||||
when:
|
||||
- ckanext_harvest_install is changed
|
||||
- ckan_init_db_and_solr | bool
|
||||
- ckan_init_db_and_solr
|
||||
notify: Restart fetch and gather consumers
|
||||
tags: [ 'ckan', 'geonetwork', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the CKAN ckanext-spatial plugin
|
||||
pip: name='{{ ckan_ckanext_spatial_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
|
||||
notify: Restart CKAN
|
||||
when: ckan_geonetwork_harvester | bool
|
||||
when: ckan_geonetwork_harvester
|
||||
register: ckanext_spatial_install
|
||||
tags: [ 'ckan', 'ckan_spatial', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the CKAN ckanext-spatial requirements
|
||||
pip: requirements={{ ckan_virtenv }}/src/ckanext-spatial/pip-requirements.txt virtualenv={{ ckan_virtenv }} state=present
|
||||
when: ckan_geonetwork_harvester | bool
|
||||
when: ckan_geonetwork_harvester
|
||||
tags: [ 'ckan', 'ckan_spatial', 'ckan_plugins' ]
|
||||
|
||||
- name: Initialize the CKAN ckanext-spatial plugin
|
||||
shell: . /usr/lib/ckan/default/bin/activate ; paster --plugin=ckanext-spatial spatial initdb --config={{ ckan_config_file }}
|
||||
when:
|
||||
- ckanext_spatial_install is changed
|
||||
- ckan_init_db_and_solr | bool
|
||||
- ckan_init_db_and_solr
|
||||
tags: [ 'ckan', 'ckan_spatial', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the CKAN ckanext-geoview plugin
|
||||
pip: name='{{ ckan_geoview_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }}
|
||||
notify: Restart CKAN
|
||||
when: ckan_geoview | bool
|
||||
when: ckan_geoview
|
||||
tags: [ 'ckan', 'ckan_geoview', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the latest version of the CKAN ckanext-dcat plugin code on CKAN version >= 2.8
|
||||
- name: Download the CKAN ckanext-dcat plugin code
|
||||
pip: name={{ ckan_dcat_url }} virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
|
||||
when:
|
||||
- ckan_dcat | bool
|
||||
- ckan_version is version_compare('2.8', '>=')
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_dcat', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the CKAN ckanext-dcat plugin code. Stick to version 1.0.0 on CKAN < 2.8
|
||||
pip: name={{ ckan_dcat_1_0_0_url }} virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
|
||||
when:
|
||||
- ckan_dcat | bool
|
||||
- ckan_version is version_compare('2.8', '<')
|
||||
when: ckan_dcat
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_dcat', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the CKAN ckanext-dcat requirements
|
||||
pip: requirements={{ ckan_virtenv }}/src/ckanext-dcat/requirements.txt virtualenv={{ ckan_virtenv }} state=present
|
||||
when: ckan_dcat | bool
|
||||
when: ckan_dcat
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_dcat', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the CKAN Geonetwork plugin code
|
||||
git: repo={{ ckan_geonetwork_harvester_url }} dest=/usr/lib/ckan/default/src/ckanext-geonetwork force=yes update={{ ckan_git_plugins_state }}
|
||||
when: ckan_geonetwork_harvester | bool
|
||||
when: ckan_geonetwork_harvester
|
||||
register: install_geonetwork_harvester
|
||||
tags: [ 'ckan', 'ckan_geonetwork', 'ckan_plugins' ]
|
||||
|
||||
|
@ -103,51 +89,41 @@
|
|||
|
||||
- name: Install the script that updates the tracking data
|
||||
template: src=tracker_update.sh.j2 dest={{ ckan_virtenv }}/bin/tracker_update owner={{ ckan_shell_user }} group={{ ckan_shell_user }} mode=0555
|
||||
when: ckan_geonetwork_harvester | bool
|
||||
when: ckan_geonetwork_harvester
|
||||
tags: [ 'ckan', 'ckan_geonetwork', 'ckan_plugins', 'tracker' ]
|
||||
|
||||
- name: Install the cron job that runs the tracker update script
|
||||
cron: name="tracker update" minute="0" hour="3" job="{{ ckan_virtenv }}/bin/tracker_update > {{ ckan_logdir }}/tracker_update.log 2>&1" user={{ ckan_shell_user }}
|
||||
when: ckan_geonetwork_harvester | bool
|
||||
when: ckan_geonetwork_harvester
|
||||
tags: [ 'ckan', 'ckan_geonetwork', 'ckan_plugins', 'tracker' ]
|
||||
|
||||
- name: Download the CKAN PDF viewer plugin
|
||||
pip: name='{{ ckan_ckanext_pdfview_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }}
|
||||
when: ckan_pdfview | bool
|
||||
when: ckan_pdfview
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_pdfview', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the CKAN Privatedatasets extension for CKAN 2.8
|
||||
- name: Download the CKAN Privatedatasets extension
|
||||
pip: name='{{ ckan_privatedatasets_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }}
|
||||
when:
|
||||
- ckan_privatedatasets | bool
|
||||
- ckan_version is version_compare('2.8', '>=')
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_privdatasets', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the CKAN Privatedatasets extension for CKAN 2.6
|
||||
pip: name='{{ ckan_privatedatasets_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} version=0.2.19
|
||||
when:
|
||||
- ckan_privatedatasets | bool
|
||||
- ckan_version is version_compare('2.8', '<')
|
||||
when: ckan_privatedatasets
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_privdatasets', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the CKAN hierarchy plugin code
|
||||
pip: name='{{ ckan_hierarchy_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
|
||||
when: ckan_hierarchy | bool
|
||||
when: ckan_hierarchy
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_hierarchy', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the CKAN pages plugin code
|
||||
pip: name='{{ ckan_pages_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
|
||||
when: ckan_pages | bool
|
||||
when: ckan_pages
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_pages', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the CKAN LDAP plugin code
|
||||
git: repo={{ ckan_ldap_url }} dest=/usr/lib/ckan/default/src/ckanext-ldap force=yes update={{ ckan_git_plugins_state }}
|
||||
when: ckan_ldap | bool
|
||||
when: ckan_ldap
|
||||
register: install_ldap_plugin
|
||||
tags: [ 'ckan', 'ckan_ldap', 'ckan_plugins' ]
|
||||
|
||||
|
@ -159,7 +135,7 @@
|
|||
|
||||
- name: Download the CKAN LIRE plugin code
|
||||
git: repo={{ ckan_ckanext_lire_url }} dest={{ ckan_virtenv }}/src/ckanext-lire force=yes update={{ ckan_git_plugins_state }}
|
||||
when: ckan_ckanext_lire | bool
|
||||
when: ckan_ckanext_lire
|
||||
register: install_lire_plugin
|
||||
tags: [ 'ckan', 'ckan_lire', 'ckan_plugins' ]
|
||||
|
||||
|
@ -169,45 +145,21 @@
|
|||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_lire', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the KATA CKAN OAI-PMH plugin
|
||||
- name: Download the CKAN OAI-PMH plugin
|
||||
pip: name='{{ ckan_oai_pmh_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
|
||||
when: ckan_kata_oai_pmh | bool
|
||||
when: ckan_oai_pmh
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_oai_pmh', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the KATA CKAN ckanext-oaiphm requirements
|
||||
pip: requirements={{ ckan_virtenv }}/src/ckanext-oaipmh/requirements.txt virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }}
|
||||
when: ckan_kata_oai_pmh | bool
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_oai_pmh', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the KATA CKAN plugin
|
||||
pip: name='{{ ckan_oai_pmh_kata_plugin_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
|
||||
when: ckan_kata_oai_pmh | bool
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_oai_pmh', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the KATA CKAN requirements
|
||||
pip: requirements={{ ckan_virtenv }}/src/ckanext-kata/requirements.txt virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }}
|
||||
when: ckan_kata_oai_pmh | bool
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_oai_pmh', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the opendatasoft CKAN OAI-PMH plugin
|
||||
pip: name='{{ ckan_oai_pm_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
|
||||
when: ckan_oai_pm | bool
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_oai_pm', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the CKAN google analytics plugin python requirements
|
||||
pip: name='genshi' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }}
|
||||
when: ckan_google_analytics | bool
|
||||
when: ckan_google_analytics
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_google_analytics', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the CKAN google analytics plugin
|
||||
pip: name='{{ ckan_google_analytics_url }}' virtualenv={{ ckan_virtenv }} editable=true state={{ ckan_ga_plugin_state }}
|
||||
when: ckan_google_analytics | bool
|
||||
when: ckan_google_analytics
|
||||
register: install_ckan_google_analytics
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_google_analytics', 'ckan_plugins' ]
|
||||
|
@ -220,7 +172,7 @@
|
|||
|
||||
- name: Download the CKAN google analytics reports plugin
|
||||
pip: name='{{ ckan_ga_reports_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
|
||||
when: ckan_ga_reports | bool
|
||||
when: ckan_ga_reports
|
||||
register: install_ckan_ga_reports
|
||||
tags: [ 'ckan', 'ckan_google_analytics', 'ckan_ga_reports', 'ckan_plugins' ]
|
||||
|
||||
|
@ -228,14 +180,14 @@
|
|||
shell: . /usr/lib/ckan/default/bin/activate ; cd /usr/lib/ckan/default/src/ckanext-ga-report ; paster initdb --config={{ ckan_config_file }}
|
||||
when:
|
||||
- install_ckan_ga_reports is changed
|
||||
- ckan_init_db_and_solr | bool
|
||||
- ckan_init_db_and_solr
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_google_analytics', 'ckan_ga_reports', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the CKAN star ratings plugin
|
||||
pip: name='{{ ckan_star_ratings_url }}' virtualenv={{ ckan_virtenv }} editable=true state={{ ckan_star_ratings_state }}
|
||||
notify: Restart CKAN
|
||||
when: ckan_star_ratings | bool
|
||||
when: ckan_star_ratings
|
||||
register: install_ckan_star_ratings
|
||||
tags: [ 'ckan', 'ckan_star_ratings', 'ckan_plugins' ]
|
||||
|
||||
|
@ -244,24 +196,24 @@
|
|||
notify: Restart CKAN
|
||||
when:
|
||||
- install_ckan_star_ratings is changed
|
||||
- ckan_star_ratings | bool
|
||||
- ckan_star_ratings
|
||||
tags: [ 'ckan', 'ckan_star_ratings', 'ckan_plugins' ]
|
||||
|
||||
- name: Install the CKAN profiler plugin
|
||||
pip: name='{{ ckan_profiler_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
|
||||
when: ckan_profiler | bool
|
||||
when: ckan_profiler
|
||||
notify: Restart CKAN
|
||||
tags: [ 'ckan', 'ckan_profiler', 'ckan_plugins' ]
|
||||
|
||||
- name: Create the profiler plugin log directory
|
||||
become_user: root
|
||||
file: dest=/var/log/ckan-profiler owner=www-data group=www-data state=directory
|
||||
when: ckan_profiler | bool
|
||||
when: ckan_profiler
|
||||
tags: [ 'ckan', 'ckan_profiler', 'ckan_plugins' ]
|
||||
|
||||
- name: Download the CKAN-DATESEARCH plugin code
|
||||
git: repo={{ ckan_datesearch_url }} dest=/usr/lib/ckan/default/src/ckanext-datesearch force=yes update={{ ckan_git_plugins_state }}
|
||||
when: ckan_datesearch | bool
|
||||
when: ckan_datesearch
|
||||
register: install_datesearch_plugin
|
||||
tags: [ 'ckan', 'ckan_datesearch', 'ckan_plugins' ]
|
||||
|
||||
|
|
|
@ -19,11 +19,6 @@ clamav_milter_rh_pkgs:
|
|||
|
||||
clamav_unofficial_sigs_rh_pkgs:
|
||||
- clamav-unofficial-sigs
|
||||
- perl
|
||||
|
||||
clamav_signatures_db_dir: '/var/lib/clamav'
|
||||
clamav_signatures_dbs_to_wipe:
|
||||
- 'scamnailer.ndb'
|
||||
|
||||
clamav_clamd_user: clamscan
|
||||
clamav_clamd_conf_file: '/etc/clamd.d/scan.conf'
|
||||
|
@ -59,9 +54,6 @@ clamav_additional_signatures_proxy_host: ''
|
|||
clamav_additional_signatures_proxy_port: 3128
|
||||
clamav_additional_signatures_proxy_user: ''
|
||||
clamav_additional_signatures_proxy_pwd: ''
|
||||
clamav_additional_signatures_db_dir: '/var/lib/clamav-unofficial-sigs'
|
||||
clamav_additional_signatures_dbs_to_wipe:
|
||||
- 'dbs-ss/scamnailer.ndb'
|
||||
|
||||
# Freshclam
|
||||
clamav_freshclam_check_frequency: 12
|
||||
|
|
|
@ -34,15 +34,6 @@
|
|||
- name: Ensure that the clamd service is running and enabled
|
||||
service: name=clamd@scan state=started enabled=yes
|
||||
|
||||
- name: Remove some signature files that are broken.
|
||||
file: dest={{ clamav_signatures_db_dir }}/{{ item }} state=absent
|
||||
with_items: '{{ clamav_signatures_dbs_to_wipe }}'
|
||||
tags: [ 'clamav', 'clamav_clamd', 'clamav_config', 'clamav_signatures' ]
|
||||
|
||||
- name: Install a cron job that wipes the wrong signature files, just in case they reappear after an update
|
||||
cron: name="Wipe some clamav signature dbs that are broken" user=root special_time=hourly job="{% for db in clamav_signatures_dbs_to_wipe %}/bin/rm -f {{ clamav_signatures_db_dir }}/{{ db }}{% if not loop.last %}; {% endif %}{% endfor %}"
|
||||
tags: [ 'clamav', 'clamav_clamd', 'clamav_config', 'clamav_signatures' ]
|
||||
|
||||
when: clamav_install | bool
|
||||
tags: [ 'clamav', 'clamav_clamd', 'clamav_config' ]
|
||||
|
||||
|
@ -54,16 +45,8 @@
|
|||
- os.conf
|
||||
- user.conf
|
||||
|
||||
- name: Wipe some unofficial signature files that are broken. Disabling them seems not enough
|
||||
copy: content="" dest={{ clamav_additional_signatures_db_dir }}/{{ item }} force=yes
|
||||
with_items: '{{ clamav_additional_signatures_dbs_to_wipe }}'
|
||||
notify: Restart clamd
|
||||
|
||||
- name: Install a cron job that wipes the wrong unofficial signature files, just in case they reappear after an update
|
||||
cron: name="Wipe some unofficial clamav signature dbs that are broken" user=root special_time=hourly job="{% for db in clamav_additional_signatures_dbs_to_wipe %}echo '' > {{ clamav_additional_signatures_db_dir }}/{{ db }}{% if not loop.last %}; {% endif %}{% endfor %}"
|
||||
|
||||
when: clamav_unofficial_sigs_install | bool
|
||||
tags: [ 'clamav', 'clamav_clamd', 'clamav_config', 'clamav_unofficial_sigs' ]
|
||||
tags: [ 'clamav', 'clamav_clamd', 'clamav_config' ]
|
||||
|
||||
- name: Configure clamav milter
|
||||
block:
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
|
||||
# Default dbs rating (Default: MEDIUM)
|
||||
# valid rating: LOW, MEDIUM, HIGH
|
||||
default_dbs_rating="LOW"
|
||||
#default_dbs_rating="HIGH"
|
||||
|
||||
# Per Database
|
||||
# These ratings will override the global rating for the specific database
|
||||
|
@ -48,9 +48,6 @@ default_dbs_rating="LOW"
|
|||
# http://www.example.org/sigs.ldb
|
||||
#) #END ADDITIONAL DATABASES
|
||||
|
||||
# master.conf single signatures overrides.
|
||||
scamnailer.ndb|DISABLE
|
||||
|
||||
# Uncomment the following line to enable the script
|
||||
user_configuration_complete="yes"
|
||||
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
#
|
||||
# Define the following variables to manage additional disks and mount points
|
||||
additional_disks: False
|
||||
#disks_and_mountpoints_list:
|
||||
# - { mountpoint: '/data', device: 'xvda3', fstype: 'xfs', opts: 'noatime', state: 'mounted', create_filesystem: True }
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
- block:
|
||||
- name: Create a file system on the new disks
|
||||
filesystem: dev=/dev/{{ item.device }} fstype={{ item.fstype }} force=no
|
||||
with_items: '{{ disks_and_mountpoints_list | default([]) }}'
|
||||
when:
|
||||
- additional_disks
|
||||
- item.create_filesystem
|
||||
|
||||
- name: Install the NFS client utilities when we are going to mount a NFS file system
|
||||
apt: pkg=nfs-common state=present update_cache=yes cache_valid_time=1800
|
||||
with_items: '{{ disks_and_mountpoints_list | default([]) }}'
|
||||
when: item.fstype == 'nfs'
|
||||
|
||||
- name: Install the NFS 4 acl tools when we are going to mount a NFS file system
|
||||
apt: pkg=nfs4-acl-tools state=present update_cache=yes cache_valid_time=1800
|
||||
with_items: '{{ disks_and_mountpoints_list | default([]) }}'
|
||||
when: item.fstype == 'nfs'
|
||||
|
||||
- name: Manage the additional file systems
|
||||
mount: name={{ item.mountpoint }} src={{ item.root_device | default('/dev') }}/{{ item.device }} fstype={{ item.fstype }} opts={{ item.opts }} state={{ item.state }}
|
||||
with_items: '{{ disks_and_mountpoints_list | default([]) }}'
|
||||
|
||||
when: additional_disks
|
||||
tags: [ 'data_disk', 'mountpoint' ]
|
|
@ -6,8 +6,6 @@
|
|||
|
||||
- name: Install python-software-properties
|
||||
apt: pkg=python-software-properties state=present update_cache=yes cache_valid_time=3600
|
||||
when:
|
||||
- ansible_distribution == 'Ubuntu'
|
||||
- ansible_distribution_version is version_compare('14.04', '==')
|
||||
when: is_trusty
|
||||
tags: pythonapt
|
||||
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
- name: Set the hostname when different from the inventory one.
|
||||
hostname: name={{ hostname }}
|
||||
when: hostname is defined
|
||||
tags: [ 'bootstrap', 'set_hostname' ]
|
||||
|
||||
- name: Set the hostname as defined in the inventory
|
||||
hostname: name={{ inventory_hostname }}
|
||||
when: hostname is not defined
|
||||
tags: [ 'bootstrap', 'set_hostname' ]
|
||||
|
||||
- name: Add the hostname to /etc/hosts
|
||||
shell: grep -v {{ ansible_default_ipv4.address }} /etc/hosts > /etc/hosts.tmp ; echo "{{ ansible_default_ipv4.address }} {{ hostname }} {{ ansible_hostname }}" >> /etc/hosts.tmp ; /bin/mv /etc/hosts.tmp /etc/hosts
|
||||
when:
|
||||
- hostname is defined
|
||||
- ansible_virtualization_type == 'xen'
|
||||
tags: [ 'bootstrap', 'set_hostname' ]
|
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
deb_default_locale: "en_US.UTF-8"
|
||||
deb_locales_list:
|
||||
- { name: '{{ deb_default_locale }}' }
|
||||
- { name: 'en_US' }
|
||||
- { name: 'it_IT.UTF-8' }
|
||||
- { name: 'it_IT' }
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
- block:
|
||||
- name: Add/remove a list of locales
|
||||
locale_gen: name={{ item.name }} state={{ item.state | default('present') }}
|
||||
with_items: '{{ deb_locales_list }}'
|
||||
|
||||
- name: Set the default locale
|
||||
shell: update-locale LANG={{ deb_default_locale }}
|
||||
|
||||
tags: locale
|
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
dovecot_service_enabled: True
|
||||
dovecot_rh_pkgs:
|
||||
- dovecot
|
||||
- dovecot-pigeonhole
|
||||
|
||||
dovecot_firewalld_services:
|
||||
- { service: 'pop3', state: 'enabled', zone: '{{ firewalld_default_zone }}' }
|
||||
- { service: 'pop3s', state: 'enabled', zone: '{{ firewalld_default_zone }}' }
|
||||
- { service: 'imap', state: 'enabled', zone: '{{ firewalld_default_zone }}' }
|
||||
- { service: 'imaps', state: 'enabled', zone: '{{ firewalld_default_zone }}' }
|
||||
|
||||
# 24 is LMTP
|
||||
# 4190 is ManageSieve
|
||||
dovecot_firewalld_ports:
|
||||
- { port: 24, protocol: 'tcp', state: 'disabled', zone: '{{ firewalld_default_zone }}' }
|
||||
- { port: 4190, protocol: 'tcp', state: 'disabled', zone: '{{ firewalld_default_zone }}' }
|
||||
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
- name: Manage the firewalld rules
|
||||
block:
|
||||
- name: Manage the dovecot related services
|
||||
firewalld: service={{ item.service }} zone={{ item.zone }} permanent={{ item.permanent | default(True) }} state={{ item.state }} immediate=True
|
||||
with_items: '{{ dovecot_firewalld_services }}'
|
||||
|
||||
- name: Manage the dovecot related tcp/udp ports
|
||||
firewalld: port={{ item.port }}/{{ item.protocol }} zone={{ item.zone }} permanent={{ item.permanent | default(False) }} state={{ item.state }} immediate=True
|
||||
with_items: '{{ dovecot_firewalld_ports }}'
|
||||
|
||||
tags: [ 'dovecot', 'firewall', 'firewalld', 'iptables', 'iptables_rules' ]
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
- name: Manage the letsencrypt handler
|
||||
block:
|
||||
- name: Create the letsencrypt hooks directory if it is not present
|
||||
file: dest={{ letsencrypt_acme_services_scripts_dir }} state=directory owner=root group=root mode=0755
|
||||
|
||||
- name: Install the dovecot letsencrypt hook
|
||||
template: src=dovecot_letsencrypt_hook.sh.j2 dest={{ letsencrypt_acme_services_scripts_dir }}/dovecot owner=root group=root mode=0750
|
||||
|
||||
tags: [ 'dovecot', 'imap', 'letsencrypt' ]
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
- name: Install the dovecot packages and start the service
|
||||
block:
|
||||
- name: Install the dovecot packages
|
||||
yum: pkg={{ dovecot_rh_pkgs }}
|
||||
|
||||
- name: Ensure that the service is started and enabled
|
||||
service: name=dovecot state=started enabled=yes
|
||||
when: dovecot_service_enabled | bool
|
||||
|
||||
- name: Stop and disable the dovecot service
|
||||
service: name=dovecot state=stopped enabled=no
|
||||
when: not dovecot_service_enabled | bool
|
||||
|
||||
tags: [ 'dovecot', 'imap' ]
|
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
- import_tasks: dovecot_rh.yml
|
||||
when: ansible_distribution_file_variety == "RedHat"
|
||||
- import_tasks: dovecot_firewalld.yml
|
||||
when: firewalld_enabled is defined and firewalld_enabled | bool
|
||||
- import_tasks: dovecot_letsencrypt.yml
|
||||
when: letsencrypt_acme_install is defined and letsencrypt_acme_install | bool
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
/bin/systemctl reload dovecot > {{ letsencrypt_acme_sh_log_dir }}/dovecot.log 2>&1
|
||||
|
||||
exit $?
|
|
@ -2,11 +2,8 @@
|
|||
drupal_install: False
|
||||
drupal_install_deb: False
|
||||
drupal_maintenance_cron: False
|
||||
drupal_major_ver: 8
|
||||
drupal_major: '{{ drupal_major_ver }}'
|
||||
drupal_minor_ver: 8
|
||||
drupal_point_ver: 3
|
||||
drupal_version: '{{ drupal_major_ver }}.{{ drupal_minor_ver }}.{{ drupal_point_ver }}'
|
||||
drupal_major: 8
|
||||
drupal_version: '{{ drupal_major_ver }}.1.7'
|
||||
drupal_dist_name: 'drupal-{{ drupal_version }}'
|
||||
drupal_dist_file: '{{ drupal_dist_name }}.tar.gz'
|
||||
drupal_tar_url: 'http://ftp.drupal.org/files/projects/{{ drupal_dist_file }}'
|
||||
|
@ -22,7 +19,6 @@ http_group: '{{ http_user }}'
|
|||
# - 'php{{ php_version }}-mbstring'
|
||||
# - php-ssh2
|
||||
drupal_php_prereq:
|
||||
- 'php{{ php_version }}-fpm'
|
||||
- 'php{{ php_version }}-json'
|
||||
- 'php{{ php_version }}-intl'
|
||||
- 'php{{ php_version }}-cli'
|
||||
|
@ -30,11 +26,10 @@ drupal_php_prereq:
|
|||
- 'php{{ php_version }}-gd'
|
||||
- 'php{{ php_version }}-json'
|
||||
- 'php{{ php_version }}-curl'
|
||||
- 'php{{ php_version }}-mbstring'
|
||||
- php-apcu
|
||||
- php-apc
|
||||
- php-pear
|
||||
- php-date
|
||||
- php-memcached
|
||||
- php-xml-serializer
|
||||
- imagemagick
|
||||
|
||||
drupal_install_drush: False
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
dependencies:
|
||||
- { role: 'php-fpm' }
|
||||
- { role: '../../library/roles/composer', when: drupal_install_drush }
|
||||
|
||||
|
|
|
@ -11,8 +11,8 @@
|
|||
tags: ['drupal', 'drush', 'global_drush' ]
|
||||
|
||||
- name: Install drush locally as part of drupal
|
||||
#become: True
|
||||
#become_user: '{{ item.user }}'
|
||||
become: True
|
||||
become_user: '{{ item.user }}'
|
||||
composer: command=require arguments=drush/drush:{{ drupal_drush_version }} prefer_dist=yes working_dir={{ item.doc_root }}
|
||||
with_items: '{{ phpfpm_pools }}'
|
||||
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
---
|
||||
php_fpm_packages: '{{ drupal_php_prereq }}'
|
|
@ -1,32 +0,0 @@
|
|||
---
|
||||
exist_db_major: 5
|
||||
exist_db_minor: 2
|
||||
exist_db_patch: 0
|
||||
exist_db_version: '{{ exist_db_major }}.{{ exist_db_minor }}.{{ exist_db_patch }}'
|
||||
exist_db_distribution_dir: 'exist-distribution-{{ exist_db_version }}'
|
||||
exist_db_distribution: 'https://bintray.com/existdb/releases/download_file?file_path={{ exist_db_distribution_dir }}-unix.tar.bz2'
|
||||
exist_db_http_port: 8080
|
||||
|
||||
exist_db_home: '/srv/existdb'
|
||||
exist_db_data_dir: '/srv/existdb/data'
|
||||
exist_db_journal_dir: '/srv/existdb/data-journal'
|
||||
exist_db_backup_dir: '/srv/existdb/data-backups'
|
||||
exist_db_base_dir: '{{ exist_db_home }}/distribution/{{ exist_db_distribution_dir }}'
|
||||
exist_db_logdir: '/var/log/exist-db'
|
||||
#exist_db_conf_dir: '/srv/existdb/etc'
|
||||
exist_db_conf_dir: '{{ exist_db_home }}/distribution/{{ exist_db_distribution_dir }}/etc'
|
||||
|
||||
# Always express it in 'm' (MegaBytes)
|
||||
exist_db_min_java_heap: '512'
|
||||
exist_db_max_java_heap: '{{ exist_db_min_java_heap }}'
|
||||
# exist_db_max_java_heap / 3
|
||||
exist_db_cache_size: '170'
|
||||
exist_db_file_encoding: 'UTF-8'
|
||||
exist_db_java_opts: "-Xms{{ exist_db_min_java_heap }}m -Xmx{{ exist_db_max_java_heap }}m -server -Djava.awt.headless=true -Dfile.encoding={{ exist_db_file_encoding }}"
|
||||
|
||||
exist_db_consistency_enabled: True
|
||||
exist_db_check_cron: "0 0 0/3 * * ?"
|
||||
exist_db_max_backups_enabled: 6
|
||||
exist_db_backups_enabled: True
|
||||
exist_db_incremental_backups_enabled: "yes"
|
||||
exist_db_backup_cron: "0 0 0/12 * * ?"
|
|
@ -1,3 +0,0 @@
|
|||
---
|
||||
- name: Restart existdb
|
||||
service: name=exist-db state=started enabled=yes
|
|
@ -1,4 +0,0 @@
|
|||
---
|
||||
dependencies:
|
||||
- { role: '../../library/roles/nginx', when: nginx_enabled is defined and nginx_enabled }
|
||||
- { role: '../../library/roles/openjdk' }
|
|
@ -1,81 +0,0 @@
|
|||
---
|
||||
- name: Create the exist-db user and the directory tree
|
||||
block:
|
||||
- name: Create the exist-db user
|
||||
user: name={{ exist_db_user }} home={{ exist_db_home }} comment="eXist-db Service Account" createhome=no shell=/usr/sbin/nologin system=yes
|
||||
|
||||
- name: Create the exist-db base path
|
||||
file: dest={{ item }} state=directory mode=0750 owner=root group={{ exist_db_group }}
|
||||
with_items:
|
||||
- '{{ exist_db_home }}'
|
||||
- '{{ exist_db_home }}/distribution'
|
||||
- '{{ exist_db_conf_dir }}'
|
||||
|
||||
- name: Create the exist-db directory tree
|
||||
file: dest={{ item }} state=directory mode=0750 owner={{ exist_db_user }} group={{ exist_db_group }}
|
||||
with_items:
|
||||
- '{{ exist_db_data_dir }}'
|
||||
- '{{ exist_db_journal_dir }}'
|
||||
- '{{ exist_db_backup_dir }}'
|
||||
- '{{ exist_db_logdir }}'
|
||||
|
||||
- name: Link the log and data directories from the exist distribution directory
|
||||
file: dest={{ exist_db_home }}/distribution/{{ exist_db_distribution_dir }}/{{ item }} state=absent
|
||||
with_items:
|
||||
- 'data'
|
||||
- 'logs'
|
||||
|
||||
- name: Link the log directory into the exist distribution directory
|
||||
file: src={{ exist_db_logdir }} dest={{ exist_db_home }}/distribution/{{ exist_db_distribution_dir }}/logs state=link
|
||||
|
||||
- name: Link the data directory into the exist distribution directory
|
||||
file: src={{ exist_db_data_dir }} dest={{ exist_db_home }}/distribution/{{ exist_db_distribution_dir }}/data state=link
|
||||
|
||||
tags: [ 'exist-db', 'exist_db' ]
|
||||
|
||||
- name: Download and unpack the eXist DB distribution
|
||||
block:
|
||||
- name: Download the eXist DB archive
|
||||
get_url: url={{ exist_db_distribution }} dest=/srv/exist-distribution-{{ exist_db_version }}-unix.tar.bz2
|
||||
|
||||
- name: Unarchive the eXist DB distribution
|
||||
unarchive: src=/srv/exist-distribution-{{ exist_db_version }}-unix.tar.bz2 dest={{ exist_db_home }}/distribution remote_src=yes owner=root group=root
|
||||
args:
|
||||
creates: '{{ exist_db_home }}/distribution/{{ exist_db_distribution_dir }}/lib'
|
||||
|
||||
tags: [ 'exist-db', 'exist_db' ]
|
||||
|
||||
- name: Configure the eXistDB service
|
||||
block:
|
||||
- name: Install the changes to the configuration files in the custom etc/ directory
|
||||
template: src={{ item }}.j2 dest={{ exist_db_conf_dir }}/{{ item }}
|
||||
with_items:
|
||||
- 'conf.xml'
|
||||
- 'log4j2.xml'
|
||||
notify: Restart existdb
|
||||
|
||||
- name: Install the startup scripts
|
||||
template: src={{ item }}.j2 dest={{ exist_db_home }}/distribution/{{ exist_db_distribution_dir }}/bin/{{ item }} owner=root group=root mode=0755
|
||||
with_items:
|
||||
- 'startup.sh'
|
||||
- 'shutdown.sh'
|
||||
- 'backup.sh'
|
||||
- 'client.sh'
|
||||
- 'export.sh'
|
||||
|
||||
- name: Install the exist-db systemd unit
|
||||
template: src=exist-db.service.j2 dest=/lib/systemd/system/exist-db.service owner=root group=root mode=0644
|
||||
register: existdb_unit_install
|
||||
|
||||
- name: Reload the systemd configuration
|
||||
systemd: daemon_reload=yes
|
||||
when: existdb_unit_install is changed
|
||||
|
||||
- name: Ensure that the eXistDB service is running and enabled
|
||||
service: name=exist-db state=started enabled=yes
|
||||
|
||||
tags: [ 'exist-db', 'exist_db', 'exist_db_conf' ]
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,86 +0,0 @@
|
|||
#!/usr/bin/env sh
|
||||
# eXist Open Source Native XML Database
|
||||
# Copyright (C) 2019 The eXist-db Project
|
||||
# info@exist-db.org
|
||||
# http://www.exist-db.org
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation
|
||||
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
|
||||
|
||||
# resolve links - $0 may be a softlink
|
||||
PRG="$0"
|
||||
|
||||
while [ -h "$PRG" ]; do
|
||||
ls=`ls -ld "$PRG"`
|
||||
link=`expr "$ls" : '.*-> \(.*\)$'`
|
||||
if expr "$link" : '/.*' > /dev/null; then
|
||||
PRG="$link"
|
||||
else
|
||||
PRG=`dirname "$PRG"`/"$link"
|
||||
fi
|
||||
done
|
||||
|
||||
PRGDIR=`dirname "$PRG"`
|
||||
#BASEDIR=`cd "$PRGDIR/.." >/dev/null; pwd`
|
||||
BASEDIR="{{ exist_db_base_dir }}"
|
||||
|
||||
# Reset the REPO variable. If you need to influence this use the environment setup file.
|
||||
REPO=
|
||||
|
||||
# If a specific java binary isn't specified search for the standard 'java' binary
|
||||
if [ -z "$JAVACMD" ] ; then
|
||||
if [ -n "$JAVA_HOME" ] ; then
|
||||
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
|
||||
# IBM's JDK on AIX uses strange locations for the executables
|
||||
JAVACMD="$JAVA_HOME/jre/sh/java"
|
||||
else
|
||||
JAVACMD="$JAVA_HOME/bin/java"
|
||||
fi
|
||||
else
|
||||
JAVACMD=`which java`
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -x "$JAVACMD" ] ; then
|
||||
echo "Error: JAVA_HOME is not defined correctly." 1>&2
|
||||
echo " We cannot execute $JAVACMD" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$REPO" ]
|
||||
then
|
||||
REPO="$BASEDIR"/lib
|
||||
fi
|
||||
|
||||
CLASSPATH="$BASEDIR"/etc:"$REPO"/appassembler-booter-2.1.0.jar:"$REPO"/appassembler-model-2.1.0.jar:"$REPO"/plexus-utils-3.2.0.jar:"$REPO"/stax-api-1.0.1.jar:"$REPO"/stax-1.1.1-dev.jar
|
||||
|
||||
ENDORSED_DIR=
|
||||
if [ -n "$ENDORSED_DIR" ] ; then
|
||||
CLASSPATH=$BASEDIR/$ENDORSED_DIR/*:$CLASSPATH
|
||||
fi
|
||||
|
||||
if [ -n "$CLASSPATH_PREFIX" ] ; then
|
||||
CLASSPATH=$CLASSPATH_PREFIX:$CLASSPATH
|
||||
fi
|
||||
|
||||
exec "$JAVACMD" $JAVA_OPTS -Xms128m -Dfile.encoding=UTF-8 -Dlog4j.configurationFile={{ exist_db_conf_dir }}/log4j2.xml -Dexist.home="$BASEDIR" -Dexist.configurationFile={{ exist_db_conf_dir }}/conf.xml -Djetty.home="$BASEDIR" -Dexist.jetty.config="$BASEDIR"/etc/jetty/standard.enabled-jetty-configs \
|
||||
-classpath "$CLASSPATH" \
|
||||
-Dapp.name="backup" \
|
||||
-Dapp.pid="$$" \
|
||||
-Dapp.repo="$REPO" \
|
||||
-Dapp.home="$BASEDIR" \
|
||||
-Dbasedir="$BASEDIR" \
|
||||
org.codehaus.mojo.appassembler.booter.AppassemblerBooter \
|
||||
"$@"
|
|
@ -1,85 +0,0 @@
|
|||
#!/usr/bin/env sh
|
||||
# eXist Open Source Native XML Database
|
||||
# Copyright (C) 2019 The eXist-db Project
|
||||
# info@exist-db.org
|
||||
# http://www.exist-db.org
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation
|
||||
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
|
||||
|
||||
# resolve links - $0 may be a softlink
|
||||
PRG="$0"
|
||||
|
||||
while [ -h "$PRG" ]; do
|
||||
ls=`ls -ld "$PRG"`
|
||||
link=`expr "$ls" : '.*-> \(.*\)$'`
|
||||
if expr "$link" : '/.*' > /dev/null; then
|
||||
PRG="$link"
|
||||
else
|
||||
PRG=`dirname "$PRG"`/"$link"
|
||||
fi
|
||||
done
|
||||
|
||||
PRGDIR=`dirname "$PRG"`
|
||||
BASEDIR="{{ exist_db_base_dir }}"
|
||||
|
||||
# Reset the REPO variable. If you need to influence this use the environment setup file.
|
||||
REPO=
|
||||
|
||||
# If a specific java binary isn't specified search for the standard 'java' binary
|
||||
if [ -z "$JAVACMD" ] ; then
|
||||
if [ -n "$JAVA_HOME" ] ; then
|
||||
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
|
||||
# IBM's JDK on AIX uses strange locations for the executables
|
||||
JAVACMD="$JAVA_HOME/jre/sh/java"
|
||||
else
|
||||
JAVACMD="$JAVA_HOME/bin/java"
|
||||
fi
|
||||
else
|
||||
JAVACMD=`which java`
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -x "$JAVACMD" ] ; then
|
||||
echo "Error: JAVA_HOME is not defined correctly." 1>&2
|
||||
echo " We cannot execute $JAVACMD" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$REPO" ]
|
||||
then
|
||||
REPO="$BASEDIR"/lib
|
||||
fi
|
||||
|
||||
CLASSPATH="$BASEDIR"/etc:"$REPO"/appassembler-booter-2.1.0.jar:"$REPO"/appassembler-model-2.1.0.jar:"$REPO"/plexus-utils-3.2.0.jar:"$REPO"/stax-api-1.0.1.jar:"$REPO"/stax-1.1.1-dev.jar
|
||||
|
||||
ENDORSED_DIR=
|
||||
if [ -n "$ENDORSED_DIR" ] ; then
|
||||
CLASSPATH=$BASEDIR/$ENDORSED_DIR/*:$CLASSPATH
|
||||
fi
|
||||
|
||||
if [ -n "$CLASSPATH_PREFIX" ] ; then
|
||||
CLASSPATH=$CLASSPATH_PREFIX:$CLASSPATH
|
||||
fi
|
||||
|
||||
exec "$JAVACMD" $JAVA_OPTS -Xms128m -Dfile.encoding=UTF-8 -Dlog4j.configurationFile={{ exist_db_conf_dir }}/log4j2.xml -Dexist.home="$BASEDIR" -Dexist.configurationFile={{ exist_db_conf_dir }}/conf.xml -Djetty.home="$BASEDIR" -Dexist.jetty.config="$BASEDIR"/etc/jetty/standard.enabled-jetty-configs \
|
||||
-classpath "$CLASSPATH" \
|
||||
-Dapp.name="client" \
|
||||
-Dapp.pid="$$" \
|
||||
-Dapp.repo="$REPO" \
|
||||
-Dapp.home="$BASEDIR" \
|
||||
-Dbasedir="$BASEDIR" \
|
||||
org.codehaus.mojo.appassembler.booter.AppassemblerBooter \
|
||||
"$@"
|
File diff suppressed because it is too large
Load Diff
|
@ -1,20 +0,0 @@
|
|||
[Unit]
|
||||
Description=eXist-db {{ exist_db_version }} Server
|
||||
Documentation=http://www.exist-db.org/exist/apps/doc/documentation
|
||||
After=syslog.target network.target
|
||||
|
||||
[Service]
|
||||
#Type=forking
|
||||
Type=simple
|
||||
SyslogIdentifier=existdb
|
||||
User={{ exist_db_user }}
|
||||
Group={{ exist_db_group }}
|
||||
ExecStart={{ exist_db_base_dir }}/bin/startup.sh
|
||||
ExecStop={{ exist_db_base_dir }}/bin/shutdown.sh
|
||||
Restart=on-failure
|
||||
RestartSec=30s
|
||||
SuccessExitStatus=143
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
@ -1,85 +0,0 @@
|
|||
#!/usr/bin/env sh
|
||||
# eXist Open Source Native XML Database
|
||||
# Copyright (C) 2019 The eXist-db Project
|
||||
# info@exist-db.org
|
||||
# http://www.exist-db.org
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation
|
||||
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
|
||||
|
||||
# resolve links - $0 may be a softlink
|
||||
PRG="$0"
|
||||
|
||||
while [ -h "$PRG" ]; do
|
||||
ls=`ls -ld "$PRG"`
|
||||
link=`expr "$ls" : '.*-> \(.*\)$'`
|
||||
if expr "$link" : '/.*' > /dev/null; then
|
||||
PRG="$link"
|
||||
else
|
||||
PRG=`dirname "$PRG"`/"$link"
|
||||
fi
|
||||
done
|
||||
|
||||
PRGDIR=`dirname "$PRG"`
|
||||
BASEDIR="{{ exist_db_base_dir }}"
|
||||
|
||||
# Reset the REPO variable. If you need to influence this use the environment setup file.
|
||||
REPO=
|
||||
|
||||
# If a specific java binary isn't specified search for the standard 'java' binary
|
||||
if [ -z "$JAVACMD" ] ; then
|
||||
if [ -n "$JAVA_HOME" ] ; then
|
||||
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
|
||||
# IBM's JDK on AIX uses strange locations for the executables
|
||||
JAVACMD="$JAVA_HOME/jre/sh/java"
|
||||
else
|
||||
JAVACMD="$JAVA_HOME/bin/java"
|
||||
fi
|
||||
else
|
||||
JAVACMD=`which java`
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -x "$JAVACMD" ] ; then
|
||||
echo "Error: JAVA_HOME is not defined correctly." 1>&2
|
||||
echo " We cannot execute $JAVACMD" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$REPO" ]
|
||||
then
|
||||
REPO="$BASEDIR"/lib
|
||||
fi
|
||||
|
||||
CLASSPATH="$BASEDIR"/etc:"$REPO"/appassembler-booter-2.1.0.jar:"$REPO"/appassembler-model-2.1.0.jar:"$REPO"/plexus-utils-3.2.0.jar:"$REPO"/stax-api-1.0.1.jar:"$REPO"/stax-1.1.1-dev.jar
|
||||
|
||||
ENDORSED_DIR=
|
||||
if [ -n "$ENDORSED_DIR" ] ; then
|
||||
CLASSPATH=$BASEDIR/$ENDORSED_DIR/*:$CLASSPATH
|
||||
fi
|
||||
|
||||
if [ -n "$CLASSPATH_PREFIX" ] ; then
|
||||
CLASSPATH=$CLASSPATH_PREFIX:$CLASSPATH
|
||||
fi
|
||||
|
||||
exec "$JAVACMD" $JAVA_OPTS -Xms128m -Dfile.encoding=UTF-8 -Dlog4j.configurationFile={{ exist_db_conf_dir }}/log4j2.xml -Dexist.home="$BASEDIR" -Dexist.configurationFile={{ exist_db_conf_dir }}/conf.xml -Djetty.home="$BASEDIR" -Dexist.jetty.config="$BASEDIR"/etc/jetty/standard.enabled-jetty-configs \
|
||||
-classpath "$CLASSPATH" \
|
||||
-Dapp.name="export" \
|
||||
-Dapp.pid="$$" \
|
||||
-Dapp.repo="$REPO" \
|
||||
-Dapp.home="$BASEDIR" \
|
||||
-Dbasedir="$BASEDIR" \
|
||||
org.codehaus.mojo.appassembler.booter.AppassemblerBooter \
|
||||
"$@"
|
|
@ -1,252 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Configuration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
|
||||
<Properties>
|
||||
<Property name="logs">{{ exist_db_logdir }}</Property>
|
||||
{% raw %}
|
||||
<Property name="rollover.max.size">10MB</Property>
|
||||
<Property name="rollover.max">14</Property>
|
||||
<Property name="rollover.file.pattern">%d{yyyyMMddHHmmss}</Property>
|
||||
<Property name="exist.file.pattern">%d [%t] %-5p (%F [%M]:%L) - %m %n</Property>
|
||||
</Properties>
|
||||
|
||||
<Appenders>
|
||||
|
||||
<Console name="STDOUT">
|
||||
<PatternLayout pattern="%d{DATE} [%t] %-5p (%F [%M]:%L) - %m %n"/>
|
||||
</Console>
|
||||
|
||||
<RollingRandomAccessFile name="exist.core" filePattern="${logs}/exist.${rollover.file.pattern}.log.gz" fileName="${logs}/exist.log">
|
||||
<Policies>
|
||||
<SizeBasedTriggeringPolicy size="${rollover.max.size}"/>
|
||||
</Policies>
|
||||
<DefaultRolloverStrategy max="${rollover.max}"/>
|
||||
<PatternLayout pattern="${exist.file.pattern}"/>
|
||||
</RollingRandomAccessFile>
|
||||
|
||||
<RollingRandomAccessFile name="exist.locks" filePattern="${logs}/locks.${rollover.file.pattern}.log.gz" fileName="${logs}/locks.log">
|
||||
<Policies>
|
||||
<SizeBasedTriggeringPolicy size="${rollover.max.size}"/>
|
||||
</Policies>
|
||||
<DefaultRolloverStrategy max="${rollover.max}"/>
|
||||
<PatternLayout pattern="%d %-5p - %m %n"/>
|
||||
</RollingRandomAccessFile>
|
||||
|
||||
<RollingRandomAccessFile name="exist.ensurelocking" filePattern="${logs}/locks.${rollover.file.pattern}.log.gz" fileName="${logs}/ensure-locking.log">
|
||||
<Policies>
|
||||
<SizeBasedTriggeringPolicy size="${rollover.max.size}"/>
|
||||
</Policies>
|
||||
<DefaultRolloverStrategy max="${rollover.max}"/>
|
||||
<PatternLayout pattern="%d %-5p - %m %n"/>
|
||||
</RollingRandomAccessFile>
|
||||
|
||||
<RollingRandomAccessFile name="exist.xmldb" filePattern="${logs}/xmldb.${rollover.file.pattern}.log.gz" fileName="${logs}/xmldb.log">
|
||||
<Policies>
|
||||
<SizeBasedTriggeringPolicy size="${rollover.max.size}"/>
|
||||
</Policies>
|
||||
<DefaultRolloverStrategy max="${rollover.max}"/>
|
||||
<PatternLayout pattern="${exist.file.pattern}"/>
|
||||
</RollingRandomAccessFile>
|
||||
|
||||
<RollingRandomAccessFile name="exist.xmlrpc" filePattern="${logs}/xmlrpc.${rollover.file.pattern}.log.gz" fileName="${logs}/xmlrpc.log">
|
||||
<Policies>
|
||||
<SizeBasedTriggeringPolicy size="${rollover.max.size}"/>
|
||||
</Policies>
|
||||
<DefaultRolloverStrategy max="${rollover.max}"/>
|
||||
<PatternLayout pattern="${exist.file.pattern}"/>
|
||||
</RollingRandomAccessFile>
|
||||
|
||||
<RollingRandomAccessFile name="exist.urlrewrite" filePattern="${logs}/urlrewrite.${rollover.file.pattern}.log.gz" fileName="${logs}/urlrewrite.log">
|
||||
<Policies>
|
||||
<SizeBasedTriggeringPolicy size="${rollover.max.size}"/>
|
||||
</Policies>
|
||||
<DefaultRolloverStrategy max="${rollover.max}"/>
|
||||
<PatternLayout pattern="${exist.file.pattern}"/>
|
||||
</RollingRandomAccessFile>
|
||||
|
||||
<RollingRandomAccessFile name="exist.profiling" filePattern="${logs}/profile.${rollover.file.pattern}.log.gz" fileName="${logs}/profile.log">
|
||||
<Policies>
|
||||
<SizeBasedTriggeringPolicy size="${rollover.max.size}"/>
|
||||
</Policies>
|
||||
<DefaultRolloverStrategy max="${rollover.max}"/>
|
||||
<PatternLayout pattern="${exist.file.pattern}"/>
|
||||
</RollingRandomAccessFile>
|
||||
|
||||
<RollingRandomAccessFile name="exist.scheduler" filePattern="${logs}/scheduler.${rollover.file.pattern}.log.gz" fileName="${logs}/scheduler.log">
|
||||
<Policies>
|
||||
<SizeBasedTriggeringPolicy size="${rollover.max.size}"/>
|
||||
</Policies>
|
||||
<DefaultRolloverStrategy max="${rollover.max}"/>
|
||||
<PatternLayout pattern="${exist.file.pattern}"/>
|
||||
</RollingRandomAccessFile>
|
||||
|
||||
<RollingRandomAccessFile name="exist.ehcache" filePattern="${logs}/ehcache.${rollover.file.pattern}.log.gz" fileName="${logs}/ehcache.log">
|
||||
<Policies>
|
||||
<SizeBasedTriggeringPolicy size="${rollover.max.size}"/>
|
||||
</Policies>
|
||||
<DefaultRolloverStrategy max="${rollover.max}"/>
|
||||
<PatternLayout pattern="${exist.file.pattern}"/>
|
||||
</RollingRandomAccessFile>
|
||||
|
||||
<RollingRandomAccessFile name="exist.betterform" filePattern="${logs}/betterform.${rollover.file.pattern}.log.gz" fileName="${logs}/betterform.log">
|
||||
<Policies>
|
||||
<SizeBasedTriggeringPolicy size="${rollover.max.size}"/>
|
||||
</Policies>
|
||||
<DefaultRolloverStrategy max="${rollover.max}"/>
|
||||
<PatternLayout pattern="${exist.file.pattern}"/>
|
||||
</RollingRandomAccessFile>
|
||||
|
||||
<RollingRandomAccessFile name="exist.restxq" filePattern="${logs}/restxq.${rollover.file.pattern}.log.gz" fileName="${logs}/restxq.log">
|
||||
<Policies>
|
||||
<SizeBasedTriggeringPolicy size="${rollover.max.size}"/>
|
||||
</Policies>
|
||||
<DefaultRolloverStrategy max="${rollover.max}"/>
|
||||
<PatternLayout pattern="${exist.file.pattern}"/>
|
||||
</RollingRandomAccessFile>
|
||||
|
||||
<RollingRandomAccessFile name="exist.backup" filePattern="${logs}/backup.${rollover.file.pattern}.log.gz" fileName="${logs}/backup.log">
|
||||
<Policies>
|
||||
<SizeBasedTriggeringPolicy size="${rollover.max.size}"/>
|
||||
</Policies>
|
||||
<DefaultRolloverStrategy max="${rollover.max}"/>
|
||||
<PatternLayout pattern="${exist.file.pattern}"/>
|
||||
</RollingRandomAccessFile>
|
||||
|
||||
<RollingRandomAccessFile name="exist.statistics" filePattern="${logs}/statistics.${rollover.file.pattern}.log.gz" fileName="${logs}/statistics.log">
|
||||
<Policies>
|
||||
<SizeBasedTriggeringPolicy size="${rollover.max.size}"/>
|
||||
</Policies>
|
||||
<DefaultRolloverStrategy max="${rollover.max}"/>
|
||||
<PatternLayout pattern="${exist.file.pattern}"/>
|
||||
</RollingRandomAccessFile>
|
||||
|
||||
<RollingRandomAccessFile name="expath.repo" filePattern="${logs}/expath-repo.${rollover.file.pattern}.log.gz" fileName="${logs}/expath-repo.log">
|
||||
<Policies>
|
||||
<SizeBasedTriggeringPolicy size="${rollover.max.size}"/>
|
||||
</Policies>
|
||||
<DefaultRolloverStrategy max="${rollover.max}"/>
|
||||
<PatternLayout pattern="${exist.file.pattern}"/>
|
||||
</RollingRandomAccessFile>
|
||||
|
||||
<RollingRandomAccessFile name="exist.launcher" filePattern="${logs}/launcher.${rollover.file.pattern}.log.gz" fileName="${logs}/launcher.log">
|
||||
<Policies>
|
||||
<SizeBasedTriggeringPolicy size="${rollover.max.size}"/>
|
||||
</Policies>
|
||||
<DefaultRolloverStrategy max="${rollover.max}"/>
|
||||
<PatternLayout pattern="${exist.file.pattern}"/>
|
||||
</RollingRandomAccessFile>
|
||||
|
||||
</Appenders>
|
||||
|
||||
<Loggers>
|
||||
|
||||
<Logger name="org.exist.storage.lock.LockTable" additivity="false" level="info"> <!-- set to level="trace" to see lock activity -->
|
||||
<AppenderRef ref="exist.locks"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="org.exist.storage.lock.EnsureLockingAspect" additivity="false" level="trace">
|
||||
<AppenderRef ref="exist.ensurelocking"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="org.exist.util.sanity.SanityCheck" additivity="false" level="info"> <!-- set to level="trace" to enable all sanity check logging -->
|
||||
<AppenderRef ref="exist.core"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="org.exist.jetty.JettyStart" additivity="false" level="trace">
|
||||
<AppenderRef ref="STDOUT"/>
|
||||
<AppenderRef ref="exist.core"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="org.exist.jetty.StandaloneServer" additivity="false" level="trace">
|
||||
<AppenderRef ref="STDOUT"/>
|
||||
<AppenderRef ref="exist.core"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="xquery.profiling" additivity="false" level="trace">
|
||||
<AppenderRef ref="exist.profiling"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="org.exist.statistics" additivity="false" level="debug">
|
||||
<AppenderRef ref="exist.core"/>
|
||||
<AppenderRef ref="exist.statistics"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="org.exist.backup.SystemExport" additivity="false" level="trace">
|
||||
<AppenderRef ref="exist.backup"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="org.exist.xmldb" additivity="false" level="info">
|
||||
<AppenderRef ref="exist.xmldb"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="org.exist.xmlrpc" additivity="false" level="info">
|
||||
<AppenderRef ref="exist.xmlrpc"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="org.apache.xmlrpc" additivity="false" level="info">
|
||||
<AppenderRef ref="exist.xmlrpc"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="org.exist.http.urlrewrite" additivity="false" level="info">
|
||||
<!-- set to "trace" to get detailed info on URL rewriting -->
|
||||
<AppenderRef ref="exist.urlrewrite"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="org.exist.extensions.exquery.restxq" additivity="false" level="info">
|
||||
<AppenderRef ref="exist.restxq"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="org.eclipse.jetty" additivity="false" level="info">
|
||||
<AppenderRef ref="exist.core"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="httpclient" additivity="false" level="info">
|
||||
<AppenderRef ref="exist.core"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="net.sf.ehcache" additivity="false" level="info">
|
||||
<AppenderRef ref="exist.ehcache"/>
|
||||
</Logger>
|
||||
|
||||
<!-- Milton WebDAV framework -->
|
||||
<Logger name="com.bradmcevoy" additivity="false" level="info">
|
||||
<AppenderRef ref="exist.core"/>
|
||||
</Logger>
|
||||
|
||||
<!-- Apache Axis: 'debug' produces a large amount of debugging output -->
|
||||
<Logger name="org.apache" additivity="false" level="off">
|
||||
<AppenderRef ref="exist.core"/>
|
||||
</Logger>
|
||||
|
||||
<!-- Quartz scheduler -->
|
||||
<Logger name="org.quartz" additivity="false" level="info">
|
||||
<AppenderRef ref="exist.scheduler"/>
|
||||
</Logger>
|
||||
|
||||
<Logger name="org.exist.storage.SystemTask" additivity="false" level="debug">
|
||||
<AppenderRef ref="exist.scheduler"/>
|
||||
</Logger>
|
||||
|
||||
<!-- betterFORM extension -->
|
||||
<Logger name="de.betterform" additivity="false" level="warn">
|
||||
<AppenderRef ref="exist.betterform"/>
|
||||
</Logger>
|
||||
|
||||
<!-- expath pkg repo -->
|
||||
<Logger name="org.expath.pkg" additivity="false" level="info">
|
||||
<AppenderRef ref="expath.repo"/>
|
||||
</Logger>
|
||||
<Logger name="org.exist.repo" additivity="false" level="info">
|
||||
<AppenderRef ref="expath.repo"/>
|
||||
</Logger>
|
||||
<Logger name="org.exist.launcher" additivity="false" level="warn">
|
||||
<AppenderRef ref="exist.launcher"/>
|
||||
</Logger>
|
||||
|
||||
<Root level="info">
|
||||
<AppenderRef ref="exist.core"></AppenderRef>
|
||||
</Root>
|
||||
|
||||
</Loggers>
|
||||
|
||||
</Configuration>
|
||||
{% endraw %}
|
|
@ -1,86 +0,0 @@
|
|||
#!/usr/bin/env sh
|
||||
# eXist Open Source Native XML Database
|
||||
# Copyright (C) 2019 The eXist-db Project
|
||||
# info@exist-db.org
|
||||
# http://www.exist-db.org
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation
|
||||
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
|
||||
|
||||
# resolve links - $0 may be a softlink
|
||||
PRG="$0"
|
||||
|
||||
while [ -h "$PRG" ]; do
|
||||
ls=`ls -ld "$PRG"`
|
||||
link=`expr "$ls" : '.*-> \(.*\)$'`
|
||||
if expr "$link" : '/.*' > /dev/null; then
|
||||
PRG="$link"
|
||||
else
|
||||
PRG=`dirname "$PRG"`/"$link"
|
||||
fi
|
||||
done
|
||||
|
||||
PRGDIR=`dirname "$PRG"`
|
||||
#BASEDIR=`cd "$PRGDIR/.." >/dev/null; pwd`
|
||||
BASEDIR="{{ exist_db_base_dir }}"
|
||||
|
||||
# Reset the REPO variable. If you need to influence this use the environment setup file.
|
||||
REPO=
|
||||
|
||||
# If a specific java binary isn't specified search for the standard 'java' binary
|
||||
if [ -z "$JAVACMD" ] ; then
|
||||
if [ -n "$JAVA_HOME" ] ; then
|
||||
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
|
||||
# IBM's JDK on AIX uses strange locations for the executables
|
||||
JAVACMD="$JAVA_HOME/jre/sh/java"
|
||||
else
|
||||
JAVACMD="$JAVA_HOME/bin/java"
|
||||
fi
|
||||
else
|
||||
JAVACMD=`which java`
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -x "$JAVACMD" ] ; then
|
||||
echo "Error: JAVA_HOME is not defined correctly." 1>&2
|
||||
echo " We cannot execute $JAVACMD" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$REPO" ]
|
||||
then
|
||||
REPO="$BASEDIR"/lib
|
||||
fi
|
||||
|
||||
CLASSPATH="$BASEDIR"/etc:"$REPO"/appassembler-booter-2.1.0.jar:"$REPO"/appassembler-model-2.1.0.jar:"$REPO"/plexus-utils-3.2.0.jar:"$REPO"/stax-api-1.0.1.jar:"$REPO"/stax-1.1.1-dev.jar
|
||||
|
||||
ENDORSED_DIR=
|
||||
if [ -n "$ENDORSED_DIR" ] ; then
|
||||
CLASSPATH=$BASEDIR/$ENDORSED_DIR/*:$CLASSPATH
|
||||
fi
|
||||
|
||||
if [ -n "$CLASSPATH_PREFIX" ] ; then
|
||||
CLASSPATH=$CLASSPATH_PREFIX:$CLASSPATH
|
||||
fi
|
||||
|
||||
exec "$JAVACMD" $JAVA_OPTS {{ exist_db_java_opts }} -Dlog4j.configurationFile={{ exist_db_conf_dir }}/log4j2.xml -Dexist.home="$BASEDIR" -Dexist.configurationFile={{ exist_db_conf_dir }}/conf.xml -Djetty.home="$BASEDIR" -Dexist.jetty.config="$BASEDIR"/etc/jetty/standard.enabled-jetty-configs \
|
||||
-classpath "$CLASSPATH" \
|
||||
-Dapp.name="shutdown" \
|
||||
-Dapp.pid="$$" \
|
||||
-Dapp.repo="$REPO" \
|
||||
-Dapp.home="$BASEDIR" \
|
||||
-Dbasedir="$BASEDIR" \
|
||||
org.codehaus.mojo.appassembler.booter.AppassemblerBooter \
|
||||
"$@"
|
|
@ -1,86 +0,0 @@
|
|||
#!/usr/bin/env sh
|
||||
# eXist Open Source Native XML Database
|
||||
# Copyright (C) 2019 The eXist-db Project
|
||||
# info@exist-db.org
|
||||
# http://www.exist-db.org
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU Lesser General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation
|
||||
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
|
||||
|
||||
# resolve links - $0 may be a softlink
|
||||
PRG="$0"
|
||||
|
||||
while [ -h "$PRG" ]; do
|
||||
ls=`ls -ld "$PRG"`
|
||||
link=`expr "$ls" : '.*-> \(.*\)$'`
|
||||
if expr "$link" : '/.*' > /dev/null; then
|
||||
PRG="$link"
|
||||
else
|
||||
PRG=`dirname "$PRG"`/"$link"
|
||||
fi
|
||||
done
|
||||
|
||||
PRGDIR=`dirname "$PRG"`
|
||||
#BASEDIR=`cd "$PRGDIR/.." >/dev/null; pwd`
|
||||
BASEDIR="{{ exist_db_base_dir }}"
|
||||
|
||||
# Reset the REPO variable. If you need to influence this use the environment setup file.
|
||||
REPO=
|
||||
|
||||
# If a specific java binary isn't specified search for the standard 'java' binary
|
||||
if [ -z "$JAVACMD" ] ; then
|
||||
if [ -n "$JAVA_HOME" ] ; then
|
||||
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
|
||||
# IBM's JDK on AIX uses strange locations for the executables
|
||||
JAVACMD="$JAVA_HOME/jre/sh/java"
|
||||
else
|
||||
JAVACMD="$JAVA_HOME/bin/java"
|
||||
fi
|
||||
else
|
||||
JAVACMD=`which java`
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -x "$JAVACMD" ] ; then
|
||||
echo "Error: JAVA_HOME is not defined correctly." 1>&2
|
||||
echo " We cannot execute $JAVACMD" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$REPO" ]
|
||||
then
|
||||
REPO="$BASEDIR"/lib
|
||||
fi
|
||||
|
||||
CLASSPATH="$BASEDIR"/etc:"$REPO"/appassembler-booter-2.1.0.jar:"$REPO"/appassembler-model-2.1.0.jar:"$REPO"/plexus-utils-3.2.0.jar:"$REPO"/stax-api-1.0.1.jar:"$REPO"/stax-1.1.1-dev.jar
|
||||
|
||||
ENDORSED_DIR=
|
||||
if [ -n "$ENDORSED_DIR" ] ; then
|
||||
CLASSPATH=$BASEDIR/$ENDORSED_DIR/*:$CLASSPATH
|
||||
fi
|
||||
|
||||
if [ -n "$CLASSPATH_PREFIX" ] ; then
|
||||
CLASSPATH=$CLASSPATH_PREFIX:$CLASSPATH
|
||||
fi
|
||||
|
||||
exec "$JAVACMD" $JAVA_OPTS {{ exist_db_java_opts }} -Dlog4j.configurationFile={{ exist_db_conf_dir }}/log4j2.xml -Dexist.home="$BASEDIR" -Dexist.configurationFile={{ exist_db_conf_dir }}/conf.xml -Djetty.home="$BASEDIR" -Dexist.jetty.config="$BASEDIR"/etc/jetty/standard.enabled-jetty-configs \
|
||||
-classpath "$CLASSPATH" \
|
||||
-Dapp.name="startup" \
|
||||
-Dapp.pid="$$" \
|
||||
-Dapp.repo="$REPO" \
|
||||
-Dapp.home="$BASEDIR" \
|
||||
-Dbasedir="$BASEDIR" \
|
||||
org.codehaus.mojo.appassembler.booter.AppassemblerBooter \
|
||||
"$@"
|
|
@ -1,3 +0,0 @@
|
|||
---
|
||||
exist_db_user: existdb
|
||||
exist_db_group: '{{ exist_db_user }}'
|
|
@ -1,10 +1,5 @@
|
|||
---
|
||||
dependencies:
|
||||
- src: git+https://gitea-s2i2s.isti.cnr.it/ISTI-ansible-roles/ansible-role-memcached.git
|
||||
version: master
|
||||
name: memcached
|
||||
state: latest
|
||||
- src: git+https://gitea-s2i2s.isti.cnr.it/ISTI-ansible-roles/ansible-role-redis.git
|
||||
version: master
|
||||
name: redis
|
||||
state: latest
|
||||
- { role: '../../library/roles/memcached', when: freeradius_local_memcache_support }
|
||||
- { role: '../../library/roles/redis', when: freeradius_local_redis_support }
|
||||
|
||||
|
|
|
@ -330,13 +330,11 @@
|
|||
</init-param>
|
||||
|
||||
<!--
|
||||
Specified what geonetwork data directory to use. -->
|
||||
Specified what geonetwork data directory to use.
|
||||
<init-param>
|
||||
<param-name>geonetwork.dir</param-name>
|
||||
<param-value>{{ geonetwork_data_directory }}</param-value>
|
||||
</init-param>
|
||||
|
||||
|
||||
<param-value>/app/geonetwork_data_dir</param-value>
|
||||
</init-param>-->
|
||||
<load-on-startup>1</load-on-startup>
|
||||
</servlet>
|
||||
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
---
|
||||
#
|
||||
# https://gitea.io
|
||||
#
|
||||
# We use the server ssh daemon, and nginx in front of the service by default.
|
||||
# So we do not start in http mode and we do not use the embedded letsencrypt support
|
||||
#
|
||||
gitea_version: 1.8
|
||||
gitea_download_url: 'https://dl.gitea.io/gitea/{{ gitea_version }}/gitea-{{ gitea_version }}-linux-amd64'
|
||||
gitea_bin_path: /usr/local/bin/gitea
|
||||
|
||||
gitea_conf_dir: /etc/gitea
|
||||
gitea_data_dir: /var/lib/gitea
|
||||
gitea_data_subdirs:
|
||||
- custom
|
||||
- data
|
||||
- log
|
||||
- sock
|
||||
|
||||
gitea_repository_data: '{{ gitea_data_dir }}/repositories'
|
||||
|
||||
gitea_server_protocol: unix
|
||||
gitea_http_addr: '{{ gitea_data_dir }}/sock/gitea.sock'
|
||||
gitea_http_port: 3000
|
||||
gitea_root_url: https://{{ ansible_fqdn }}
|
||||
|
||||
# home, explore
|
||||
gitea_landing_page: explore
|
||||
gitea_user: gitea
|
||||
gitea_group: '{{ gitea_user }}'
|
||||
gitea_run_mode: prod
|
||||
gitea_db: postgres
|
||||
gitea_local_postgresql: True
|
||||
gitea_local_mysql: False
|
||||
gitea_local_mariadb: False
|
||||
gitea_nginx_frontend: True
|
||||
gitea_local_redis: True
|
||||
gitea_local_memcache: True
|
||||
|
||||
gitea_start_lfs: 'true'
|
||||
gitea_lfs_content_path: '{{ gitea_data_dir }}/data/lfs'
|
||||
#gitea_lfs_jwt_secret: put it into a vault file
|
||||
gitea_lfs_http_auth_expiry: 20m
|
||||
|
||||
gitea_required_packages:
|
||||
- git
|
||||
|
||||
gitea_db_name: gitea
|
||||
gitea_db_user: gitea_u
|
||||
#gitea_db_pwd: put it into a vault file
|
||||
gitea_db_host: localhost
|
||||
gitea_db_port: 5432
|
||||
gitea_db_ssl_mode: 'disable'
|
||||
|
||||
gitea_app_name: "Gitea"
|
||||
gitea_disable_registration: 'false'
|
||||
gitea_install_lock: 'false'
|
||||
gitea_mailer_enabled: False
|
||||
gitea_mail_from: gitea@localhost
|
||||
gitea_mailer_type: sendmail
|
||||
gitea_sendmail_path: /usr/sbin/sendmail
|
||||
gitea_cache_provider: memcache
|
||||
#gitea_cache_host: '127.0.0.1:11211'
|
||||
gitea_cache_host: '127.0.0.1:11211'
|
||||
gitea_session_provider: redis
|
||||
gitea_session_config: 'network=tcp,addr=127.0.0.1:6379,db=0,pool_size=100,idle_timeout=180'
|
||||
|
||||
gitea_prometheus_metrics: False
|
||||
#gitea_prometheus_bearer_token: put it into a vault file
|
||||
gitea_prometheus_bearer_token: ''
|
||||
gitea_log_level: Info
|
||||
|
||||
# gitea_app_configurations:
|
||||
# - { section: 'mailer', option: 'ENABLED', value: 'true', state: 'present' }
|
||||
# - { section: 'mailer', option: 'FROM', value: '{{ gitea_mail_from }}', state: 'present' }
|
||||
# - { section: 'mailer', option: 'MAILER_TYPE', value: '{{ gitea_mailer_type }}', state: 'present' }
|
||||
# - { section: 'mailer', option: 'SENDMAIL_PATH', value: '{{ gitea_sendmail_path }}', state: 'present' }
|
||||
# - { section: 'metrics', option: 'ENABLED', value: 'true', state: 'present' }
|
||||
# - { section: 'metrics', option: 'TOKEN', value: '{{ gitea_prometheus_bearer_token }}', state: 'present' }
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- name: reload gitea
|
||||
service: name=gitea state=reloaded
|
||||
|
||||
- name: restart gitea
|
||||
service: name=gitea state=restarted
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
dependencies:
|
||||
- { role: '../../library/roles/postgresql', when: gitea_local_postgresql }
|
||||
- { role: '../../library/roles/mysql', when: gitea_local_mysql }
|
||||
- { role: '../../library/roles/nginx', when: gitea_nginx_frontend }
|
||||
- { role: '../../library/roles/redis', when: gitea_local_redis }
|
||||
- { role: '../../library/roles/memcached', when: gitea_local_memcache }
|
|
@ -0,0 +1,46 @@
|
|||
---
|
||||
- block:
|
||||
- name: Create the gitea service user
|
||||
user: name={{ gitea_user }} home=/srv/gitea createhome=yes shell=/bin/bash system=yes
|
||||
|
||||
- name: Create the gitea directory tree
|
||||
file: dest={{ gitea_data_dir }}/{{ item }} state=directory owner={{ gitea_user }} group={{ gitea_group }}
|
||||
with_items: '{{ gitea_data_subdirs }}'
|
||||
|
||||
- name: Create the gitea conf directory
|
||||
file: dest={{ gitea_conf_dir }} state=directory owner=root group={{ gitea_group }} mode=0750
|
||||
|
||||
- name: Download the gitea binary
|
||||
get_url: url={{ gitea_download_url }} dest={{ gitea_bin_path }} owner=root group={{ gitea_group }} mode=0750
|
||||
|
||||
- name: Install the required packages
|
||||
package: state=present use=auto name={{ gitea_required_packages }}
|
||||
|
||||
- name: Check if the gitea configuration file exists
|
||||
stat: path={{ gitea_conf_dir }}/app.ini
|
||||
register: gitea_app_ini
|
||||
|
||||
- name: Change the gitea configuration. After the installation
|
||||
ini_file: path={{ gitea_conf_dir }}/app.ini section={{ item.section }} option={{ item.option }} value={{ item.value }} state={{ item.state }} owner={{ gitea_user }} group={{ gitea_group }} mode=0640 create=no
|
||||
with_items: '{{ gitea_app_configurations }}'
|
||||
when:
|
||||
- gitea_app_ini.stat.exists
|
||||
- gitea_app_configurations is defined
|
||||
notify: restart gitea
|
||||
|
||||
- name: Install the gitea configuration file. At install time only
|
||||
template: src=app.ini.j2 dest={{ gitea_conf_dir }}/app.ini owner={{ gitea_user }} group={{ gitea_group }} mode=0640 force=no
|
||||
notify: restart gitea
|
||||
|
||||
- name: Install the gitea systemd unit
|
||||
template: src=gitea.service.systemd dest=/etc/systemd/system/gitea.service
|
||||
register: gitea_systemd_unit
|
||||
|
||||
- name: Reload the systemd configuration
|
||||
command: systemctl daemon-reload
|
||||
when: gitea_systemd_unit is changed
|
||||
|
||||
- name: Ensure that the gitea service is running and enabled
|
||||
service: name=gitea status=started enabled=yes
|
||||
|
||||
tags: [ 'git', 'gitea' ]
|
|
@ -0,0 +1,65 @@
|
|||
APP_NAME = {{ gitea_app_name }}
|
||||
RUN_USER = {{ gitea_user }}
|
||||
RUN_MODE= {{ gitea_run_mode }}
|
||||
|
||||
[repository]
|
||||
ROOT = {{ gitea_repository_data }}
|
||||
|
||||
[server]
|
||||
PROTOCOL = {{ gitea_server_protocol }}
|
||||
HTTP_ADDR = {{ gitea_http_addr }}
|
||||
LANDING_PAGE = {{ gitea_landing_page }}
|
||||
LFS_START_SERVER = {{ gitea_start_lfs }}
|
||||
LFS_CONTENT_PATH = {{ gitea_lfs_content_path }}
|
||||
LFS_HTTP_AUTH_EXPIRY = {{ gitea_lfs_http_auth_expiry }}
|
||||
SSH_DOMAIN = localhost
|
||||
DOMAIN = localhost
|
||||
HTTP_PORT = {{ gitea_http_port }}
|
||||
ROOT_URL = {{ gitea_root_url }}
|
||||
DISABLE_SSH = false
|
||||
SSH_PORT = 22
|
||||
OFFLINE_MODE = false
|
||||
|
||||
[database]
|
||||
DB_TYPE = {{ gitea_db }}
|
||||
HOST = {{ gitea_db_host }}:{{ gitea_db_port }}
|
||||
NAME = {{ gitea_db_name }}
|
||||
USER = {{ gitea_db_user }}
|
||||
PASSWD = {{ gitea_db_pwd }}
|
||||
SSL_MODE = {{ gitea_db_ssl_mode }}
|
||||
|
||||
[security]
|
||||
INSTALL_LOCK = {{ gitea_install_lock }}
|
||||
|
||||
[service]
|
||||
DISABLE_REGISTRATION = {{ gitea_disable_registration }}
|
||||
NO_REPLY_ADDRESS = {{ gitea_mail_from }}
|
||||
|
||||
{% if gitea_mailer_enabled %}
|
||||
[mailer]
|
||||
ENABLED = true
|
||||
FROM = {{ gitea_mail_from }}
|
||||
MAILER_TYPE = {{ gitea_mailer_type }}
|
||||
SENDMAIL_PATH = {{ gitea_sendmail_path }}
|
||||
{% endif %}
|
||||
|
||||
[cache]
|
||||
ADAPTER = {{ gitea_cache_provider }}
|
||||
HOST = {{ gitea_cache_host }}
|
||||
|
||||
[session]
|
||||
PROVIDER = {{ gitea_session_provider }}
|
||||
PROVIDER_CONFIG = {{ gitea_session_config }}
|
||||
|
||||
{% if gitea_prometheus_metrics %}
|
||||
[metrics]
|
||||
ENABLED = true
|
||||
TOKEN = '{{ gitea_prometheus_bearer_token }}'
|
||||
{% endif %}
|
||||
|
||||
[other]
|
||||
SHOW_FOOTER_VERSION = false
|
||||
SHOW_FOOTER_TEMPLATE_LOAD_TIME = false
|
||||
|
||||
[log]
|
||||
LEVEL = {{ gitea_log_level }}
|
|
@ -0,0 +1,42 @@
|
|||
[Unit]
|
||||
Description=Gitea (Git with a cup of tea)
|
||||
After=syslog.target
|
||||
After=network.target
|
||||
{% if gitea_local_mysql %}
|
||||
Requires=mysql.service
|
||||
{% endif %}
|
||||
#Requires=mariadb.service
|
||||
{% if gitea_local_postgresql %}
|
||||
Requires=postgresql.service
|
||||
{% endif %}
|
||||
{% if gitea_local_redis %}
|
||||
Requires=redis.service
|
||||
{% endif %}
|
||||
{% if gitea_local_memcache %}
|
||||
Requires=memcached.service
|
||||
{% endif %}
|
||||
|
||||
[Service]
|
||||
# Modify these two values and uncomment them if you have
|
||||
# repos with lots of files and get an HTTP error 500 because
|
||||
# of that
|
||||
###
|
||||
#LimitMEMLOCK=infinity
|
||||
#LimitNOFILE=65535
|
||||
RestartSec=2s
|
||||
Type=simple
|
||||
User={{ gitea_user }}
|
||||
Group={{ gitea_user }}
|
||||
WorkingDirectory={{ gitea_data_dir }}
|
||||
ExecStart=/usr/local/bin/gitea web -c {{ gitea_conf_dir }}/app.ini
|
||||
Restart=always
|
||||
Environment=USER={{ gitea_user }} HOME=/srv/gitea GITEA_WORK_DIR={{ gitea_data_dir }}
|
||||
# If you want to bind Gitea to a port below 1024 uncomment
|
||||
# the two values below
|
||||
###
|
||||
#CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
#AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
nginx_use_common_virthost: True
|
||||
redis_install: True
|
||||
http_port: 80
|
||||
https_port: 443
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
---
|
||||
cdh_manager_install: False
|
||||
cdh_version: 5.9.3
|
||||
#cdh_manager_repo: 'deb https://archive.cloudera.com/cm5/ubuntu/trusty/amd64/cm/dists/trusty-cm{{ cdh_version }}/'
|
||||
cdh_packages_repo: 'deb [arch=amd64] http://archive.cloudera.com/cm5/ubuntu/trusty/amd64/cm trusty-cm5 contrib'
|
||||
cdh_manager_repo_url: 'https://archive.cloudera.com/cdh5/ubuntu/trusty/amd64/cdh/cloudera.list'
|
||||
#cdh_manager_download_url: 'http://archive.cloudera.com/cm5/installer/5.9.3/cloudera-manager-installer.bin'
|
||||
cdh_csd_directory: /opt/cloudera/csd
|
||||
|
||||
# Set it to true on the oozie server nodes
|
||||
cdh_oozie_server: False
|
||||
cdh_oozie_ext_libs_url: 'https://archive.cloudera.com/gplextras/misc/ext-2.2.zip'
|
||||
|
||||
cdh_use_separate_postgres_db: True
|
||||
cdh_postgres_db_host: localhost
|
||||
cdh_postgres_db_name: cdh
|
||||
cdh_postgres_db_user: cdh
|
||||
cdh_postgres_db_schema: cdh
|
||||
cdh_postgresql_version: 9.4
|
||||
postgresql_jdbc_driver_version: 42.1.4
|
||||
|
||||
###
|
||||
# Spark2: the installation is completely manual, see
|
||||
# https://www.cloudera.com/documentation/spark2/latest/topics/spark2_installing.html
|
||||
#
|
||||
# To integrate spark2 and oozie:
|
||||
# https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.6.0/bk_spark-component-guide/content/ch_oozie-spark-action.html#spark-config-oozie-spark2
|
||||
# https://community.cloudera.com/t5/Advanced-Analytics-Apache-Spark/Spark2-classpath-issues-with-Oozie/td-p/59782
|
||||
|
||||
cdh_spark2_enabled: True
|
||||
cdh_spark2_jar: SPARK2_ON_YARN-2.2.0.cloudera2.jar
|
||||
cdh_spark2_csd_url: 'http://archive.cloudera.com/spark2/csd/{{ cdh_spark2_jar }}'
|
||||
|
||||
cdh_zeppelin_node: False
|
||||
cdh_zeppelin_version: 0.7.3
|
||||
cdh_zeppelin_dir: 'zeppelin-{{ cdh_zeppelin_version }}-bin-all'
|
||||
chd_zeppelin_archive: '{{ cdh_zeppelin_dir }}.tgz'
|
||||
cdh_zeppelin_download_url: 'http://mirror.nohup.it/apache/zeppelin/zeppelin-{{ cdh_zeppelin_version }}/{{ chd_zeppelin_archive }}'
|
||||
cdh_zeppelin_user: zeppelin
|
||||
cdh_zeppelin_group: '{{ cdh_zeppelin_user }}'
|
||||
cdh_zeppelin_http_port: 8080
|
||||
cdh_zeppelin_home: /srv/zeppelin
|
||||
cdh_zeppelin_work_dirs:
|
||||
- '{{ cdh_zeppelin_home }}/notebook'
|
||||
- '{{ cdh_zeppelin_home }}/log'
|
||||
- '{{ cdh_zeppelin_home }}/run'
|
||||
- '{{ cdh_zeppelin_home }}/base_tmp/tmp'
|
||||
|
||||
cdh_zeppelin_conf_files:
|
||||
- zeppelin-env.sh
|
||||
- shiro.ini
|
||||
- zeppelin-site.xml
|
||||
cdh_zeppelin_ldap_auth: True
|
||||
cdh_zeppelin_notebook_public: 'false'
|
||||
cdh_zeppelin_dedicated_node: False
|
||||
cdh_zeppelin_use_spark2: '{{ cdh_spark2_enabled }}'
|
||||
|
||||
cdh_impala_load_balancer: False
|
||||
|
||||
cdh_zeppelin_ldap_enabled: False
|
||||
cdh_zeppelin_ldap_advanced_config: True
|
||||
cdh_zeppelin_ldap_starttls: 'true'
|
||||
cdh_zeppelin_search_bind_authentication: 'false'
|
||||
cdh_zeppelin_username_pattern: "uid={0},ou=People,dc=mycompany,dc=com"
|
||||
cdh_zeppelin_ldap_search_base: "dc=mycompany,dc=com"
|
||||
cdh_zeppelin_ldap_users_base: "ou=People,dc=mycompany,dc=com"
|
||||
cdh_zeppelin_user_objectclass: posixUser
|
||||
cdh_zeppelin_ldap_group_base: "ou=Groups,dc=mycompany,dc=com"
|
||||
cdh_zeppelin_ldap_group_obj_class: groupofnames
|
||||
cdh_zeppelin_group_template: "cn={0},ou=Groups,dc=mycompany,dc=com"
|
||||
cdh_zeppelin_group_search_filter: '(memberUid={0})'
|
||||
cdh_zeppelin_ldap_nested_groups: 'false'
|
||||
cdh_zeppelin_ldap_roles_by_group: 'cdh_hadoop: userrole, cdh_admin: adminrole'
|
||||
cdh_zeppelin_ldap_bind_u: zeppelin
|
||||
#cdh_zeppelin_ldap_bind_pwd: "use a vault file"
|
||||
|
||||
cdh_zeppelin_ldap_user_attr: uid
|
||||
cdh_zeppelin_ldap_member_attr: member
|
||||
cdh_zeppelin_ldap_group: zeppelin_hadoop
|
||||
cdh_zeppelin_ldap_url: 'ldap://ldap.test.com:389'
|
||||
cdh_zeppelin_secure_urls: True
|
||||
cdh_zeppelin_secure_roles: 'authc, roles[adminrole]'
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue