Merge branch 'master' of gitorious.research-infrastructures.eu:infrastructure-management/ansible-playbooks

This commit is contained in:
Tommaso Piccioli 2015-08-31 11:50:07 +02:00
commit 3390920d0f
61 changed files with 504 additions and 345 deletions

View File

@ -1,3 +1,12 @@
This role assumes that only one tomcat instance is defined and running on the system. This role assumes that only one tomcat instance is defined and running on the system.
Important note: the variable 'http_port' needs to be defined earlier in the calling playbook. Important note: the variable 'http_port' needs to be defined earlier in the calling playbook.
What the role does:
- Install the sudoers config that permits the tomcat user to restart
the service
- Install the script that allows the tomcat user to start and stop the
service without using the full path
- Install the README file that explains where the options files are
placed and how start/stop the service

View File

@ -1,3 +1,7 @@
--- ---
d4science_user: gcube d4science_user: gcube
d4science_user_home: '/home/{{ d4science_user }}' d4science_user_home: '/home/{{ d4science_user }}'
d4science_tomcat_options_files:
- '/etc/default/tomcat-instance-{{ item.0.http_port }}'
- '/etc/default/tomcat-instance-{{ item.0.http_port }}.local'

View File

@ -9,3 +9,29 @@
- '{{ tomcat_m_instances }}' - '{{ tomcat_m_instances }}'
- [ 'startContainer.sh', 'stopContainer.sh' ] - [ 'startContainer.sh', 'stopContainer.sh' ]
tags: [ 'tomcat', 'd4science', 'sudo' ] tags: [ 'tomcat', 'd4science', 'sudo' ]
- name: Install the README file that explains where the options files are placed and how start/stop the service
template: src={{ item.1 }}.j2 dest={{ item.0.user_home }}/{{ item.1 }} owner={{ item.0.user }} group={{ item.0.user }} mode=0444
with_nested:
- '{{ tomcat_m_instances }}'
- [ 'README' ]
tags: [ 'tomcat', 'd4science', 'd4s_readme' ]
# - name: Set the read/write permissions on the tomcat default options files
# acl: name={{ item.1 }} entity={{ item.0.user }} etype=user permissions=rw state=present
# with_nested:
# - '{{ tomcat_m_instances }}'
# - '{{ d4science_tomcat_options_files }}'
# tags: [ 'tomcat', 'd4science', 'acl' ]
- name: Set the read/write permissions on the tomcat default options files
acl: name=/etc/default/tomcat-instance-{{ item.http_port }} entity={{ item.user }} etype=user permissions=rw state=present
with_items: tomcat_m_instances
tags: [ 'tomcat', 'd4science', 'acl' ]
- name: Set the read/write permissions on the tomcat default local options files
acl: name=/etc/default/tomcat-instance-{{ item.http_port }}.local entity={{ item.user }} etype=user permissions=rw state=present
with_items: tomcat_m_instances
tags: [ 'tomcat', 'd4science', 'acl' ]

View File

@ -0,0 +1,8 @@
The java options are set inside /etc/default/tomcat-instance-{{ item.0.http_port }}
The GHN environment variables are set inside /etc/default/tomcat-instance-{{ item.0.http_port }}.local
The commands that start and stop the containers are:
/home/gcube/startContainer.sh
/home/gcube/stopContainer.sh
The log files live inside /home/gcube/tomcat/logs (it's a symbolic link to {{ tomcat_m_instances_logdir_base }}/{{ item.0.http_port }})

View File

@ -1,3 +1,3 @@
This role sets acls that permit unprivileged users to: This role sets acls that permit unprivileged users to:
- write inside a list of directories - write inside a list of directories
- restart the tomcat instances - restart the tomcat instances (default). Or manage other services.

View File

@ -1,6 +1,8 @@
--- ---
dnet_standard_installation: True
dnet_user: tomcat7 dnet_user: tomcat7
dnet_group: dnet dnet_group: dnet
dnet_sudoers_group: dnetsu
dnet_data_directories: dnet_data_directories:
- /var/lib/dnet - /var/lib/dnet
@ -8,3 +10,14 @@ dnet_data_directories:
dnet_log_directories: dnet_log_directories:
- /var/log/dnet - /var/log/dnet
- /var/log/dnet/search - /var/log/dnet/search
# Define the following if you want some directories readable and writable by the dnet group but outside the dnet app data dirs
#dnet_users_data_directories:
# - { name: '/data/1', create: True }
# - { name: '/data/2', create: False, file: False }
# - { name: '/data/bah', create: False, file: True }
# Define the following array when you want to add commands to the sudoers file
#dnet_sudo_commands:
# - /etc/init.d/virtuoso-opensource-7
# - /sbin/reboot

View File

@ -0,0 +1,13 @@
---
- name: Install additional packages, if needed
apt: pkg={{ item }} state=installed
with_items: dnet_additional_packages
when: dnet_additional_packages is defined
tags: ['dnet', 'pkgs']
- name: Install additional python modules, if needed
pip: name={{ item }} state=present
with_items: dnet_additional_python_modules
when: dnet_additional_python_modules is defined
tags: ['dnet', 'pkgs']

View File

@ -0,0 +1,30 @@
---
- name: Create the dnet data dirs
file: name={{ item }} state=directory owner={{ dnet_user }} group={{ dnet_group }} mode=0750
with_items: dnet_data_directories
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Create the dnet log dirs
file: name={{ item }} state=directory owner={{ tomcat_user }} group={{ dnet_group }} mode=0750
with_items: dnet_log_directories
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the read/write permissions on the dnet data dirs
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rwx state=present
with_items: dnet_data_directories
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the default read/write permissions on the dnet data dirs
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rwx state=present default=yes
with_items: dnet_data_directories
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the read permissions on the dnet log dirs
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rx state=present
with_items: dnet_log_directories
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the default read permissions on the dnet log dirs
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rx state=present default=yes
with_items: dnet_log_directories
tags: [ 'tomcat', 'dnet', 'users' ]

View File

@ -0,0 +1,25 @@
---
- name: Add the dnet groups, if it does not exist already
group: name={{ item }} state=present
with_items:
- '{{ dnet_group }}'
- '{{ dnet_sudoers_group }}'
tags: [ 'dnet', 'users' ]
- name: Add all the users to the dnet group
user: name={{ item.login }} groups={{ dnet_group }}, append=yes
with_items: users_system_users
tags: [ 'dnet', 'users' ]
- name: Add selected users to the dnet sudoers group
user: name={{ item.login }} groups={{ dnet_sudoers_group }}, append=yes
with_items: users_system_users
when: item.dnet_sudoers_user
tags: [ 'dnet', 'users' ]
- name: Remove selected users to the dnet sudoers group
user: name={{ item.login }} groups={{ dnet_group }}
with_items: users_system_users
when: not item.dnet_sudoers_user
tags: [ 'dnet', 'users' ]

View File

@ -0,0 +1,68 @@
---
#
# Acls for the single tomcat instance
#
# Note: the default is a default only. We need two commands to add acl effectively on the root dir and set the default
- name: Set the read/write permissions on the tomcat webapps and common/classes directories. single tomcat instance
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rwx state=present
when: tomcat_m_instances is not defined
with_items:
- [ '{{ tomcat_webapps_dir }}', '{{ tomcat_common_classes_dir }}', '{{ tomcat_common_dir }}' ]
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the default read/write permissions on the tomcat webapps and common/classes directories. single tomcat instance
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rwx state=present default=yes
when: tomcat_m_instances is not defined
with_items:
- [ '{{ tomcat_webapps_dir }}', '{{ tomcat_common_classes_dir }}', '{{ tomcat_common_dir }}' ]
tags: [ 'tomcat', 'dnet', 'users' ]
# Note: the default is a default only. We need two commands to add acl effectively on the root dir and set the default
- name: Set the read permissions on the tomcat log directory. single tomcat instance
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rx state=present
when: tomcat_m_instances is not defined
with_items:
- [ '{{ tomcat_logdir }}' ]
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the default read permissions on the tomcat log directory. single tomcat instance
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rx state=present default=yes
when: tomcat_m_instances is not defined
with_items:
- [ '{{ tomcat_logdir }}' ]
tags: [ 'tomcat', 'dnet', 'users' ]
#
# Same steps, but when we are using multiple tomcat instances
#
# Note: the default is a default only. We need two commands to add acl effectively on the root dir and set the default
- name: Set the read/write permissions on the tomcat webapps and common/classes directories. multiple tomcat instances
acl: name={{ item.0.instance_path }}/{{ item.1 }} entity={{ dnet_group }} etype=group permissions=rwx state=present
when: tomcat_m_instances is defined
with_nested:
- '{{ tomcat_m_instances }}'
- [ 'webapps', 'common', 'common/classes' ]
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the default read/write permissions on the tomcat webapps and common/classes directories. multiple tomcat instances
acl: name={{ item.0.instance_path }}/{{ item.1 }} entity={{ dnet_group }} etype=group permissions=rwx state=present default=yes
when: tomcat_m_instances is defined
with_nested:
- '{{ tomcat_m_instances }}'
- [ 'webapps', 'common', 'common/classes' ]
tags: [ 'tomcat', 'dnet', 'users' ]
# Note: the default is a default only. We need two commands to add acl effectively on the root dir and set the default
- name: Set the read permissions on the tomcat log directory. multiple tomcat instances
acl: name={{ tomcat_m_instances_logdir_base }}/{{ item.http_port }} entity={{ dnet_group }} etype=group permissions=rx state=present
when: tomcat_m_instances is defined
with_items: tomcat_m_instances
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the default read permissions on the tomcat log directory. multiple tomcat instances
acl: name={{ tomcat_m_instances_logdir_base }}/{{ item.http_port }} entity={{ dnet_group }} etype=group permissions=rx state=present default=yes
when: tomcat_m_instances is defined
with_items: tomcat_m_instances
tags: [ 'tomcat', 'dnet', 'users' ]

View File

@ -0,0 +1,25 @@
---
- name: Create the users dnet data dirs
file: name={{ item.name }} state=directory owner=root group={{ dnet_group }} mode=0750
with_items: dnet_users_data_directories
when: item.create and not item.file
tags: [ 'dnet', 'users' ]
- name: Set the read/write/access permissions on the users dnet data dirs
acl: name={{ item.name }} entity={{ dnet_group }} etype=group permissions=rwx state=present
with_items: dnet_users_data_directories
when: not item.file
tags: [ 'dnet', 'users' ]
- name: Set the default read/write/access permissions on the users dnet data dirs
acl: name={{ item.name }} entity={{ dnet_group }} etype=group permissions=rwx state=present default=yes
with_items: dnet_users_data_directories
when: not item.file
tags: [ 'dnet', 'users' ]
- name: Set the read/write permissions on pre-existing files inside the users dnet data dirs
acl: name={{ item.name }} entity={{ dnet_group }} etype=group permissions=rw state=present
with_items: dnet_users_data_directories
when: item.file
tags: [ 'dnet', 'users' ]

View File

@ -1,107 +1,10 @@
--- ---
- name: Add the all the users to the dnet group - include: dnet-groups.yml
user: name={{ item.login }} groups={{ dnet_group }} - include: sudo-config.yml
with_items: users_system_users - include: dnet-data-dirs.yml
tags: [ 'dnet', 'users' ] when: dnet_standard_installation
- include: dnet-users-data-dirs.yml
- name: Install the sudoers config that permits the dnet users to restart tomcat when: dnet_users_data_directories is defined
template: src=dnet-sudoers.j2 dest=/etc/sudoers.d/dnet-group owner=root group=root mode=0440 - include: dnet-additional-packages.yml
tags: [ 'tomcat', 'dnet', 'sudo', 'users' ] - include: dnet-tomcat-acls.yml
when: dnet_standard_installation
- name: Create the dnet data dirs
file: name={{ item }} state=directory owner={{ dnet_user }} group={{ dnet_group }} mode=0750
with_items: dnet_data_directories
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Create the dnet log dirs
file: name={{ item }} state=directory owner={{ tomcat_user }} group={{ dnet_group }} mode=0750
with_items: dnet_log_directories
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the read/write permissions on the dnet data dirs
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rwx state=present
with_items: dnet_data_directories
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the default read/write permissions on the dnet data dirs
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rwx state=present default=yes
with_items: dnet_data_directories
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the read permissions on the dnet log dirs
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rx state=present
with_items: dnet_log_directories
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the default read permissions on the dnet log dirs
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rx state=present default=yes
with_items: dnet_log_directories
tags: [ 'tomcat', 'dnet', 'users' ]
#
# Acls for the single tomcat instance
#
# Note: the default is a default only. We need two commands to add acl effectively on the root dir and set the default
- name: Set the read/write permissions on the tomcat webapps and common/classes directories. single tomcat instance
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rwx state=present
when: tomcat_m_instances is not defined
with_items:
- [ '{{ tomcat_webapps_dir }}', '{{ tomcat_common_classes_dir }}', '{{ tomcat_common_dir }}' ]
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the default read/write permissions on the tomcat webapps and common/classes directories. single tomcat instance
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rwx state=present default=yes
when: tomcat_m_instances is not defined
with_items:
- [ '{{ tomcat_webapps_dir }}', '{{ tomcat_common_classes_dir }}', '{{ tomcat_common_dir }}' ]
tags: [ 'tomcat', 'dnet', 'users' ]
# Note: the default is a default only. We need two commands to add acl effectively on the root dir and set the default
- name: Set the read permissions on the tomcat log directory. single tomcat instance
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rx state=present
when: tomcat_m_instances is not defined
with_items:
- [ '{{ tomcat_logdir }}' ]
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the default read permissions on the tomcat log directory. single tomcat instance
acl: name={{ item }} entity={{ dnet_group }} etype=group permissions=rx state=present default=yes
when: tomcat_m_instances is not defined
with_items:
- [ '{{ tomcat_logdir }}' ]
tags: [ 'tomcat', 'dnet', 'users' ]
#
# Same steps, but when we are using multiple tomcat instances
#
# Note: the default is a default only. We need two commands to add acl effectively on the root dir and set the default
- name: Set the read/write permissions on the tomcat webapps and common/classes directories. multiple tomcat instances
acl: name={{ item.0.instance_path }}/{{ item.1 }} entity={{ dnet_group }} etype=group permissions=rwx state=present
when: tomcat_m_instances is defined
with_nested:
- ' {{ tomcat_m_instances }}'
- [ 'webapps', 'common', 'common/classes' ]
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the default read/write permissions on the tomcat webapps and common/classes directories. multiple tomcat instances
acl: name={{ item.0.instance_path }}/{{ item.1 }} entity={{ dnet_group }} etype=group permissions=rwx state=present default=yes
when: tomcat_m_instances is not defined
when: tomcat_m_instances is defined
with_nested:
- ' {{ tomcat_m_instances }}'
- [ 'webapps', 'common', 'common/classes' ]
tags: [ 'tomcat', 'dnet', 'users' ]
# Note: the default is a default only. We need two commands to add acl effectively on the root dir and set the default
- name: Set the read permissions on the tomcat log directory. multiple tomcat instances
acl: name={{ tomcat_m_instances_logdir_base }}/{{ item.http_port }} entity={{ dnet_group }} etype=group permissions=rx state=present
when: tomcat_m_instances is defined
with_items: tomcat_m_instances
tags: [ 'tomcat', 'dnet', 'users' ]
- name: Set the default read permissions on the tomcat log directory. multiple tomcat instances
acl: name={{ tomcat_m_instances_logdir_base }}/{{ item.http_port }} entity={{ dnet_group }} etype=group permissions=rx state=present default=yes
when: tomcat_m_instances is defined
with_items: tomcat_m_instances
tags: [ 'tomcat', 'dnet', 'users' ]

View File

@ -0,0 +1,5 @@
---
- name: Install the sudoers config that permits the dnet users to execute some privileged commands
template: src=dnet-sudoers.j2 dest=/etc/sudoers.d/dnet-group owner=root group=root mode=0440
tags: [ 'tomcat', 'dnet', 'sudo', 'users' ]

View File

@ -1,3 +1,3 @@
%{{ dnet_group }} ALL=(ALL) NOPASSWD: /etc/init.d/tomcat7, /etc/init.d/tomcat-instance-* %{{ dnet_sudoers_group }} ALL=(ALL) NOPASSWD: {% if tomcat_m_instances is defined %}/etc/init.d/tomcat7, /etc/init.d/tomcat-instance-*{% endif %}{% if dnet_sudo_commands is defined %}{% for cmd in dnet_sudo_commands %}{{ cmd }}{% if not loop.last %},{% endif %}{% endfor %}{% endif %}

View File

@ -2,16 +2,14 @@
# Fail2ban # Fail2ban
# Needed by the fail2ban template # Needed by the fail2ban template
cm_ip: 146.48.123.18
monitoring_ip: 146.48.123.23
# ban time in seconds. 86400 == 1 day # ban time in seconds. 86400 == 1 day
f2b_ban_time: 86400 f2b_ban_time: 86400
f2b_findtime: 600 f2b_findtime: 600
f2b_maxretry: 5 f2b_maxretry: 5
f2b_default_backend: auto f2b_default_backend: auto
f2b_usedns: warn f2b_usedns: warn
f2b_dest_email: sysadmin@research-infrastructures.eu f2b_dest_email: 'sysadmin@{{ domain_name }}'
f2b_sender_email: denyhosts@research-infrastructures.eu f2b_sender_email: 'denyhosts@{{ domain_name }}'
f2b_default_banaction: iptables-multiport f2b_default_banaction: iptables-multiport
# Default action: ban. Not send email # Default action: ban. Not send email
f2b_default_action: action_ f2b_default_action: action_

View File

@ -1,4 +1,6 @@
--- ---
- name: Restart fail2ban - name: Restart fail2ban
service: name=fail2ban state=restarted enabled=yes service: name=fail2ban state=restarted enabled=yes
when: has_fail2ban

View File

@ -1,5 +1,3 @@
--- ---
- include: fail2ban.yml - include: fail2ban.yml
when: ( is_trusty ) or ( is_debian8 ) when: has_fail2ban

View File

@ -18,7 +18,7 @@
# "ignoreip" can be an IP address, a CIDR mask or a DNS host. Fail2ban will not # "ignoreip" can be an IP address, a CIDR mask or a DNS host. Fail2ban will not
# ban a host which matches an address in this list. Several addresses can be # ban a host which matches an address in this list. Several addresses can be
# defined using space separator. # defined using space separator.
ignoreip = 127.0.0.1/8 {{ cm_ip }} {{ monitoring_ip }} ignoreip = 127.0.0.1/8 {% if cm_ip is defined %}{{ cm_ip }}{% endif %} {% if monitoring_ip is defined %}{{ monitoring_ip }}{% endif %}
# "bantime" is the number of seconds that a host is banned. # "bantime" is the number of seconds that a host is banned.
bantime = {{ f2b_ban_time }} bantime = {{ f2b_ban_time }}

View File

@ -12,18 +12,16 @@
with_items: with_items:
- ganglia-modules-linux - ganglia-modules-linux
- ganglia-monitor-python - ganglia-monitor-python
notify: notify: Restart ganglia monitor
Restart ganglia monitor when: ( is_trusty_or_debian7 ) or ( is_debian8 )
when: is_trusty_or_debian7
tags: tags:
- monitoring - monitoring
- ganglia - ganglia
- name: Distribute the ganglia configuration file for Ubuntu >= 12.04 - name: Distribute the ganglia configuration file for Ubuntu >= 12.04
template: src=gmond.j2 dest=/etc/ganglia/gmond.conf owner=root group=root mode=444 template: src=gmond.j2 dest=/etc/ganglia/gmond.conf owner=root group=root mode=444
when: is_not_ubuntu_less_than_precise when: ( is_not_ubuntu_less_than_precise ) or ( is_debian8 )
notify: notify: Restart ganglia monitor
Restart ganglia monitor
tags: tags:
- monitoring - monitoring
- ganglia - ganglia
@ -31,8 +29,7 @@
- name: Distribute the ganglia configuration file for Debian 7 - name: Distribute the ganglia configuration file for Debian 7
template: src=gmond.j2 dest=/etc/ganglia/gmond.conf owner=root group=root mode=444 template: src=gmond.j2 dest=/etc/ganglia/gmond.conf owner=root group=root mode=444
when: is_debian7 when: is_debian7
notify: notify: Restart ganglia monitor
Restart ganglia monitor
tags: tags:
- monitoring - monitoring
- ganglia - ganglia
@ -40,8 +37,7 @@
- name: Distribute the ganglia configuration file for Ubuntu < 12.04 and >= 10.04 and Debian 6 - name: Distribute the ganglia configuration file for Ubuntu < 12.04 and >= 10.04 and Debian 6
template: src=gmond-3.1.j2 dest=/etc/ganglia/gmond.conf owner=root group=root mode=444 template: src=gmond-3.1.j2 dest=/etc/ganglia/gmond.conf owner=root group=root mode=444
when: is_ubuntu_between_10_04_and_11_04_and_is_debian_6 when: is_ubuntu_between_10_04_and_11_04_and_is_debian_6
notify: notify: Restart ganglia monitor
Restart ganglia monitor
tags: tags:
- monitoring - monitoring
- ganglia - ganglia
@ -50,8 +46,7 @@
template: src=gmond-2.5.j2 dest=/etc/gmond.conf owner=root group=root mode=444 template: src=gmond-2.5.j2 dest=/etc/gmond.conf owner=root group=root mode=444
when: when:
- is_ubuntu_between_8_and_9_and_is_debian_4 - is_ubuntu_between_8_and_9_and_is_debian_4
notify: notify: Restart ganglia monitor
Restart ganglia monitor
tags: tags:
- monitoring - monitoring
- ganglia - ganglia
@ -60,8 +55,7 @@
template: src=gmond-2.5.j2 dest=/etc/gmond.conf owner=root group=root mode=444 template: src=gmond-2.5.j2 dest=/etc/gmond.conf owner=root group=root mode=444
when: when:
- is_broken_hardy_lts - is_broken_hardy_lts
notify: notify: Restart ganglia monitor
Restart ganglia monitor
tags: tags:
- monitoring - monitoring
- ganglia - ganglia
@ -82,8 +76,7 @@
- name: Setup the ganglia configuration for python modules - name: Setup the ganglia configuration for python modules
copy: src=modpython.conf dest=/etc/ganglia/conf.d/modpython.conf owner=root group=root mode=0644 copy: src=modpython.conf dest=/etc/ganglia/conf.d/modpython.conf owner=root group=root mode=0644
notify: notify: Restart ganglia monitor
- Restart ganglia monitor
when: is_precise when: is_precise
tags: tags:
- monitoring - monitoring

View File

@ -22,5 +22,5 @@
- name: Restart fail2ban - name: Restart fail2ban
service: name=fail2ban state=restarted enabled=yes service: name=fail2ban state=restarted enabled=yes
when: is_trusty when: has_fail2ban

View File

@ -1,4 +1,4 @@
--- ---
nemis_ldap_uri: "ldap://ldap.sub.research-infrastructures.eu" ldap_uri: "ldap://ldap.sub.research-infrastructures.eu"
nemis_ldap_base_dn: "dc=research-infrastructures,dc=eu" ldap_base_dn: "dc=research-infrastructures,dc=eu"
ldap_tls_cacert: /etc/ssl/certs/ca-certificates.crt

View File

@ -4,36 +4,30 @@
with_items: with_items:
- ldapscripts - ldapscripts
- libpam-ldap - libpam-ldap
tags: tags: ldap-client
- ldap-client
- name: Write the ldap client configuration file - name: Write the ldap client configuration file
template: src=ldap.conf.j2 dest=/etc/ldap.conf mode=444 owner=root group=root template: src=ldap.conf-old.j2 dest=/etc/ldap.conf mode=444 owner=root group=root
when: is_ubuntu_less_than_trusty when: is_ubuntu_less_than_trusty
tags: tags: ldap-client
- ldap-client
- name: Write the ldap client configuration file - name: Write the ldap client configuration file
template: src=ldap.conf.j2 dest=/etc/ldap/ldap.conf mode=444 owner=root group=root template: src=ldap.conf.j2 dest=/etc/ldap/ldap.conf mode=444 owner=root group=root
when: is_trusty when: is_trusty
tags: tags: ldap-client
- ldap-client
- name: set the ldapscripts.conf uri - name: set the ldapscripts.conf uri
action: configfile path=/etc/ldapscripts/ldapscripts.conf key=SERVER value='{{ nemis_ldap_uri }}' syntax=shell action: configfile path=/etc/ldapscripts/ldapscripts.conf key=SERVER value='{{ ldap_uri }}' syntax=shell
when: is_trusty when: is_trusty
tags: tags: ldap-client
- ldap-client
- name: set the ldapscripts.conf bind dn - name: set the ldapscripts.conf bind dn
action: configfile path=/etc/ldapscripts/ldapscripts.conf key=BINDDN value='cn=admin,{{ nemis_ldap_base_dn }}' syntax=shell action: configfile path=/etc/ldapscripts/ldapscripts.conf key=BINDDN value='cn=admin,{{ ldap_base_dn }}' syntax=shell
when: is_trusty when: is_trusty
tags: tags: ldap-client
- ldap-client
- name: set the ldapscripts.conf dn suffix - name: set the ldapscripts.conf dn suffix
action: configfile path=/etc/ldapscripts/ldapscripts.conf key=SUFFIX value='{{ nemis_ldap_base_dn }}' syntax=shell action: configfile path=/etc/ldapscripts/ldapscripts.conf key=SUFFIX value='{{ ldap_base_dn }}' syntax=shell
when: is_trusty when: is_trusty
tags: tags: ldap-client
- ldap-client

View File

@ -0,0 +1,11 @@
# The distinguished name of the search base.
BASE {{ ldap_base_dn }}
# Another way to specify your LDAP server is to provide an
URI {{ ldap_uri }}
# The LDAP version to use (defaults to 3
# if supported by client library)
ldap_version 3
nss_initgroups_ignoreusers avahi,backup,bin,daemon,games,gnats,irc,libuuid,list,lp,mail,man,messagebus,munin,news,nslcd,proxy,root,rstudio-server,sshd,sync,sys,syslog,uucp,www-data

View File

@ -1,11 +1,14 @@
# The distinguished name of the search base. # The distinguished name of the search base.
BASE {{ nemis_ldap_base_dn }} BASE {{ ldap_base_dn }}
# Another way to specify your LDAP server is to provide an # Another way to specify your LDAP server is to provide an
URI {{ nemis_ldap_uri }} URI {{ ldap_uri }}
# The LDAP version to use (defaults to 3 # The LDAP version to use (defaults to 3
# if supported by client library) # if supported by client library)
ldap_version 3 ldap_version 3
nss_initgroups_ignoreusers avahi,backup,bin,daemon,games,gnats,irc,libuuid,list,lp,mail,man,messagebus,munin,news,nslcd,proxy,root,rstudio-server,sshd,sync,sys,syslog,uucp,www-data nss_initgroups_ignoreusers avahi,backup,bin,daemon,games,gnats,irc,libuuid,list,lp,mail,man,messagebus,munin,news,nslcd,proxy,root,rstudio-server,sshd,sync,sys,syslog,uucp,www-data
# TLS certificates (needed for GnuTLS)
TLS_CACERT {{ ldap_tls_cacert }}

View File

@ -16,6 +16,7 @@ mw_php_prereq:
- php5-mysqlnd - php5-mysqlnd
- php-apc - php-apc
- php-pear - php-pear
- php5-ldap
- imagemagick - imagemagick
# This choice is not recommended. The package has a poor list of dependencies. We do not want to deal with those # This choice is not recommended. The package has a poor list of dependencies. We do not want to deal with those

View File

@ -43,4 +43,5 @@ mysql_backup_logdir: '{{ mysql_log_dir }}'
mysql_backup_logfile: '{{ mysql_backup_logdir }}/my_backup.log' mysql_backup_logfile: '{{ mysql_backup_logdir }}/my_backup.log'
mysql_backup_retain_copies: 15 mysql_backup_retain_copies: 15
mysql_backup_destdir: /var/lib/mysql-backup mysql_backup_destdir: /var/lib/mysql-backup
mysql_backup_exclude_list: "performance_schema"

View File

@ -6,6 +6,8 @@ MY_BACKUP_USE_NAGIOS="False"
MY_BACKUP_DIR=/var/lib/mysql-backup MY_BACKUP_DIR=/var/lib/mysql-backup
MY_DATA_DIR=/var/lib/mysql MY_DATA_DIR=/var/lib/mysql
N_DAYS_TO_SPARE=7 N_DAYS_TO_SPARE=7
# Exclude list
EXCLUDE_LIST='performance_schema'
if [ -f /etc/default/mysql_backup ] ; then if [ -f /etc/default/mysql_backup ] ; then
. /etc/default/mysql_backup . /etc/default/mysql_backup
@ -33,33 +35,28 @@ fi
chmod 700 $MY_BACKUP_DIR chmod 700 $MY_BACKUP_DIR
LOCKFILE=$MY_DATA_DIR/.mysqldump.lock LOCKFILE=$MY_DATA_DIR/.mysqldump.lock
NAGIOS_LOG=$MY_BACKUP_DIR/.nagios-status NAGIOS_LOG=$MY_BACKUP_DIR/.nagios-status
# Exclude list
EXCLUDE_LIST='performance_schema'
if [ ! -f $LOCKFILE ] ; then if [ ! -f $LOCKFILE ] ; then
touch $LOCKFILE touch $LOCKFILE
if [ "${MY_BACKUP_USE_NAGIOS}" == "True" ] ; then if [ "${MY_BACKUP_USE_NAGIOS}" == "True" ] ; then
> $NAGIOS_LOG > $NAGIOS_LOG
fi fi
for db in $( /bin/ls -1 /var/lib/mysql/ | grep -v $EXCLUDE_LIST ) ; do for db in $( mysql -Bse "show databases;" | grep -v $EXCLUDE_LIST ) ; do
if [ -d /var/lib/mysql/$db ] ; then mysqldump -f --flush-privileges --opt $db > $MY_BACKUP_DIR/history/${db}.sql.${SAVE_TIME} 2> $MY_BACKUP_LOG_DIR/$db.log
#mysqldump -uroot -f --opt -p$MYSQLPASS $db > $MY_BACKUP_DIR/$db.sql 2> $MY_BACKUP_DIR/log/$db.log DUMP_RESULT=$?
mysqldump -f --opt $db > $MY_BACKUP_DIR/history/${db}.sql.${SAVE_TIME} 2> $MY_BACKUP_LOG_DIR/$db.log chmod 600 $MY_BACKUP_DIR/history/${db}.sql.${SAVE_TIME}
DUMP_RESULT=$? if [ "${MY_BACKUP_USE_NAGIOS}" == "True" ] ; then
chmod 600 $MY_BACKUP_DIR/history/${db}.sql.${SAVE_TIME} if [ $DUMP_RESULT -ne 0 ] ; then
if [ "${MY_BACKUP_USE_NAGIOS}" == "True" ] ; then echo "$db:FAILED" >> $NAGIOS_LOG
if [ $DUMP_RESULT -ne 0 ] ; then RETVAL=$DUMP_RESULT
echo "$db:FAILED" >> $NAGIOS_LOG else
RETVAL=$DUMP_RESULT echo "$db:OK" >> $NAGIOS_LOG
else fi
echo "$db:OK" >> $NAGIOS_LOG fi
fi pushd ${MY_BACKUP_DIR}/ >/dev/null 2>&1
fi rm -f $db.sql
pushd ${MY_BACKUP_DIR}/ >/dev/null 2>&1 ln -s $MY_BACKUP_DIR/history/${db}.sql.${SAVE_TIME} ./$db.sql
rm -f $db.sql popd >/dev/null 2>&1
ln -s $MY_BACKUP_DIR/history/${db}.sql.${SAVE_TIME} ./$db.sql
popd >/dev/null 2>&1
fi
done done
# Do a "flush-hosts" after the backup # Do a "flush-hosts" after the backup
mysqladmin flush-hosts 2> $MY_BACKUP_LOG_DIR/flush-hosts.log mysqladmin flush-hosts 2> $MY_BACKUP_LOG_DIR/flush-hosts.log

View File

@ -5,9 +5,7 @@
when: when:
- mysql_db_data is defined - mysql_db_data is defined
- item.name is defined - item.name is defined
tags: tags: [ 'mysql', 'mysql_db' ]
- mysql
- mysql_db
- name: Add a user for the databases - name: Add a user for the databases
mysql_user: name={{ item.0.user }} password={{ item.0.pwd }} host={{ item.1 }} priv={{ item.0.name }}.*:"{{ item.0.user_grant }}" state=present mysql_user: name={{ item.0.user }} password={{ item.0.pwd }} host={{ item.1 }} priv={{ item.0.name }}.*:"{{ item.0.user_grant }}" state=present
@ -17,7 +15,4 @@
when: when:
- mysql_db_data is defined - mysql_db_data is defined
- item.0.name is defined - item.0.name is defined
tags: tags: [ 'mysql', 'mysql_db' ]
- mysql
- mysql_db

View File

@ -4,3 +4,5 @@ MY_BACKUP_LOG_FILE='{{ mysql_backup_logfile}}'
N_DAYS_TO_SPARE='{{ mysql_backup_retain_copies }}' N_DAYS_TO_SPARE='{{ mysql_backup_retain_copies }}'
MY_BACKUP_DIR='{{ mysql_backup_destdir }}' MY_BACKUP_DIR='{{ mysql_backup_destdir }}'
MY_DATA_DIR='{{ mysql_data_dir }}' MY_DATA_DIR='{{ mysql_data_dir }}'
# Exclude list
EXCLUDE_LIST='{{ mysql_backup_exclude_list }}'

View File

@ -61,8 +61,11 @@ nagios_dell_omsa_pkgs:
- srvadmin-base - srvadmin-base
- srvadmin-idrac - srvadmin-idrac
- srvadmin-storageservices - srvadmin-storageservices
- srvadmin-omcommon
# We need a more recent version of the check_openmanage executable # We need a more recent version of the check_openmanage executable
nagios_dell_standalone_checks: nagios_dell_standalone_checks:
- check_dell_warranty.py - check_dell_warranty.py
- check_openmanage - check_openmanage
nagios_openmanage_additional_opts: ''

View File

@ -7,14 +7,28 @@
register: update_apt_cache register: update_apt_cache
tags: [ 'dell', 'nagios' ] tags: [ 'dell', 'nagios' ]
- name: Install the NeMIS internal repository apt key
apt_key: url=http://ppa.research-infrastructures.eu/system/keys/system-archive.asc state=present
tags: [ 'dell', 'nagios' ]
- name: research infrastructures system repository on debian - name: research infrastructures system repository on debian
copy: src={{ item }} dest=/etc/apt/sources.list.d/{{ item }} copy: src={{ item }} dest=/etc/apt/sources.list.d/{{ item }}
with_items: with_items:
- research-infrastructures.eu.system.list - research-infrastructures.eu.system.list
when: is_debian6 when: is_debian
register: update_apt_cache register: update_apt_cache
tags: [ 'dell', 'nagios' ] tags: [ 'dell', 'nagios' ]
- name: Update apt cache
apt: update_cache=yes
when: ( update_apt_cache | changed )
tags: [ 'dell', 'nagios' ]
#- action: apt_key id=1285491434D8786F state=present
- name: Install the Dell OMSA repository apt key
apt_key: keyserver=pool.sks-keyservers.net id=1285491434D8786F
tags: [ 'dell', 'nagios' ]
- name: Install the Dell apt repository - name: Install the Dell apt repository
template: src={{ item }}.j2 dest=/etc/apt/sources.list.d/{{ item }} template: src={{ item }}.j2 dest=/etc/apt/sources.list.d/{{ item }}
with_items: with_items:
@ -23,18 +37,9 @@
register: update_apt_cache register: update_apt_cache
tags: [ 'dell', 'nagios' ] tags: [ 'dell', 'nagios' ]
- name: Install the NeMIS internal repository apt key
apt_key: url=http://ppa.research-infrastructures.eu/system/keys/system-archive.asc state=present
tags: [ 'dell', 'nagios' ]
#- action: apt_key id=1285491434D8786F state=present
- name: Install the Dell OMSA repository apt key
apt_key: keyserver=pool.sks-keyservers.net id=1285491434D8786F
tags: [ 'dell', 'nagios' ]
- name: Update apt cache - name: Update apt cache
apt: update_cache=yes apt: update_cache=yes
when: update_apt_cache.changed when: ( update_apt_cache | changed )
tags: [ 'dell', 'nagios' ] tags: [ 'dell', 'nagios' ]
- name: Install the Dell OMSA packages dependencies - name: Install the Dell OMSA packages dependencies
@ -42,7 +47,7 @@
with_items: nagios_dell_omsa_deps with_items: nagios_dell_omsa_deps
tags: [ 'dell', 'nagios' ] tags: [ 'dell', 'nagios' ]
- name: Install the Dell OMSA packages dependencies - name: Install other Dell OMSA packages dependencies
apt: pkg={{ item }} state=installed apt: pkg={{ item }} state=installed
with_items: with_items:
- python-requests - python-requests
@ -75,8 +80,8 @@
when: ( libssl_legacy | changed ) when: ( libssl_legacy | changed )
tags: [ 'dell', 'nagios' ] tags: [ 'dell', 'nagios' ]
- name: Install the Dell OMSA packages - name: Install the main Dell OMSA package
apt: pkg={{ item }} state=installed force=yes apt: pkg={{ item }} state={{ nagios_dell_omsa_pkg_state }} force=yes
with_items: with_items:
- syscfg - syscfg
when: is_not_debian6 when: is_not_debian6

View File

@ -25,6 +25,7 @@
- name: Ensure that the smart server is enabled and running - name: Ensure that the smart server is enabled and running
service: name=smartmontools state=started enabled=yes service: name=smartmontools state=started enabled=yes
when: not is_debian8
tags: tags:
- nagios-hw - nagios-hw
- nagios - nagios

View File

@ -5,6 +5,14 @@ nginx_ldap_uri: "ldap://ldap.sub.research-infrastructures.eu"
nginx_ldap_base_dn: "dc=research-infrastructures,dc=eu" nginx_ldap_base_dn: "dc=research-infrastructures,dc=eu"
nginx_enabled: "Yes" nginx_enabled: "Yes"
nginx_enable_compression: True
nginx_gzip_vary: "on"
nginx_gzip_proxied: any
nginx_gzip_comp_level: 6
nginx_gzip_buffers: 16 8k
nginx_gzip_http_version: 1.1
nginx_gzip_types: "text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript"
nginx_proxy_buffering: "on" nginx_proxy_buffering: "on"
nginx_proxy_redirect: "off" nginx_proxy_redirect: "off"
nginx_proxy_buffer_size: 128k nginx_proxy_buffer_size: 128k

View File

@ -4,25 +4,26 @@
with_items: with_items:
- nginx-full - nginx-full
when: not nginx_use_ldap_pam_auth when: not nginx_use_ldap_pam_auth
tags: tags: nginx
- nginx
- name: Install the nginx web server if we need ldap auth via pam - name: Install the nginx web server if we need ldap auth via pam
apt: pkg={{ item }} state=installed apt: pkg={{ item }} state=installed
with_items: with_items:
- nginx-extras - nginx-extras
when: nginx_use_ldap_pam_auth when: nginx_use_ldap_pam_auth
tags: tags: nginx
- nginx
- name: remove nginx default config - name: remove nginx default config
file: dest=/etc/nginx/sites-enabled/default state=absent file: dest=/etc/nginx/sites-enabled/default state=absent
notify: notify: Reload nginx
Reload nginx tags: [ 'nginx', 'nginx_conf', 'nginx_virtualhost' ]
tags:
- nginx - name: Install the gzip compression configuration if enabled
template: src=nginx-compression.conf.j2 dest=/etc/nginx/conf.d/compression.conf owner=root group=root mode=0444
when: nginx_enable_compression
notify: Reload nginx
tags: [ 'nginx', 'nginx_conf' ]
- name: Ensure that the webserver is running - name: Ensure that the webserver is running
service: name=nginx state=started enabled={{ nginx_enabled }} service: name=nginx state=started enabled={{ nginx_enabled }}
tags: tags: nginx
- nginx

View File

@ -0,0 +1,6 @@
gzip_vary {{ nginx_gzip_vary }};
gzip_proxied {{ nginx_gzip_proxied }};
gzip_comp_level {{ nginx_gzip_comp_level }};
gzip_buffers {{ nginx_gzip_buffers }};
gzip_http_version {{ nginx_gzip_http_version }};
gzip_types {{ nginx_gzip_types }};

View File

@ -6,8 +6,12 @@ jdk_version:
- '{{ jdk_default }}' - '{{ jdk_default }}'
jdk_java_home: '/usr/lib/jvm/java-{{ jdk_default }}-oracle' jdk_java_home: '/usr/lib/jvm/java-{{ jdk_default }}-oracle'
jdk_pkg_state: installed jdk_pkg_state: installed
jdk_install_strong_encryption_policy: False oracle_jdk_packages:
- 'oracle-java{{ jdk_default }}-installer'
- 'oracle-java{{ jdk_default }}-set-default'
jdk_install_strong_encryption_policy: True
# If we want a different oracle jdk set the following variables in the local playbook: # If we want a different oracle jdk set the following variables in the local playbook:
jdk_use_tarfile: False
# jdk_java_home: /usr/lib/jvm/java-7-0-25 # jdk_java_home: /usr/lib/jvm/java-7-0-25
# jdk_use_tarfile: True
# jdk_tarfile: oracle-jdk-7.0.25.tar.gz # jdk_tarfile: oracle-jdk-7.0.25.tar.gz

View File

@ -15,33 +15,35 @@
tags: jdk tags: jdk
- name: Install the latest version of Oracle JDK - name: Install the latest version of Oracle JDK
apt: pkg=oracle-java{{ item }}-installer state={{ jdk_pkg_state }} force=yes apt: pkg={{ item }} state={{ jdk_pkg_state }} force=yes
when: not jdk_use_tarfile
with_items: oracle_jdk_packages
tags: jdk
- name: Install the extended security JCE Oracle JDK package
apt: pkg=oracle-java{{ item }}-unlimited-jce-policy state={{ jdk_pkg_state }} force=yes
when: jdk_use_tarfile is not defined or not jdk_use_tarfile when: jdk_use_tarfile is not defined or not jdk_use_tarfile
with_items: jdk_version with_items: jdk_version
when:
- not jdk_use_tarfile
- jdk_install_strong_encryption_policy
tags: jdk tags: jdk
- name: Set the JDK default via update-alternatives - name: Set the JDK default via update-alternatives
apt: pkg=oracle-java{{ item }}-set-default state={{ jdk_pkg_state }} force=yes apt: pkg=oracle-java{{ item }}-set-default state={{ jdk_pkg_state }} force=yes
with_items: jdk_default with_items: jdk_default
when: jdk_use_tarfile is not defined or not jdk_use_tarfile when:
notify: - not jdk_use_tarfile
Set the default Oracle JDK - jdk_default is defined
when: jdk_default is defined notify: Set the default Oracle JDK
tags: jdk tags: jdk
- name: Install a custom version of Oracle JDK from a tar file - name: Install a custom version of Oracle JDK from a tar file
unarchive: src={{ jdk_tarfile }} dest={{ jdk_java_home_prefix }} unarchive: src={{ jdk_tarfile }} dest={{ jdk_java_home_prefix }}
when: jdk_use_tarfile is defined and jdk_use_tarfile when: jdk_use_tarfile
tags: jdk tags: jdk
- name: Set fact jdk_installed - name: Set fact jdk_installed
set_fact: jdk_installed=True set_fact: jdk_installed=True
tags: [ 'jdk', 'jdk_security' ] tags: jdk
- name: Install the strong encryption policy files
copy: src=jdk-{{ item.0 }}-{{ item.1 }} dest={{ jdk_java_home }}/jre/lib/security/{{ item.1}} mode=0444 owner=root group=root
with_nested:
- '{{ jdk_version }}'
- [ 'US_export_policy.jar', 'local_policy.jar' ]
when: jdk_install_strong_encryption_policy
tags: [ 'jdk', 'jdk_security' ]

View File

@ -1,3 +1,6 @@
---
# Set it to true when you want configure your machine to send email to a relay
postfix_relay_client: False
postfix_biff: "no" postfix_biff: "no"
postfix_append_dot_mydomain: "no" postfix_append_dot_mydomain: "no"
postfix_use_relay_host: True postfix_use_relay_host: True
@ -6,7 +9,7 @@ postfix_use_sasl_auth: True
postfix_smtp_sasl_auth_enable: "yes" postfix_smtp_sasl_auth_enable: "yes"
postfix_smtp_create_relay_user: True postfix_smtp_create_relay_user: True
# See vars/isti-global.yml # See vars/isti-global.yml
postfix_relay_host: smtp-relay.research-infrastructures.eu postfix_relay_host: smtp-relay.example.com
postfix_relay_port: 587 postfix_relay_port: 587
postfix_default_destination_concurrency_limit: 20 postfix_default_destination_concurrency_limit: 20
#postfix_smtp_relay_user: smtp-user #postfix_smtp_relay_user: smtp-user
@ -15,7 +18,6 @@ postfix_default_destination_concurrency_limit: 20
# The following options are used only whe postfix_relay_server is set to True # The following options are used only whe postfix_relay_server is set to True
postfix_relay_server: False postfix_relay_server: False
#postfix_mynetworks: '{{ network.nmis }}, hash:/etc/postfix/network_table'
postfix_mynetworks: hash:/etc/postfix/network_table postfix_mynetworks: hash:/etc/postfix/network_table
postfix_interfaces: all postfix_interfaces: all
postfix_inet_protocols: all postfix_inet_protocols: all

View File

@ -1,6 +1,9 @@
--- ---
- include: smtp-common-packages.yml - include: smtp-common-packages.yml
when: postfix_relay_client
- include: smtp-sasl-auth.yml - include: smtp-sasl-auth.yml
when: postfix_use_sasl_auth when:
- postfix_use_sasl_auth
- postfix_relay_client
- include: postfix-relay-server.yml - include: postfix-relay-server.yml
when: postfix_relay_server when: postfix_relay_server

View File

@ -1,7 +1,6 @@
--- ---
# solr # solr
solr_http_port: 8983 solr_http_port: 8983
tomcat_http_port: '{{ solr_http_port }}'
tomcat_load_additional_default_conf: True tomcat_load_additional_default_conf: True
tomcat_version: 7 tomcat_version: 7
# solr needs a lot of time to start if it needs to rebuild its indices # solr needs a lot of time to start if it needs to rebuild its indices
@ -13,7 +12,7 @@ solr_config_name: hindex
solr_shards: 1 solr_shards: 1
solr_instance: '{{ solr_service }}' solr_instance: '{{ solr_service }}'
solr_log_level: INFO solr_log_level: INFO
solr_http_port_1: '{{ tomcat_http_port }}' solr_http_port_1: '{{ solr_http_port }}'
solr_zoo_port: 9983 solr_zoo_port: 9983
solr_zoo_port_1: 9984 solr_zoo_port_1: 9984
solr_zoo_port_2: 9985 solr_zoo_port_2: 9985

View File

@ -1,3 +0,0 @@
---
dependencies:
- role: '../../library/roles/tomcat-multiple-instances'

View File

@ -1,41 +1,5 @@
--- ---
manage_root_ssh_keys: True manage_root_ssh_keys: True
#
# Example:
# user_ssh_key: [ '{{ sandro_labruzzo }}','{{ michele_artini }}', '{{ claudio_atzori }}' ]
#
cm_pubkey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJN8XR/N4p6FfymWJy7mwR3vbUboC4P+7CgZalflhK5iH0P7c24/zZDY9Y5QIq58IViY7napqZuRkNHnHcvm9mxtSxQ16qe03NulABN5V/ljgR0sQAWz8pwv68LDpR9uBSCbXDdDCUUlS+zOxCHA6s7O7PSFavX4An1Vd/mjwoeR4eLRQXNcKsK2Pu/BZ3TCLmWyi2otnxFiJ8IoKW1CvjxKWmt5BvAvys0dfsdnTSVz9yiUMwN5Oj8cw/jhKqadnkvqTGfGl1ELm9L2V7hT6LM0cIom9oRsQf+JJ6loBe3UUZGaAhY2jmARmZdX3qV9Wh+UtxaWMEAXB9mf/2cK9f jenkins@cm
ci_pubkey: ssh-dss AAAAB3NzaC1kc3MAAACBAPwK/P1MAOksk1vT8YQd4/d9apwx2Npbs1ynNq3jZloDClbR9bOyNQ41SA5HcSHvgRYHTDySw2nCDWew+FB5VqoEqmTecpy7MoPYyxOuRByx26LwgBIt7f3Dj1hrepwiWtrvY16dw7SYEs6+Bm8VGXRmlvGPORzuyP8plagI2641AAAAFQCknhxNYiauoYAfjcx1LONccKwjZQAAAIAJ097QfL/ehWEiaEI710t8wksckio1fhS9zLckNDyBaqMYYBQUSru/orWy6hkoF1hpCiRuhyKj5HyzIZmHRk0oPg6F6Kiq/9AKZAxH/mKD5Dsw0FVANQMuOq5DH2O3NYxlBEh/8tEqSg3BoNsv563i48FJ1DJeOd8/Ldi4tBcxswAAAIBu/R99IT3aOYkoC9z5I7qg0nL5duth4gMsJRJZbwoTtdY4ABF94GBHeb8RlQ+o7dxiUyBp0P5ME0p9Mc0OsTZPsLzYsnZfpzPIWmlNGaPPQExKFhpXkAwJ0zuDAatf9Tc7eT7bhf/vDsZXS4GKJ4HtRIVb6z5jvjq9Y27/HNC/6Q== jenkins@ci
claudio_atzori: ssh-dss AAAAB3NzaC1kc3MAAACBAPMUiX2cCrDItmblQgA2sRZ5SixdDvwmVG0yPk67wb2oZF8MCGCGwwt9eWI8EecMKIevoWF63pn8poUveqvnRRFfGCjly8Rl6cNM3QZRmc5hjU3HcG/eFDCs92+vGdYfN/UV1qi2xIKU8204VfpnpWfsPlBqion/mR/kfLgCD0RTAAAAFQD6xPbDfMl0mkPGNL591eYHlKbtwQAAAIBJjhez8Gy8WGMJdcd/0B8rgEuHhDA9SQTknc/V88OMMthe3T5dWwwesT0DU4fPbn9Be6QWU+SNrBESmB64UpreCeodvh9pnfe0xerYWMplELlHM1yRtCCDQp2iDXK/oRTZne3IX8+OPx1OSKkWzQAVls4PV92CDSS8h1B9yvutiAAAAIAd8tasvTEmFpjaqszB6gkCdTlRHuVshRdrvAE8NBg9n0EzN3GdIyzJMmMAtTb0oJZZ3KGnKZic/gGGbqEY48PMbd9/WpWTf8SJz1ccpt3EQMbvLBJUwsJQ8ObBYhVe3SIwucwsZguIiPNdHIje+g1fc1DQHd5ALt3ljAYCPW+Yug== claudio@claudio-desktop
michele_artini: ssh-dss AAAAB3NzaC1kc3MAAAEBAKYA5eODSPDAcAhTqXQQP5mzPmLfS7J929Ncl5eqTj6KsjayfNnKsKDPzXGD/YGEGTP82VQuBzk42c1WLoUi5GnB5kZUWfdrKJLbr4JXcVUYnNNIwWIc1L9YunmbFN2zllHUXHrKn6EeKjR6H5xT0KPOX17MUa462jA2FLvesaqOoomm/AeNBF1UkCx0mRJEAMGq+I3xSBQJVhUOFmRJ7n6b4X0E3GXxtpkAwHiBiHX1GtNh2gMeTkIBHeZrS90l7DOumM8Y5KOP+fBd5scxodHKG2p+t3gwzwU2RoF5Hq8OvT4B3Dr5qPZKBIrB6kh6/5rLv8O0Lbky2aeoiYaIR0sAAAAVAO1EMaAsE93IDppRLlV+EjNn/4HbAAABAQCDPZHdR+uG0jfsed/ONPzecBDAJ4qS99D50hqrmQIRtsuhwo9KFsJ1cVjgSYjToqg8XuPZaO26E1riHnFAGoExQFNdev++kGtMfT3sxHOLwDd19fA3KNftFY0oqzDkLuD4D1+8gWk7WmTk8M5O5McFuuAr5TmXFdFNT49/6Z+XOuIQxyuEq9kJxSbO+dag4699lm3ZadSq6SEC2u0WAgyaIYUorYPJyYETSvUpsBtv37+oGbbz7dfbZ5pnmYi70BFiC2G7fA79shn0X/+Gk2Wp7RTDP/OB++RZFcrjHFQtvETdGSviq2Lxl1C7zp61qAmd0TZJBZ19k29nXIrILEnJAAABAFRqkJyVwZerL+E2jbF5LP89NW9HsjrBOEBekohR5zQY1KUPDirbReaGdf5hM6tvRxQCjlD+VMNq2VFRDC+RqOot5+KIyCaom4sXeYZiJBWa1Zx5YLbUZnYBGIpsa+IICA4drYwInGUN2EhClPwfDvZzFhd422kZFjiLYNM1HQ9f5TbKf1cPLSE/OitxU6+/NrCbMaRO2QPrjAB2EQG2s3DB9qfMBPg/Re7DyGJgMBn6KUXZ6JRvVssASvF7WsRaf5zRpug335CymndSS4fvQY74XJiVtB4vqDZle+WhXut8jvZ1Zl525fZZg9smZ2anqVWGGRxael7hjvlwYXbazkw= michele@pc-artini
andrea_dellamico: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZ9n6B+J5S7NPnwjejPC2WrvcRzC07WPnAoQ7ZHZ0Mv9JakyWItswzI3Drz/zI0mCamyuye+9dWz9v/ZRwUfBobVyXuptRaZIwxlMC/KsTZofpp3RHOBTteZ4/VM0VhEeiOHu+GuzNE0fRB2gsusWeMMae2cq4TjVAOMcQmJX496L703Smc14gFrP8y/P9jbC5HquuVnPR29PsW4mHidPmjdKkO7QmDfFAj44pEUGeInYOJe708C03NCpsjHw8AVdAJ6Pf16EOdDH+z8D6CByVO3s8UT0HJ85BRoIy6254/hmYLzyd/eRnCXHS/dke+ivrlA3XxG4+DmqjuJR/Jpfx adellam@semovente
sandro_labruzzo: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+PFOSF+U9pvWTH/9TYZer3oDvTU2q6wVPs0dvgYc9Ak1Wdzmq4Dj9nyeLBW3G1i5ddqFrr/QSjIroX2/y8Z8Dq+OZLRpBhSyLF9bV0jKbytJJYhkzIJHgE/ITTdbNQVZstjPZ0D4c/0lrbMwiiwsKWRqphmvMKFmgkO4M4w1qm8B3UYPHF3lZfw+vm+rgVv+FiOltgsRm+LU0IszeiiOd1WgPWUVYixFnNUVzDkXRDatO5//M1XMHM/PoontgnsCP2j9kxIptYgguiNZUIeMUFljw3SbV84NrVUSpL6/fzmvsEv05rkRT0+P8oPYIhxO1alKr99H9ADg7pU36rWaN sandro@sandro-pc
hadoop_test_cluster: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDi7O89HLqa3HMEkmCVF6/V/IWw8G8eaKWOOzDsLtQAFFti9rWHckyCSxNhtYuuiGLhn5Mad0E7JaguexU5j+Rm9Vu30ducF6DefJsOqQ5TfQhzN60w5f+y59BqWDSHBBawEhfuS2B5qj9iL76w8ZgMsqS+6WXiT792F9DoelYfKBODQi8/AE5C93iQiYyyFIrvy37KUfvBlzjSkNNHb5A36PlHmQBZD3WhROaZfjUfXifFzOSs9bERazttXG8HeElt7zbE40OSse2HG3y34gB+TvGIYbd3scQUiL5dEWt4cDSDBrEU6b1rG04uZgkscxCFwTDxPrHUVXS0ou03N4nr Hadoop test
tommaso_piccioli: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAzcHuDU7PgJwz34AsVG0E2+ZRx17ZKW1uDEGABNk3Z60/c9LTwWKPj6kcIRy6RzFJI5X+IgPJnYouXVmJsIWjVL8IRk8fP1ffJC6Fyf6H7+fCxu/Wwed5OoOCvKeZ0bEmJ1tlXFM6+EnxKqLCvz3fsNy8e4WKMnpS1hT8K6YB7PMjt60S3wOaxds1Lv4NmmgnfGM5uZFYrZCx1/GJCzNSh7AEEEUIVQ1B8xmXbet7whNiwDmiOnXSlt38dkIYT8kNMuRCj/r9wPr7FmoUCOFzUVXTcnuYagKyURrZ8QDyHbK6XQLYXgvCz/lWoErGFbDqpmBHHyvKSeLPxYfJpWJ70w== tom@tom
backup_agent: ssh-dss AAAAB3NzaC1kc3MAAACBANBn5i7oJd12+GAeDVSAiPqCxcCDzWe41g3Vy/LhbYKwG0smPNJRfvyf7lKWkgolJfMJZrk7bBVhJoApkV7vkFkrSPueyRC+/ohjafpOsmxRYiOaSrDZ2c9TbGFVZTh23pUXoDPp2Z0N8l471b9Mx/nqgtflCV+IVICcDZbUhcCTAAAAFQC+fmfljTFllCMKsgrSJcQAtiIT/QAAAIEAvrsLfmQzHQjt4G5FhcPVbvP87KUsDh0xksCfMRP6bQBz/3mcnt7V5/MLll/CZMiOWjRK3ww9zCYHprUwQtAZSllFWiGUKw1tDvf1ZQGESYP/vvWwcpPZpVsRHlhRtuMsQchSRxw03yYOqEEa2akWzQlvaZ4CWWym931mZg6zY4AAAACAG/l8dU/QEMK1JP3rDV0kZYvcxjUC9Mxw5ScTyVqVnxDL75ssX9HiQamsiTk0dYNyl8qkB38FfkB4LhEb8FkHs4toN+nTNPPlLqhpYMs+anwyNy32LnXAVP02VJ2+3exwGe0b5vtIFpj+j8s7YZMHN5x6d4xhZ9oq5M2pJN6M48E= root@dlibbackup
monja_dariva: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAuQJvgDc8lQB+EArajGPEirRuYxGcInfiM3uRS0P5Dhqch6cuNdMFFjCoQVFL2Dvs7QNSRm8mvnPLWOCYLEFPBdXlA63w+n3VWoVOs0lUgQM77/axetd/K8BCkJlcA/exvVxLtzc5k8hN1k3OJY/Npi2Xa4WyEMV6t7+vYK3MXPjFBy4Y/aLWZvHcCn0zUbeB8T8PJ2S8taCIOMzemUzjGs3c0f4y6oaJx1gPw31PCahkaVS4ZLSt+0y3DRaGiXjyzgbQPf1whBOT4SSiX3SgdMvxA/Fzz2sSAn9PNfKq+/vygn7qDB79qzBhOXs36dPuwmsqggxIZasGUT/YfRp5Cw== monja@pc-monja
andrea_manzi: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAoCquwjgvRQXrHJ7sjY7/mFv0hEev4dljYKYz3Rf9r1rExQ6zku4tCvLkwmc+1U4ui2GCMQ70Hp1BbVdU01WVdAb6ESLAqk4m2NFiNxSsxerEyyOgnCvTA+Pcb1beVHgEm1/IA+6MgVPg71nE2OETpaoDNBGn+AmCdLqC67lXM9KlEaoLFFGY8ZbwJifWdidH/fk3rQojnGhxnFOidVu8QeV+b5kNTyVA2CUbCZCFZANIs/ZrDOmP5nmtA35vkIRU0OV6iBeJmcYsMwXmh8kiR6KoKVcH7gMMxTpBr/wjvdak7BeiZirP9poKE7XBiyHeatqQgEUOALsolkCYk8YJUw== andrea.manzi@shell.research-infrastructures.eu
antonis_lempesis: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA8nr14q0s/8V9Nv3bz7xCk9FwKbtN21qx33PDTUS/NjwHX/AQIE1ZFbepPOnzLuPy8LUtzrEI+cEMDjn37CLiZWjnZkPOaIV7ELUBvwIk6JBe6iXSq93atYJWxQQsPuc1uoAFWLayxExMRl+P0UQCP7pQmTg4v8U4VflCp0LLBglgBl5glIiw2fLAfc+JawefWGnp92djuvqii8zm5nUmgJ+5DjbSD0rMO+vYXme5ig6v6b2YFG0cUHiNk8evM6M+OWmtz1uzP6kfQ4SjCNpzib6Rub8hgPlkJH/z4S+7lF1e6uwohQyicwu6hfTfIL+IRRCrNTGtzcDmk405/nIETQ== antonislebesis@ekton.lan
alessia_bardi: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvfQoH4uRROhIUY5VTthAiY0Ga0cbg3smsT366C4Nd3TtU5ciBterRQv0YkvdQ4zS+e3D47PFRAuEyJEAJMp9+odhmjT6WPLhMYmE42b0qk+WC4uXG7V2rTX+wNvX4HaVHnlPai/6Of85rZ1AKbeMB2LLKMvj0n1HovVg6VbLUfrrxfkcTfgE27mukoRQy4RuZjQRjdJ1o7g4geA05CrFjDOriqwl4WDXWUNSkx2MwtOZ58ZLAVu84ce+RYvzxHC/wZptOx6U35fsoAaK7NPIiwbRbSbQqlAMnQauCLYTvfFKFkqY2JXp9q6lSsW4S5VnEeJjWvO/e7rxOmdbxGzx9w== lexis@lexis02
andrea_mannocci: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCtTV2pjWXgTmX5h9J7VtQbYZ2NoQyZmLKl5gHvBKcX4pgBNYR+OA0620l3I3bTLPzqx93y6N/GIi2ewutyk7n2a5qFAIZxhrQYR5rSQn07apTDSh9CKyAyy6baM/jQmZN4ba6ObHIFdtIPHyY0Z/2ni6ohWXuOPIC+me+/x4R6P5s7y6x4IoMOGcEtn+puJ1gAdMBhkn7IqMAbdMj3WbsBjDAJ2lT8Dbyet8fkW4TENxd0teRW9jGeSP8rtuapnAF6rgcvPn/gk3/0wnBsXjtlBe5VEJTsNXY50RoB+PdkLgT4h6613v2WtR6ZoCEVNLXbsJ2BabrCmntyEEJVdbMJ andrea@pc-andrea
marek_horst: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0tbauAEn91q209ek50lv6jeBGsYy+N25XPVE9e173L3oW/NR1DuIXdn/zpHy5sLKpWk2nLkGJxNBdAFlKKxDKzRRZ7aX8qB490o5H4GTGgdxIQtp8x66CvIjMyM4kYLExVb4WVV7yMxCxuClMk6/m0vo3h77VzL08e3uyLoa5FZ3RPbOFb6QvnH4QEoFp/6Hos9mJF2bY/w2DqUrUVgUeAO9k9uilqhv+rwHdsq20g9OXHNlWOOtNtrWq0pn1FU1jCooZsbqLeBcEGlvD/I1FxqLi7x5llpNVfHTmEHoczTmuo0sqAGmSxHWnz3C4KtVTHVqxLS6hSUp55j6DQwPnw==
eri_katsari: ssh-dss AAAAB3NzaC1kc3MAAACBALsZPjtXRreOknW7KAoBCiJ/QqFfHjz2JD0hTl/MQOPTNfn8532F1tpQDqMKz0H1XIljJas7EvrVDMNDO7iX5CbTmfLh2ds8ssPQ/LwH8ArNfsWaWyELVWJExXA9Xizcb8PApWScUEeRgP22ZTgnHYSX7zCQGhSn1Kb4vQ3H8MxbAAAAFQC8PJWnks+PWgqtO7Gb8SOV8oP3YQAAAIEArfQ69en2ZHku2T4FONfhjFLy7AKpq6Rh40KCGTSgowtbyyYVk0aRupqMlVolYwlbeY+/o21EFb1+Wy9nFDNsu1uY/1mESdLs256rRy6VJx8/VvuYg4r7TdSqypOa0QsqzCbExwZR4witez5yMQKZji8kmRKWKRsByFVk2OR0IxkAAACARQlw/skGy5wfkWd41YqsoDhfMLVNLAS2dKnUkLdifUKjymcSHC2WrYq2LfxVxrd9CFFp/yurlQ01v/818GX7nE9zBiRhhFS944Lk05CmInmcDt/J2iGq65bA/7iem9EhXkU+5Up1uYFgdubPEL7Za+Pk+Z9NMdqqjtco9Q0A6v8= eri@eri-duffy
marko_mikulicic: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCYNjCquDDIpGqJgr8DTkRd0Y1ngmrq+FFMb+UnALdm3I1Uch07Z+TAkcrkpr9RdyTjP3mNIUUyI18Z6NgUC2TR4x7wVA9eV0uGWP8BiocXWPjVhQJhtDkXldkP93ylyYlLJ4VQ+xGinKdg7ZA4KTpG6rnjL999AA4W0utj5B8Dj0l/wvp96ONq1ZOTzOc3h0t9NGVQLbXstNakQkPcb5E2hyt4QOOahpZ6TG2is460G5yEgV3xHT/VRJQn0OjKeHnXlDwXs53qwjeNrESMEv4wD2qufgAXKbPGK7+3GReE8VkkhwnEY1/ET4LaTyqg6eIp0mIiScDvBV0/UCNX8c49 mkm
jochen_schirrwagen: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAqVJeLtXaqseUP3cHSIQw+6Piv6s0PmezFbj34oqcN81/JlzmTtpOd8GBX6N8Weo40HbKhlghOl08+3WP2fW3eg9vaST6xCy8BvzLcqb8LPBSlTXa8imAK9AWkR4peFi1zYpIciZpkAwaFtfpdSR/zJip2s61EgWhinUPHs/0PzCCM32P4Yc0qYygb+htv4AthZWChEbHSY7eNrXIOOvyQtUSbpGJ78VCEdlKuy+ehhTxlMOBxcKca1PSWU3jSmzkSxnUotr2IXiRK1bUVZYpXXd7K89EZfPpb3DG1z8UBf9n0obLdI0yvaka8z8l1KxbwuAhN9MyzHITALbniYIHOw== jochen@jochen-laptop ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAo5A+f0wdqoXCGEFBpePV892cq9MswIgK9vmDJ22TdHKQrN5h1sIHeXjxO3vnaktb62evFqZw1kueA0dwQhEA+Kvpc5qN1s+GfIxs4PbNjiNWNVgwrfGK11vlW/LP2GgbfZ7pl+Gxj6Qu65/A2eMf4c9ZjAOnHck6RQSttrfIjR0kLpqEB3o2x8s89vu/P5PG7mN+IsfW9Ow/612m+8ZG84qnVAo36lK9mgEFUToozIHfON14uC8VGTnsN9ff9S98GJkW8Ga3ha9voPwkp794LBHZlQj01Pwm4ZOx+tdOfTNXx06szjswacWXsW4zaTyH9MZP9LumubGG7eOse0y0bw== jochen@jochen-desktop ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAo5A+f0wdqoXCGEFBpePV892cq9MswIgK9vmDJ22TdHKQrN5h1sIHeXjxO3vnaktb62evFqZw1kueA0dwQhEA+Kvpc5qN1s+GfIxs4PbNjiNWNVgwrfGK11vlW/LP2GgbfZ7pl+Gxj6Qu65/A2eMf4c9ZjAOnHck6RQSttrfIjR0kLpqEB3o2x8s89vu/P5PG7mN+IsfW9Ow/612m+8ZG84qnVAo36lK9mgEFUToozIHfON14uC8VGTnsN9ff9S98GJkW8Ga3ha9voPwkp794LBHZlQj01Pwm4ZOx+tdOfTNXx06szjswacWXsW4zaTyH9MZP9LumubGG7eOse0y0bw== jochen@jochen-desktop
old_nikon_gasparis: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpwiKTTbiaRtuloEgvTRwjDjzrYSjUOUfjZ/o7FlfvtkApA09bSbbtVpMid60TYzf2tK1ie0Y0rCnaQ0wiaSQFqGkw47VsewBOpyJC+pWXz6GLMMJUEY6viDSuUDbn7ADJqak4YscVi2vZCSwWwslA+jBqWimDdE+8hIKNqQQA3klZ1zp84HayUdJY4jt3nbpQkOpVUdE/1cggVdq523hF2u+mjyR3ctILVyyPArxPInYILZxhaS8AvX8ZPADIE5Ki0zowC2UsvbZZzauJzJQ/KuK1tvZVD2AaEg+06Kj1RWWxIlYgXpO+XYGoYEViPMHUdf1h+zt+t6UxXshWPeWd nikonas@di.uoa.gr
nikon_gasparis: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3b+t/2RQjw8d07zV30tD0qysEFNTeeAsFqazdrvPa+bbm6wZ75Gkka4+wWmVZdd56gIh4yx4L4avnnzeQfUTREgrhNmlHRPdVB5rpJNa/3bQ+J/O3SpyRcGawPKNJWlhwCWaILag0lm3O+4ukuzN2WXFxHGyiiz0FLPXS7Yps2k3OZVHPx7GhGkr+K26c3oELR/yTCCgQxrZwMpy9xOLhXgPZRlzj4Y/KQBgRojbhhrFmmKe3k7g8u2Kb/oSDl5+kSOWzV7qrvHkHDUc2K1bp+lrG6L8QNLivZzOVQ/VeBBGGRhSL5D2JdC4T7+q89dsmPQM6Zu3lWBKQk/Jw/1gZ nikonas@mpagasas-I2
roberto_cirillo: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvkwppFE+K5MjKqtkGJN63wkcwaqZG4HkgPqMSWrXmCfDPJ3FxjDHV9aQRJYVKZObc9+SsFc9IYXwB2A8FI0XwPkCH2hfFKDVNO4TktO/SrM+4tXbEfEDWX/PduBQLootYaMEVj++p2+s/mxVnxTAMzsR4txC9tkWR4JO4VJ2cpZfM8po4p1wA4YteW6Oiv0PqUEsLtPtBHGuCgovo8WS+qxcxpeBBnewEssgis2dzDSqx5HUmaOETAxxEHflapHWQLum0JjvXsG5jlf9jL44XJPkcHXAYk3gnhtyM0moJpUya+GX7+ttfWWvwxs0tYNDXNMRn91r1hMLWmas4D+T/Q== rcirillo@rcirillo-cnr
fabio_sinibaldi: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEArNhKFcJ6T08sn7kTTLf+rO9HEvgOvqfhv5HQ2sRf2tFYfjfCb0zHKnMkgW+sy5gMU10Lyx1r7juXCvqRC955uIM97m1B1Xc6sVqASVKuGPhCKfhxEaMAyBcWFdE+HYbCOPYVN+JMrcwWfbblwiZTtK1OCqaEUvDDI7cFeU68noXwggEp46T48eqMUdi541D9Y+BVx9HYAo6OCQz0+6eXwxJL+tpRcAAXIMMWv362CYHoOgIU45R7xVSMLY1k/HLrcEAblwxEaSpduCH5cWUXZE/56IyxpvP44BxZkVhNdqJLmg4hxBQWhoMNYiTZxbLay3W2TwBCM111cAtUx4M/jQ== fabio@pc-fabio
gianpaolo_coro: ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAkLUsStIPUVZVWiHyiI2poDnB70CjOJttbFLc5hBd6ViomiFil9u9q5Q0M1JBFFSv8Yfl1Rmc9zOh/52lJolxPGn8r22uGgDHVv71IJ04nS5KaRGIbv2WoZbYBc85oyZk5Fv/emY9Ace/t8icgDl5xJddeLfK6rTU64MZ7NGycIc= coro@coro-PC
katerina_iatropoulou_old: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA29WTITAKDhIE4lYt41hEtL3TnE+bIrlZAdAzSKySHOXPI8Q1vxanvprnL8BU0okgfZJDx3qxcTWLbwpcdWvGbO2SIA8JSKl2viQqfYDc5VtWFd4xo5z9y5BRrNDOOel+XAZjamx8lv8c44Au0ACV+jCAhnzwJA4Iso1KuNsuj2M= kiatrop@rudie
katerina_iatropoulou: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/gQ8huY5CSl7cGPiNE8OdNvlE4A0lXe08gSiEKIYVh9qz57ALoZLSP3To4cKIhfmFssSAewu/0A0IX9llOGlsOVkC4aGOlO03l0mAiVS7bVQd+5S51Gh+ijWsJjSg4bLoRINn1NkNNZ8J8GK2+vBGqxB25LcG6giRdPs2/jb5UHd9tqqrPdO/rJWV4OrTDkevYb2qfnubuvZgrf+C9bD1l0Xnklr2zY0R6RCkSpmVhQfwpXU0KGb9pW7oJS897XB7GCawKfufOdmYqyjG3o9nMi5+cIVNKfhT14wSv6D1FUQIIQnzJPE22SBmWIzkS4ovGP2cRObVTcIRwO5U4H8x kiatrop@rudie
farah_karim: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCzKSQSk3ntKGUW2Cy8lt/44BTK2+UxMM4W2XO4CrcwgUxxlgIfpL4UjyuSKIygRdU/lL/4xHJdRNzA7PSEiHnBhIeLiF9QWw1mO2GVdJ4/1G5J/XEZ3sL7zyEdwwks7FsnT4U9PO9drNDZ1AmIK8eDKtX9EJcOFflulOknbIHjIq29gXcXbrhQaV3rNHS8vGDkv3fkpJT9Wi8BEUMeMFYsa3k3pc3nPysCQR+xsVJ1Ht+1gpU71W7fACaI1ltYaCToPAJasU19Tz6xE3edl9/Dz6HIL5FcVNSbLFEiyQhd5oL1ITCXJOwzyqobrUUdRK/30iIBRRFW00AIGQCDV0S3 hadoop@karim-ThinkPad-S1-Yoga
luca_frosini: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDlTQulSJFayTJyOOecgsct35u7uvVQGX/Da11UZVxvJzw2sQKOMSCMBBGF9zUlcMoP/qvF425jVMM71S8kamCcqgSN528fp9W/Nhw7s15NbCE3H9tJ3B+u5ESOYsRfgogeTIyL26aIY/2rke0DoKDIMU3YlOtN/1ipt5cY9uV3ootxTM126y2WChICGo0h77M/Ta1pIccUE0XbuaA1HwlJBkfDzQ2kh5tkaC7mjeETstOQzpEoPFoVr0qwSPz1Y6l8uiedpDZejrq64Z2zRcSxjEQ1wuA9r8uO7TJQttUKK8m/dHMe6q3WAiFc9sOYe4tf/GEmziB8VloMTNCPJQiz lucafrosini@pc-frosini
francesco_mangiacrapa: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDa0NzwaCcauxAFlsupU2xG2eff9nzep9bnb8pISbX2lk+K4yoJvJOAz9W9klJtpPX/IUJx18YR4jjDNcdiYWNh4Y+5jKT2EhSPNkj7Vw2MhA/ZeOrfHx7JNtL8gdxa8XxYB0ZoZqutRppmaRwWmGGwdVh0wyUzWR/v0OT01IuQGYVneLKIjUtx+BcWGsosWISaOQzVbv9iTFbSwgjbkKFHzHasxwKsrK4t1wvbzuxwhVC+5/VKghBJWN219m/PO+itww/fSes0KpI5X/7q8jrYzUgYwrKwt290U41Fx8syDQ6101YnRzMXZRyZwuVNh2S7WosGWebg5nPS4IjKho/F francesco-mangiacrapa@ubuntu-francesco-i24
lucia_vadicamo: ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAgEAqR/WChJL0M/EOQ1Jg7x7H5dmgb9jb/Rs+ZsWFdEvIlbNzZPUhTofvazHHLObR/RtLl4+9jjG83bNJHOTysrtY0c4BOcgBRFm3HRAr6TTU7bpoC0bleycKWmgXbP3nVfRgwd0N0tlmsDMBopdfZ7BwUH3SARQ8ssWbi1ahTP6IiYE6oCOxzDhXHRRGIdHhcRbE6vFaN+BTQKX/1zfFimPnuhiQUyZwX1KtBNnjxbxaNTIUbyDUxmyLo3S66EnNqXD6n4jDDWXJb0HkI4eDTtGFxaF02PSzXr8X3ZTWDBX+/YkzaWeesvtubYa0QlZ19D2+WJZJi0SmPDf7XifPtq0iu2UB4DFPQeRGAIctxmxTSMQMhngblSdSY6R+ZXG8yxtd/b3iNQaD3s8xkjYeXouno6djnJyIA/Wu2Asn6zLJC5qyVTTNUq5QNMIoNoX4Eq74eri2yzieQGRegDNie5NxjiRyo4kdHV3gihRDm53MCUL1aKz0SijmSfNeaP8weO77qKXaYNbuiEJYtU2io14mOHYHfMlvFf059QcqxQUAEOWl1YrELExHJmNJKjl/2/nCq6Ns4JMDEHicisjTfE5GAZHJVFgKRVroRzySHfxRTBXXyPDPzjN9aZy7HKSFiUXnKKS3jrVsbQ5xKdtOOzo6Uy3mIakTQ/JWDbzQ0fMwoE= lucia.vadicamo@isti.cnr.it
sahar_vahdati_old: ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIB38nRuOy6g0UEkYLZ5v+VGQIbZAFjylEtbmZJAN3OMm+wcgoCTIBvytZ6Ajp8ZTT1tTqo2rsAVb8O5pv08Qaunl5VBfvEUyqNdYX9SY1kB5PzKtBZBbkkUI4AE7BNJKKuki0nYvOHP5p07FdobC2OjILGxci4zn37X+CGEykNrXQ== rsa-key-20150605
sahar_vahdati: ssh-dss AAAAB3NzaC1kc3MAAAEBAMAfb7STRygwnvobeoYs+znGDFSauwFJ53SGiqxWvws7VO84JLCGPrInrnFYhU6eJAWd7W24ebQhBLqEKprJ85+j068F8kBL3EoR3yyS47jHeM9nZtQEoPuPJIdQotKEUcsEB0qXsdxrK/g2xOwEE/QTvxHHoHdrlrV5i8nL2iRJZTLn1OdoyHTUJMX778RJuqsApY9duyi6Sx7YshF4uFqNiarrEUu0ldG2K8akBwQEvDBJuXsDKD5GJmRzBbqDX8xswTORelvcVtDk/TD0wMMKudBNQfktTPXATBCx6oPQ3gzBlLDF4KrnwKZ+I75c6/Q+AIz3OMM8vrcB6JMLk0MAAAAVAPKLs+YuP5ulRX484PevayNHavKJAAABAFcjNAQ1KxUKaNBeDMtNj8WWkMyx02HUPWf8ztKetTyvavK4ILTrQAwsgvH3dmOMSnm4ckWMSxQ/v+zbU/mKNddyNo7BJqRT4rKbQUvp5Mg5E+PkZNZaiTu9C8rLIa1JbUoEyssqLAlFbIviJlwpLgaf+jY7ZCJso7kCYRWkcXMaEnNvqCd5u8IAGBZijI/L9TtAIyjgYoh4pYdAPWjYTjH+nH9xpIuN7KQEVq1ba/WyAe9xVNPta+fnuHiUHbUpNaExhIs4pskfCI5EuBBgxtixkSPssZaNFlWXx2rwFLnfvnLxeG9t7qbXs5LPoo0x0miq/eo+jgIHel9uEvN/BNYAAAEAQ/qXwtXcw1aA7PoKOTOmwaproFmcnu/7unEEu16/G4F2t76kz4CwehGgq19MnbgfzBL64qfs9A5UxI4HRJ6e5/Ik1a1dv/tSVSgA+rKJWeCZr1cTg5Y/u7OAk/mik0nL7r7TraofYvGAWl7ckYeN/28wv5TWSNB6CkPix69DgLvapjU5RG+7DPhzINc5MF75MjFRTnc5eAeC2wv2+3MzGzm78+i6UPpwd7Jj/BKTvtj0XinHJj+QNhkVtH6lAYDnAJNrQXpiGCKScVs1YCNbF9xHtBN1wlU99k+FdjLVsef3L348c3QWTVloXoh+HC0eNwt8QvLUyZLGyaCAy0ifvw== dsa-key-20150709
christoph_lange: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAvFxHqgmIkBfdyxRCMGhj2R+Bj05EBB7DlBrlKy6eM3K3EnPP+0dlMW+KhGwcu5sHFjyPtdngEO8AX1TQCUgifhd9++fBVAfUfKU5+dUqqyFFeQjQMqbf7pzWCJ9JjQ5tk1If9IzgBe/50ro0SCqIbod3FogSe4RZqQV1P0znxaHt4ngJSRYnRK+6gniMuT+SlcKgjDM8v8RP4ELWvE0ibduUGoyCEzmmroXgymcL7tpqHTdfo8o3mbcwqRGmCHEplQttFG57PwkJlcQvhKuJHo/Sgcyx2WuEFL/vZMFnuXhaNFg7I1UIO9bNwsLjsbnR9FEK9rjwwl8dKQHDh5R1zQ== clange@BACH
# Use the list when you want to give access to non root users # Use the list when you want to give access to non root users
ssh_users_list: ssh_users_list:

View File

@ -1,8 +1,8 @@
--- ---
dependencies: #dependencies:
- role: '../../library/roles/oracle-jdk' # - role: '../../library/roles/oracle-jdk'
- role: '../../library/roles/apache' # - role: '../../library/roles/apache'
- role: '../../library/roles/tomcat' # - role: '../../library/roles/tomcat'
when: tomcat_m_instances is not defined # when: tomcat_m_instances is not defined
# - role: '../../library/roles/tomcat-multiple-instances' # - role: '../../library/roles/tomcat-multiple-instances'
# when: tomcat_m_instances # when: tomcat_m_instances

View File

@ -1,23 +1,8 @@
--- ---
- name: Install the apache proxy modules needed for tomcat
file: src=/etc/apache2/mods-available/{{ item }} dest=/etc/apache2/mods-enabled/{{ item }} state=link
with_items:
- proxy.load
- proxy_http.load
- proxy_ajp.load
notify: apache2 reload
tags:
- apache
- dnet
- name: Ensure that the jre/lib/endorsed exists - name: Ensure that the jre/lib/endorsed exists
file: dest={{ jdk_java_home }}/jre/lib/endorsed state=directory owner=root group=root mode=0755 file: dest={{ jdk_java_home }}/jre/lib/endorsed state=directory owner=root group=root mode=0755
tags: tags: apache
- apache
- dnet
- name: Install the xercesImpl.jar needed by the dnet applications - name: Install the xercesImpl.jar needed by the dnet applications
copy: src=xercesImpl.jar dest={{ jdk_java_home }}/jre/lib/endorsed/xercesImpl.jar owner=root group=root mode=0644 copy: src=xercesImpl.jar dest={{ jdk_java_home }}/jre/lib/endorsed/xercesImpl.jar owner=root group=root mode=0644
tags: tags: apache
- apache
- dnet

View File

@ -49,5 +49,5 @@ tomcat_m_jmx_localhost_only: False
# This is only an example. Insert a line for each tomcat instance. 'app_contexts' can be used to automatically configure apache or nginx virtualhost http/ajp proxy # This is only an example. Insert a line for each tomcat instance. 'app_contexts' can be used to automatically configure apache or nginx virtualhost http/ajp proxy
# #
#tomcat_m_instances: #tomcat_m_instances:
# - { http_enabled: True, http_port: '8180', http_address: '0.0.0.0', ajp_enabled: False, ajp_port: '8109', ajp_address: '127.0.0.1', restart_timeout: '{{ tomcat_m_restart_timeout }}', shutdown_port: '8105', java_home: '{{ jdk_java_home }}', user: '{{ tomcat_m_default_user }}', user_home: '{{ tomcat_m_instances_base_path }}', user_shell: '{{ tomcat_m_default_user_shell }}', instance_path: '{{ tomcat_m_instances_base_path }}/8180', max_threads: '{{ tomcat_m_max_threads }}', autodeploy: '{{ tomcat_m_webapps_autodeploy }}', unpack: '{{ tomcat_m_webapps_unpack }}',default_conf: True, java_opts: '{{ tomcat_m_java_opts }}', java_gc_opts: '{{ tomcat_m_java_gc_opts }}', other_java_opts: '{{ tomcat_m_other_java_opts }}', jmx_enabled: '{{ tomcat_m_jmx_enabled }}', jmx_auth_enabled: '{{ tomcat_m_jmx_auth_enabled }}', jmx_auth_dir: '{{ tomcat_m_instances_base_path }}/8180/conf', jmx_port: '8182', jmx_monitorpass: '{{ set_in_a_vault_file }}', jmx_controlpass: '{{ set_in_a_vault_file }}', remote_debugging: '{{ tomcat_m_enable_remote_debugging }}', remote_debugging_port: '8100', access_log_enabled: True, log_rotation_freq: daily, log_retain: 30, allowed_hosts: [ 'xxx.xxx.xxx.xxx/32', 'yyy.yyy.yyy.yyy/32' ], app_contexts: [ 'app1', 'app2' ] } # - { http_enabled: True, http_port: '8180', http_address: '0.0.0.0', ajp_enabled: False, ajp_port: '8109', ajp_address: '127.0.0.1', restart_timeout: '{{ tomcat_m_restart_timeout }}', shutdown_port: '8105', java_home: '{{ jdk_java_home }}', user: '{{ tomcat_m_default_user }}', user_home: '{{ tomcat_m_instances_base_path }}', user_shell: '{{ tomcat_m_default_user_shell }}', instance_path: '{{ tomcat_m_instances_base_path }}/8180', max_threads: '{{ tomcat_m_max_threads }}', autodeploy: '{{ tomcat_m_webapps_autodeploy }}', unpack: '{{ tomcat_m_webapps_unpack }}', install_server_xml: True, default_conf: True, java_opts: '{{ tomcat_m_java_opts }}', java_gc_opts: '{{ tomcat_m_java_gc_opts }}', other_java_opts: '{{ tomcat_m_other_java_opts }}', jmx_enabled: '{{ tomcat_m_jmx_enabled }}', jmx_auth_enabled: '{{ tomcat_m_jmx_auth_enabled }}', jmx_auth_dir: '{{ tomcat_m_instances_base_path }}/8180/conf', jmx_port: '8182', jmx_monitorpass: '{{ set_in_a_vault_file }}', jmx_controlpass: '{{ set_in_a_vault_file }}', remote_debugging: '{{ tomcat_m_enable_remote_debugging }}', remote_debugging_port: '8100', access_log_enabled: True, log_rotation_freq: daily, log_retain: 30, allowed_hosts: [ 'xxx.xxx.xxx.xxx/32', 'yyy.yyy.yyy.yyy/32' ], app_contexts: [ 'app1', 'app2' ] }

View File

@ -16,7 +16,11 @@
limitations under the License. limitations under the License.
--> -->
{% if item.shutdown_port == '-1' %}
<Server port="{{ item.shutdown_port }}" shutdown="SHUTDOWN_PORT_DISABLED">
{% else %}
<Server port="{{ item.shutdown_port }}" shutdown="{{ tomcat_m_shutdown_pwd }}"> <Server port="{{ item.shutdown_port }}" shutdown="{{ tomcat_m_shutdown_pwd }}">
{% endif %}
<Listener className="org.apache.catalina.core.JasperListener" /> <Listener className="org.apache.catalina.core.JasperListener" />
<!-- Prevent memory leaks due to use of particular java/javax APIs--> <!-- Prevent memory leaks due to use of particular java/javax APIs-->
<Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" /> <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />

View File

@ -16,6 +16,7 @@ tomcat_java_opts: "-Xms{{ tomcat_min_heap_size }} -Xmx{{ tomcat_heap_size }} -XX
tomcat_java_gc_opts: "-XX:+UseConcMarkSweepGC" tomcat_java_gc_opts: "-XX:+UseConcMarkSweepGC"
#tomcat_other_java_opts: "-Djsse.enableSNIExtension=false" #tomcat_other_java_opts: "-Djsse.enableSNIExtension=false"
tomcat_other_java_opts: "" tomcat_other_java_opts: ""
tomcat_install_server_xml: True
tomcat_install_default_conf: True tomcat_install_default_conf: True
tomcat_load_additional_default_conf: True tomcat_load_additional_default_conf: True
tomcat_http_enabled: True tomcat_http_enabled: True

View File

@ -17,7 +17,7 @@
- name: Configure tomcat server.xml - name: Configure tomcat server.xml
template: src=tomcat-server.xml.j2 dest={{ tomcat_conf_dir }}/server.xml template: src=tomcat-server.xml.j2 dest={{ tomcat_conf_dir }}/server.xml
when: tomcat_install_default_conf when: tomcat_install_server_xml
notify: tomcat restart notify: tomcat restart
tags: tomcat tags: tomcat

View File

@ -5,6 +5,7 @@
use_apt_proxy: False use_apt_proxy: False
apt_proxy_url: "http://apt.research-infrastructures.eu:9999" apt_proxy_url: "http://apt.research-infrastructures.eu:9999"
pkg_state: installed
common_packages: common_packages:
- acl - acl
- zile - zile
@ -23,6 +24,12 @@ common_packages:
- tree - tree
- bind9-host - bind9-host
- bash-completion - bash-completion
- sudo
# Set this variable in your playbook
# additional_packages:
# - pkg1
# - pkg2
# Unattended upgrades # Unattended upgrades
unatt_allowed_origins: unatt_allowed_origins:
@ -81,6 +88,12 @@ configure_munin: False
# Manage the root ssh keys # Manage the root ssh keys
manage_root_ssh_keys: False manage_root_ssh_keys: False
install_additional_ca_certs: False
additional_ca_dest_dir: /usr/local/share/ca-certificates
# IMPORTANT: the destination file extension must be .crt
#x509_additional_ca_certs:
# - { url: "https://security.fi.infn.it/CA/mgt/INFNCA.pem", dest_file: '{{ additional_ca_dest_dir }}/infn-ca.crt' }
# #
# debian/ubuntu distributions controllers # debian/ubuntu distributions controllers
# #
@ -90,6 +103,8 @@ has_htop: "'{{ ansible_distribution }}' == 'Ubuntu' and ({{ ansible_distribution
has_apt: "('{{ ansible_distribution }}' == 'Debian' or '{{ ansible_distribution }}' == 'Ubuntu') and '{{ ansible_distribution_version }}' != 'lenny/sid' and '{{ ansible_lsb['major_release'] }}' >= 5" has_apt: "('{{ ansible_distribution }}' == 'Debian' or '{{ ansible_distribution }}' == 'Ubuntu') and '{{ ansible_distribution_version }}' != 'lenny/sid' and '{{ ansible_lsb['major_release'] }}' >= 5"
has_fail2ban: "(('{{ ansible_distribution }}' == 'Ubuntu') and ({{ ansible_distribution_major_version }} >= 14)) or (('{{ ansible_distribution }}' == 'Debian') and ({{ ansible_lsb['major_release'] }} >= 8))"
is_debian: "'{{ ansible_distribution }}' == 'Debian'" is_debian: "'{{ ansible_distribution }}' == 'Debian'"
is_debian8: "'{{ ansible_distribution_release }}' == 'jessie'" is_debian8: "'{{ ansible_distribution_release }}' == 'jessie'"
is_debian7: "'{{ ansible_distribution_release }}' == 'wheezy'" is_debian7: "'{{ ansible_distribution_release }}' == 'wheezy'"
@ -97,8 +112,8 @@ is_debian6: "('{{ ansible_distribution }}' == 'Debian' and {{ ansible_lsb['major
is_debian5: "'{{ ansible_distribution }}' == 'Debian' and '{{ ansible_distribution_version }}' != 'lenny/sid' and {{ ansible_lsb['major_release'] }} == 5" is_debian5: "'{{ ansible_distribution }}' == 'Debian' and '{{ ansible_distribution_version }}' != 'lenny/sid' and {{ ansible_lsb['major_release'] }} == 5"
is_debian4: "'{{ ansible_distribution }}' == 'Debian' and '{{ ansible_distribution_version }}' != 'lenny/sid' and {{ ansible_lsb['major_release'] }} == 4" is_debian4: "'{{ ansible_distribution }}' == 'Debian' and '{{ ansible_distribution_version }}' != 'lenny/sid' and {{ ansible_lsb['major_release'] }} == 4"
is_not_debian6: "'{{ ansible_distribution }}' == 'Debian' and '{{ ansible_distribution_version }}' != 'lenny/sid' and {{ ansible_lsb['major_release'] }} != 6" is_not_debian6: "'{{ ansible_distribution }}' == 'Debian' and '{{ ansible_distribution_version }}' != 'lenny/sid' and {{ ansible_lsb['major_release'] }} != 6"
is_debian_7_or_older: "'{{ ansible_distribution }}' == 'Debian' and '{{ ansible_distribution_version }}' != 'lenny/sid' and {{ ansible_distribution_major_version }} <= 7" is_debian_7_or_older: "'{{ ansible_distribution }}' == 'Debian' and {{ ansible_distribution_major_version }} <= 7"
is_debian_less_than6: "'{{ ansible_distribution }}' == 'Debian' and '{{ ansible_distribution_version }}' != 'lenny/sid' and {{ ansible_lsb['major_release'] }} < 6" is_debian_less_than6: "'{{ ansible_distribution }}' == 'Debian' and '{{ ansible_distribution_version }}' != 'lenny/sid' and {{ ansible_distribution_major_version }} < 6"
is_not_debian_less_than_6: "('{{ ansible_distribution }}' != 'Debian') or (('{{ ansible_distribution }}' == 'Debian' or '{{ ansible_distribution }}' == 'Ubuntu') and '{{ ansible_distribution_version }}' != 'lenny/sid' and {{ ansible_lsb['major_release'] }} >= 6)" is_not_debian_less_than_6: "('{{ ansible_distribution }}' != 'Debian') or (('{{ ansible_distribution }}' == 'Debian' or '{{ ansible_distribution }}' == 'Ubuntu') and '{{ ansible_distribution_version }}' != 'lenny/sid' and {{ ansible_lsb['major_release'] }} >= 6)"
is_hardy: "'{{ ansible_distribution_release }}' == 'hardy'" is_hardy: "'{{ ansible_distribution_release }}' == 'hardy'"

View File

@ -18,3 +18,7 @@
- name: Restart rsyslog - name: Restart rsyslog
service: name=rsyslog state=restarted service: name=rsyslog state=restarted
- name: Update the CA bundle list
shell: update-ca-certificates
tags: ca

View File

@ -3,33 +3,21 @@
apt: pkg={{ item }} state=installed apt: pkg={{ item }} state=installed
with_items: with_items:
- denyhosts - denyhosts
when:
- is_debian_7_or_older
- is_ubuntu_less_than_trusty
tags: denyhosts tags: denyhosts
- name: ensure CM can access the VMs - name: ensure CM can access the VMs
action: | action: |
lineinfile name=/etc/hosts.allow regexp="sshd: 146.48.123.18$" line="sshd: 146.48.123.18" lineinfile name=/etc/hosts.allow regexp="sshd: 146.48.123.18$" line="sshd: 146.48.123.18"
when:
- is_debian_7_or_older
- is_ubuntu_less_than_trusty
tags: denyhosts tags: denyhosts
- name: ensure Monitoring can connect via ssh - name: ensure Monitoring can connect via ssh
action: | action: |
lineinfile name=/etc/hosts.allow regexp="sshd: 146.48.123.23$" line="sshd: 146.48.123.23" lineinfile name=/etc/hosts.allow regexp="sshd: 146.48.123.23$" line="sshd: 146.48.123.23"
when:
- is_debian_7_or_older
- is_ubuntu_less_than_trusty
tags: denyhosts tags: denyhosts
- name: Set the treshold for root on the denyhosts config file - name: Set the treshold for root on the denyhosts config file
lineinfile: | lineinfile: |
name=/etc/denyhosts.conf regexp="^DENY_THRESHOLD_ROOT = " line="DENY_THRESHOLD_ROOT = 5" name=/etc/denyhosts.conf regexp="^DENY_THRESHOLD_ROOT = " line="DENY_THRESHOLD_ROOT = 5"
when:
- is_debian_7_or_older
- is_ubuntu_less_than_trusty
notify: Restart denyhosts notify: Restart denyhosts
tags: denyhosts tags: denyhosts

View File

@ -1,6 +1,8 @@
--- ---
- name: Install the INFN CA certificate - name: Install the additional CA certificates
get_url: url=https://security.fi.infn.it/CA/mgt/INFNCA.pem dest=/etc/ssl/certs/infn-ca.pem get_url: url={{ item.url }} dest={{ item.dest_file }}
tags: with_items: x509_additional_ca_certs
- ca when: install_additional_ca_certs
notify: Update the CA bundle list
tags: ca

View File

@ -3,15 +3,16 @@
- include: resolvconf.yml - include: resolvconf.yml
when: install_resolvconf when: install_resolvconf
- include: packages.yml - include: packages.yml
- include: ntp.yml
- include: remove-unneeded-pkgs.yml - include: remove-unneeded-pkgs.yml
- include: manage-ipv6-status.yml - include: manage-ipv6-status.yml
when: is_not_debian_less_than_6 when: is_not_debian_less_than_6
- include: disable-ipv6-old-servers.yml - include: disable-ipv6-old-servers.yml
when: disable_ipv6 when: disable_ipv6
- include: denyhost.yml - include: denyhost.yml
when: when: is_debian_7_or_older
- is_debian_7_or_older - include: denyhost.yml
- is_ubuntu_less_than_trusty when: is_ubuntu_less_than_trusty
- include: munin.yml - include: munin.yml
when: configure_munin when: configure_munin
- include: pubkeys.yml - include: pubkeys.yml

View File

@ -0,0 +1,9 @@
---
- name: Install the ntp server
apt: pkg=ntp state={{ pkg_state }}
tags: [ 'packages', 'ntp' ]
- name: Ensure that the ntp server is running
service: name=ntp state=started enabled=yes
tags: [ 'packages', 'ntp' ]

View File

@ -28,66 +28,60 @@
apt_repository: repo='deb http://http.debian.net/debian-backports squeeze-backports main' state=present apt_repository: repo='deb http://http.debian.net/debian-backports squeeze-backports main' state=present
register: update_apt_cache register: update_apt_cache
when: is_debian6 when: is_debian6
tags: tags: squeeze-backports
- squeeze-backports
- name: Install the squeeze-lts repository on debian 6
apt_repository: repo='deb http://http.debian.net/debian squeeze-lts main contrib non-free' state=present
register: update_apt_cache
when: is_debian6
tags:
- squeeze-lts
- name: Install the backports repository on debian 7 - name: Install the backports repository on debian 7
apt_repository: repo='deb http://http.debian.net/debian wheezy-backports main' state=present apt_repository: repo='deb http://http.debian.net/debian wheezy-backports main' state=present
register: update_apt_cache register: update_apt_cache
when: is_debian7 when: is_debian7
tags: tags: wheezy-backports
- wheezy-backports
- name: Install the backports repository on debian 8 - name: Install the backports repository on debian 8
apt_repository: repo='deb http://http.debian.net/debian jessie-backports main' state=present apt_repository: repo='deb http://http.debian.net/debian jessie-backports main' state=present
register: update_apt_cache register: update_apt_cache
when: is_debian8 when: is_debian8
tags: tags: jessie-backports
- wheezy-backports
# Debian 7 “Wheezy” from February 2016 to May 2018
# Debian 8 “Jessie“ from May 2018 to April/May 2020
- name: Install the squeeze-lts repository on debian 6
apt_repository: repo='deb http://http.debian.net/debian squeeze-lts main contrib non-free' state=present
register: update_apt_cache
when: is_debian6
tags: squeeze-lts
# - name: Install the wheezy-lts repository on debian 7
# apt_repository: repo='deb http://http.debian.net/debian wheezy-lts main contrib non-free' state=present
# register: update_apt_cache
# when: is_debian7
# tags: wheeze-lts
- name: apt key for the internal ppa repository - name: apt key for the internal ppa repository
apt_key: url=http://ppa.research-infrastructures.eu/system/keys/system-archive.asc state=present apt_key: url=http://ppa.research-infrastructures.eu/system/keys/system-archive.asc state=present
when: is_ubuntu when: is_ubuntu
tags: tags: packages
- packages
- name: setup system apt repository - name: setup system apt repository
apt_repository: repo='deb http://ppa.research-infrastructures.eu/system stable main' apt_repository: repo='deb http://ppa.research-infrastructures.eu/system stable main'
register: update_apt_cache register: update_apt_cache
when: is_ubuntu when: is_ubuntu
tags: tags: packages
- packages
- name: Update the apt cache - name: Update the apt cache
apt: update_cache=yes apt: update_cache=yes
when: update_apt_cache.changed when: update_apt_cache.changed
ignore_errors: True ignore_errors: True
tags: tags: packages
- packages
- name: install common packages - name: install common packages
apt: pkg={{ item }} state=installed apt: pkg={{ item }} state={{ pkg_state }}
when: has_apt when: has_apt
with_items: common_packages with_items: common_packages
tags: tags: [ 'packages', 'common_pkgs' ]
- packages
- name: Install the ntp server - name: Install additional packages, if any
apt: pkg=ntp state=installed apt: pkg={{ item }} state={{ pkg_state }}
tags: with_items: additional_packages
- packages when: additional_packages is defined
- ntp tags: [ 'packages', 'common_pkgs', 'additional_packages' ]
- name: Ensure that the ntp server is running
service: name=ntp state=started
tags:
- packages
- ntp

11
vagrant/defaults/main.yml Normal file
View File

@ -0,0 +1,11 @@
---
vagrant_install: False
vagrant_package_from_site: False
vagrant_site_version: 1.7.4
vagrant_url: 'https://dl.bintray.com/mitchellh/vagrant/vagrant_{{ vagrant_site_version }}_x86_64.deb'
virtualbox_version: 5.0
vagrant_package_list:
- 'linux-headers-{{ ansible_kernel }}'
- 'virtualbox-{{ virtualbox_version }}'

26
vagrant/tasks/main.yml Normal file
View File

@ -0,0 +1,26 @@
---
- name: Get the package from the vagrant site
get_url: url='{{ vagrant_url }}' dest=/opt/vagrant_{{ vagrant_site_version }}_x86_64.deb
when: vagrant_package_from_site
tags: [ 'vagrant', 'virtualbox' ]
- name: Install the virtualbox repository key
apt_key: url=https://www.virtualbox.org/download/oracle_vbox.asc state=present
when: vagrant_package_from_site
tags: [ 'vagrant', 'virtualbox' ]
- name: Install the virtualbox repository
apt_repository: repo='deb http://download.virtualbox.org/virtualbox/debian {{ ansible_distribution_release }} contrib' state=present update_cache=yes
when: vagrant_package_from_site
tags: [ 'vagrant', 'virtualbox' ]
- name: Install the virtualbox package and vagrant requirements
apt: name={{ item }} state={{ pkg_state }}
with_items: vagrant_package_list
tags: [ 'vagrant', 'virtualbox' ]
- name: Install the package from the vagrant site
apt: deb=/opt/vagrant_{{ vagrant_site_version }}_x86_64.deb
when: vagrant_package_from_site
tags: [ 'vagrant', 'virtualbox' ]