Merge branch 'master' of gitorious.research-infrastructures.eu:infrastructure-management/ansible-playbooks
Merge with files from ckan-d-d4s
This commit is contained in:
commit
939169002a
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
- name: Manage the cran repository key
|
||||
apt_key: id=E084DAB9 keyserver=keyserver.ubuntu.com state={{ r_install_cran_repo }}
|
||||
tags: [ 'r_software', 'r_repo' ]
|
||||
tags: [ 'r_software', 'r_repo', 'r_repo_key' ]
|
||||
|
||||
- name: Manage the cran repository definition
|
||||
apt_repository: repo='deb http://cran.rstudio.com/bin/linux/ubuntu {{ ansible_distribution_release }}/' state={{ r_install_cran_repo }} update_cache=yes
|
||||
|
@ -41,7 +41,7 @@
|
|||
apt: pkg={{ item }} state={{ r_packages_state }} update_cache=yes force=yes
|
||||
with_items: '{{ r_distribution_required_packages | default([]) }}'
|
||||
when: r_needs_additional_distro_pkgs
|
||||
tags: [ 'r_software', 'r_pkg' ]
|
||||
tags: [ 'r_software', 'r_pkg', 'r_deps' ]
|
||||
|
||||
- name: Ensure that the R packages sources directory exists
|
||||
file: dest={{ r_source_plugins_dest_dir }} state=directory owner=root group=root
|
||||
|
|
|
@ -1,2 +1,6 @@
|
|||
---
|
||||
ckan_solr_port: 8983
|
||||
solr_multicore: True
|
||||
solr_cores:
|
||||
- collection1
|
||||
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
---
|
||||
- name: Install the solr schema used by CKAN
|
||||
file: src=/usr/lib/ckan/default/src/ckan/ckan/config/solr/schema.xml dest={{ tomcat_m_instances_base_path }}/{{ ckan_solr_port }}/solr/data/solr/collection1/conf/schema.xml state=link force=yes
|
||||
file: src=/usr/lib/ckan/default/src/ckan/ckan/config/solr/schema.xml dest={{ solr_collections_base_dir }}/{{ item }}/conf/schema.xml state=link force=yes
|
||||
with_items: '{{ solr_cores }}'
|
||||
when: not ckan_geonetwork_harvester
|
||||
notify: Solr Restart
|
||||
tags: [ 'ckan', 'solr', 'solr_schema' ]
|
||||
|
||||
- name: Install the solr schema used by CKAN, modified with the spatial fields
|
||||
copy: src=schema.xml dest={{ tomcat_m_instances_base_path }}/{{ ckan_solr_port }}/solr/data/solr/collection1/conf/schema.xml force=yes
|
||||
copy: src=schema.xml dest={{ solr_collections_base_dir }}/{{ item }}/conf/schema.xml force=yes
|
||||
with_items: '{{ solr_cores }}'
|
||||
when: ckan_geonetwork_harvester
|
||||
notify: Solr Restart
|
||||
tags: [ 'ckan', 'solr', 'solr_schema' ]
|
||||
|
|
|
@ -60,7 +60,7 @@ ckanext.spatial.common_map.mapbox.map_id: mapbox.satellite
|
|||
ckanext.spatial.common_map.mapbox.access_token: pk.eyJ1IjoiZDRzY2llbmNlIiwiYSI6ImNpcW1nZjE4MDAwMXNod25rdHJsemRoNTQifQ.YPNkNLb8EzjThpvJl1tg4w
|
||||
|
||||
# Needed to install some CKAN plugins
|
||||
additional_packages:
|
||||
ckan_additional_packages:
|
||||
- git
|
||||
- libxslt1-dev
|
||||
- gcc
|
||||
|
|
|
@ -1,4 +1,10 @@
|
|||
---
|
||||
- name: Install some packages dependencies
|
||||
apt: name={{ item }} state=latest update_cache=yes
|
||||
with_items: '{{ ckan_additional_packages }}'
|
||||
when: ckan_geonetwork_harvester
|
||||
tags: [ 'ckan', 'geonetwork', 'ckan_plugins', 'ckan_pip_deps' ]
|
||||
|
||||
- name: Install some plugins dependencies inside the CKAN virtualenv
|
||||
become: True
|
||||
become_user: '{{ ckan_shell_user }}'
|
||||
|
|
|
@ -4,6 +4,19 @@ d4science_user_create_home: True
|
|||
d4science_user_home: '/home/{{ d4science_user }}'
|
||||
d4science_user_shell: /bin/bash
|
||||
|
||||
d4science_sudoers_commands:
|
||||
- /etc/init.d/tomcat-instance-*
|
||||
|
||||
d4science_tomcat_options_files:
|
||||
- '/etc/default/tomcat-instance-{{ item.0.http_port }}'
|
||||
- '/etc/default/tomcat-instance-{{ item.0.http_port }}.local'
|
||||
|
||||
d4science_manual_tomcat_inst_dir: '{{ d4science_user_home }}/tomcat'
|
||||
d4science_manual_tomcat_log_dir: '{{ d4science_manual_tomcat_inst_dir }}/logs'
|
||||
d4science_manual_tomcat_rotate_copies: 15
|
||||
d4science_manual_tomcat_rotate_access_log: False
|
||||
d4science_manual_tomcat_access_log: localhost_access.log
|
||||
|
||||
d4science_tomcat_start_command:
|
||||
|
||||
d4science_tomcat_stop_command:
|
||||
|
|
|
@ -1,19 +1,41 @@
|
|||
---
|
||||
- name: Install the sudoers config that permits the tomcat user to restart the service
|
||||
template: src=tomcat-sudoers.j2 dest=/etc/sudoers.d/tomcat-d4science owner=root group=root mode=0440
|
||||
tags: [ 'tomcat', 'd4science', 'sudo' ]
|
||||
|
||||
- name: Install the script that allows the tomcat user to start and stop the service without using the full path
|
||||
template: src={{ item.1 }}.j2 dest={{ item.0.user_home }}/{{ item.1 }} owner={{ item.0.user }} group={{ item.0.user }} mode=0755
|
||||
with_nested:
|
||||
- '{{ tomcat_m_instances }}'
|
||||
- '{{ tomcat_m_instances | default ([]) }}'
|
||||
- [ 'startContainer.sh', 'stopContainer.sh' ]
|
||||
tags: [ 'tomcat', 'd4science', 'sudo' ]
|
||||
when: tomcat_m_instances is defined
|
||||
tags: [ 'tomcat', 'd4science', 'sudo', 'startup_cmd' ]
|
||||
|
||||
- name: Install the README file that explains where the options files are placed and how start/stop the service
|
||||
template: src={{ item.1 }}.j2 dest={{ item.0.user_home }}/{{ item.1 }} owner={{ item.0.user }} group={{ item.0.user }} mode=0444
|
||||
with_nested:
|
||||
- '{{ tomcat_m_instances }}'
|
||||
- [ 'README-tomcat' ]
|
||||
when: tomcat_m_instances is defined
|
||||
tags: [ 'tomcat', 'd4science', 'd4s_readme' ]
|
||||
|
||||
# A manual tomcat installation. We try to fix it in some way
|
||||
- name: Create the d4science tomcat user
|
||||
user: name={{ d4science_user }} home={{ d4science_user_home }} createhome={{ d4science_user_create_home }} shell={{ d4science_user_shell }}
|
||||
when: tomcat_m_instances is not defined
|
||||
tags: [ 'tomcat', 'd4science', 'users' ]
|
||||
|
||||
- name: Install the script that allows the tomcat user to start and stop the service without using the full path
|
||||
template: src={{ item }}.j2 dest=/home/{{ d4science_user }}/{{ item }} owner={{ d4science_user }} group={{ d4science_user }} mode=0755
|
||||
with_items:
|
||||
- 'startContainer.sh'
|
||||
- 'stopContainer.sh'
|
||||
when: tomcat_m_instances is not defined
|
||||
tags: [ 'tomcat', 'd4science', 'sudo', 'startup_cmd' ]
|
||||
|
||||
- name: Install a logrotate rule for catalina.out and access_log
|
||||
template: src=catalina-logrotate.j2 dest=/etc/logrotate.d/catalina_access owner=root group=root mode=0644
|
||||
when: tomcat_m_instances is not defined
|
||||
tags: [ 'tomcat', 'd4science', 'startup_cmd' ]
|
||||
|
||||
# We always install the sudoers file
|
||||
- name: Install the sudoers config that permits the tomcat user to restart the service
|
||||
template: src=tomcat-sudoers.j2 dest=/etc/sudoers.d/tomcat-d4science owner=root group=root mode=0440
|
||||
tags: [ 'tomcat', 'd4science', 'sudo', 'startup_cmd' ]
|
||||
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
{{ d4science_manual_tomcat_log_dir }}/catalina.out {
|
||||
copytruncate
|
||||
daily
|
||||
rotate {{ d4science_manual_tomcat_rotate_copies }}
|
||||
compress
|
||||
missingok
|
||||
create 640 {{ d4science_user }} {{ d4science_user }}
|
||||
}
|
||||
|
||||
{% if d4science_manual_tomcat_rotate_access_log %}
|
||||
{{ d4science_manual_tomcat_log_dir }}/localhost_access.log {
|
||||
copytruncate
|
||||
daily
|
||||
rotate {{ d4science_manual_tomcat_rotate_copies }}
|
||||
compress
|
||||
missingok
|
||||
create 640 {{ d4science_user }} {{ d4science_user }}
|
||||
}
|
||||
{% endif %}
|
||||
|
|
@ -1,5 +1,9 @@
|
|||
#!/bin/bash
|
||||
|
||||
{% if tomcat_m_instances is defined %}
|
||||
sudo /etc/init.d/tomcat-instance-{{ item.0.http_port }} start
|
||||
{% else %}
|
||||
sudo {{ d4science_tomcat_start_command }}
|
||||
{% endif %}
|
||||
|
||||
exit $?
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
#!/bin/bash
|
||||
|
||||
{% if tomcat_m_instances is defined %}
|
||||
sudo /etc/init.d/tomcat-instance-{{ item.0.http_port }} stop
|
||||
{% else %}
|
||||
sudo {{ d4science_tomcat_stop_command }}
|
||||
{% endif %}
|
||||
|
||||
exit $?
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
{{ d4science_user }} ALL=(ALL) NOPASSWD: /etc/init.d/tomcat-instance-*
|
||||
{{ d4science_user }} ALL=(ALL) NOPASSWD: {% for cmd in d4science_sudoers_commands %}{{ cmd }}{% if not loop.last %},{% endif %}{% endfor %}
|
||||
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
# foreman PKG state: latest, installed, absent
|
||||
foreman_pkg_state: latest
|
||||
foreman_repos:
|
||||
- 'deb http://deb.theforeman.org/ trusty 1.10'
|
||||
- 'deb http://deb.theforeman.org/ plugins 1.10'
|
||||
- 'deb http://deb.theforeman.org/ {{ ansible_distribution }} stable'
|
||||
- 'deb http://deb.theforeman.org/ plugins stable'
|
||||
|
||||
foreman_repo_key: 'http://deb.theforeman.org/pubkey.gpg'
|
||||
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
---
|
||||
fusiondirectory_install: False
|
||||
fusiondirectory_schemas_install: True
|
||||
# We use the debian wheezy repo for Ubuntu precise and Ubuntu trusty
|
||||
fusiond_repo: 'deb http://repos.fusiondirectory.org/debian-wheezy wheezy main'
|
||||
fusiond_extra_repo: 'deb http://repos.fusiondirectory.org/debian-extra wheezy main'
|
||||
fusiond_repo_key: 62B4981F
|
||||
fusiond_gpg_repo: keys.gnupg.net
|
||||
|
||||
fusiondirectory_main_pkgs:
|
||||
- fusiondirectory-archive-keyring
|
||||
- fusiondirectory
|
||||
|
||||
fusiondirectory_main_plugins:
|
||||
- fusiondirectory-plugin-nagios
|
||||
- fusiondirectory-plugin-ldapmanager
|
||||
- fusiondirectory-plugin-ssh
|
||||
|
||||
fusiondirectory_main_schemas:
|
||||
- fusiondirectory-plugin-nagios-schema
|
||||
- fusiondirectory-schema
|
||||
- fusiondirectory-plugin-ssh-schema
|
||||
|
||||
#fusiondirectory_plugins:
|
||||
|
||||
#fusiondirectory_schemas:
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- include: manage-fd-repos.yml
|
||||
- include: manage-fd-packages.yml
|
||||
- include: manage-fd-schemas.yml
|
||||
when: fusiondirectory_schemas_install
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
---
|
||||
- block:
|
||||
|
||||
- name: Install the fusiondirectory main packages
|
||||
apt: name={{ item }} state=present update_cache=yes
|
||||
with_items: '{{ fusiondirectory_main_pkgs }}'
|
||||
|
||||
- name: Install the fusiondirectory main plugins
|
||||
apt: name={{ item }} state=present
|
||||
with_items: '{{ fusiondirectory_main_plugins }}'
|
||||
|
||||
- name: Install the fusiondirectory additional plugins
|
||||
apt: name={{ item }} state=present
|
||||
with_items: '{{ fusiondirectory_plugins | default ([]) }}'
|
||||
|
||||
when: fusiondirectory_install
|
||||
tags: fusiondirectory
|
||||
|
||||
- block:
|
||||
|
||||
- name: Remove the fusiondirectory main plugins
|
||||
apt: name={{ item }} state=absent
|
||||
with_items: '{{ fusiondirectory_main_plugins }}'
|
||||
|
||||
- name: Remove the fusiondirectory additional plugins
|
||||
apt: name={{ item }} state=absent
|
||||
with_items: '{{ fusiondirectory_plugins | default ([]) }}'
|
||||
|
||||
- name: Remove the fusiondirectory main packages
|
||||
apt: name={{ item }} state=absent
|
||||
with_items: '{{ fusiondirectory_main_pkgs }}'
|
||||
|
||||
when: not fusiondirectory_install
|
||||
tags: fusiondirectory
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
---
|
||||
- block:
|
||||
|
||||
- name: Get the Fusion Directory repo key
|
||||
apt_key: id={{ fusiond_repo_key }} keyserver={{ fusiond_gpg_repo }}
|
||||
|
||||
- name: Install the Fusion Directory repo
|
||||
apt_repository: repo={{ fusiond_repo }}
|
||||
|
||||
- name: Install the Fusion Directory debian extras repo
|
||||
apt_repository: repo={{ fusiond_extra_repo }}
|
||||
|
||||
when: fusiondirectory_install
|
||||
tags: fusiondirectory
|
||||
|
||||
- block:
|
||||
|
||||
- name: Remove the Fusion Directory repo key
|
||||
apt_key: id={{ fusiond_repo_key }} keyserver={{ fusiond_gpg_repo }} state=absent
|
||||
|
||||
- name: Remove the Fusion Directory repo
|
||||
apt_repository: repo={{ fusiond_repo }} state=absent
|
||||
|
||||
- name: Remove the Fusion Directory debian extras repo
|
||||
apt_repository: repo={{ fusiond_extra_repo }} state=absent update_cache=yes
|
||||
|
||||
when: not fusiondirectory_install
|
||||
tags: fusiondirectory
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
---
|
||||
- block:
|
||||
|
||||
- name: Install the fusiondirectory main schemas
|
||||
apt: name={{ item }} state=present update_cache=yes
|
||||
with_items: '{{ fusiondirectory_main_schemas }}'
|
||||
|
||||
- name: Install the fusiondirectory additional schemas
|
||||
apt: name={{ item }} state=present
|
||||
with_items: '{{ fusiondirectory_schemas | default ([]) }}'
|
||||
|
||||
when: fusiondirectory_schemas_install
|
||||
tags: [ 'fusiondirectory', 'fd_schemas' ]
|
||||
|
||||
- block:
|
||||
|
||||
- name: Remove the fusiondirectory main schemas
|
||||
apt: name={{ item }} state=absent
|
||||
with_items: '{{ fusiondirectory_main_schemas }}'
|
||||
|
||||
- name: Remove the fusiondirectory additional schemas
|
||||
apt: name={{ item }} state=absent
|
||||
with_items: '{{ fusiondirectory_schemas | default ([]) }}'
|
||||
|
||||
when: not fusiondirectory_schemas_install
|
||||
tags: [ 'fusiondirectory', 'fd_schemas' ]
|
||||
|
|
@ -5,8 +5,12 @@
|
|||
#ganglia_gmond_cluster_port: 8649
|
||||
#ganglia_gmond_mcast_addr: 239.2.11.71
|
||||
#ganglia_gmetad_host: ganglia-gmetad
|
||||
ganglia_gmond_send_metadata_interval: 60
|
||||
ganglia_gmond_send_metadata_interval: 30
|
||||
# Needed to build the correct firewall rules when jmxtrans is in use
|
||||
ganglia_gmond_use_jmxtrans: False
|
||||
# Used by other roles to install specific ganglia iptables rules or some specific ganglia plugins. Or not.
|
||||
ganglia_enabled: False
|
||||
ganglia_unicast_mode: False
|
||||
ganglia_unicast_networks:
|
||||
- 0.0.0.0/0
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
tags: [ 'monitoring', 'ganglia' ]
|
||||
|
||||
- name: Distribute the ganglia configuration file for Ubuntu < 12.04 and >= 10.04 and Debian 6
|
||||
template: src=gmond-3.1.j2 dest=/etc/ganglia/gmond.conf owner=root group=root mode=444
|
||||
template: src=gmond.j2 dest=/etc/ganglia/gmond.conf owner=root group=root mode=444
|
||||
when: is_ubuntu_between_10_04_and_11_04_and_is_debian_6
|
||||
notify: Restart ganglia monitor
|
||||
tags: [ 'monitoring', 'ganglia' ]
|
||||
|
|
|
@ -32,8 +32,9 @@ host {
|
|||
|
||||
/* Feel free to specify as many udp_send_channels as you like. Gmond
|
||||
used to only support having a single channel */
|
||||
{% if not ganglia_unicast_mode %}
|
||||
udp_send_channel {
|
||||
bind_hostname = yes
|
||||
#bind_hostname = yes
|
||||
mcast_join = {{ ganglia_gmond_mcast_addr }}
|
||||
port = {{ ganglia_gmond_cluster_port }}
|
||||
ttl = 1
|
||||
|
@ -45,10 +46,19 @@ udp_recv_channel {
|
|||
port = {{ ganglia_gmond_cluster_port }}
|
||||
}
|
||||
|
||||
udp_recv_channel {
|
||||
bind = {{ ansible_fqdn }}
|
||||
{% else %}
|
||||
{% for host in ganglia_gmetad_sources %}
|
||||
udp_send_channel {
|
||||
host = {{ host }}
|
||||
port = {{ ganglia_gmond_cluster_port }}
|
||||
ttl = 1
|
||||
}
|
||||
{% endfor %}
|
||||
|
||||
{% endif %}
|
||||
udp_recv_channel {
|
||||
port = {{ ganglia_gmond_cluster_port }}
|
||||
}
|
||||
|
||||
/* You can specify as many tcp_accept_channels as you like to share
|
||||
an xml description of the state of the cluster */
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
- rules.v4
|
||||
- rules.v6
|
||||
when: is_precise
|
||||
notify: Start the iptables service
|
||||
register: install_iptables_rules_precise
|
||||
tags: [ 'iptables', 'iptables_rules' ]
|
||||
|
||||
- name: Install the IPv4 and IPv6 iptables rules. The IPv6 ones are not used. On trusty
|
||||
|
@ -34,7 +34,7 @@
|
|||
- rules.v4
|
||||
- rules.v6
|
||||
when: is_trusty
|
||||
notify: Start the iptables service
|
||||
register: install_iptables_rules_trusty
|
||||
tags: [ 'iptables', 'iptables_rules' ]
|
||||
|
||||
- name: Install the IPv4 and IPv6 iptables rules. The IPv6 ones are not used. On debian 7
|
||||
|
@ -43,7 +43,7 @@
|
|||
- rules.v4
|
||||
- rules.v6
|
||||
when: is_debian7
|
||||
notify: Start the iptables service
|
||||
register: install_iptables_rules_deb7
|
||||
tags: [ 'iptables', 'iptables_rules' ]
|
||||
|
||||
- name: Install the IPv4 and IPv6 iptables rules. The IPv6 ones are not used. On debian 8
|
||||
|
@ -52,6 +52,29 @@
|
|||
- rules.v4
|
||||
- rules.v6
|
||||
when: is_debian8
|
||||
notify: Start the netfilter service
|
||||
register: install_netfilter_rules
|
||||
tags: [ 'iptables', 'iptables_rules' ]
|
||||
|
||||
- name: Start the iptables service immediately after the new rules have been installed, on Ubuntu precise. This can have an impact on other tasks
|
||||
service: name=iptables-persistent state=restarted enabled=yes
|
||||
notify: Restart fail2ban
|
||||
when: ( install_iptables_rules_precise | changed )
|
||||
tags: [ 'iptables', 'iptables_rules' ]
|
||||
|
||||
- name: Start the iptables service immediately after the new rules have been installed, on Ubuntu Trusty. This can have an impact on other tasks
|
||||
service: name=iptables-persistent state=restarted enabled=yes
|
||||
notify: Restart fail2ban
|
||||
when: ( install_iptables_rules_trusty | changed )
|
||||
tags: [ 'iptables', 'iptables_rules' ]
|
||||
|
||||
- name: Start the iptables service immediately after the new rules have been installed, on Debian 7. This can have an impact on other tasks
|
||||
service: name=iptables-persistent state=restarted enabled=yes
|
||||
notify: Restart fail2ban
|
||||
when: ( install_iptables_rules_deb7 | changed )
|
||||
tags: [ 'iptables', 'iptables_rules' ]
|
||||
|
||||
- name: Start the netfilter service immediately after the new rules have been installed. This can have an impact on other tasks
|
||||
service: name=netfilter-persistent state=restarted enabled=yes
|
||||
notify: Restart fail2ban
|
||||
when: ( install_netfilter_rules | changed )
|
||||
tags: [ 'iptables', 'iptables_rules' ]
|
||||
|
|
|
@ -41,6 +41,9 @@
|
|||
-A INPUT -s {{ network.nmis }} -j ACCEPT
|
||||
-A INPUT -s {{ network.eduroam }} -j ACCEPT
|
||||
{% endif %}
|
||||
{% if letsencrypt_acme_install is defined and letsencrypt_acme_install %}
|
||||
-A INPUT -m state --state NEW -m tcp -p tcp --dport 80 -j ACCEPT
|
||||
{% endif %}
|
||||
{% if http_port is defined %}
|
||||
# http
|
||||
{% if http_allowed_hosts is defined %}
|
||||
|
@ -175,12 +178,18 @@
|
|||
{% if ganglia_enabled %}
|
||||
{% if ganglia_gmond_cluster_port is defined %}
|
||||
# Ganglia
|
||||
{% if not ganglia_unicast_mode %}
|
||||
{% if ganglia_gmond_use_jmxtrans is not defined or not ganglia_gmond_use_jmxtrans %}
|
||||
-A INPUT -m pkttype --pkt-type multicast -d {{ ganglia_gmond_mcast_addr }} -j ACCEPT
|
||||
{% else %}
|
||||
-A INPUT -m pkttype --pkt-type multicast -j ACCEPT
|
||||
-A INPUT -p udp -m udp -d {{ ganglia_gmond_mcast_addr }} --dport {{ ganglia_gmond_cluster_port }} -j ACCEPT
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{% for net in ganglia_unicast_networks %}
|
||||
-A INPUT -p udp -m udp -s {{ net }} --dport {{ ganglia_gmond_cluster_port }} -j ACCEPT
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
-A INPUT -m state --state NEW -s {{ ganglia_gmetad_host }} -p tcp -m tcp --dport {{ ganglia_gmond_cluster_port }} -j ACCEPT
|
||||
-A INPUT -s {{ ganglia_gmetad_host }} -p udp -m udp --dport {{ ganglia_gmond_cluster_port }} -j ACCEPT
|
||||
{% endif %}
|
||||
|
|
|
@ -69,5 +69,51 @@ nginx_letsencrypt_managed: True
|
|||
# php, rewrite rules, acls, ldap auth
|
||||
# More robust rules
|
||||
# log format personalization (global, inside conf.d)
|
||||
# CORS
|
||||
#
|
||||
# Special cases: mediawiki,...
|
||||
#
|
||||
# CORS example 1
|
||||
# set $cors '';
|
||||
# if ($http_origin ~* 'https?://(localhost|*\.example\.org)') {
|
||||
# set $cors 'true';
|
||||
# }
|
||||
|
||||
# if ($cors = 'true') {
|
||||
# add_header 'Access-Control-Allow-Origin' "$http_origin";
|
||||
# add_header 'Access-Control-Allow-Credentials' 'true';
|
||||
# add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS';
|
||||
# add_header 'Access-Control-Allow-Headers' 'Accept,Authorization,Cache-Control,Content-Type,DNT,If-Modified-Since,Keep-Alive,Origin,User-Agent,X-Mx-ReqToken,X-Requested-With';
|
||||
# }
|
||||
|
||||
# if ($request_method = 'OPTIONS') {
|
||||
# return 204;
|
||||
# }
|
||||
|
||||
# CORS example 2
|
||||
# location / {
|
||||
# if ($request_method = 'OPTIONS') {
|
||||
# add_header 'Access-Control-Allow-Origin' '*';
|
||||
# add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
|
||||
# #
|
||||
# # Custom headers and headers various browsers *should* be OK with but aren't
|
||||
# #
|
||||
# add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
|
||||
# #
|
||||
# # Tell client that this pre-flight info is valid for 20 days
|
||||
# #
|
||||
# add_header 'Access-Control-Max-Age' 1728000;
|
||||
# add_header 'Content-Type' 'text/plain charset=UTF-8';
|
||||
# add_header 'Content-Length' 0;
|
||||
# return 204;
|
||||
# }
|
||||
# if ($request_method = 'POST') {
|
||||
# add_header 'Access-Control-Allow-Origin' '*';
|
||||
# add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
|
||||
# add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
|
||||
# }
|
||||
# if ($request_method = 'GET') {
|
||||
# add_header 'Access-Control-Allow-Origin' '*';
|
||||
# add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
|
||||
# add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
|
||||
# }
|
||||
|
|
|
@ -33,6 +33,7 @@ psql_conf_parameters:
|
|||
|
||||
# SSL as a special case
|
||||
psql_enable_ssl: False
|
||||
psql_force_ssl_client_connection: False
|
||||
postgresql_letsencrypt_managed: True
|
||||
psql_conf_ssl_parameters:
|
||||
- { name: 'ssl', value: 'true' }
|
||||
|
@ -125,6 +126,14 @@ pgpool_memqcache_memcached_port: 11211
|
|||
pgpool_memqcache_expire: 0
|
||||
pgpool_memqcache_auto_cache_invalidation: 'on'
|
||||
|
||||
# SSL as a special case
|
||||
pgpool_enable_ssl: False
|
||||
pgpool_letsencrypt_managed: True
|
||||
pgpool_ssl_key: /etc/pki/pgpool2/pgpool2.key
|
||||
pgpool_ssl_cert: '/var/lib/acme/live/{{ ansible_fqdn }}/cert'
|
||||
pgpool_ssl_ca: '/var/lib/acme/live/{{ ansible_fqdn }}/chain'
|
||||
pgpool_ssl_ca_dir: /etc/ssl/certs
|
||||
|
||||
# WAL files archiving is mandatory for pgpool recovery
|
||||
psql_wal_files_archiving_enabled: '{{ psql_pgpool_install }}'
|
||||
psql_restart_after_wal_enabling: True
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
#!/bin/bash
|
||||
|
||||
H_NAME=$( hostname -f )
|
||||
LE_SERVICES_SCRIPT_DIR=/usr/lib/acme/hooks
|
||||
LE_CERTS_DIR=/var/lib/acme/live/$H_NAME
|
||||
LE_LOG_DIR=/var/log/letsencrypt
|
||||
PGPOOL2_CERTDIR=/etc/pki/pgpool2
|
||||
PGPOOL2_KEYFILE=$PGPOOL2_CERTDIR/pgpool2.key
|
||||
DATE=$( date )
|
||||
|
||||
[ ! -d $PGPOOL2_CERTDIR ] && mkdir -p $PGPOOL2_CERTDIR
|
||||
[ ! -d $LE_LOG_DIR ] && mkdir $LE_LOG_DIR
|
||||
echo "$DATE" >> $LE_LOG_DIR/pgpool2.log
|
||||
|
||||
if [ -f /etc/default/letsencrypt ] ; then
|
||||
. /etc/default/letsencrypt
|
||||
else
|
||||
echo "No letsencrypt default file" >> $LE_LOG_DIR/pgpool2.log
|
||||
fi
|
||||
|
||||
echo "Copy the key file" >> $LE_LOG_DIR/pgpool2.log
|
||||
cp ${LE_CERTS_DIR}/privkey ${PGPOOL2_KEYFILE}
|
||||
chmod 440 ${PGPOOL2_KEYFILE}
|
||||
chgrp postgres ${PGPOOL2_KEYFILE}
|
||||
|
||||
echo "Reload the pgpool2 service" >> $LE_LOG_DIR/pgpool2.log
|
||||
if [ -x /bin/systemctl ] ; then
|
||||
systemctl reload pgpool2 >> $LE_LOG_DIR/pgpool2.log 2>&1
|
||||
else
|
||||
service pgpool2 reload >> $LE_LOG_DIR/pgpool2.log 2>&1
|
||||
fi
|
||||
|
||||
echo "Done." >> $LE_LOG_DIR/pgpool2.log
|
||||
|
||||
exit 0
|
||||
|
|
@ -6,7 +6,7 @@
|
|||
# - { name: 'db_name', user: 'db_user', pwd: 'db_pwd', allowed_hosts: [ '146.48.123.17/32', '146.48.122.110/32' ] }
|
||||
#
|
||||
- name: Give access to the remote postgresql client
|
||||
lineinfile: name=/etc/postgresql/{{ psql_version }}/main/pg_hba.conf regexp="^host {{ item.0.name }} {{ item.0.user }} {{ item.1 }}.*$" line="host {{ item.0.name }} {{ item.0.user }} {{ item.1 }} md5"
|
||||
lineinfile: name=/etc/postgresql/{{ psql_version }}/main/pg_hba.conf regexp="^host.* {{ item.0.name }} {{ item.0.user }} {{ item.1 }}.*$" line="host {{ item.0.name }} {{ item.0.user }} {{ item.1 }} md5"
|
||||
with_subelements:
|
||||
- '{{ psql_db_data | default([]) }}'
|
||||
- allowed_hosts
|
||||
|
@ -14,6 +14,20 @@
|
|||
- psql_listen_on_ext_int
|
||||
- psql_db_data is defined
|
||||
- item.1 is defined
|
||||
- not psql_force_ssl_client_connection
|
||||
notify: Reload postgresql
|
||||
tags: [ 'postgresql', 'postgres', 'pg_hba' ]
|
||||
|
||||
- name: Give access to the remote postgresql client, force ssl
|
||||
lineinfile: name=/etc/postgresql/{{ psql_version }}/main/pg_hba.conf regexp="^host.* {{ item.0.name }} {{ item.0.user }} {{ item.1 }}.*$" line="hostssl {{ item.0.name }} {{ item.0.user }} {{ item.1 }} md5"
|
||||
with_subelements:
|
||||
- '{{ psql_db_data | default([]) }}'
|
||||
- allowed_hosts
|
||||
when:
|
||||
- psql_listen_on_ext_int
|
||||
- psql_db_data is defined
|
||||
- item.1 is defined
|
||||
- psql_force_ssl_client_connection
|
||||
notify: Reload postgresql
|
||||
tags: [ 'postgresql', 'postgres', 'pg_hba' ]
|
||||
|
||||
|
|
|
@ -33,5 +33,9 @@
|
|||
when:
|
||||
- postgresql_letsencrypt_managed
|
||||
- letsencrypt_acme_install is defined
|
||||
- include: pgpool-letsencrypt-acmetool.yml
|
||||
when:
|
||||
- pgpool_letsencrypt_managed
|
||||
- letsencrypt_acme_install is defined
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
- name: Create the acme hooks directory if it does not yet exist
|
||||
file: dest={{ letsencrypt_acme_services_scripts_dir }} state=directory owner=root group=root
|
||||
when:
|
||||
- pgpool_letsencrypt_managed
|
||||
- letsencrypt_acme_install
|
||||
tags: [ 'postgresql', 'postgres', 'pgpool', 'letsencrypt' ]
|
||||
|
||||
- name: Install a script that fix the letsencrypt certificate for postgresql and then reload the service
|
||||
copy: src=pgpool-letsencrypt-acme.sh dest={{ letsencrypt_acme_services_scripts_dir }}/pgpool owner=root group=root mode=4555
|
||||
when:
|
||||
- pgpool_letsencrypt_managed
|
||||
- letsencrypt_acme_install
|
||||
tags: [ 'postgresql', 'postgres', 'pgpool', 'letsencrypt' ]
|
||||
|
|
@ -77,25 +77,14 @@ authentication_timeout = 60
|
|||
# Delay in seconds to complete client authentication
|
||||
# 0 means no timeout.
|
||||
|
||||
{% if pgpool_enable_ssl %}
|
||||
# - SSL Connections -
|
||||
|
||||
ssl = off
|
||||
# Enable SSL support
|
||||
# (change requires restart)
|
||||
#ssl_key = './server.key'
|
||||
# Path to the SSL private key file
|
||||
# (change requires restart)
|
||||
#ssl_cert = './server.cert'
|
||||
# Path to the SSL public certificate file
|
||||
# (change requires restart)
|
||||
#ssl_ca_cert = ''
|
||||
# Path to a single PEM format file
|
||||
# containing CA root certificate(s)
|
||||
# (change requires restart)
|
||||
#ssl_ca_cert_dir = ''
|
||||
# Directory containing CA root certificate(s)
|
||||
# (change requires restart)
|
||||
|
||||
ssl = on
|
||||
ssl_key = '{{ pgpool_ssl_key }}'
|
||||
ssl_cert = '{{ pgpool_ssl_cert }}'
|
||||
ssl_ca_cert = '{{ pgpool_ssl_ca }}'
|
||||
ssl_ca_cert_dir = '{{ pgpool_ssl_ca_dir }}'
|
||||
{% endif %}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# POOLS
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
py_env_pkgs_state: installed
|
||||
py_env_site: False
|
||||
py_env_basic_pkgs:
|
||||
- python-pip
|
||||
|
||||
py_env_dpkg:
|
||||
-
|
||||
|
||||
py_pip_deps:
|
||||
-
|
||||
|
||||
py_env_pip_pkgs:
|
||||
-
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
- block:
|
||||
|
||||
- name: Install python pip
|
||||
apt: name={{ item }} state={{ py_env_pkgs_state }} update_cache=yes cache_valid_time=600
|
||||
with_items: '{{ py_env_basic_pkgs }}'
|
||||
|
||||
- name: Install python deb packages
|
||||
apt: name={{ item }} state={{ py_env_pkgs_state }} update_cache=yes cache_valid_time=600
|
||||
with_items: '{{ py_env_dpkg | default([]) }}'
|
||||
|
||||
- name: Install deb packages needed to compile the pip modules
|
||||
apt: name={{ item }} state={{ py_env_pkgs_state }} update_cache=yes cache_valid_time=600
|
||||
with_items: '{{ py_pip_deps | default([]) }}'
|
||||
|
||||
- name: Install a list of pip packages
|
||||
pip: name={{ item }}
|
||||
with_items: '{{ py_env_pip_pkgs | default ([]) }}'
|
||||
|
||||
tags: [ "python", "py_env" ]
|
|
@ -5,10 +5,10 @@ py_virtenv_pkgs:
|
|||
- python-pip
|
||||
- python-virtualenv
|
||||
|
||||
# py_virtenv_pip_pkgs:
|
||||
# - pip_pkg_foo
|
||||
# - pip_pkg_bar
|
||||
py_virtenv_pip_pkgs:
|
||||
-
|
||||
|
||||
# py_virtenv_pip_requirements: "/tmp/foo/requirements.txt"
|
||||
|
||||
py_virtenv_env_base_dir: "/tmp/foo"
|
||||
#
|
||||
# py_virtenv_env_base_dir: "/tmp/foo"
|
||||
py_virtenv_env_base_dir:
|
||||
|
|
|
@ -1,40 +1,34 @@
|
|||
---
|
||||
- name: Install the python virtualenv packages
|
||||
apt: name={{ item }} state={{ py_virtenv_pkgs_state }}
|
||||
with_items: py_virtenv_pkgs
|
||||
tags: [ "python", "py_virtenv" ]
|
||||
- block:
|
||||
|
||||
- name: Install the python virtualenv packages
|
||||
apt: name={{ item }} state={{ py_virtenv_pkgs_state }}
|
||||
with_items: '{{ py_virtenv_pkgs }}'
|
||||
|
||||
- name: Create the virtenv environments.
|
||||
command: virtualenv {{ py_virtenv_env_base_dir }}
|
||||
tags: [ "python", "py_virtenv" ]
|
||||
- name: Create the virtenv environments.
|
||||
command: virtualenv {{ py_virtenv_env_base_dir }}
|
||||
when: '{{ py_virtenv_env_base_dir }} != ""'
|
||||
|
||||
- name: Install a list of pip packages inside the virtualenv, inherit the global site-packages
|
||||
pip: name={{ item }} virtualenv={{ py_virtenv_env_base_dir }} virtualenv_site_packages=yes
|
||||
with_items: py_virtenv_pip_pkgs
|
||||
when:
|
||||
- py_virtenv_pip_pkgs is defined
|
||||
- py_virtenv_site
|
||||
tags: [ "python", "py_virtenv" ]
|
||||
- name: Install a list of pip packages inside the virtualenv, inherit the global site-packages
|
||||
pip: name={{ item }} virtualenv={{ py_virtenv_env_base_dir }} virtualenv_site_packages=yes
|
||||
with_items: '{{ py_virtenv_pip_pkgs | default ([]) }}'
|
||||
when: py_virtenv_site
|
||||
|
||||
- name: Install a list of pip packages inside the virtualenv
|
||||
pip: name={{ item }} virtualenv={{ py_virtenv_env_base_dir }}
|
||||
with_items: py_virtenv_pip_pkgs
|
||||
when:
|
||||
- py_virtenv_pip_pkgs is defined
|
||||
- not py_virtenv_site
|
||||
tags: [ "python", "py_virtenv" ]
|
||||
- name: Install a list of pip packages inside the virtualenv
|
||||
pip: name={{ item }} virtualenv={{ py_virtenv_env_base_dir }}
|
||||
with_items: '{{ py_virtenv_pip_pkgs | default ([]) }}'
|
||||
when: not py_virtenv_site
|
||||
|
||||
- name: Install a list of pip packages inside the virtualenv from a requirements.txt file, inherit the global site-packages
|
||||
pip: requirements={{ py_virtenv_pip_requirements }} virtualenv={{ py_virtenv_env_base_dir }} virtualenv_site_packages=yes
|
||||
when:
|
||||
- py_virtenv_pip_requirements is defined
|
||||
- py_virtenv_site
|
||||
tags: [ "python", "py_virtenv" ]
|
||||
- name: Install a list of pip packages inside the virtualenv from a requirements.txt file, inherit the global site-packages
|
||||
pip: requirements={{ py_virtenv_pip_requirements }} virtualenv={{ py_virtenv_env_base_dir }} virtualenv_site_packages=yes
|
||||
when:
|
||||
- py_virtenv_pip_requirements is defined
|
||||
- py_virtenv_site
|
||||
|
||||
- name: Install a list of pip packages inside the virtualenv from a requirements.txt file
|
||||
pip: requirements={{ py_virtenv_pip_requirements }} virtualenv={{ py_virtenv_env_base_dir }}
|
||||
when:
|
||||
- py_virtenv_pip_requirements is defined
|
||||
- not py_virtenv_site
|
||||
tags: [ "python", "py_virtenv" ]
|
||||
- name: Install a list of pip packages inside the virtualenv from a requirements.txt file
|
||||
pip: requirements={{ py_virtenv_pip_requirements }} virtualenv={{ py_virtenv_env_base_dir }}
|
||||
when:
|
||||
- py_virtenv_pip_requirements is defined
|
||||
- not py_virtenv_site
|
||||
|
||||
tags: [ "python", "py_virtenv" ]
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
---
|
||||
gcube_repository: 'gcube-snapshots'
|
||||
gcube_repository: 'gcube-staging'
|
||||
ckan_connector_plugin_install: False
|
||||
ckan_connector_ver: 1.0.0
|
||||
ckan_connector_name: 'ckan_connector-{{ ckan_connector_ver }}-20160627.090904-1.war'
|
||||
ckan_connector_url: 'http://maven.research-infrastructures.eu/nexus/content/repositories/{{ gcube_repository }}/org/gcube/data/access/ckan_connector/{{ ckan_connector_ver}}-SNAPSHOT/{{ ckan_connector_name }}'
|
||||
ckan_connector_war_file: ckan-connector.war
|
||||
ckan_connector_ver: 1.0.0-4.0.0-129609
|
||||
ckan_connector_name: ckan-connector
|
||||
ckan_connector_filename: '{{ ckan_connector_name }}-{{ ckan_connector_ver }}.war'
|
||||
ckan_connector_url: 'http://maven.research-infrastructures.eu/nexus/content/repositories/{{ gcube_repository }}/org/gcube/data/access/{{ ckan_connector_name }}/{{ ckan_connector_ver}}/{{ ckan_connector_filename }}'
|
||||
ckan_connector_war_file: '{{ ckan_connector_name }}.war'
|
||||
ckan_connector_user: ckan_connector
|
||||
|
|
|
@ -1,6 +1,13 @@
|
|||
---
|
||||
- block:
|
||||
|
||||
|
||||
- name: Remove the installed CKAN connector before upgrading
|
||||
file: dest={{ item }} state=absent
|
||||
with_items:
|
||||
- '{{ smartgears_instance_path }}/webapps/ckan-connector'
|
||||
- '{{ smartgears_instance_path }}/webapps/ckan-connector.war'
|
||||
when: smartgears_upgrade
|
||||
|
||||
- name: Get the CKAN connector war file
|
||||
get_url: url={{ ckan_connector_url }} dest={{ smartgears_instance_path }}/webapps/{{ ckan_connector_war_file }}
|
||||
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
---
|
||||
generic_worker_install: False
|
||||
|
||||
#smart_generic_worker_plugin_ver: 1.0.1-3.9.0
|
||||
#smart_generic_worker_plugin_ver: 1.0.2-3.10.1
|
||||
smart_generic_worker_plugin_ver: 1.0.3-3.11.0-128830
|
||||
smart_generic_worker_plugin_ver: 1.0.3-4.0.0-128830
|
||||
smart_generic_worker_plugin_name: 'smart-generic-worker-{{ smart_generic_worker_plugin_ver }}-jar-with-dependencies.jar'
|
||||
smart_generic_worker_plugin_url: 'http://maven.research-infrastructures.eu/nexus/content/repositories/{{ gcube_repository }}/org/gcube/dataanalysis/smart-generic-worker/{{ smart_generic_worker_plugin_ver }}/{{ smart_generic_worker_plugin_name }}'
|
||||
|
||||
|
|
|
@ -1,24 +1,8 @@
|
|||
---
|
||||
smart_executor_install: False
|
||||
|
||||
#smart_executor_version: 1.2.0-3.9.0
|
||||
#smart_executor_version: 1.3.0-3.10.1
|
||||
smart_executor_version: 1.3.0-3.11.0-128844
|
||||
smart_executor_version: 1.3.0-4.0.0-128844
|
||||
smart_executor_name: smart-executor
|
||||
smart_executor_file: '{{ smart_executor_name }}-{{ smart_executor_version }}.war'
|
||||
smart_executor_url: 'http://maven.research-infrastructures.eu/nexus/content/repositories/{{ gcube_repository }}/org/gcube/vremanagement/smart-executor/{{ smart_executor_version }}/{{ smart_executor_file }}'
|
||||
|
||||
smart_executor_context: '/{{ smart_executor_name }}'
|
||||
smart_executor_contexts_list:
|
||||
- ''
|
||||
- '/BiodiversityLab'
|
||||
- '/BiOnym'
|
||||
- '/ScalableDataMining'
|
||||
|
||||
# dev has two different contexts
|
||||
#smart_executor_contexts_list:
|
||||
# - ''
|
||||
# - '/devVRE'
|
||||
|
||||
# - ''
|
||||
# - '/NextNext'
|
||||
|
|
|
@ -1,41 +1,24 @@
|
|||
---
|
||||
- name: Remove the old smart executor files
|
||||
file: path={{ smartgears_instance_path }}/webapps/{{ smart_executor_name }} state=absent
|
||||
when:
|
||||
- smart_executor_install
|
||||
- smartgears_upgrade
|
||||
tags: [ 'smartgears', 'tomcat' ]
|
||||
|
||||
# NOTE: Install as the smartgears user so we do not mess with the permissions
|
||||
- name: Get the smart executor plugin
|
||||
get_url: url={{ smart_executor_url }} dest={{ smartgears_user_home }}/{{ smart_executor_file }}
|
||||
when: smart_executor_install
|
||||
tags: [ 'smartgears', 'smart_executor', 'tomcat' ]
|
||||
|
||||
- name: Create the smart executor working directory
|
||||
file: path={{ smartgears_instance_path }}/webapps/{{ smart_executor_name }} state=directory owner={{ smartgears_user }} group={{ smartgears_user }}
|
||||
when: smart_executor_install
|
||||
tags: [ 'smartgears', 'smart_executor', 'tomcat' ]
|
||||
|
||||
- name: Unarchive the smartexecutor distribution
|
||||
become: True
|
||||
become_user: '{{ smartgears_user }}'
|
||||
unarchive: copy=no src={{ smartgears_user_home }}/{{ smart_executor_file }} dest={{ smartgears_instance_path }}/webapps/{{ smart_executor_name }} creates={{ smartgears_instance_path }}/webapps/{{ smart_executor_name }}/WEB-INF/lib
|
||||
when: smart_executor_install
|
||||
notify: Restart smartgears
|
||||
tags: [ 'smartgears', 'smart_executor', 'tomcat' ]
|
||||
|
||||
- name: Install the smartgears configuration file
|
||||
template: src=smart_executor-container.xml.j2 dest={{ smartgears_install_path }}/container.xml owner={{ item.user }} group={{ item.user }}
|
||||
with_items: '{{ tomcat_m_instances }}'
|
||||
notify: Restart smartgears
|
||||
when: smart_executor_install
|
||||
register: smartexec_containerxml_state
|
||||
tags: [ 'smartgears', 'smart_executor', 'smart_executor_conf', 'tomcat' ]
|
||||
- block:
|
||||
|
||||
- name: Remove the smartgears application state if the configuration changed
|
||||
file: dest={{ smartgears_install_path }}/state state=absent
|
||||
notify: Restart smartgears
|
||||
when: ( smartexec_containerxml_state | changed )
|
||||
tags: [ 'smartgears', 'smart_executor', 'smart_executor_conf', 'tomcat' ]
|
||||
- name: Remove the old smart executor files
|
||||
file: path={{ smartgears_instance_path }}/webapps/{{ smart_executor_name }} state=absent
|
||||
when: ( not smart_executor_install ) or ( smartgears_upgrade )
|
||||
|
||||
# NOTE: Install as the smartgears user so we do not mess with the permissions
|
||||
- name: Get the smart executor plugin
|
||||
get_url: url={{ smart_executor_url }} dest={{ smartgears_user_home }}/{{ smart_executor_file }}
|
||||
when: smart_executor_install
|
||||
|
||||
- name: Create the smart executor working directory
|
||||
file: path={{ smartgears_instance_path }}/webapps/{{ smart_executor_name }} state=directory owner={{ smartgears_user }} group={{ smartgears_user }}
|
||||
when: smart_executor_install
|
||||
|
||||
- name: Unarchive the smartexecutor distribution
|
||||
unarchive: copy=no src={{ smartgears_user_home }}/{{ smart_executor_file }} dest={{ smartgears_instance_path }}/webapps/{{ smart_executor_name }} creates={{ smartgears_instance_path }}/webapps/{{ smart_executor_name }}/WEB-INF/lib
|
||||
when: smart_executor_install
|
||||
notify: Restart smartgears
|
||||
|
||||
become: True
|
||||
become_user: '{{ d4science_user }}'
|
||||
tags: [ 'smartgears', 'smart_executor', 'tomcat' ]
|
||||
|
|
|
@ -1,34 +0,0 @@
|
|||
<container mode='{{ smartgears_mode }}'>
|
||||
<hostname>{{ smartgears_hostname }}</hostname>
|
||||
{%if setup_nginx %}
|
||||
{%if https_port is defined %}
|
||||
<port>{{ https_port }}</port>
|
||||
{% else %}
|
||||
<port>{{ http_port }}</port>
|
||||
{% endif %}
|
||||
{% else %}
|
||||
<port>{{ item.http_port }}</port>
|
||||
{% endif %}
|
||||
<infrastructure>{{ smartgears_infrastructure_name }}</infrastructure>
|
||||
<vo>{{ smartgears_vo_name }}</vo>
|
||||
|
||||
<site>
|
||||
<country>{{ smartgears_country }}</country>
|
||||
<location>{{ smartgears_location }}</location>
|
||||
<latitude>41.9000</latitude>
|
||||
<longitude>12.5000</longitude>
|
||||
</site>
|
||||
|
||||
<property name='SmartGearsDistribution' value='{{ smartgears_distribution_version }}' />
|
||||
<property name='SmartGearsDistributionBundle' value='UnBundled' />
|
||||
<publication-frequency>60</publication-frequency>
|
||||
|
||||
{% if smart_executor_install %}
|
||||
<application mode="{{ smartgears_mode }}" context="{{ smart_executor_context }}">
|
||||
{% for context in smart_executor_contexts_list %}
|
||||
<scope>/{{ smartgears_infrastructure_name }}/{{ smartgears_vo_name }}{{ context }}</scope>
|
||||
{% endfor %}
|
||||
</application>
|
||||
{% endif %}
|
||||
|
||||
</container>
|
|
@ -15,14 +15,13 @@ smartgears_user: '{{ d4science_user }}'
|
|||
smartgears_user_home: '{{ d4science_user_home }}'
|
||||
smartgears_instance_path: '{{ smartgears_user_home }}/tomcat'
|
||||
smartgears_install_path: '{{ smartgears_user_home }}/SmartGears'
|
||||
#smartgears_distribution_version: 1.2.6-3.10.0
|
||||
#smartgears_distribution_version: 1.2.7-3.10.1
|
||||
smartgears_distribution_version: 1.2.7-3.11.0-125799
|
||||
smartgears_distribution_version: 1.2.8-4.0.0-129615
|
||||
smartgears_file: 'smartgears-distribution-{{ smartgears_distribution_version }}.tar.gz'
|
||||
smartgears_url: 'http://maven.research-infrastructures.eu/nexus/content/repositories/{{ gcube_repository }}/org/gcube/distribution/smartgears-distribution/{{ smartgears_distribution_version }}/{{ smartgears_file }}'
|
||||
smartgears_mode: online
|
||||
# Production infra and VO
|
||||
smartgears_infrastructure_name: "d4science.research-infrastructures.eu"
|
||||
smartgears_vo: True
|
||||
smartgears_vo_name: gCubeApps
|
||||
smartgears_hostname: '{{ ansible_fqdn }}'
|
||||
smartgears_country: it
|
||||
|
@ -31,12 +30,31 @@ smartgears_location: pisa
|
|||
smartgears_http_port: 9000
|
||||
smartgears_service_name: 'tomcat-instance-{{ smartgears_http_port }}'
|
||||
|
||||
smartgears_loglevel: WARN
|
||||
|
||||
smartgears_tomcat_contexts: [ 'whn-manager' ]
|
||||
smartgears_define_context_vo: False
|
||||
smartgears_context: '/smart-executor'
|
||||
smartgears_contexts_list:
|
||||
- ''
|
||||
- '/BiodiversityLab'
|
||||
- '/BiOnym'
|
||||
- '/ScalableDataMining'
|
||||
|
||||
# dev has two different contexts
|
||||
#smart_executor_contexts_list:
|
||||
# - ''
|
||||
# - '/devVRE'
|
||||
|
||||
# - ''
|
||||
# - '/NextNext'
|
||||
|
||||
# The iptables rules use this
|
||||
http_port: '{{ smartgears_http_port }}'
|
||||
|
||||
tomcat_m_webapps_unpack: True
|
||||
tomcat_m_instances:
|
||||
- { http_enabled: True, http_port: '{{ smartgears_http_port }}', http_address: '0.0.0.0', ajp_enabled: False, ajp_port: '8109', ajp_address: '127.0.0.1', restart_timeout: '{{ tomcat_m_restart_timeout }}', shutdown_port: '-1', java_home: '{{ jdk_java_home }}', user: '{{ smartgears_user }}', user_home: '{{ smartgears_user_home }}', user_shell: '{{ tomcat_m_default_user_shell }}', instance_path: '{{ smartgears_instance_path }}', max_threads: '{{ tomcat_m_max_threads }}', autodeploy: '{{ tomcat_m_webapps_autodeploy }}', unpack: '{{ tomcat_m_webapps_unpack }}', default_conf: True, java_opts: '{{ tomcat_m_java_opts }}', java_gc_opts: '{{ tomcat_m_java_gc_opts }}', other_java_opts: '{{ tomcat_m_other_java_opts }}', remote_debugging: '{{ tomcat_m_enable_remote_debugging }}', remote_debugging_port: '{{ smartgears_debugging_port }}' , access_log_enabled: True, log_rotation_freq: daily, log_retain: 30, allowed_hosts: [ '0.0.0.0/0' ], app_contexts: [ 'whn-manager' ], servername: '{{ ansible_fqdn }}' }
|
||||
- { http_enabled: True, http_port: '{{ smartgears_http_port }}', http_address: '0.0.0.0', ajp_enabled: False, ajp_port: '8109', ajp_address: '127.0.0.1', restart_timeout: '{{ tomcat_m_restart_timeout }}', shutdown_port: '-1', java_home: '{{ jdk_java_home }}', user: '{{ smartgears_user }}', user_home: '{{ smartgears_user_home }}', user_shell: '{{ tomcat_m_default_user_shell }}', instance_path: '{{ smartgears_instance_path }}', max_threads: '{{ tomcat_m_max_threads }}', autodeploy: '{{ tomcat_m_webapps_autodeploy }}', unpack: '{{ tomcat_m_webapps_unpack }}', default_conf: True, java_opts: '{{ tomcat_m_java_opts }}', java_gc_opts: '{{ tomcat_m_java_gc_opts }}', other_java_opts: '{{ tomcat_m_other_java_opts }}', remote_debugging: '{{ tomcat_m_enable_remote_debugging }}', remote_debugging_port: '{{ smartgears_debugging_port }}' , access_log_enabled: True, log_rotation_freq: daily, log_retain: 30, allowed_hosts: [ '0.0.0.0/0' ], app_contexts: '{{ smartgears_tomcat_contexts }}' , servername: '{{ ansible_fqdn }}' }
|
||||
|
||||
# To enable debugging:
|
||||
# - Set
|
||||
|
|
|
@ -50,7 +50,6 @@
|
|||
- name: Install the smartgears configuration file
|
||||
template: src=container.xml.j2 dest={{ smartgears_install_path }}/container.xml owner={{ item.user }} group={{ item.user }}
|
||||
with_items: '{{ tomcat_m_instances }}'
|
||||
when: ( generic_worker_install is not defined ) or ( not generic_worker_install )
|
||||
register: containerxml_state
|
||||
notify: Restart smartgears
|
||||
tags: [ 'smartgears', 'smartgears_conf', 'tomcat' ]
|
||||
|
@ -61,6 +60,17 @@
|
|||
notify: Restart smartgears
|
||||
tags: [ 'smartgears', 'smartgears_conf', 'tomcat' ]
|
||||
|
||||
- name: Install the script that manages the smartgears loglevel
|
||||
template: src=change-logback-loglevel.sh.j2 dest=/usr/local/bin/change-logback-loglevel owner=root group=root mode=0755
|
||||
with_items: '{{ tomcat_m_instances }}'
|
||||
tags: [ 'smartgears', 'smartgears_loglevel', 'tomcat' ]
|
||||
|
||||
- name: Change the smartgears log level
|
||||
become: True
|
||||
become_user: '{{ d4science_user }}'
|
||||
shell: /usr/local/bin/change-logback-loglevel
|
||||
tags: [ 'smartgears', 'tomcat', 'smartgears_loglevel' ]
|
||||
|
||||
- name: Remove some wrong symbolic links created by the install/upgrade script
|
||||
file: dest={{ smartgears_install_path }}/state state=absent
|
||||
with_items:
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
#!/bin/bash
|
||||
|
||||
RETVAL=
|
||||
LOGLEVEL=$( xmlstarlet sel -t -v "/configuration/logger/@level" {{ item.user_home }}/tomcat/lib/logback.xml | grep {{ smartgears_loglevel }} )
|
||||
RETVAL=$?
|
||||
|
||||
if [ $RETVAL -ne 0 ] ; then
|
||||
xmlstarlet ed -u "/configuration/logger[@level]/@level" -v {{ smartgears_loglevel }} {{ item.user_home }}/tomcat/lib/logback.xml > {{ item.user_home }}/tomcat/lib/logback.xml.new
|
||||
/bin/mv {{ item.user_home }}/tomcat/lib/logback.xml.new {{ item.user_home }}/tomcat/lib/logback.xml
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
|
@ -11,8 +11,10 @@
|
|||
<port>{{ item.http_port }}</port>
|
||||
{% endif %}
|
||||
<infrastructure>{{ smartgears_infrastructure_name }}</infrastructure>
|
||||
{% if smartgears_vo %}
|
||||
<vo>{{ smartgears_vo_name }}</vo>
|
||||
|
||||
{% endif %}
|
||||
|
||||
<site>
|
||||
<country>{{ smartgears_country }}</country>
|
||||
<location>{{ smartgears_location }}</location>
|
||||
|
@ -23,5 +25,13 @@
|
|||
<property name='SmartGearsDistributionBundle' value='UnBundled' />
|
||||
<property name='SmartGearsDistribution' value='{{ smartgears_distribution_version }}' />
|
||||
<publication-frequency>60</publication-frequency>
|
||||
{% if smartgears_define_context_vo %}
|
||||
|
||||
<application mode="{{ smartgears_mode }}" context="{{ smartgears_context }}">
|
||||
{% for context in smartgears_contexts_list %}
|
||||
<scope>/{{ smartgears_infrastructure_name }}/{{ smartgears_vo_name }}{{ context }}</scope>
|
||||
{% endfor %}
|
||||
</application>
|
||||
{% endif %}
|
||||
|
||||
</container>
|
||||
|
|
|
@ -23,11 +23,17 @@ solr_outside_tomcat_dir: False
|
|||
# We need to define this one because we are using the tomcat multiple instances role
|
||||
solr_tomcat_instance_dir: '{{ tomcat_m_instances_base_path }}/{{ solr_http_port }}'
|
||||
solr_data_dir: '{{ solr_tomcat_instance_dir }}/solr'
|
||||
solr_collections_base_dir: '{{ solr_data_dir }}/data/solr'
|
||||
solr_zookeeper_data_dir: '{{ solr_data_dir }}/zoo_data'
|
||||
solr_install_collection1: False
|
||||
# Stand alone
|
||||
solr_opts: "-DzkRun -DnumShards={{ solr_shards }}"
|
||||
# This is for the replica/sharded version
|
||||
# We need to pass a lot of options to the jdk for zookeeper and the solr shard configuration
|
||||
#solr_opts: "-DzkRun={{ ansible_fqdn}}:{{ solr_zoo_port }} -DnumShards={{ solr_shards }} -DzkHost=index1.t.hadoop.research-infrastructures.eu:{{ solr_zoo_port }},index2.t.hadoop.research-infrastructures.eu:{{ solr_zoo_port }},index3.t.hadoop.research-infrastructures.eu:{{ solr_zoo_port }} -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port={{ solr_jmx_port_1 }} -Dcom.sun.management.jmxremote.password.file={{ tomcat_conf_dir }}/jmxremote.passwd -Dcom.sun.management.jmxremote.access.file={{ tomcat_conf_dir }}/jmxremote.access"
|
||||
#solr_opts: "-DzkRun={{ ansible_fqdn}}:{{ solr_zoo_port }} -DnumShards={{ solr_shards }} -DzkHost=index1:{{ solr_zoo_port }},index2:{{ solr_zoo_port }},index3:{{ solr_zoo_port }}"
|
||||
|
||||
# Define the following if you want a multicore installation
|
||||
#solr_multicore: True
|
||||
solr_cores:
|
||||
- collection1
|
||||
|
||||
|
|
Binary file not shown.
|
@ -5,23 +5,20 @@
|
|||
- data/solr
|
||||
- webapps
|
||||
- zoo_data
|
||||
tags:
|
||||
- solr
|
||||
- collection_data
|
||||
tags: solr
|
||||
|
||||
- name: Create the link from the tomcat instance for solr to the solr data directory
|
||||
file: src={{ solr_data_dir }}/data/solr dest={{ solr_tomcat_instance_dir }}/solr state=link
|
||||
when: solr_outside_tomcat_dir
|
||||
tags:
|
||||
- solr
|
||||
tags: solr
|
||||
|
||||
- name: Solr needs some additional packages
|
||||
apt: pkg={{ item }} state={{ pkg_state }}
|
||||
with_items:
|
||||
- libslf4j-java
|
||||
- libcommons-logging-java
|
||||
tags:
|
||||
- solr
|
||||
- tomcat
|
||||
tags: [ solr, tomcat ]
|
||||
|
||||
- name: Let the additional packages jar files visible to tomcat
|
||||
file: src=/usr/share/java/{{ item }} dest={{ tomcat_catalina_home_dir }}/lib/{{ item }} state=link
|
||||
|
@ -32,51 +29,61 @@
|
|||
- jcl-over-slf4j.jar
|
||||
- commons-logging.jar
|
||||
notify: tomcat solr restart
|
||||
tags:
|
||||
- solr
|
||||
- tomcat
|
||||
tags: [ solr, tomcat ]
|
||||
|
||||
- name: Install the solr webapp under <solr_data_mountpoint>/webapps
|
||||
copy: src=solr-{{ solr_version }}.war dest={{ solr_data_dir }}/webapps/solr-{{ solr_version }}.war owner={{ solr_user }} group={{ solr_user }} mode=0644
|
||||
register: solr_war_installed
|
||||
notify: tomcat solr restart
|
||||
tags:
|
||||
- solr
|
||||
- tomcat
|
||||
tags: [ solr, tomcat ]
|
||||
|
||||
- name: Install the solr catalina definition
|
||||
template: src=catalina-{{ item }}.j2 dest={{ solr_tomcat_instance_dir }}/conf/Catalina/localhost/{{ item }} owner=root group=root mode=0444
|
||||
with_items:
|
||||
- solr.xml
|
||||
notify: tomcat solr restart
|
||||
tags:
|
||||
- solr
|
||||
tags: [ solr, tomcat ]
|
||||
|
||||
- name: Copy the solr collection1 and solr_core_base archives on the target machine
|
||||
copy: src={{ item }} dest={{ solr_data_dir }}/collection_data owner={{ solr_user }} group={{ solr_user }}
|
||||
with_items:
|
||||
- collection1.tar.gz
|
||||
- solr_core_base.tar.gz
|
||||
tags: [ solr, tomcat ]
|
||||
|
||||
- name: Install the solr collection1 example
|
||||
unarchive: src=collection1.tar.gz dest={{ solr_data_dir }}/data/solr/
|
||||
unarchive: src={{ solr_data_dir }}/collection_data/collection1.tar.gz dest={{ solr_collections_base_dir }} copy=no
|
||||
args:
|
||||
creates: '{{ solr_data_dir }}/data/solr/collection1'
|
||||
creates: '{{ solr_data_dir }}/data/solr/collection1/conf/solrconfig.xml'
|
||||
when: solr_install_collection1
|
||||
notify: tomcat solr restart
|
||||
tags:
|
||||
- solr
|
||||
- tomcat
|
||||
tags: [ solr, tomcat ]
|
||||
|
||||
- name: Fix the collection1 permissions
|
||||
file: path={{ solr_data_dir }}/data/solr/ owner={{ solr_user }} group={{ solr_user }} recurse=yes
|
||||
when: solr_install_collection1
|
||||
tags:
|
||||
- solr
|
||||
- tomcat
|
||||
- name: Create the solr cores data directories
|
||||
file: dest={{ solr_collections_base_dir }}/{{ item }} state=directory owner={{ solr_user }} group={{ solr_user }}
|
||||
with_items: '{{ solr_cores }}'
|
||||
when: solr_multicore is defined and solr_multicore
|
||||
tags: [ solr, tomcat ]
|
||||
|
||||
- name: Install the solr cores data on a multicore system
|
||||
unarchive: src={{ solr_data_dir }}/collection_data/solr_core_base.tar.gz dest={{ solr_data_dir }}/data/solr/{{ item }} copy=no
|
||||
args:
|
||||
creates: '{{ solr_data_dir }}/data/solr/{{ item }}/conf/solrconfig.xml'
|
||||
with_items: '{{ solr_cores }}'
|
||||
when: solr_multicore is defined and solr_multicore
|
||||
notify: tomcat solr restart
|
||||
tags: [ solr, tomcat ]
|
||||
|
||||
- name: Fix the cores permissions
|
||||
file: path={{ solr_collections_base_dir }} owner={{ solr_user }} group={{ solr_user }} recurse=yes
|
||||
tags: [ solr, tomcat ]
|
||||
|
||||
- name: Install the tomcat.local default file
|
||||
template: src={{ item }}.j2 dest=/etc/default/tomcat-instance-{{ solr_http_port }}.local owner=root group={{ solr_user }} mode=0440
|
||||
with_items:
|
||||
- tomcat.local
|
||||
notify: tomcat solr restart
|
||||
tags:
|
||||
- solr
|
||||
- tomcat
|
||||
tags: [ solr, tomcat ]
|
||||
|
||||
- name: Install the solr.xml and zookeeper conf files
|
||||
template: src={{ item }}.j2 dest={{ solr_data_dir }}/data/solr/{{ item }} owner=root group={{ solr_user }} mode=0440
|
||||
|
@ -84,7 +91,5 @@
|
|||
- solr.xml
|
||||
- zoo.cfg
|
||||
notify: tomcat solr restart
|
||||
tags:
|
||||
- solr
|
||||
- tomcat
|
||||
tags: [ solr, tomcat ]
|
||||
|
||||
|
|
|
@ -29,11 +29,13 @@
|
|||
<solr>
|
||||
|
||||
<solrcloud>
|
||||
{% if solr_multicore is not defined or not solr_multicore %}
|
||||
<str name="host">{{ ansible_fqdn }}</str>
|
||||
<int name="hostPort">{{ solr_http_port_1 }}</int>
|
||||
<str name="hostContext">${hostContext:solr}</str>
|
||||
<int name="zkClientTimeout">${zkClientTimeout:30000}</int>
|
||||
<bool name="genericCoreNodeNames">${genericCoreNodeNames:true}</bool>
|
||||
{% endif %}
|
||||
</solrcloud>
|
||||
|
||||
<shardHandlerFactory name="shardHandlerFactory" class="HttpShardHandlerFactory">
|
||||
|
@ -41,4 +43,17 @@
|
|||
<int name="connTimeout">${connTimeout:0}</int>
|
||||
</shardHandlerFactory>
|
||||
|
||||
{% if solr_multicore is defined or solr_multicore %}
|
||||
<cores adminPath="/admin/cores">
|
||||
{% for core in solr_cores %}
|
||||
|
||||
<core name="{{ core }}" instanceDir="{{ core }}">
|
||||
<property name="dataDir" value="{{ solr_collections_base_dir }}/{{ core }}" />
|
||||
</core>
|
||||
|
||||
{% endfor %}
|
||||
</cores>
|
||||
{% endif %}
|
||||
|
||||
|
||||
</solr>
|
||||
|
|
|
@ -31,8 +31,9 @@ tomcat_ajp_address: 127.0.0.1
|
|||
# Disable the shutdown port by default
|
||||
#tomcat_shutdown_port: 8005
|
||||
tomcat_shutdown_port: -1
|
||||
tomcat_shutdown_pwd: "{{ lookup('password', '/tmp/passwordfile chars=ascii_letters,digits,hexdigits') }}"
|
||||
tomcat_shutdown_pwd: "{{ lookup('password', '/tmp/passwordfile chars=ascii_letters,digits') }}"
|
||||
tomcat_restart_timeout: 300
|
||||
tomcat_max_post_size: 1000000
|
||||
tomcat_catalina_home_dir: '/usr/share/tomcat{{ tomcat_version }}'
|
||||
tomcat_catalina_base_dir: '/var/lib/tomcat{{ tomcat_version }}'
|
||||
tomcat_conf_dir: '/etc/tomcat{{ tomcat_version }}'
|
||||
|
@ -102,3 +103,4 @@ tomcat_install_jdbc: False
|
|||
tomcat_install_pg_jdbc: '{{ tomcat_install_jdbc }}'
|
||||
# Not used yet
|
||||
tomcat_install_mysql_jdbc: False
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@
|
|||
{% if tomcat_http_enabled %}
|
||||
<!-- A http "Connector" using the shared thread pool-->
|
||||
<Connector executor="tomcatThreadPool"
|
||||
enableLookups="false"
|
||||
enableLookups="false" maxPostSize="{{ tomcat_max_post_size }}"
|
||||
maxThreads="{{ tomcat_max_threads }}" connectionTimeout="60000"
|
||||
URIEncoding="UTF-8" bindOnInit="false" address="{{ tomcat_http_address }}"
|
||||
port="{{ tomcat_http_port }}" protocol="HTTP/1.1"
|
||||
|
@ -95,7 +95,7 @@
|
|||
{% if tomcat_ajp_enabled %}
|
||||
<!-- Define an AJP 1.3 Connector on port {{ tomcat_ajp_port }} -->
|
||||
<Connector port="{{ tomcat_ajp_port }}" protocol="AJP/1.3" redirectPort="8443"
|
||||
enableLookups="false" address="{{ tomcat_ajp_address }}"
|
||||
enableLookups="false" address="{{ tomcat_ajp_address }}" maxPostSize="{{ tomcat_max_post_size }}"
|
||||
maxThreads="{{ tomcat_max_threads }}" connectionTimeout="60000"
|
||||
URIEncoding="UTF-8" bindOnInit="false" />
|
||||
{% endif %}
|
||||
|
|
|
@ -27,6 +27,7 @@ common_packages:
|
|||
- sudo
|
||||
- apt-transport-https
|
||||
- nano
|
||||
- xmlstarlet
|
||||
|
||||
# Set this variable in your playbook
|
||||
# additional_packages:
|
||||
|
@ -77,6 +78,7 @@ exim_email_server_pkgs:
|
|||
- exim4-config
|
||||
- exim4-daemon-light
|
||||
|
||||
disable_some_not_needed_services: False
|
||||
services_to_be_disabled:
|
||||
- rpcbind
|
||||
- atd
|
||||
|
|
|
@ -2,5 +2,5 @@
|
|||
- name: Disable some unneeded services
|
||||
service: name={{ item }} state=stopped enabled=no
|
||||
with_items: '{{ services_to_be_disabled }}'
|
||||
ignore_errors: True
|
||||
when: disable_some_not_needed_services
|
||||
tags: [ 'bootstrap', 'disable_services' ]
|
||||
|
|
Loading…
Reference in New Issue