Merge branch 'master' of gitorious.research-infrastructures.eu:infrastructure-management/ansible-playbooks

This commit is contained in:
Roberto Cirillo 2016-10-24 10:07:59 +02:00
commit 7ff43272d9
35 changed files with 585 additions and 140 deletions

View File

@ -21,6 +21,15 @@ r_packages_main_state: present
r_packages_state: '{{ r_packages_main_state }}' r_packages_state: '{{ r_packages_main_state }}'
r_plugins_from_deb: True r_plugins_from_deb: True
r_packages_cleanup: False r_packages_cleanup: False
r_packages_updater: False
# They need to be flat text files available via http
# 1 package per line
#r_debian_packages_list_url
# package[:cran mirror]
# The CRAN mirror URL is optional
#r_cran_packages_list_url
# user/package_name
#r_github_packages_list_url
r_source_plugins_dest_dir: /var/cache/R r_source_plugins_dest_dir: /var/cache/R

View File

@ -2,3 +2,4 @@
- include: r-packages_cleanup.yml - include: r-packages_cleanup.yml
when: r_packages_cleanup when: r_packages_cleanup
- include: r-installation.yml - include: r-installation.yml
- include: r-packages-updater.yml

View File

@ -102,6 +102,26 @@
tags: [ 'r_software', 'r_pkg', 'r_plugins', 'r_plugins_github' ] tags: [ 'r_software', 'r_pkg', 'r_plugins', 'r_plugins_github' ]
ignore_errors: True ignore_errors: True
- name: Install R packages from the cran sources, specific versions. First round
command: >
Rscript --slave --no-save --no-restore-history -e "if (! ('{{ item.name }}' %in% installed.packages()[,'Package'])) { install.packages('{{ r_source_plugins_dest_dir }}/{{ item.source }}', repos = NULL, type='source'); print('Added'); } else if (packageVersion('{{ item.name }}') != '{{ item.version }}') { install.packages('{{ r_source_plugins_dest_dir }}/{{ item.source }}', repos = NULL, type='source'); print('Added'); } else { print('Already Installed'); }"
register: install_s_plugins_result
failed_when: "install_s_plugins_result.rc != 0 or 'had non-zero exit status' in install_s_plugins_result.stderr"
changed_when: '"Added" in install_s_plugins_result.stdout'
with_items: '{{ r_plugins_from_sources | default([]) }}'
ignore_errors: True
tags: [ 'r_software', 'r_pkg', 'r_plugins', 'r_plugins_version' ]
- name: Install R packages from the cran sources, specific versions. Second round, to avoid circular dependencies
command: >
Rscript --slave --no-save --no-restore-history -e "if (! ('{{ item.name }}' %in% installed.packages()[,'Package'])) { install.packages('{{ r_source_plugins_dest_dir }}/{{ item.source }}', repos = NULL, type='source'); print('Added'); } else if (packageVersion('{{ item.name }}') != '{{ item.version }}') { install.packages('{{ r_source_plugins_dest_dir }}/{{ item.source }}', repos = NULL, type='source'); print('Added'); } else { print('Already Installed'); }"
register: install_s_plugins_result
failed_when: "install_s_plugins_result.rc != 0 or 'had non-zero exit status' in install_s_plugins_result.stderr"
changed_when: '"Added" in install_s_plugins_result.stdout'
with_items: '{{ r_plugins_from_sources | default([]) }}'
when: ( install_s_plugins_result | failed )
tags: [ 'r_software', 'r_pkg', 'r_plugins', 'r_plugins_version' ]
- name: Remove R unwanted packages - name: Remove R unwanted packages
command: > command: >
Rscript --slave --no-save --no-restore-history -e "if (! ('{{ item }}' %in% installed.packages()[,'Package'])) { print('Not installed'); } else { remove.packages(pkgs='{{ item }}'); print('Removed'); }" Rscript --slave --no-save --no-restore-history -e "if (! ('{{ item }}' %in% installed.packages()[,'Package'])) { print('Not installed'); } else { remove.packages(pkgs='{{ item }}'); print('Removed'); }"
@ -111,3 +131,4 @@
with_items: '{{ r_plugins_list_to_remove | default([]) }}' with_items: '{{ r_plugins_list_to_remove | default([]) }}'
when: r_plugins_list_to_remove is defined when: r_plugins_list_to_remove is defined
tags: [ 'r_software', 'r_pkg', 'r_plugins' ] tags: [ 'r_software', 'r_pkg', 'r_plugins' ]

View File

@ -0,0 +1,27 @@
---
- block:
- name: Install the R packages updater script
template: src=update_r_packages.sh.j2 dest=/usr/local/bin/update_r_packages owner=root group=root mode=0755
- name: Cron job that installs new R packages, if any
cron: name="install new R packages" user=root cron_file=install-r-packages minute="*/10" job="/usr/local/bin/update_r_packages install" state=present
- name: Cron job that upgrades existing R packages and installs new ones, if any
cron: name="install new R packages" user=root cron_file=upgrade-r-packages hour="3" job="/usr/local/bin/update_r_packages upgrade" state=present
when: r_packages_updater
tags: [ 'r_software', 'r_pkg', 'r_plugins', 'r_plugins_github', 'r_cran_pkgs', 'r_github_pkgs' ]
- block:
- name: Remove the R packages updater script
file: dest=/usr/local/bin/update_r_packages state=absent
- name: Remove the cron job that installs new R packages
cron: name="install new R packages" user=root minute="*/10" cron_file=install-r-packages job="/usr/local/bin/update_r_packages install" state=absent
- name: Remove the cron job that upgrades existing R packages and installs new ones
cron: name="install new R packages" user=root cron_file=upgrade-r-packages hour="3" job="/usr/local/bin/update_r_packages upgrade" state=absent
when: not r_packages_updater
tags: [ 'r_software', 'r_pkg', 'r_plugins', 'r_plugins_github', 'r_cran_pkgs', 'r_github_pkgs' ]

View File

@ -4,26 +4,9 @@
## Copyright (C) 2008 Dirk Eddelbuettel and GPL'ed ## Copyright (C) 2008 Dirk Eddelbuettel and GPL'ed
## ##
## see help(Startup) for documentation on ~/.Rprofile and Rprofile.site ## see help(Startup) for documentation on ~/.Rprofile and Rprofile.site
# #
# NOTE: managed by ansible # NOTE: managed by ansible
# #
# ## Example of .Rprofile
# options(width=65, digits=5)
# options(show.signif.stars=FALSE)
# setHook(packageEvent("grDevices", "onLoad"),
# function(...) grDevices::ps.options(horizontal=FALSE))
# set.seed(1234)
# .First <- function() cat("\n Welcome to R!\n\n")
# .Last <- function() cat("\n Goodbye!\n\n")
# ## Example of Rprofile.site
# local({
# # add MASS to the default packages, set a CRAN mirror
# old <- getOption("defaultPackages"); r <- getOption("repos")
# r["CRAN"] <- "http://my.local.cran"
# options(defaultPackages = c(old, "MASS"), repos = r)
#})
local({r <- getOption("repos") local({r <- getOption("repos")
r["CRAN"] <- "{{ r_cran_mirror_site }}" r["CRAN"] <- "{{ r_cran_mirror_site }}"
options(repos=r) options(repos=r)

View File

@ -0,0 +1,177 @@
#!/bin/bash
RETVAl=
PARAMS=$#
ACTION=$1
PROCNUM=$$
OLDPROC=
OLDPROC_RUNNING=
LOCKDIR=/var/run
LOCK_FILE=$LOCKDIR/.update_r_pkgs.lock
TMP_FILES_DIR=/var/tmp/r_pkgs_update
# We cannot answer questions
DEBIAN_FRONTEND=noninteractive
R_CRAN_MIRROR={{ r_cran_mirror_site }}
# - debian packages list format:
# one package per line
DEB_PKGS_SKIP=0
DEBIAN_PKGS_LIST_URL={{ r_debian_packages_list_url | default('') }}
PKGS_LIST=
# - R packages list format:
# name[:mirror]
CRAN_PKGS_SKIP=0
R_PKGS_LIST_URL={{ r_cran_packages_list_url | default('') }}
R_PKGS_LIST=
# - R packages from github list format:
# - owner/package
GITHUB_PKGS_SKIP=0
R_PKGS_FROM_GITHUB_LIST_URL={{ r_github_packages_list_url | default('') }}
R_PKGS_GITHUB=
trap "{ logger 'update_r_packages: trap intercepted, exiting.' ; cleanup ; exit 15 }" SIGHUP SIGINT SIGTERM
function cleanup() {
logger "update_r_packages: cleaning up"
rm -f $LOCK_FILE
rm -fr $TMP_FILES_DIR
}
function usage() {
if [ $PARAMS -ne 1 ] ; then
echo "Need at least an argument: 'upgrade' or 'install'."
echo "- 'upgrade' installs new packages and upgrades the existin ones when needed."
echo "- 'install' installs new packages."
cleanup
exit 1
fi
}
function get_args() {
if [ "$ACTION" != "upgrade" -a "$ACTION" != "install" ] ; then
usage
fi
}
function fail() {
logger "Something went wrong, exiting."
cleanup
exit 1
}
function init_env() {
if [ -f $LOCK_FILE ] ; then
OLDPROC=$( cat $LOCK_FILE )
OLDPROC_RUNNING=$( ps auwwx | grep -v grep | grep $OLDPROC )
RETVAL=$?
if [ $RETVAL -eq 0 ] ; then
logger "update_r_packages: $OLDPROC_RUNNING"
logger "update_r_packages: another process is running, exiting."
exit 0
else
logger "update_r_packages: lock file exist but the process not. Continuing."
rm -fr $TMP_FILES_DIR
fi
fi
RETVAL=
echo "$PROCNUM" > $LOCK_FILE
mkdir -p $TMP_FILES_DIR
}
function get_data_files() {
# Get the packages list
if [ -z $DEBIAN_PKGS_LIST_URL ] ; then
DEB_PKGS_SKIP=1
logger "update_r_packages: the debian packages list is not available."
else
PKGS_LIST=$( mktemp $TMP_FILES_DIR/rdebs.XXXXXXX )
logger "update_r_packages: getting the debian packages list."
wget -q -o /dev/null -O $PKGS_LIST $DEBIAN_PKGS_LIST_URL
fi
if [ -z $R_PKGS_LIST_URL ] ; then
CRAN_PKGS_SKIP=1
logger "update_r_packages: the CRAN packages list is not available."
else
R_PKGS_LIST=$( mktemp $TMP_FILES_DIR/rpkgs.XXXXXXX )
logger "update_r_packages: getting the R packages list that will be installed from CRAN"
wget -q -o /dev/null -O $R_PKGS_LIST $R_PKGS_LIST_URL
fi
if [ -z $R_PKGS_FROM_GITHUB_LIST_URL ] ; then
GITHUB_PKGS_SKIP=1
logger "update_r_packages: the Github packages list is not available."
else
R_PKGS_GITHUB=$( mktemp $TMP_FILES_DIR/rpkgsgithub.XXXXXXX )
logger "update_r_packages: getting the R packages list that will be installed from github"
wget -q -o /dev/null -O $R_PKGS_GITHUB $R_PKGS_FROM_GITHUB_LIST_URL
fi
}
function debian_pkgs() {
if [ $DEB_PKGS_SKIP -eq 0 ] ; then
# Update the apt cache and install the packages in non interactive mode
logger "update_r_packages: Installing the debian dependencies"
if [ -z "$(find /var/cache/apt/pkgcache.bin -mmin -360)" ]; then
apt-get update -q >/dev/null 2>&1
else
logger "update_r_packages: APT cache not updated"
fi
xargs -a <(awk '/^\s*[^#]/' "$PKGS_LIST") -r -- apt-get install -q -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
else
logger "update_r_packages: skipping the debian packages installation"
fi
}
function r_cran_pkgs() {
if [ $CRAN_PKGS_SKIP -eq 0 ] ; then
logger "update_r_packages: Installing R packages from CRAN"
for l in $( cat $R_PKGS_LIST ) ; do
pkg=$( echo $l | cut -d : -f 1 )
is_mirror_ret=
is_mirror=$( echo $l | grep ':' )
is_mirror_ret=$?
if [ $is_mirror_ret -eq 0 ] ; then
mirror=$( echo $l | cut -d : -f 2 )
else
mirror=$R_CRAN_MIRROR
fi
if [ "$ACTION" == "upgrade" ] ; then
Rscript --slave --no-save --no-restore-history -e "install.packages(pkgs='$pkg', repos=c('$mirror/'));"
else
Rscript --slave --no-save --no-restore-history -e "if (! ('$pkg' %in% installed.packages()[,'Package'])) { install.packages(pkgs='$pkg', repos=c('$mirror/')); }"
fi
done
else
logger "update_r_packages: skipping the R CRAN packages installation"
fi
}
function r_github_pkgs() {
if [ $GITHUB_PKGS_SKIP -eq 0 ] ; then
logger "update_r_packages: Installing R packages from Github"
for l in $( cat $R_PKGS_GITHUB ) ; do
pkg=$( echo $l | cut -d "/" -f 2 )
user=$( echo $l | cut -d "/" -f 1 )
if [ "$ACTION" == "upgrade" ] ; then
Rscript --slave --no-save --no-restore-history -e "require(devtools); require(methods); install_github('$l');"
else
Rscript --slave --no-save --no-restore-history -e "if (! ('$pkg' %in% installed.packages()[,'Package'])) { require(devtools); require(methods) ; install_github('$l'); }"
fi
done
else
logger "update_r_packages: skipping the R GitHub packages installation"
fi
}
#########
# Main
#
usage
get_args
init_env
get_data_files
debian_pkgs
r_cran_pkgs
r_github_pkgs
cleanup
exit 0

View File

@ -55,7 +55,7 @@ ckan_ldap_username: uid
ckan_ldap_email: mail ckan_ldap_email: mail
ckan_ldap_prevent_edits: True ckan_ldap_prevent_edits: True
ckan_ldap_fallback: True ckan_ldap_fallback: True
ckan_ckanext_lire: True ckan_ckanext_lire: False
ckan_ckanext_lire_n: lire ckan_ckanext_lire_n: lire
ckan_ckanext_lire_url: 'https://github.com/milicp/ckanext-lire.git' ckan_ckanext_lire_url: 'https://github.com/milicp/ckanext-lire.git'

View File

@ -14,6 +14,11 @@
notify: Restart CKAN notify: Restart CKAN
tags: [ 'ckan', 'ckan_ini' ] tags: [ 'ckan', 'ckan_ini' ]
- name: Install the apache.wsgi
template: src=apache.wsgi.j2 dest={{ ckan_confdir }}/apache.wsgi
notify: Restart CKAN
tags: [ 'ckan', 'apache' ]
- name: Create the base directory for the CKAN file storage - name: Create the base directory for the CKAN file storage
file: dest={{ ckan_file_storage_dir }} state=directory owner={{ apache_user }} group={{ ckan_shell_user }} mode=2770 file: dest={{ ckan_file_storage_dir }} state=directory owner={{ apache_user }} group={{ ckan_shell_user }} mode=2770
tags: ckan tags: ckan

View File

@ -0,0 +1,14 @@
import os
activate_this = os.path.join('/usr/lib/ckan/default/bin/activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
from paste.deploy import loadapp
config_filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'production.ini')
from paste.script.util.logging_config import fileConfig
fileConfig(config_filepath)
_application = loadapp('config:%s' % config_filepath)
def application(environ, start_response):
environ['wsgi.url_scheme'] = environ.get('HTTP_X_URL_SCHEME', 'http')
return _application(environ, start_response)

View File

@ -11,11 +11,19 @@
- '{{ auth_instance_path }}/webapps/authorization-service.war' - '{{ auth_instance_path }}/webapps/authorization-service.war'
when: authorization_service_upgrade or not authorization_service_install when: authorization_service_upgrade or not authorization_service_install
- name: Get the authorization service war file # - name: Get the authorization service war file
get_url: url={{ authorization_service_url }} dest={{ auth_instance_path }}/webapps/{{ authorization_service_file }} # get_url: url={{ authorization_service_url }} dest={{ auth_instance_path }}/webapps/{{ authorization_service_file }}
- name: Unpack the authorization service war file # - name: Unpack the authorization service war file
shell: mkdir {{ auth_instance_path }}/webapps/authorization-service ; cd {{ auth_instance_path }}/webapps/authorization-service ; jar xf {{ auth_instance_path }}/webapps/{{ authorization_service_file }} # shell: mkdir {{ auth_instance_path }}/webapps/authorization-service ; cd {{ auth_instance_path }}/webapps/authorization-service ; jar xf {{ auth_instance_path }}/webapps/{{ authorization_service_file }}
# args:
# creates: '{{ auth_instance_path }}/webapps/authorization-service/WEB-INF/AuthorizationConfiguration.xml'
- name: Create the authorization service webapp directory
file: dest={{ auth_instance_path }}/webapps/authorization-service state=directory
- name: Get and unpack the authorization war file
unarchive: copy=no src={{ authorization_service_url }} dest={{ auth_instance_path }}/webapps/authorization-service
args: args:
creates: '{{ auth_instance_path }}/webapps/authorization-service/WEB-INF/AuthorizationConfiguration.xml' creates: '{{ auth_instance_path }}/webapps/authorization-service/WEB-INF/AuthorizationConfiguration.xml'

View File

@ -21,3 +21,7 @@ haproxy_nagios_check: False
# It's a percentage # It's a percentage
haproxy_nagios_check_w: 70 haproxy_nagios_check_w: 70
haproxy_nagios_check_c: 90 haproxy_nagios_check_c: 90
haproxy_check_interval: 3s
haproxy_backend_maxconn: 2048

View File

@ -112,7 +112,9 @@
tags: letsencrypt tags: letsencrypt
- name: Install a daily cron job to renew the certificates when needed - name: Install a daily cron job to renew the certificates when needed
cron: name="Letsencrypt certificate renewal" special_time=daily job="/usr/local/bin/acme-cert-request > {{ letsencrypt_acme_log_dir }}/acme-cron.log 2>&1" user={{ letsencrypt_acme_user }} become: True
become_user: '{{ letsencrypt_acme_user }}'
cron: name="Letsencrypt certificate renewal" special_time=daily job="/usr/local/bin/acme-cert-request > {{ letsencrypt_acme_log_dir }}/acme-cron.log 2>&1"
when: letsencrypt_acme_install when: letsencrypt_acme_install
tags: letsencrypt tags: letsencrypt

View File

@ -1,10 +1,14 @@
# Proxy stuff # Proxy stuff
# include /etc/nginx/snippets/nginx-proxy-params.conf; # include /etc/nginx/snippets/nginx-proxy-params.conf;
proxy_http_version 1.1; proxy_http_version 1.1;
{% if haproxy_ips is defined %}
proxy_set_header Host $http_host; proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Host $remote_addr; proxy_set_header X-Forwarded-Host $remote_addr;
proxy_set_header X-Forwarded-Server $host; proxy_set_header X-Forwarded-Server $host;
{% else %}
proxy_set_header Host $host;
{% endif %}
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering {{ nginx_proxy_buffering }}; proxy_buffering {{ nginx_proxy_buffering }};

View File

@ -47,10 +47,15 @@ openvpn_ca: '/var/lib/acme/live/{{ ansible_fqdn }}/chain'
openvpn_cert: '/var/lib/acme/live/{{ ansible_fqdn }}/cert' openvpn_cert: '/var/lib/acme/live/{{ ansible_fqdn }}/cert'
openvpn_key: '/var/lib/acme/live/{{ ansible_fqdn }}/privkey' openvpn_key: '/var/lib/acme/live/{{ ansible_fqdn }}/privkey'
openvpn_ha: False
# Not a real master. It is only the host where the dh.pem and ta.key are generated
openvpn_master_host: 'localhost'
openvpn_is_master_host: False
openvpn_compression_enabled: False openvpn_compression_enabled: False
openvpn_keepalive: '10 120' openvpn_keepalive: '10 120'
openvpn_max_clients: 50 openvpn_max_clients: 100
openvpn_run_unprivileged: True openvpn_run_unprivileged: True
openvpn_unprivileged_user: nobody openvpn_unprivileged_user: nobody
openvpn_unprivileged_group: nogroup openvpn_unprivileged_group: nogroup

View File

@ -71,6 +71,10 @@
template: src=openvpn.conf.j2 dest={{ openvpn_conf_dir }}/{{ openvpn_conf_name }} owner=root group={{ openvpn_unprivileged_group }} mode=0440 template: src=openvpn.conf.j2 dest={{ openvpn_conf_dir }}/{{ openvpn_conf_name }} owner=root group={{ openvpn_unprivileged_group }} mode=0440
notify: Reload OpenVPN notify: Reload OpenVPN
tags: [ 'openvpn', 'openvpn_conf' ]
- block:
- name: Create the dh file - name: Create the dh file
shell: openssl dhparam -out {{ openvpn_conf_dir }}/dh2048.pem 2048 ; chmod 444 {{ openvpn_conf_dir }}/dh2048.pem shell: openssl dhparam -out {{ openvpn_conf_dir }}/dh2048.pem 2048 ; chmod 444 {{ openvpn_conf_dir }}/dh2048.pem
args: args:
@ -81,8 +85,23 @@
args: args:
creates: '{{ openvpn_conf_dir }}/ta.key' creates: '{{ openvpn_conf_dir }}/ta.key'
when: openvpn_is_master_host or not openvpn_ha
tags: [ 'openvpn', 'openvpn_conf' ] tags: [ 'openvpn', 'openvpn_conf' ]
# Does not work right now. The error is
# fatal: [gw2.d4science.org -> gw1.d4science.org]: FAILED! => {"changed": false, "failed": true, "msg": "Boolean root not in either boolean list"}
# - block:
# - name: Get the dh file from the master host
# synchronize: src={{ openvpn_conf_dir }}/dh2048.pem dest=rsync://root@{{ ansible_fqdn }}/{{ openvpn_conf_dir }}/dh2048.pem
# delegate_to: '{{ openvpn_master_host }}'
# - name: Get the ta key from the master host
# synchronize: src={{ openvpn_conf_dir }}/ta.key dest=rsync://root@{{ ansible_fqdn }}/{{ openvpn_conf_dir }}/ta.key
# delegate_to: '{{ openvpn_master_host }}'
# when: openvpn_ha and not openvpn_is_master_host
# tags: [ 'openvpn', 'openvpn_conf', 'openvpn_shared_secrets' ]
- block: - block:
- name: Enable kernel forwarding - name: Enable kernel forwarding

View File

@ -31,7 +31,7 @@ $ldap = Net::LDAP->new($opt_uri) or die("LDAP connect to $opt_uri failed!");
{% endif %} {% endif %}
{% if openvpn_ldap_nonanon_bind %} {% if openvpn_ldap_nonanon_bind %}
$result = $ldap->bind('{{ openvpn_ldap_binddn }}', password => '{{ openvpn_ldap_bindpwd }}'); $result = $ldap->bind('{{ openvpn_ldap_binddn }}', password => '{{ openvpn_ldap_bindpwd | default('') }}');
{% else %} {% else %}
$result = $ldap->bind($opt_binddn, password => $opt_passwd); $result = $ldap->bind($opt_binddn, password => $opt_passwd);
{% endif %} {% endif %}

View File

@ -84,7 +84,7 @@
</network> </network>
<storages/> <storages/>
<users> <users>
<user resources="*" password="{{ orientdb_root_pwd }}" name="root"/> <user resources="*" password="{{ orientdb_root_pwd }}" name="{{ orientdb_root_username }}"/>
</users> </users>
<properties> <properties>
<entry value="1" name="db.pool.min"/> <entry value="1" name="db.pool.min"/>

View File

@ -12,7 +12,7 @@ psql_pgpool_pkg_state: installed
# #
# See the features matrix here: http://www.postgresql.org/about/featurematrix/ # See the features matrix here: http://www.postgresql.org/about/featurematrix/
# #
psql_version: 9.4 psql_version: 9.5
psql_db_host: localhost psql_db_host: localhost
psql_db_port: 5432 psql_db_port: 5432
psql_db_size_w: 150000000 psql_db_size_w: 150000000
@ -54,7 +54,7 @@ postgresql_pkgs:
psql_ansible_needed_pkgs: psql_ansible_needed_pkgs:
- python-psycopg2 - python-psycopg2
# - libpq-dev
psql_db_name: db_name psql_db_name: db_name
psql_db_user: db_user psql_db_user: db_user
psql_db_pwd: "We cannot save the password into the repository. Use another variable and change pgpass.j2 accordingly. Encrypt the file that contains the variable with ansible-vault" psql_db_pwd: "We cannot save the password into the repository. Use another variable and change pgpass.j2 accordingly. Encrypt the file that contains the variable with ansible-vault"
@ -75,6 +75,7 @@ postgresql_pgpool_pkgs:
# pgpool-II # pgpool-II
pgpool_pkgs: pgpool_pkgs:
- pgpool2 - pgpool2
- iputils-arping
pgpool_enabled: True pgpool_enabled: True
pgpool_listen_addresses: 'localhost' pgpool_listen_addresses: 'localhost'
@ -126,6 +127,20 @@ pgpool_memqcache_memcached_host: localhost
pgpool_memqcache_memcached_port: 11211 pgpool_memqcache_memcached_port: 11211
pgpool_memqcache_expire: 0 pgpool_memqcache_expire: 0
pgpool_memqcache_auto_cache_invalidation: 'on' pgpool_memqcache_auto_cache_invalidation: 'on'
# HA and watchdog
pgpool_use_watchdog: 'off'
pgpool_wd_trusted_servers: 'localhost,localhost'
pgpool_wd_port: 9000
pgpool_wd_priority: 1
# Warning: setting pgpool_wd_heartbeat_mode to False enables
# the 'query mode' that is untested and not working without manual intervention
pgpool_wd_heartbeat_mode: True
pgpool_wd_heartbeat_port: 9694
pgpool_wd_heartbeat_keepalive_int: 3
pgpool_wd_heartbeat_deadtime: 30
pgpool_wd_heartbeat_dest0: 'localhost'
pgpool_wd_heartbeat_dest0_port: '{{ pgpool_wd_heartbeat_port }}'
#pgpool_wd_authkey: 'set it inside a vault file'
# SSL as a special case # SSL as a special case
pgpool_enable_ssl: False pgpool_enable_ssl: False
@ -134,6 +149,8 @@ pgpool_ssl_key: /etc/pki/pgpool2/pgpool2.key
pgpool_ssl_cert: '/var/lib/acme/live/{{ ansible_fqdn }}/cert' pgpool_ssl_cert: '/var/lib/acme/live/{{ ansible_fqdn }}/cert'
pgpool_ssl_ca: '/var/lib/acme/live/{{ ansible_fqdn }}/chain' pgpool_ssl_ca: '/var/lib/acme/live/{{ ansible_fqdn }}/chain'
pgpool_ssl_ca_dir: /etc/ssl/certs pgpool_ssl_ca_dir: /etc/ssl/certs
pgpool_virtual_ip: 127.0.0.1
pgpool_virtual_netmask: 24
# WAL files archiving is mandatory for pgpool recovery # WAL files archiving is mandatory for pgpool recovery
psql_wal_files_archiving_enabled: '{{ psql_pgpool_install }}' psql_wal_files_archiving_enabled: '{{ psql_pgpool_install }}'

View File

@ -0,0 +1,12 @@
#!/bin/bash
#
# Exec /usr/bin/arping as root via sudo
RETVAL=
CMD=/usr/bin/arping
sudo $CMD $@
RETVAL=$?
exit $RETVAL

View File

@ -0,0 +1,12 @@
#!/bin/bash
#
# Exec /sbin/ip as root via sudo
RETVAL=
CMD=/sbin/ip
sudo $CMD $@
RETVAL=$?
exit $RETVAL

View File

@ -5,8 +5,6 @@
when: psql_postgresql_install when: psql_postgresql_install
- include: postgis.yml - include: postgis.yml
when: postgres_install_gis_extensions when: postgres_install_gis_extensions
- include: postgres_pgpool.yml
when: psql_pgpool_install
- include: postgresql-config.yml - include: postgresql-config.yml
when: psql_postgresql_install when: psql_postgresql_install
- include: postgresql-ssl-config.yml - include: postgresql-ssl-config.yml
@ -19,6 +17,10 @@
when: when:
- psql_postgresql_install - psql_postgresql_install
- psql_db_data is defined - psql_db_data is defined
- include: postgresql-service-status.yml
when: psql_postgresql_install
- include: postgres_pgpool.yml
when: psql_pgpool_install
- include: manage_pg_db.yml - include: manage_pg_db.yml
when: when:
- psql_postgresql_install - psql_postgresql_install

View File

@ -12,7 +12,7 @@
become_user: postgres become_user: postgres
postgresql_db: db={{ item.name }} port={{ psql_db_port }} encoding={{ item.encoding }} owner={{ item.user }} template=template0 state={{ item.state | default('present') }} postgresql_db: db={{ item.name }} port={{ psql_db_port }} encoding={{ item.encoding }} owner={{ item.user }} template=template0 state={{ item.state | default('present') }}
with_items: '{{ psql_db_data | default(omit) }}' with_items: '{{ psql_db_data | default(omit) }}'
when: item.managedb when: item.managedb | default(True)
tags: [ 'postgresql', 'postgres', 'pg_db' ] tags: [ 'postgresql', 'postgres', 'pg_db' ]
- name: Only set a db user password. Mostly for the postgresql user - name: Only set a db user password. Mostly for the postgresql user

View File

@ -10,13 +10,3 @@
with_items: '{{ psql_ansible_needed_pkgs }}' with_items: '{{ psql_ansible_needed_pkgs }}'
tags: [ 'postgresql', 'postgres' ] tags: [ 'postgresql', 'postgres' ]
- name: Ensure that the postgresql server is started
service: name=postgresql state=started enabled=yes
when: postgresql_enabled
tags: [ 'postgresql', 'postgres' ]
- name: Ensure that the postgresql server is stopped and disabled
service: name=postgresql state=stopped enabled=no
when: not postgresql_enabled
tags: [ 'postgresql', 'postgres' ]

View File

@ -1,41 +1,80 @@
--- ---
- name: Install the pgpool package - block:
apt: name={{ item }} state={{ psql_pgpool_pkg_state }} - name: Install the pgpool package
with_items: '{{ pgpool_pkgs }}' apt: name={{ item }} state={{ psql_pgpool_pkg_state }}
tags: [ 'postgresql', 'postgres', 'pgpool' ] with_items: '{{ pgpool_pkgs }}'
- name: Configure pcp - name: Configure pcp
#template: src=pcp.conf.j2 dest=/etc/pgpool2/pcp.conf owner=root group=postgres mode=0640 #template: src=pcp.conf.j2 dest=/etc/pgpool2/pcp.conf owner=root group=postgres mode=0640
shell: pwd=`pg_md5 {{ pcp_pwd }}` ; echo "{{ pgpool_pcp_user }}:${pwd}" > /etc/pgpool2/pcp.conf ; chmod 640 /etc/pgpool2/pcp.conf; chown root:postgres /etc/pgpool2/pcp.conf shell: pwd=`pg_md5 {{ pcp_pwd }}` ; echo "{{ pgpool_pcp_user }}:${pwd}" > /etc/pgpool2/pcp.conf ; chmod 640 /etc/pgpool2/pcp.conf; chown root:postgres /etc/pgpool2/pcp.conf
- name: Install the pgpool configuration file
template: src=pgpool.conf.j2 dest=/etc/pgpool2/pgpool.conf owner=root group=postgres mode=0640
notify: Restart pgpool2
- name: Give access to the remote postgresql clients
lineinfile: name=/etc/pgpool2/pool_hba.conf regexp="^host {{ item.0.name }} {{ item.0.user }} {{ item.1 }}.*$" line="host {{ item.0.name }} {{ item.0.user }} {{ item.1 }} md5"
with_subelements:
- '{{ psql_db_data | default([]) }}'
- allowed_hosts
when:
- psql_db_data is defined
- item.1 is defined
notify: Reload pgpool2
- name: Create the pki directory to store the pgpool key
file: dest=/etc/pki/pgpool2 state=directory owner=postgres group=postgres mode=0750
when: pgpool_enable_ssl
- name: Create a pgpool accessible ssl key file if it does not exist
copy: src=/var/lib/acme/live/{{ ansible_fqdn }}/privkey dest=/etc/pki/pgpool2/pgpool2.key owner=postgres group=postgres mode=0400 remote_src=True
when: pgpool_enable_ssl
- name: Install the pool_passwd configuration file
shell: cd /etc/pgpool2 ; pg_md5 -m -u {{ item.user }} {{ item.pwd }} ; chown root:postgres /etc/pgpool2/pool_passwd ; chmod 660 /etc/pgpool2/pool_passwd
with_items: '{{ psql_db_data | default([]) }}'
- name: Install the sudoers config that permits pgpool change the network configuration during a failover
template: src=pgpool-wd-sudoers.j2 dest=/etc/sudoers.d/pgpool-wd owner=root group=root mode=0440
- name: Install the ip script that manage the network configuration during a failover
copy: src={{ item }} dest=/sbin/{{ item }} owner=root group=root mode=0755
with_items:
- ip_script
- name: Install the arping scripts that manage the network configuration during a failover
copy: src={{ item }} dest=/usr/local/bin/{{ item }} owner=root group=root mode=0755
with_items:
- arping_script
- name: Start and enable pgpool2
service: name=pgpool2 state=started enabled=yes
when: pgpool_enabled
tags: [ 'postgresql', 'postgres', 'pgpool', 'pcp_conf', 'pgpool_conf' ] tags: [ 'postgresql', 'postgres', 'pgpool', 'pcp_conf', 'pgpool_conf' ]
- name: Install the pgpool configuration file
template: src=pgpool.conf.j2 dest=/etc/pgpool2/pgpool.conf owner=root group=postgres mode=0640
notify: Restart pgpool2
tags: [ 'postgresql', 'postgres', 'pgpool', 'pgpool_conf' ]
- name: Give access to the remote postgresql clients
lineinfile: name=/etc/pgpool2/pool_hba.conf regexp="^host {{ item.0.name }} {{ item.0.user }} {{ item.1 }}.*$" line="host {{ item.0.name }} {{ item.0.user }} {{ item.1 }} md5"
with_subelements:
- '{{ psql_db_data | default([]) }}'
- allowed_hosts
when:
- psql_db_data is defined
- item.1 is defined
notify: Reload pgpool2
tags: [ 'postgresql', 'postgres', 'pgpool', 'pgpool_conf' ]
- name: Install the pool_passwd configuration file
shell: cd /etc/pgpool2 ; pg_md5 -m -u {{ item.user }} {{ item.pwd }} ; chown root:postgres /etc/pgpool2/pool_passwd ; chmod 660 /etc/pgpool2/pool_passwd
with_items: '{{ psql_db_data | default([]) }}'
tags: [ 'postgresql', 'postgres', 'pgpool', 'pgpool_conf' ]
- name: Start and enable pgpool2
service: name=pgpool2 state=started enabled=yes
when: pgpool_enabled
tags: [ 'postgresql', 'postgres', 'pgpool' ]
- name: Stop and disable pgpool2 - block:
service: name=pgpool2 state=stopped enabled=no - name: Stop and disable pgpool2
service: name=pgpool2 state=stopped enabled=no
- name: Install the pgpool packages
apt: name={{ item }} state=absent
with_items: '{{ pgpool_pkgs }}'
- name: Remove the pgpool failover sudoers file
file: dest=/etc/sudoers.d/pgpool-wd state=absent
- name: Remove the pgpool configuration directory
file: dest=/etc/pgpool2 state=absent
- name: Remove the scripts that manage the network configuration during a failover
file: dest={{ item }} state=absent
with_items:
- /sbin/ip_script
- /usr/local/bin/arping_script
when: not pgpool_enabled when: not pgpool_enabled
tags: [ 'postgresql', 'postgres', 'pgpool' ] tags: [ 'postgresql', 'postgres', 'pgpool' ]

View File

@ -0,0 +1,11 @@
---
- name: Ensure that the postgresql server is started
service: name=postgresql state=started enabled=yes
when: postgresql_enabled
tags: [ 'postgresql', 'postgres' ]
- name: Ensure that the postgresql server is stopped and disabled
service: name=postgresql state=stopped enabled=no
when: not postgresql_enabled
tags: [ 'postgresql', 'postgres' ]

View File

@ -1,8 +1,15 @@
--- ---
- name: Setup ssl in the postgresql configuration - block:
action: configfile path=/etc/postgresql/{{ psql_version }}/main/postgresql.conf key={{ item.name }} value="'{{ item.value }}'" - name: Setup ssl in the postgresql configuration
with_items: '{{ psql_conf_ssl_parameters }}' action: configfile path=/etc/postgresql/{{ psql_version }}/main/postgresql.conf key={{ item.name }} value="'{{ item.value }}'"
when: psql_enable_ssl with_items: '{{ psql_conf_ssl_parameters }}'
notify: Restart postgresql notify: Restart postgresql
tags: [ 'postgresql', 'postgres', 'pg_conf' ]
- name: Create the pki directory to store the postgresql key
file: dest=/etc/pki/postgresql state=directory owner=postgres group=postgres mode=0750
- name: Create a postgres accessible ssl key file if it does not exist
copy: src=/var/lib/acme/live/{{ ansible_fqdn }}/privkey dest=/etc/pki/postgresql/postgresql.key owner=postgres group=postgres mode=0400 remote_src=True
when: psql_enable_ssl
tags: [ 'postgresql', 'postgres', 'pg_conf' ]

View File

@ -0,0 +1,3 @@
{{ pgpool_recovery_user }} ALL=(ALL) NOPASSWD: /bin/ip
{{ pgpool_recovery_user }} ALL=(ALL) NOPASSWD: /usr/bin/arping

View File

@ -437,13 +437,13 @@ client_idle_limit_in_recovery = {{ pgpool_client_idle_limit_in_recovery }}
# - Enabling - # - Enabling -
use_watchdog = off use_watchdog = {{ pgpool_use_watchdog }}
# Activates watchdog # Activates watchdog
# (change requires restart) # (change requires restart)
# -Connection to up stream servers - # -Connection to up stream servers -
trusted_servers = '' trusted_servers = '{{ pgpool_wd_trusted_servers }}'
# trusted server list which are used # trusted server list which are used
# to confirm network connection # to confirm network connection
# (hostA,hostB,hostC,...) # (hostA,hostB,hostC,...)
@ -454,36 +454,36 @@ ping_path = '/bin'
# - Watchdog communication Settings - # - Watchdog communication Settings -
wd_hostname = '' wd_hostname = '{{ ansible_default_ipv4.address }}'
# Host name or IP address of this watchdog # Host name or IP address of this watchdog
# (change requires restart) # (change requires restart)
wd_port = 9000 wd_port = 9000
# port number for watchdog service # port number for watchdog service
# (change requires restart) # (change requires restart)
wd_authkey = '' wd_authkey = '{{ pgpool_wd_authkey }}'
# Authentication key for watchdog communication # Authentication key for watchdog communication
# (change requires restart) # (change requires restart)
# - Virtual IP control Setting - # - Virtual IP control Setting -
delegate_IP = '' delegate_IP = '{{ pgpool_virtual_ip }}'
# delegate IP address # delegate IP address
# If this is empty, virtual IP never bring up. # If this is empty, virtual IP never bring up.
# (change requires restart) # (change requires restart)
ifconfig_path = '/sbin' ifconfig_path = '/sbin'
# ifconfig command path # ifconfig command path
# (change requires restart) # (change requires restart)
if_up_cmd = 'ifconfig eth0:0 inet $_IP_$ netmask 255.255.255.0' if_up_cmd = 'ip_script addr add {{ pgpool_virtual_ip }}/{{ pgpool_virtual_netmask }} dev {{ ansible_default_ipv4.alias }}'
# startup delegate IP command # startup delegate IP command
# (change requires restart) # (change requires restart)
if_down_cmd = 'ifconfig eth0:0 down' if_down_cmd = 'ip_script addr del {{ pgpool_virtual_ip }}/{{ pgpool_virtual_netmask }} dev {{ ansible_default_ipv4.alias }}'
# shutdown delegate IP command # shutdown delegate IP command
# (change requires restart) # (change requires restart)
arping_path = '/usr/sbin' # arping command path arping_path = '/usr/local/bin' # arping command path
# (change requires restart) # (change requires restart)
arping_cmd = 'arping -U $_IP_$ -w 1' arping_cmd = 'arping_script -U $_IP_$ -w 1'
# arping command # arping command
# (change requires restart) # (change requires restart)
@ -511,22 +511,25 @@ wd_interval = 10
# lifecheck interval (sec) > 0 # lifecheck interval (sec) > 0
# (change requires restart) # (change requires restart)
wd_priority = {{ pgpool_wd_priority }}
{% if pgpool_wd_heartbeat_mode %}
# -- heartbeat mode -- # -- heartbeat mode --
wd_heartbeat_port = 9694 wd_heartbeat_port = {{ pgpool_wd_heartbeat_port }}
# Port number for receiving heartbeat signal # Port number for receiving heartbeat signal
# (change requires restart) # (change requires restart)
wd_heartbeat_keepalive = 2 wd_heartbeat_keepalive = {{ pgpool_wd_heartbeat_keepalive_int }}
# Interval time of sending heartbeat signal (sec) # Interval time of sending heartbeat signal (sec)
# (change requires restart) # (change requires restart)
wd_heartbeat_deadtime = 30 wd_heartbeat_deadtime = {{ pgpool_wd_heartbeat_deadtime }}
# Deadtime interval for heartbeat signal (sec) # Deadtime interval for heartbeat signal (sec)
# (change requires restart) # (change requires restart)
heartbeat_destination0 = 'host0_ip1' heartbeat_destination0 = '{{ pgpool_wd_heartbeat_dest0 }}'
# Host name or IP address of destination 0 # Host name or IP address of destination 0
# for sending heartbeat signal. # for sending heartbeat signal.
# (change requires restart) # (change requires restart)
heartbeat_destination_port0 = 9694 heartbeat_destination_port0 = {{ pgpool_wd_heartbeat_dest0_port }}
# Port number of destination 0 for sending # Port number of destination 0 for sending
# heartbeat signal. Usually this is the # heartbeat signal. Usually this is the
# same as wd_heartbeat_port. # same as wd_heartbeat_port.
@ -543,6 +546,7 @@ heartbeat_device0 = ''
#heartbeat_destination_port1 = 9694 #heartbeat_destination_port1 = 9694
#heartbeat_device1 = '' #heartbeat_device1 = ''
{% else %}
# -- query mode -- # -- query mode --
wd_life_point = 3 wd_life_point = 3
@ -561,21 +565,19 @@ wd_lifecheck_password = ''
# Password for watchdog user in lifecheck # Password for watchdog user in lifecheck
# (change requires restart) # (change requires restart)
{% endif %}
# - Other pgpool Connection Settings - # - Other pgpool Connection Settings -
#other_pgpool_hostname0 = 'host0' other_pgpool_hostname0 = '{{ pgpool_wd_heartbeat_dest0 }}'
# Host name or IP address to connect to for other pgpool 0 # Host name or IP address to connect to for other pgpool 0
# (change requires restart) # (change requires restart)
#other_pgpool_port0 = 5432 other_pgpool_port0 = {{ pgpool_port }}
# Port number for othet pgpool 0 # Port number for othet pgpool 0
# (change requires restart) # (change requires restart)
#other_wd_port0 = 9000 other_wd_port0 = {{ pgpool_wd_port }}
# Port number for othet watchdog 0 # Port number for othet watchdog 0
# (change requires restart) # (change requires restart)
#other_pgpool_hostname1 = 'host1'
#other_pgpool_port1 = 5432
#other_wd_port1 = 9000
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
# OTHERS # OTHERS

View File

@ -10,6 +10,10 @@ r_connector_usershome: /home/
r_connector_userconfig: userconfig.csv r_connector_userconfig: userconfig.csv
r_connector_adduserscript: /usr/local/bin/rusersadd r_connector_adduserscript: /usr/local/bin/rusersadd
r_connector_rstudio_cookie_key: /var/lib/rstudio-server/secure-cookie-key r_connector_rstudio_cookie_key: /var/lib/rstudio-server/secure-cookie-key
r_connector_rprofile_svn_url: 'http://svn.research-infrastructures.eu/public/d4science/gcube/trunk/data-analysis/RConfiguration/RStudioConfiguration'
r_connector_rprofile_filename: '.Rprofile'
r_connector_rprofile_base_dir: '/srv/d4science'
r_connector_rprofile_path: '{{ r_connector_rprofile_base_dir }}/RStudioConfiguration'
r_connector_deb_pkgs: r_connector_deb_pkgs:
- ldap-utils - ldap-utils
- nslcd - nslcd

View File

@ -1,21 +1,20 @@
--- ---
- block: - block:
- name: Remove the installed R connector before upgrading - name: Remove the installed R connector before upgrading
file: dest={{ item }} state=absent file: dest={{ item }} state=absent
with_items: with_items:
- '{{ smartgears_instance_path }}/webapps/r-connector' - '{{ smartgears_instance_path }}/webapps/r-connector'
- '{{ smartgears_instance_path }}/webapps/r-connector.war' - '{{ smartgears_instance_path }}/webapps/r-connector.war'
when: smartgears_upgrade or not r_connector_install when: smartgears_upgrade
- name: Get the R connector war file - name: Create the R connector webapp directory
get_url: url={{ r_connector_url }} dest={{ smartgears_instance_path }}/webapps/{{ r_connector_war_file }} file: dest={{ smartgears_instance_path }}/webapps/r-connector state=directory
- name: Unpack the R connector war file - name: Get and unpack the R connector war file
shell: mkdir {{ smartgears_instance_path }}/webapps/r-connector ; cd {{ smartgears_instance_path }}/webapps/r-connector ; jar xf {{ smartgears_instance_path }}/webapps/{{ r_connector_war_file }} unarchive: copy=no src={{ r_connector_url }} dest={{ smartgears_instance_path }}/webapps/r-connector
args: args:
creates: '{{ smartgears_instance_path }}/webapps/r-connector/WEB-INF/web.xml' creates: '{{ smartgears_instance_path }}/webapps/r-connector/WEB-INF/web.xml'
- name: Obtain the permission to read the Rstudio secure cookie key - name: Obtain the permission to read the Rstudio secure cookie key
become: False become: False
file: dest={{ r_connector_rstudio_cookie_key }} mode=640 group={{ smartgears_user }} file: dest={{ r_connector_rstudio_cookie_key }} mode=640 group={{ smartgears_user }}
@ -24,15 +23,55 @@
template: src=r-web.xml.j2 dest={{ smartgears_instance_path }}/webapps/r-connector/WEB-INF/web.xml mode=0440 template: src=r-web.xml.j2 dest={{ smartgears_instance_path }}/webapps/r-connector/WEB-INF/web.xml mode=0440
notify: Restart smartgears notify: Restart smartgears
- name: Install the R add users script
become: False
template: src=rusersadd.j2 dest={{ r_connector_adduserscript }} owner=root group=root mode=0555
- name: Install the packages required to enable the LDAP PAM authentication - name: Install the packages required to enable the LDAP PAM authentication
apt: pkg={{ item }} state=present update_cache=yes cache_valid_time=1800 apt: pkg={{ item }} state=present update_cache=yes cache_valid_time=1800
with_items: '{{ r_connector_deb_pkgs }}' with_items: '{{ r_connector_deb_pkgs }}'
- name: Install the R add users script
become: False
template: src=rusersadd.j2 dest={{ r_connector_adduserscript }} owner=root group=root mode=0555
tags: [ 'smartgears', 'r_connector', 'tomcat', 'rusersadd' ]
- name: Crete the directory that will host the RConfiguration stuff
become: False
file: dest={{ r_connector_rprofile_base_dir }} owner={{ d4science_user }} group={{ d4science_user }} state=directory
tags: [ 'smartgears', 'r_connector', 'tomcat', 'rusersadd' ]
- name: Get the svn repository that provides the .Rprofile
subversion: repo={{ r_connector_rprofile_svn_url }} dest={{ r_connector_rprofile_path }}
tags: [ 'smartgears', 'r_connector', 'tomcat', 'rusersadd' ]
- name: Install the cron job that regulary updates the Rprofile
cron: name="Update the RStudioConfiguration repo" special_time=daily job="cd {{ r_connector_rprofile_path }} ; svn update >/dev/null 2>&1"
tags: [ 'smartgears', 'r_connector', 'tomcat', 'rusersadd' ]
become: True become: True
become_user: '{{ smartgears_user }}' become_user: '{{ smartgears_user }}'
when: r_connector_install when: r_connector_install
tags: [ 'smartgears', 'r_connector', 'tomcat' ] tags: [ 'smartgears', 'r_connector', 'tomcat' ]
- block:
- name: Remove the installed R connector before upgrading
file: dest={{ item }} state=absent
with_items:
- '{{ smartgears_instance_path }}/webapps/r-connector'
- '{{ smartgears_instance_path }}/webapps/r-connector.war'
- name: Remove the packages required to enable the LDAP PAM authentication
apt: pkg={{ item }} state=absent
with_items: '{{ r_connector_deb_pkgs }}'
- name: Remove the connector raddusers script
become: False
file: dest={{ r_connector_adduserscript }} state=absent
- name: Remove the RConfiguration repo
file: dest={{ r_connector_rprofile_path }} state=absent
- name: Remove the cron job that regulary updates the Rprofile
cron: name="Update the RStudioConfiguration repo" job="cd {{ r_connector_rprofile_path }} ; svn update >/dev/null 2>&1" state=absent
become: True
become_user: '{{ smartgears_user }}'
when: not r_connector_install
tags: [ 'smartgears', 'r_connector', 'tomcat' ]

View File

@ -2,29 +2,44 @@
USER="$1" USER="$1"
HDIR="{{ r_connector_usershome }}$USER" HDIR="{{ r_connector_usershome }}$USER"
logfile={{ smartgears_user_home }}/tomcat/logs/runuseradd.log # We use logger to log directly to syslog
exec > $logfile 2>&1 LOG_PREFIX="r-connector rusersadd:"
# 0: allowed
# 1: not allowed
ALLOW_LOCAL_USERS=1
RPROFILE_FILE='{{ r_connector_rprofile_path }}/{{ r_connector_rprofile_filename }}'
if [ -d $HDIR ] ; then if [ -d $HDIR ] ; then
#echo "user dir exist" logger "$LOG_PREFIX user $HDIR directory exists"
exit 0 exit 0
else else
if id -u $USER >/dev/null 2>&1 if id -u $USER >/dev/null 2>&1
then then
#"echo "ldap user first login" logger "$LOG_PREFIX ldap user $USER first login"
sudo /bin/su - $USER /bin/ls sudo /bin/mkdir -p $HDIR
sudo /bin/chmod g+ws $HDIR sudo /bin/chown -R $USER $HDIR
sudo /bin/chmod g-wr,o-rwx $HDIR
sudo /usr/bin/touch $HDIR/{{ r_connector_userconfig }} sudo /usr/bin/touch $HDIR/{{ r_connector_userconfig }}
sudo /bin/chmod 660 $HDIR/{{ r_connector_userconfig }} sudo /bin/chmod 660 $HDIR/{{ r_connector_userconfig }}
sudo /bin/chgrp -R {{ smartgears_user }} $HDIR #sudo /bin/chgrp -R {{ smartgears_user }} $HDIR
sudo /bin/chgrp {{ smartgears_user }} $HDIR
sudo /bin/chown {{ smartgears_user }}:{{ smartgears_user }} $HDIR/{{ r_connector_userconfig }}
sudo /bin/ln -s $RPROFILE_FILE $HDIR/{{ r_connector_rprofile_filename }}
exit 0 exit 0
else else
#echo "user does not exist and is not ldap" logger "$LOG_PREFIX user $USER does not exist locally and is not an ldap user"
sudo /usr/sbin/useradd -m -s /bin/false -g {{ smartgears_user }} $USER if [ $ALLOW_LOCAL_USERS -eq 1 ] ; then
sudo /bin/chmod g+ws $HDIR logger "$LOG_PREFIX non ldap users not allowed, refusing to create the user."
sudo /usr/bin/touch $HDIR/{{ r_connector_userconfig }} exit 1
sudo /bin/chmod 660 $HDIR/{{ r_connector_userconfig }} else
sudo /bin/chgrp -R {{ smartgears_user }} $HDIR logger "$LOG_PREFIX non ldap users allowed, adding the user $USER locally"
sudo /usr/sbin/useradd -m -s /bin/false -g {{ smartgears_user }} $USER
sudo /bin/chmod g+ws $HDIR
sudo /usr/bin/touch $HDIR/{{ r_connector_userconfig }}
sudo /bin/chmod 660 $HDIR/{{ r_connector_userconfig }}
sudo /bin/chgrp -R {{ smartgears_user }} $HDIR
sudo /bin/ln -s $RPROFILE_FILE $HDIR/{{ r_connector_rprofile_filename }}
exit 0
fi
fi fi
fi fi

View File

@ -3,7 +3,7 @@ REMOTE_PROTOCOL={{ orientdb_remote_protocol }}
HTTP_PROTOCOL={{ orientdb_http_protocol }} HTTP_PROTOCOL={{ orientdb_http_protocol }}
HTTP_PORT={{ orientdb_http_port }} HTTP_PORT={{ orientdb_http_port }}
DB={{ orientdb_db_name }} DB={{ orientdb_db_name }}
USERNAME={{ orientdb_username }} USERNAME={{ orientdb_root_username }}
PASSWORD={{ orientdb_root_pwd }} PASSWORD={{ orientdb_root_pwd }}
DEFAULT_CREATED_WRITER_USER_PASSWORD={{ orientdb_writer_password }} DEFAULT_CREATED_WRITER_USER_PASSWORD={{ orientdb_writer_password }}
DEFAULT_CREATED_READER_USER_PASSWORD={{ orientdb_reader_password }} DEFAULT_CREATED_READER_USER_PASSWORD={{ orientdb_reader_password }}

View File

@ -28,6 +28,7 @@ common_packages:
- apt-transport-https - apt-transport-https
- nano - nano
- xmlstarlet - xmlstarlet
- bsdutils
# Set this variable in your playbook # Set this variable in your playbook
# additional_packages: # additional_packages:
@ -110,6 +111,9 @@ default_security_limits:
- { domain: 'root', l_item: 'nofile', type: 'soft', value: '8192' } - { domain: 'root', l_item: 'nofile', type: 'soft', value: '8192' }
- { domain: 'root', l_item: 'nofile', type: 'hard', value: '8192' } - { domain: 'root', l_item: 'nofile', type: 'hard', value: '8192' }
# default_rsyslog_custom_rules:
# - ':msg, contains, "icmp6_send: no reply to icmp error" ~'
# - ':msg, contains, "[PYTHON] Can\'t call the metric handler function for" ~'
# #
# debian/ubuntu distributions controllers # debian/ubuntu distributions controllers

View File

@ -4,17 +4,20 @@
when: when:
- is_precise and ansible_kernel != "3.2.0-4-amd64" - is_precise and ansible_kernel != "3.2.0-4-amd64"
- is_not_trusty - is_not_trusty
notify: notify: Restart rsyslog
Restart rsyslog tags: rsyslog
tags:
- rsyslog
- name: Enable the kernel logger on ubuntu 12.04 and kernel major version >= 3 - name: Enable the kernel logger on ubuntu 12.04 and kernel major version >= 3
lineinfile: dest=/etc/rsyslog.conf line="$ModLoad imklog" insertafter="^#$ModLoad imklog" backup=yes lineinfile: dest=/etc/rsyslog.conf line="$ModLoad imklog" insertafter="^#$ModLoad imklog" backup=yes
when: when:
- is_precise and ansible_kernel == "3.2.0-4-amd64" - is_precise and ansible_kernel == "3.2.0-4-amd64"
- is_trusty - is_trusty
notify: notify: Restart rsyslog
Restart rsyslog tags: rsyslog
tags:
- rsyslog - name: Install custom rsyslog rules
template: src=10-custom_rules.conf.j2 dest=/etc/rsyslog.d/10-custom_rules.conf owner=root group=root mode=0444
when: default_rsyslog_custom_rules is defined
notify: Restart rsyslog
tags: rsyslog

View File

@ -0,0 +1,6 @@
{% if default_rsyslog_custom_rules is defined %}
{% for entry in default_rsyslog_custom_rules %}
{{ entry }}
{% endfor %}
{% endif %}