forked from ISTI-ansible-roles/ansible-roles
Merge branch 'master' of gitorious.research-infrastructures.eu:infrastructure-management/ansible-playbooks
This commit is contained in:
commit
5c30b79437
|
@ -13,3 +13,7 @@ docker_packages:
|
|||
|
||||
docker_run_as_docker_user: True
|
||||
docker_user_home: /home/docker
|
||||
docker_defaults_file: /etc/default/docker
|
||||
docker_enable_tcp_socket: False
|
||||
docker_tcp_socket_port: 2375
|
||||
docker_tcp_socket_host: 127.0.0.1
|
||||
|
|
|
@ -14,6 +14,10 @@
|
|||
apt: pkg={{ item }} state={{ docker_pkg_status }} update_cache=yes cache_valid_time=3600
|
||||
with_items: '{{ docker_packages }}'
|
||||
|
||||
- name: Install the Docker default options
|
||||
template: src=docker-default.j2 dest={{ docker_defaults_file }} owner=root group=root mode=0644
|
||||
notify: Restart docker
|
||||
|
||||
- name: Ensure that the service is started and enabled
|
||||
service: name=docker state=started enabled=yes
|
||||
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
# Customize location of Docker binary (especially for development testing).
|
||||
#DOCKERD="/usr/local/bin/dockerd"
|
||||
|
||||
|
||||
CUSTOM_DOCKER_SOCKET="-H tcp://{{ docker_tcp_socket_host }}:{{ docker_tcp_socket_port }} -H unix:///var/run/docker.sock"
|
||||
#CUSTOM_DOCKER_DNS_SERVERS="--dns 8.8.8.8 --dns 8.8.4.4"
|
||||
|
||||
# Use DOCKER_OPTS to modify the daemon startup options.
|
||||
DOCKER_OPTS="{% if docker_enable_tcp_socket %}$CUSTOM_DOCKER_SOCKET {% endif %}"
|
||||
|
||||
# If you need Docker to use an HTTP proxy, it can also be specified here.
|
||||
#export http_proxy="http://127.0.0.1:3128/"
|
||||
|
||||
# This is also a handy place to tweak where Docker's temporary files go.
|
||||
#export DOCKER_TMPDIR="/mnt/bigdrive/docker-tmp"
|
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
# There are some duplicates here
|
||||
jenkins_dest: "/var/lib/jenkins"
|
||||
jenkins_username: jenkins
|
||||
jenkins_group: jenkins
|
||||
jenkins_shell: /bin/bash
|
||||
|
||||
jenkins_maven_config: True
|
||||
jenkins_maven_settings_dirs:
|
||||
- .m2
|
||||
|
||||
jenkins_maven_settings_url: http://localhost/settings.xml
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
- block:
|
||||
- name: Create the maven setting directory
|
||||
file: dest={{ jenkins_dest }}/{{ item }} state=directory
|
||||
with_items: '{{ jenkins_maven_settings_dirs }}'
|
||||
|
||||
- name: Fetch the maven settings template file
|
||||
become: False
|
||||
become_user: root
|
||||
run_once: True
|
||||
get_url: url={{ jenkins_maven_settings_url }} dest=/tmp/settings.xml.j2 force=yes
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Install the maven settings
|
||||
template: src=/tmp/settings.xml.j2 dest={{ jenkins_dest }}/.m2/settings.xml
|
||||
|
||||
become: True
|
||||
become_user: '{{ jenkins_username }}'
|
||||
when: jenkins_maven_config
|
||||
tags: [ 'jenkins', 'jenkins_common', 'jenkins_master', 'jenkins_slave' ]
|
|
@ -74,3 +74,5 @@ jenkins_plugins:
|
|||
enabled: True
|
||||
build-pipeline-plugin:
|
||||
enabled: True
|
||||
build-timeout-plugin:
|
||||
enabled: True
|
||||
|
|
|
@ -32,14 +32,13 @@
|
|||
service: name=jenkins state=started enabled=yes
|
||||
|
||||
when: jenkins_install
|
||||
tags: jenkins
|
||||
tags: [ 'jenkins', 'jenkins_master' ]
|
||||
|
||||
- block:
|
||||
# Handle plugins
|
||||
# If Jenkins is installed or updated, wait for pulling the Jenkins CLI, assuming 10s should be sufficiant
|
||||
- name: 120 seconds delay while starting Jenkins
|
||||
wait_for: port={{ jenkins_http_port }} delay={{ jenkins_restart_delay }}
|
||||
when: jenkins_install | changed
|
||||
|
||||
# Create Jenkins CLI destination directory
|
||||
- name: "Create Jenkins CLI destination directory"
|
||||
|
@ -55,14 +54,14 @@
|
|||
- name: Install plugins without a specific version
|
||||
jenkins_plugin: name="{{ item.key }}" params='{{ jenkins_access_params }}'
|
||||
register: my_jenkins_plugin_unversioned
|
||||
when: 'version' not in item.value
|
||||
with_dict: jenkins_plugins
|
||||
when: '"version" not in item.value'
|
||||
with_dict: '{{ jenkins_plugins }}'
|
||||
|
||||
- name: Install plugins with a specific version
|
||||
jenkins_plugin: name="{{ item.key }}" version="{{ item.value['version'] }}" params='{{ jenkins_access_params }}'
|
||||
register: my_jenkins_plugin_versioned
|
||||
when: 'version' in item.value
|
||||
with_dict: jenkins_plugins
|
||||
when: '"version" in item.value'
|
||||
with_dict: '{{ jenkins_plugins }}'
|
||||
|
||||
- name: Initiate the jenkins_restart_required fact
|
||||
set_fact:
|
||||
|
@ -72,16 +71,17 @@
|
|||
set_fact:
|
||||
jenkins_restart_required: yes
|
||||
when: item.changed
|
||||
with_items: my_jenkins_plugin_versioned.results
|
||||
with_items: '{{ my_jenkins_plugin_versioned.results }}'
|
||||
|
||||
- name: Check if restart is required by any of the unversioned plugins
|
||||
set_fact:
|
||||
jenkins_restart_required: yes
|
||||
when: item.changed
|
||||
with_items: my_jenkins_plugin_unversioned.results
|
||||
with_items: '{{ my_jenkins_plugin_unversioned.results }}'
|
||||
|
||||
- name: Restart Jenkins if required
|
||||
service: name=jenkins state= restarted
|
||||
become_user: root
|
||||
service: name=jenkins state=restarted
|
||||
when: jenkins_restart_required
|
||||
|
||||
- name: Wait for Jenkins to start up
|
||||
|
@ -105,18 +105,18 @@
|
|||
|
||||
- name: Plugin pinning
|
||||
jenkins_plugin: name="{{ item.key }}" state="{{ 'pinned' if item.value['pinned'] else 'unpinned'}}" params='{{ jenkins_access_params }}'
|
||||
when: 'pinned' in item.value
|
||||
with_dict: jenkins_plugins
|
||||
when: '"pinned" in item.value'
|
||||
with_dict: '{{ jenkins_plugins }}'
|
||||
|
||||
- name: Plugin enabling
|
||||
jenkins_plugin: name="{{ item.key }}" state="{{ 'enabled' if item.value['enabled'] else 'disabled'}}" params='{{ jenkins_access_params }}'
|
||||
when: 'enabled' in item.value
|
||||
with_dict: jenkins_plugins
|
||||
when: '"enabled" in item.value'
|
||||
with_dict: '{{ jenkins_plugins }}'
|
||||
|
||||
become: True
|
||||
become_user: '{{ jenkins_username }}'
|
||||
when: jenkins_install
|
||||
tags: [ 'jenkins', 'jenkins_plugins' ]
|
||||
tags: [ 'jenkins', 'jenkins_plugins', 'jenkins_master' ]
|
||||
|
||||
- block:
|
||||
- name: Ensure that jenkins is stoppend and disabled
|
||||
|
@ -140,4 +140,4 @@
|
|||
apt_key: url='{{ jenkins_repo_key }}' state=absent
|
||||
|
||||
when: not jenkins_install
|
||||
tags: jenkins
|
||||
tags: [ 'jenkins', 'jenkins_master' ]
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
jenkins_slave: False
|
||||
# There are some duplicates here
|
||||
jenkins_dest: "/var/lib/jenkins"
|
||||
jenkins_username: jenkins
|
||||
jenkins_group: jenkins
|
||||
jenkins_shell: /bin/bash
|
||||
jenkins_tmp_retain_days: 5
|
||||
# TODO: fetch the public key from the master
|
||||
#jenkins_master_pubkey:
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
- block:
|
||||
- name: add the user that will run the jenkins jobs
|
||||
user: name={{ jenkins_username }} home={{ jenkins_dest }} shell={{ jenkins_shell }} generate_ssh_key=yes
|
||||
|
||||
- name: ensure the jenkins master has ssh access on each slave, as jenkins user
|
||||
authorized_key: user={{ jenkins_username }} key="{{ item }}" state=present
|
||||
with_items:
|
||||
- '{{ jenkins_master_pubkey }}'
|
||||
|
||||
- name: Daily cron job to cleanup the /tmp junk
|
||||
template: src={{ item }}.j2 dest=/etc/cron.daily/{{ item }} owner=root group=root mode=0755
|
||||
with_items:
|
||||
- tmp-cleaner
|
||||
|
||||
when: jenkins_slave
|
||||
tags: [ 'jenkins', 'jenkins_slave' ]
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
|
||||
RETAIN_DAYS={{ jenkins_tmp_retain_days }}
|
||||
LOG_FILE=/var/log/tmp-cleaner.log
|
||||
find /tmp/ -ctime +${RETAIN_DAYS} -exec rm -fr {} \; >>$LOG_FILE 2>&1
|
||||
|
||||
exit 0
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
shinyproxy_install: False
|
||||
shinyproxy_version: 0.8.6
|
||||
shinyproxy_file_name: 'shinyproxy-{{ shinyproxy_version }}.jar'
|
||||
shinyproxy_url: 'https://www.shinyproxy.io/downloads/{{ shinyproxy_file_name }}'
|
||||
shinyproxy_app_name: shinyproxy.jar
|
||||
shinyproxy_user: shinyproxy
|
||||
shinyproxy_install_dir: /opt/shinyproxy
|
||||
shinyproxy_http_port: 8080
|
||||
|
||||
shinyproxy_app_title: 'Open Analytics Shiny Proxy'
|
||||
shinyproxy_logo_url: 'http://www.openanalytics.eu/sites/www.openanalytics.eu/themes/oa/logo.png'
|
||||
shinyproxy_authentication: ldap
|
||||
shinyproxy_admin_group: ''
|
||||
shinyproxy_ldap_server: 'ldap://ldap.forumsys.com:389/dc=example,dc=com'
|
||||
shinyproxy_ldap_admin: cn=read-only-admin,dc=example,dc=com
|
||||
shinyproxy_ldap_admin_pwd: password
|
||||
shinyproxy_user_dn_pattern: 'uid={0}'
|
||||
shinyproxy_group_search_base: ''
|
||||
shinyproxy_group_search_filter: '(uniqueMember={0})'
|
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
- name: Restart shinyproxy
|
||||
service: name=shinyproxy state=restarted
|
|
@ -0,0 +1,30 @@
|
|||
---
|
||||
- block:
|
||||
- name: Create the shinyproxy user
|
||||
user: name={{ shinyproxy_user }} home={{ shinyproxy_install_dir }} createhome=yes system=yes shell=/usr/sbin/nologin
|
||||
|
||||
- name: Download the shinyproxy jar
|
||||
become: True
|
||||
become_user: '{{ shinyproxy_user }}'
|
||||
get_url: url={{ shinyproxy_url }} dest={{ shinyproxy_install_dir }}
|
||||
|
||||
- name: Set up a symlink to an unversioned app name
|
||||
become: True
|
||||
become_user: '{{ shinyproxy_user }}'
|
||||
file: src={{ shinyproxy_install_dir }}/{{ shinyproxy_file_name }} dest={{ shinyproxy_install_dir }}/{{ shinyproxy_app_name }} state=link
|
||||
|
||||
- name: Install the upstart init file
|
||||
template: src=upstart-shinyproxy.conf.j2 dest=/etc/init/shinyproxy.conf owner=root group=root mode=0644
|
||||
when: is_trusty
|
||||
|
||||
- name: Install the shinyproxy configuration file
|
||||
template: src=shinyproxy-conf.yml.j2 dest={{ shinyproxy_install_dir }}/application.yml owner=root group={{ shinyproxy_user }} mode=0640
|
||||
notify: Restart shinyproxy
|
||||
tags: [ 'shinyproxy', 'shinyproxy_conf' ]
|
||||
|
||||
- name: Ensure that the shinyproxy service is enabled and running
|
||||
service: name=shinyproxy state=started enabled=yes
|
||||
|
||||
when: shinyproxy_install
|
||||
tags: shinyproxy
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
shiny:
|
||||
proxy:
|
||||
title: {{ shinyproxy_app_title }}
|
||||
logo-url: {{ shinyproxy_logo_url }}
|
||||
landing-page: /
|
||||
heartbeat-rate: 10000
|
||||
heartbeat-timeout: 60000
|
||||
port: 8080
|
||||
authentication: {{ shinyproxy_authentication }}
|
||||
admin-groups: {{ shinyproxy_admin_group }}
|
||||
# LDAP configuration
|
||||
ldap:
|
||||
url: {{ shinyproxy_ldap_server }}
|
||||
user-dn-pattern: {{ shinyproxy_user_dn_pattern }}
|
||||
group-search-base: {{ shinyproxy_group_search_base }}
|
||||
group-search-filter: {{ shinyproxy_group_search_filter }}
|
||||
manager-dn: {{ shinyproxy_ldap_admin }}
|
||||
manager-password: {{ shinyproxy_ldap_admin_pwd }}
|
||||
# Docker configuration
|
||||
docker:
|
||||
cert-path: /home/none
|
||||
url: http://localhost:2375
|
||||
host: 127.0.0.1
|
||||
port-range-start: 20000
|
||||
apps:
|
||||
- name: 01_hello
|
||||
display-name: Hello Application
|
||||
description: Application which demonstrates the basics of a Shiny app
|
||||
docker-cmd: ["R", "-e shinyproxy::run_01_hello()"]
|
||||
docker-image: openanalytics/shinyproxy-demo
|
||||
groups: scientists, mathematicians
|
||||
- name: 06_tabsets
|
||||
docker-cmd: ["R", "-e shinyproxy::run_06_tabsets()"]
|
||||
docker-image: openanalytics/shinyproxy-demo
|
||||
groups: scientists
|
||||
|
||||
logging:
|
||||
file:
|
||||
shinyproxy.log
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
# shinyproxy - springboot based shiny executor
|
||||
|
||||
description "shinyproxy service"
|
||||
|
||||
start on runlevel [2345]
|
||||
stop on runlevel [!2345]
|
||||
respawn
|
||||
respawn limit 10 5
|
||||
|
||||
setuid {{ shinyproxy_user }}
|
||||
setgid {{ shinyproxy_user }}
|
||||
|
||||
script
|
||||
cd {{ shinyproxy_install_dir }}
|
||||
exec java -jar ./{{ shinyproxy_app_name }}
|
||||
end script
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
|
||||
accounting_service_gcube_repository: 'gcube-snapshots'
|
||||
accounting_service_install: False
|
||||
accounting_service_ver: 1.0.0-20170323.102652-1
|
||||
accounting_service_snap: 1.0.0-SNAPSHOT
|
||||
accounting_service_name: accounting-service
|
||||
accounting_service_filename: '{{ accounting_service_name }}-{{ accounting_service_ver }}.war'
|
||||
accounting_service_url: 'http://maven.research-infrastructures.eu/nexus/service/local/repositories/{{ accounting_service_gcube_repository }}/content/org/gcube/data/publishing/{{ accounting_service_name }}/{{ accounting_service_snap}}/{{ accounting_service_filename }}'
|
||||
accounting_service_war_file: '{{ accounting_service_name }}.war'
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
- block:
|
||||
|
||||
- name: Remove the installed accounting-service before upgrading
|
||||
file: dest={{ item }} state=absent
|
||||
with_items:
|
||||
- '{{ smartgears_instance_path }}/webapps/accounting-service'
|
||||
- '{{ smartgears_instance_path }}/webapps/accounting-service.war'
|
||||
when: smartgears_upgrade
|
||||
|
||||
- name: Get the accounting-service war file
|
||||
get_url: url={{ accounting_service_url }} dest={{ smartgears_instance_path }}/webapps/{{ accounting_service_war_file }}
|
||||
|
||||
become: True
|
||||
become_user: '{{ smartgears_user }}'
|
||||
when: accounting_service_install
|
||||
tags: [ 'smartgears', 'accounting_service', 'tomcat' ]
|
|
@ -7,6 +7,7 @@
|
|||
- '{{ gcube_prod_key_2 }}'
|
||||
- '{{ gcube_prod_key_3 }}'
|
||||
- '{{ gcube_prod_key_4 }}'
|
||||
- '{{ gcube_prod_key_5 }}'
|
||||
notify: Restart smartgears
|
||||
when: install_gcube_prod_key
|
||||
|
||||
|
@ -17,6 +18,7 @@
|
|||
- '{{ gcube_prod_key_2 }}'
|
||||
- '{{ gcube_prod_key_3 }}'
|
||||
- '{{ gcube_prod_key_4 }}'
|
||||
- '{{ gcube_prod_key_5 }}'
|
||||
notify: Restart smartgears
|
||||
when: not install_gcube_prod_key
|
||||
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
home_library_gcube_repository: 'gcube-staging'
|
||||
home_library_install: False
|
||||
home_library_ver: 1.7.0-4.3.0-144852
|
||||
home_library_name: home-library-webapp
|
||||
home_library_filename: '{{ home_library_name }}-{{ home_library_ver }}.war'
|
||||
home_library_url: 'http://maven.research-infrastructures.eu/nexus/content/repositories/{{ home_library_gcube_repository }}/org/gcube/data/access/{{ home_library_name }}/{{ home_library_ver}}/{{ home_library_filename }}'
|
||||
home_library_war_file: '{{ home_library_name }}.war'
|
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
- block:
|
||||
|
||||
- name: Remove the installed HOME LIBRARY connector before upgrading
|
||||
file: dest={{ item }} state=absent
|
||||
with_items:
|
||||
- '{{ smartgears_instance_path }}/webapps/home-library-webapp'
|
||||
- '{{ smartgears_instance_path }}/webapps/home-library-webapp.war'
|
||||
when: smartgears_upgrade
|
||||
|
||||
- name: Get the HOME LIBRARY connector war file
|
||||
get_url: url={{ home_library_url }} dest={{ smartgears_instance_path }}/webapps/{{ home_library_war_file }}
|
||||
|
||||
# - name: Unpack the HOME LIBRARY connector war file
|
||||
# shell: mkdir {{ smartgears_instance_path }}/webapps/home-library-webapp ; cd {{ smartgears_instance_path }}/webapps/home-library-webapp ; jar xf {{ smartgears_instance_path }}/webapps/{{ home_library_war_file }}
|
||||
# args:
|
||||
# creates: '{{ smartgears_instance_path }}/webapps/home-library-webapp/WEB-INF/web.xml'
|
||||
|
||||
become: True
|
||||
become_user: '{{ smartgears_user }}'
|
||||
when: home_library_install
|
||||
tags: [ 'smartgears', 'home_library', 'tomcat' ]
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
gcube_repository: 'gcube-staging'
|
||||
r_connector_install: False
|
||||
r_connector_ver: 2.1.2-4.3.0-144071
|
||||
r_connector_ver: 2.1.3-4.4.0-146364
|
||||
r_connector_name: r-connector
|
||||
r_connector_group_id: org.gcube.data.analysis
|
||||
r_connector_extension: war
|
||||
|
|
|
@ -10,11 +10,18 @@ ALLOW_LOCAL_USERS=1
|
|||
RPROFILE_FILE='{{ r_connector_rprofile_path }}/{{ r_connector_rprofile_filename }}'
|
||||
|
||||
if [ -d $HDIR ] ; then
|
||||
logger "$LOG_PREFIX user $HDIR directory exists"
|
||||
sudo /usr/bin/touch ${HDIR}/{{ r_connector_userconfig }}
|
||||
sudo /bin/chown ${USER}:{{ smartgears_user }} $HDIR/{{ r_connector_userconfig }}
|
||||
sudo /usr/bin/setfacl -m u:${USER}:rw,g:{{ smartgears_user }}:rw $HDIR/{{ r_connector_userconfig }}
|
||||
exit 0
|
||||
if id -u $USER >/dev/null 2>&1
|
||||
then
|
||||
logger "$LOG_PREFIX user $HDIR directory exists. Touching the userconfig.csv file to ensure that it exists with the correct permissions"
|
||||
sudo /usr/bin/touch ${HDIR}/{{ r_connector_userconfig }}
|
||||
sudo /bin/chown ${USER}:{{ smartgears_user }} $HDIR/{{ r_connector_userconfig }}
|
||||
sudo /usr/bin/setfacl -m u:${USER}:rw,g:{{ smartgears_user }}:rw $HDIR/{{ r_connector_userconfig }}
|
||||
sudo /bin/ln -s $RPROFILE_FILE $HDIR/{{ r_connector_rprofile_filename }}
|
||||
exit 0
|
||||
else
|
||||
logger "$LOG_PREFIX user $HDIR directory exists but the user not. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
if id -u $USER >/dev/null 2>&1
|
||||
then
|
||||
|
|
|
@ -16,19 +16,32 @@ smartgears_user: '{{ d4science_user }}'
|
|||
smartgears_user_home: '{{ d4science_user_home }}'
|
||||
smartgears_instance_path: '{{ smartgears_user_home }}/tomcat'
|
||||
smartgears_install_path: '{{ smartgears_user_home }}/SmartGears'
|
||||
smartgears_distribution_version: 2.1.0-4.3.0-142337
|
||||
#smartgears_distribution_version: 2.1.0-4.3.0-142337
|
||||
smartgears_distribution_version: 2.1.2-4.4.0-146408
|
||||
smartgears_file: 'smartgears-distribution-{{ smartgears_distribution_version }}.tar.gz'
|
||||
smartgears_url: 'http://maven.research-infrastructures.eu/nexus/content/repositories/{{ gcube_repository }}/org/gcube/distribution/smartgears-distribution/{{ smartgears_distribution_version }}/{{ smartgears_file }}'
|
||||
smartgears_mode: online
|
||||
smartgears_application_mode: online
|
||||
# Production infra
|
||||
smartgears_infrastructure_name: "d4science.research-infrastructures.eu"
|
||||
# Production VOs
|
||||
smartgears_production_vo:
|
||||
- '/{{ smartgears_infrastructure_name }}'
|
||||
- '/{{ smartgears_infrastructure_name }}/FARM'
|
||||
- '/{{ smartgears_infrastructure_name }}/SoBigData'
|
||||
- '/{{ smartgears_infrastructure_name }}/SmartArea'
|
||||
- '/{{ smartgears_infrastructure_name }}/gCubeApps'
|
||||
- '/{{ smartgears_infrastructure_name }}/D4Research'
|
||||
# Set to 'true' or 'false'. Pay attention to the case
|
||||
smartgears_authorized_on_all_scopes: 'false'
|
||||
smartgears_scopes:
|
||||
- '/{{ smartgears_infrastructure_name }}'
|
||||
smartgears_hostname: '{{ ansible_fqdn }}'
|
||||
smartgears_country: it
|
||||
smartgears_location: pisa
|
||||
smartgears_latitude: 41.9000
|
||||
smartgears_longitude: 12.5000
|
||||
smartgears_publication_frequency: 180
|
||||
smartgears_publication_frequency: 300
|
||||
smartgears_http_port: 9000
|
||||
smartgears_service_name: 'tomcat-instance-{{ smartgears_http_port }}'
|
||||
|
||||
|
@ -36,8 +49,6 @@ smartgears_loglevel: WARN
|
|||
|
||||
smartgears_tomcat_contexts: [ 'whn-manager' ]
|
||||
|
||||
smartgears_scopes:
|
||||
- '/{{ smartgears_infrastructure_name }}'
|
||||
|
||||
# The iptables rules use this
|
||||
http_port: '{{ smartgears_http_port }}'
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
- '{{ gcube_prod_key_2 }}'
|
||||
- '{{ gcube_prod_key_3 }}'
|
||||
- '{{ gcube_prod_key_4 }}'
|
||||
- '{{ gcube_prod_key_5 }}'
|
||||
notify: Restart smartgears
|
||||
when: install_gcube_prod_key
|
||||
|
||||
|
@ -17,6 +18,7 @@
|
|||
- '{{ gcube_prod_key_2 }}'
|
||||
- '{{ gcube_prod_key_3 }}'
|
||||
- '{{ gcube_prod_key_4 }}'
|
||||
- '{{ gcube_prod_key_5 }}'
|
||||
notify: Restart smartgears
|
||||
when: not install_gcube_prod_key
|
||||
|
||||
|
|
|
@ -58,6 +58,26 @@
|
|||
copy: src=TokenGenerator.class dest=/usr/local/lib/TokenGenerator.class owner=root group=root mode=0644
|
||||
tags: [ 'smartgears', 'smartgears_conf', 'tomcat' ]
|
||||
|
||||
- name: Install the script that manages the smartgears loglevel
|
||||
template: src=change-logback-loglevel.sh.j2 dest=/usr/local/bin/change-logback-loglevel owner=root group=root mode=0755
|
||||
with_items: '{{ tomcat_m_instances }}'
|
||||
when: not item.skip_smartgears
|
||||
tags: [ 'smartgears', 'smartgears_loglevel', 'tomcat' ]
|
||||
|
||||
- name: Change the smartgears log level
|
||||
become: True
|
||||
become_user: '{{ smartgears_user }}'
|
||||
shell: /usr/local/bin/change-logback-loglevel
|
||||
tags: [ 'smartgears', 'tomcat', 'smartgears_loglevel' ]
|
||||
|
||||
- name: Remove some wrong symbolic links created by the install/upgrade script
|
||||
file: dest={{ item }} state=absent
|
||||
with_items:
|
||||
- '{{ smartgears_install_path }}/lib/lib'
|
||||
- '{{ smartgears_install_path }}/apps/webapps'
|
||||
when: smartgears_upgrade
|
||||
tags: [ 'smartgears', 'smartgears_conf', 'tomcat' ]
|
||||
|
||||
- name: Install the script that fetches the scope tokens
|
||||
template: src=get-scopes.j2 dest=/usr/local/bin/get-scopes owner=root group={{ smartgears_user }} mode=0750
|
||||
with_items: '{{ tomcat_m_instances }}'
|
||||
|
@ -81,38 +101,20 @@
|
|||
with_items: '{{ tomcat_m_instances }}'
|
||||
tags: [ 'smartgears', 'smartgears_conf', 'tomcat' ]
|
||||
|
||||
- name: Get the scope tokens from the authorization service and assemble the container.xml file
|
||||
- name: Get the scope tokens from the authorization service and assemble the container.xml file when whe have an authorization token or we are upgrading
|
||||
become: True
|
||||
become_user: '{{ smartgears_user }}'
|
||||
shell: /usr/local/bin/get-scopes {{ gcube_admin_token | default(omit) }}
|
||||
when: gcube_admin_token is defined
|
||||
shell: /usr/local/bin/get-scopes {{ gcube_admin_token | default('') }}
|
||||
when: gcube_admin_token is defined or smartgears_upgrade
|
||||
notify: Restart smartgears
|
||||
tags: [ 'smartgears', 'smartgears_conf', 'tomcat' ]
|
||||
|
||||
- name: Remove the smartgears application state if requested
|
||||
#file: dest={{ smartgears_install_path }}/state state=absent
|
||||
file: dest=/home/gcube/SmartGears/state state=absent
|
||||
when: smartgears_remove_state
|
||||
notify: Restart smartgears
|
||||
tags: [ 'smartgears', 'smartgears_conf', 'tomcat' ]
|
||||
|
||||
- name: Install the script that manages the smartgears loglevel
|
||||
template: src=change-logback-loglevel.sh.j2 dest=/usr/local/bin/change-logback-loglevel owner=root group=root mode=0755
|
||||
with_items: '{{ tomcat_m_instances }}'
|
||||
when: not item.skip_smartgears
|
||||
tags: [ 'smartgears', 'smartgears_loglevel', 'tomcat' ]
|
||||
|
||||
- name: Change the smartgears log level
|
||||
become: True
|
||||
become_user: '{{ smartgears_user }}'
|
||||
shell: /usr/local/bin/change-logback-loglevel
|
||||
tags: [ 'smartgears', 'tomcat', 'smartgears_loglevel' ]
|
||||
|
||||
- name: Remove some wrong symbolic links created by the install/upgrade script
|
||||
file: dest={{ item }} state=absent
|
||||
with_items:
|
||||
- '{{ smartgears_install_path }}/lib/lib'
|
||||
- '{{ smartgears_install_path }}/apps/webapps'
|
||||
when: smartgears_upgrade
|
||||
shell: . {{ smartgears_user_home }}/.bashrc ; cd {{ smartgears_user_home }}/SmartGears/scripts ; ./clean-container-state -s old_saved_scopes.xml
|
||||
when: smartgears_remove_state and not smartgears_upgrade
|
||||
notify: Restart smartgears
|
||||
tags: [ 'smartgears', 'smartgears_conf', 'tomcat' ]
|
||||
|
||||
- name: Force a smartgears restart
|
||||
|
|
|
@ -24,6 +24,9 @@
|
|||
<longitude>{{ smartgears_longitude }}</longitude>
|
||||
</site>
|
||||
|
||||
|
||||
<authorizeChildrenContext>{{ smartgears_authorized_on_all_scopes }}</authorizeChildrenContext>
|
||||
|
||||
<property name='SmartGearsDistributionBundle' value='UnBundled' />
|
||||
<property name='SmartGearsDistribution' value='{{ smartgears_distribution_version }}' />
|
||||
<publication-frequency>{{ smartgears_publication_frequency }}</publication-frequency>
|
||||
|
|
|
@ -6,11 +6,37 @@ CONTAINER_XML_TAIL={{ smartgears_user_home }}/.containerxml/3-container.xml
|
|||
LOCAL_LIB=/usr/local/lib
|
||||
LOCAL_ETC=/usr/local/etc
|
||||
LOG_PREFIX="get-scopes: "
|
||||
GHN_ENV_FILE=/etc/default/tomcat-instance-{{ item.http_port }}.local
|
||||
SMARTGEARS_VO_AUTH={{ smartgears_authorized_on_all_scopes }}
|
||||
SMARTGEARS_UPGRADE={{ smartgears_upgrade }}
|
||||
SMARTGEARS_SAVED_STATE_F=saved_scopes_list.xml
|
||||
SMARTGEARS_SAVED_STATE_PATH={{ smartgears_user_home }}/SmartGears/$SMARTGEARS_SAVED_STATE_F
|
||||
SMARTGEARS_SCRIPTS_DIR={{ smartgears_user_home }}/SmartGears/scripts
|
||||
|
||||
|
||||
CONTAINER_XML_FILE={{ smartgears_install_path }}/container.xml
|
||||
|
||||
# 0: True, 1: False
|
||||
USE_SAVED_STATE=1
|
||||
|
||||
RETVAL=
|
||||
|
||||
# Scenario:
|
||||
# - First installation, no upgrade.
|
||||
# - The node must run on all VOs
|
||||
# In these cases we use our scopes list
|
||||
|
||||
if [ "$SMARTGEARS_VO_AUTH" == 'false' ] ; then
|
||||
if [ "$SMARTGEARS_UPGRADE" == 'True' ] ; then
|
||||
USE_SAVED_STATE=0
|
||||
logger "$LOG_PREFIX setting the correct variables so that we are going to use the local scopes"
|
||||
else
|
||||
logger "$LOG_PREFIX We are going to use our scopes list. A valid token is mandatory"
|
||||
fi
|
||||
else
|
||||
logger "$LOG_PREFIX We are going to use our scopes list. A valid token is mandatory"
|
||||
fi
|
||||
|
||||
SCOPES_LIST=""
|
||||
if [ -f $LOCAL_ETC/scopes.list ] ; then
|
||||
. $LOCAL_ETC/scopes.list
|
||||
|
@ -18,14 +44,16 @@ else
|
|||
logger "$LOG_PREFIX There is no token list, aborting"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
if [ $# -ne 1 ] ; then
|
||||
echo "The token must be passed as the sole argument"
|
||||
logger "$LOG_PREFIX The token must be passed as the sole argument"
|
||||
|
||||
if [ $# -ne 1 -a $USE_SAVED_STATE -ne 0 ] ; then
|
||||
echo "The token must be passed as the sole argument when we are not using the local state"
|
||||
logger "$LOG_PREFIX The token must be passed as the sole argument when we are not using the local state"
|
||||
exit 1
|
||||
elif [ $# -eq 1 ] ; then
|
||||
logger "$LOG_PREFIX We have an authorization token"
|
||||
TOKEN=$1
|
||||
fi
|
||||
TOKEN=$1
|
||||
|
||||
{%if setup_nginx %}
|
||||
{%if https_port is defined %}
|
||||
|
@ -43,21 +71,33 @@ HTTP_PORT={{ http_port }}
|
|||
HTTP_PORT={{ item.http_port }}
|
||||
{% endif %}
|
||||
|
||||
for jar in $( ls -1 /home/gcube/tomcat/lib/ ) ; do
|
||||
export CLASSPATH="/home/gcube/SmartGears/lib/${jar}:$CLASSPATH"
|
||||
done
|
||||
if [ $USE_SAVED_STATE -ne 0 ] ; then
|
||||
logger "$LOG_PREFIX First installation or moving avay to a configuration that needs to be present on all the VREs. Using our scopes list and not the state memorized one"
|
||||
for jar in $( ls -1 /home/gcube/tomcat/lib/ ) ; do
|
||||
export CLASSPATH="/home/gcube/SmartGears/lib/${jar}:$CLASSPATH"
|
||||
done
|
||||
|
||||
cd $LOCAL_LIB
|
||||
cd $LOCAL_LIB
|
||||
|
||||
java TokenGenerator {{ smartgears_hostname }} $TOKEN $HTTP_PORT $SCOPES_FILE $SCOPES_LIST
|
||||
RETVAL=$?
|
||||
if [ $RETVAL -eq 0 ] ; then
|
||||
logger "$LOG_PREFIX We got the scope tokens"
|
||||
java TokenGenerator {{ smartgears_hostname }} $TOKEN $HTTP_PORT $SCOPES_FILE $SCOPES_LIST >/dev/null 2>&1
|
||||
RETVAL=$?
|
||||
if [ $RETVAL -eq 0 ] ; then
|
||||
logger "$LOG_PREFIX We got the scope tokens"
|
||||
else
|
||||
logger "$LOG_PREFIX Unable to obtain the scope tokens, aborting"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
logger "$LOG_PREFIX Unable to obtain the scope tokens, aborting"
|
||||
exit 1
|
||||
logger "$LOG_PREFIX We are going to use the scopes memorized into the state"
|
||||
SCOPES_FILE=$SMARTGEARS_SAVED_STATE_PATH
|
||||
fi
|
||||
|
||||
# We always remove the current state
|
||||
cd $SMARTGEARS_SCRIPTS_DIR
|
||||
. $GHN_ENV_FILE
|
||||
./clean-container-state -s $SMARTGEARS_SAVED_STATE_F
|
||||
|
||||
|
||||
# Now that we have the tokens, we can assemble the container.xml file
|
||||
chmod 640 $CONTAINER_XML_FILE
|
||||
CREATE_CONTAINER_XML_RES=0
|
||||
|
|
Loading…
Reference in New Issue