Compare commits

...

5 Commits

61 changed files with 38 additions and 2657 deletions

View File

@ -76,7 +76,6 @@ centos_locate_package:
centos_hw_packages:
- smartmontools
- system-storage-manager
centos_selinux_daemons_dump_core: False
selinux_policy_type: targeted

View File

@ -8,5 +8,4 @@ dependencies:
state: latest
- role: '../../library/centos/roles/basic-setup'
- role: '../../library/roles/motd'
- role: '../../library/roles/linux-kernel-sysctl'
- role: '../../library/centos/roles/tuned-setup'

View File

@ -1,52 +0,0 @@
---
virtualization_pkg_state: latest
virtualization_packages:
- qemu-kvm
- libvirt
- bridge-utils
- virt-install
- cloud-utils
virtualization_centos6_packages:
- python-virtinst
virtualization_centos_netinst_url: "http://mi.mirror.garr.it/mirrors/CentOS/7/os/x86_64/"
virtualization_os_boot_dir: /var/lib/libvirt/boot
virtualization_os_boot_images:
- "http://centos.mirror.garr.it/centos/7.7.1908/isos/x86_64/CentOS-7-x86_64-Minimal-1908.iso"
- "http://releases.ubuntu.com/bionic/ubuntu-18.04.3-live-server-amd64.iso"
- "https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2"
- "https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img"
virtualization_activate_forwarding: True
virtualization_disable_nfs: True
virtualization_nfs_services_to_be_disabled:
- nfslock
- rpcbind
- gssproxy
virtualization_disable_iscsi: True
virtualization_iscsi_services_to_be_disabled:
- iprupdate
- iprinit
- iprdump
- iscsid
# Set this to false if ganeti is used for VM management
virtualization_enable_libvirtd: True
virtualization_services_to_be_enabled:
- libvirtd
virtualization_sysctl_tuning:
- { name: 'net.ipv4.ip_forward', value: '1', state: 'present' }
virtualization_kvm_create_lvm_pv: False
virtualization_kvm_create_lvm_vg: False
virtualization_kvm_lvm_pv:
- /dev/fake_disk_1
virtualization_kvm_lvm_vg: vgxen
# Disable tuned on the host
centos_tuned_enabled: False

View File

@ -1,48 +0,0 @@
---
- name: Install the virtualization packages
yum: name={{ virtualization_packages }} state={{ virtualization_pkg_state }}
tags: kvm
- name: Enable libvirtd when needed
service: name={{ item }} state=started enabled=yes
with_items: '{{ virtualization_services_to_be_enabled }}'
when: virtualization_enable_libvirtd | bool
tags: [ 'kvm', 'libvirt' ]
- name: Disable nfs
service: name={{ item }} state=stopped enabled=no
with_items: '{{ virtualization_nfs_services_to_be_disabled }}'
when: virtualization_disable_nfs | bool
tags: [ 'kvm', 'nfs' ]
- name: Disable iscsi
service: name={{ item }} state=stopped enabled=no
with_items: '{{ virtualization_iscsi_services_to_be_disabled }}'
when: virtualization_disable_iscsi | bool
tags: [ 'kvm' , 'iscsi' ]
- name: Set some kernel parameters needed by virtualization. IP forwarding for example, if we need NAT
sysctl: name={{ item.name }} state={{ item.state }} value={{ item.value }} sysctl_file=/etc/sysctl.d/90-virtualization.conf reload=yes sysctl_set=yes
with_items: '{{ virtualization_sysctl_tuning }}'
tags: kvm
- name: Collect the ISO boot images
get_url: url="{{ item }}" dest={{ virtualization_os_boot_dir }}/
with_items: '{{ virtualization_os_boot_images }}'
tags: [ 'kvm', 'iso_images' ]
- name: Create the LVM PV
command: pvcreate {{ item }}
with_items: '{{ virtualization_kvm_lvm_pv }}'
when: virtualization_kvm_create_lvm_pv | bool
tags: [ 'kvm', 'lvm_pv' ]
- name: Create the LVM VG to be used by the virtual guests
lvg: vg={{ virtualization_kvm_lvm_vg }} pvs={{ item }}
with_items: '{{ virtualization_kvm_lvm_pv }}'
when: virtualization_kvm_create_lvm_vg | bool
tags: [ 'kvm', 'lvm_vg' ]
- name: Fix the /dev/kvm permissions
file: dest=/dev/kvm owner=root group=kvm mode=0660
tags: kvm

View File

@ -1,12 +0,0 @@
---
elastic_hq_repo_url: https://github.com/ElasticHQ/elasticsearch-HQ.git
elastic_hq_user: elastichq
elastic_hq_home: /srv/elastichq
elastic_hq_http_port: 5000
elastic_hq_use_nginx_proxy: True
elastic_hq_python_pkgs:
- python3
- python3-pip
elastic_hq_start_cmd: '/usr/bin/python3 {{ elastic_hq_home }}/application.py'

View File

@ -1,4 +0,0 @@
---
- name: systemd reload
command: systemctl daemon-reload

View File

@ -1,3 +0,0 @@
---
dependencies:
- { role: '../../../library/roles/nginx', when: elastic_hq_use_nginx_proxy | bool }

View File

@ -1,35 +0,0 @@
---
- block:
- name: Install git
apt: pkg=git update_cache=yes cache_valid_time=1800
- name: Install the minimal python 3 env
apt: pkg={{ item }} update_cache=yes cache_valid_time=1800
with_items: '{{ elastic_hq_python_pkgs }}'
- name: Create a user to run the elasticHQ service
user: name={{ elastic_hq_user }} home={{ elastic_hq_home }} createhome=no shell=/usr/sbin/nologin system=yes
- name: Create the user home directory
file: dest={{ elastic_hq_home }} state=directory owner={{ elastic_hq_user }} group={{ elastic_hq_user }}
- name: Get the elasticHQ distribution
git: repo={{ elastic_hq_repo_url }} dest={{ elastic_hq_home }}
- name: Get the elasticHQ dependencies
pip: chdir={{ elastic_hq_home }} executable=pip3 requirements=requirements.txt
- name: Install the elasticHQ systemd startup unit
template: src=elastichq.systemd.j2 dest=/etc/systemd/system/elastichq.service mode=0644 owner=root group=root
when: ansible_service_mgr == 'systemd'
register: elastichq_systemd_unit
- name: systemd reload
command: systemctl daemon-reload
when: elastichq_systemd_unit is changed
- name: Ensure that elasticHQ is started and enabled
service: name=elastichq state=started enabled=yes
tags: [ 'elasticsearch', 'elastic_hq' ]

View File

@ -1,18 +0,0 @@
[Unit]
Description=ElasticHQ - Monitoring and Management Web Application for ElasticSearch instances and clusters.
After=network.target
[Service]
Type=simple
WorkingDirectory={{ elastic_hq_home }}
StandardOutput=syslog
StandardError=syslog
Restart=on-failure
RemainAfterExit=yes
User={{ elastic_hq_user }}
Group={{ elastic_hq_user }}
ExecStart={{ elastic_hq_start_cmd }}
ExecStop=

View File

@ -1,68 +0,0 @@
---
elasticsearch_install: True
elasticsearch_repo_key: https://packages.elastic.co/GPG-KEY-elasticsearch
elasticsearch_repo_version: '{{ elasticsearch_major_version }}.x'
#elasticsearch_major_version: 2
#elasticsearch_repo: 'deb http://packages.elastic.co/elasticsearch/{{ elasticsearch_repo_version }}/debian stable main'
elasticsearch_major_version: 5
elasticsearch_minor_version: 4
elasticsearch_patch_version: 0
elasticsearch_version: '{{ elasticsearch_major_version }}.{{ elasticsearch_minor_version }}.{{ elasticsearch_patch_version }}'
elasticsearch_repo: 'deb https://artifacts.elastic.co/packages/{{ elasticsearch_repo_version }}/apt stable main'
elasticsearch_packages:
- elasticsearch
elasticsearch_kibana_install: False
elasticsearch_kibana_enabled: True
elasticsearch_kibana_proxy: False
elasticsearch_kibana_nginx_proxy: True
elasticsearch_kibana_packages:
- kibana
elasticsearch_kibana_http_port: 5601
elasticsearch_kibana_bind_ip: 127.0.0.1
elasticsearch_kibana_serverpath: ''
elasticsearch_kibana_servername: '{{ ansible_fqdn }}'
elasticsearch_kibana_elasticsearch_url: 'http://localhost:9200'
elasticsearch_kibana_preserve_host: 'false'
elasticsearch_kibana_ssl_enabled: False
elasticsearch_kibana_rundir: /run/kibana
elasticsearch_package_state: 'present'
elasticsearch_cluster_name: 'Elasticsearch Cluster'
elasticsearch_enabled: True
elasticsearch_http_port: 9200
elasticsearch_transport_min_port: 9300
elasticsearch_transport_max_port: 9400
elasticsearch_data_dir: /var/lib/elasticsearch
elasticsearch_log_dir: /var/log/elasticsearch
elasticsearch_bind_ip: 0.0.0.0
elasticsearch_discovery_host_list: '["127.0.0.1", "[::1]"]'
elasticsearch_define_majority_of_nodes: True
elasticsearch_majority_of_nodes: 1
elasticsearch_bootstrap_known_masters:
- '{{ ansible_fqdn }}'
elasticsearch_real_cluster: False
elasticsearch_recover_after_nodes: 3
elasticsearch_max_local_storage_nodes: 1
elasticsearch_destructive_requires_name: 'true'
elasticsearch_define_heap_size: False
elasticsearch_heap_size: 2g
elasticsearch_additional_java_opts: '-server -Djava.awt.headless=true -Dfile.encoding=UTF-8'
elasticsearch_max_open_files: 65536
elasticsearch_cluster_routing_allocation_disk_threshold_enabled: 'true'
elasticsearch_cluster_routing_allocation_disk_watermark_low: '85%'
elasticsearch_cluster_routing_allocation_disk_watermark_high: '90%'
# Compatibility with kernels <= 3.5. Set to False if you are using a newer kernel
elasticsearch_disable_bootstrap_syscall_filter: True
# bin/plugin for 2.x, bin/elasticsearch-plugin for 5.x
#elasticsearch_plugin_bin: /usr/share/elasticsearch/bin/plugin
#elasticsearch_plugins:
# - { name: 'royrusso/elasticsearch-HQ', state: 'present' }
elasticsearch_plugin_bin: /usr/share/elasticsearch/bin/elasticsearch-plugin
# elasticsearch 5 not supported yet, so set it to False when installing 5.x
elasticsearch_hq_install: False
elasticsearch_hq_plugin:
- { name: 'royrusso/elasticsearch-HQ', state: 'present' }

View File

@ -1,11 +0,0 @@
---
- name: Restart elasticsearch
service: name=elasticsearch state=restarted enabled=yes
when: elasticsearch_enabled | bool
ignore_errors: True
- name: Restart kibana
service: name=kibana state=restarted enabled=yes
when: elasticsearch_kibana_enabled | bool

View File

@ -1,4 +0,0 @@
---
dependencies:
- { role: '../../../library/roles/openjdk' }
- { role: '../../../library/roles/nginx', when: elasticsearch_kibana_nginx_proxy | bool }

View File

@ -1,24 +0,0 @@
---
- name: Elasticsearch installation
block:
- name: Install the elasticsearch deb packages
apt: name='{{ elasticsearch_packages }}' state={{ elasticsearch_package_state }} update_cache=yes cache_valid_time=1800
- name: Install the elasticsearch startup default
template: src=elasticsearch-default.j2 dest=/etc/default/elasticsearch owner=root group=elasticsearch mode=0640
register: elasticsearch_default
notify: Restart elasticsearch
- name: Install the elasticsearch JVM options
template: src=jvm.options.j2 dest=/etc/elasticsearch/jvm.options owner=root group=elasticsearch mode=0640
register: elasticsearch_jvm_opts
notify: Restart elasticsearch
tags: [ 'ELK', 'elasticsearch', 'elk', 'elasticsearch_conf' ]
- name: Install the elasticsearch configuration
template: src=elasticsearch.yml.j2 dest=/etc/elasticsearch/elasticsearch.yml owner=root group=elasticsearch mode=0640
register: elasticsearch_configuration
notify: Restart elasticsearch
tags: [ 'ELK', 'elasticsearch', 'elk', 'elasticsearch_conf' ]
tags: [ 'ELK', 'elasticsearch', 'elk' ]

View File

@ -1,16 +0,0 @@
---
- block:
- name: Manage a list of elasticsearch plugins
elasticsearch_plugin: name="{{ item.name }}" state={{ item.state }} plugin_bin={{ elasticsearch_plugin_bin }} url={{ item.url|default(omit) }} version={{ item.version|default(omit) }}
with_items: '{{ elasticsearch_plugins | default ([]) }}'
- name: Install the elasticsearch HQ plugin
elasticsearch_plugin: name="{{ item.name }}" state={{ item.state }} plugin_bin={{ elasticsearch_plugin_bin }} url={{ item.url|default(omit) }} version={{ item.version|default(omit) }}
with_items: '{{ elasticsearch_hq_plugin | default ([]) }}'
when:
- elasticsearch_major_version <= 2
- elasticsearch_hq_install
when: elasticsearch_plugins is defined
tags: [ 'elasticsearch', 'es_plugins' ]

View File

@ -1,11 +0,0 @@
---
- name: Ensure that elasticsearch is enabled and running
service: name=elasticsearch state=started enabled=yes
when: elasticsearch_enabled | bool
tags: [ 'ELK', 'elasticsearch', 'elk' ]
- name: Ensure that elasticsearch is disabled and stopped
service: name=elasticsearch state=stopped enabled=no
when: not elasticsearch_enabled | bool
tags: [ 'ELK', 'elasticsearch', 'elk' ]

View File

@ -1,10 +0,0 @@
---
- name: ELK repository
block:
- name: Install the elasticsearch repo key
apt_key: url={{ elasticsearch_repo_key }} state=present
- name: Install the elasticsearch deb repository
apt_repository: repo='{{ elasticsearch_repo }}' state=present update_cache=yes
tags: [ 'ELK', 'elasticsearch', 'elk' ]

View File

@ -1,50 +0,0 @@
---
- name: Kibana x509 certificate management
block:
- name: Create the acme hooks directory if it does not yet exist
file: dest={{ letsencrypt_acme_sh_services_scripts_dir }} state=directory owner=root group=root
- name: Create the kibana pki subdir
file: dest={{ pki_dir }}/kibana state=directory owner=root group=kibana mode=0750
- name: Check if the global certificate private key exists
stat: path={{ letsencrypt_acme_certs_dir }}/privkey
register: kibana_privkey
- name: Check if the kibana certificate private key exists under the pki directory
stat: path={{ pki_dir }}/kibana/privkey
register: kibana_pki_privkey
- name: Copy the private key into the expected place if it is not already there
copy: src={{ letsencrypt_acme_certs_dir }}/privkey dest={{ pki_dir }}/kibana/privkey remote_src=yes owner=root group=kibana mode=0440
when:
- kibana_privkey.stat.exists
- not kibana_pki_privkey.stat.exists
- name: Install the kibana hook for letsencrypt
template: src=kibana-letsencrypt-hook.sh.j2 dest=/usr/lib/acme/hooks/kibana owner=root group=root mode=0550
when:
- elasticsearch_kibana_ssl_enabled | bool
- letsencrypt_acme_install is defined and letsencrypt_acme_install | bool
tags: [ 'ELK', 'elasticsearch', 'elk', 'kibana' ]
- name: Kibana installation
block:
- name: Install the Kibana packages
apt: name='{{ elasticsearch_kibana_packages }}' state={{ elasticsearch_package_state }} update_cache=yes cache_valid_time=1800
- name: Install the kibana systemd configuration to manage the rundir directory
template: src=kibana_rundir.conf.j2 dest=/usr/lib/tmpfiles.d/kibana.conf owner=root group=root mode=0644
register: reconfigure_systemd
- name: Reload the systemd configuration
systemd: daemon_reload=yes
- name: Install the Kibana configuration
template: src=kibana.yml.j2 dest=/etc/kibana/kibana.yml owner=root group=kibana mode=0640
register: kibana_configuration
notify: Restart kibana
tags: [ 'ELK', 'elasticsearch', 'elk', 'kibana', 'kibana_conf' ]
tags: [ 'ELK', 'elasticsearch', 'elk', 'kibana' ]

View File

@ -1,11 +0,0 @@
---
- name: Ensure that kibana is enabled and running
service: name=kibana state=started enabled=yes
when: elasticsearch_kibana_enabled | bool
tags: [ 'ELK', 'elasticsearch', 'elk', 'kibana' ]
- name: Ensure that kibana is disabled and stopped
service: name=kibana state=stopped enabled=no
when: not elasticsearch_kibana_enabled | bool
tags: [ 'ELK', 'elasticsearch', 'elk', 'kibana' ]

View File

@ -1,13 +0,0 @@
---
- import_tasks: elk_repo.yml
- import_tasks: elasticsearch.yml
when: elasticsearch_install | bool
- import_tasks: elasticsearch_plugins.yml
when: elasticsearch_install | bool
- import_tasks: elasticsearch_service.yml
when: elasticsearch_install | bool
- import_tasks: kibana.yml
when: elasticsearch_kibana_install | bool
- import_tasks: kibana_service.yml
when: elasticsearch_kibana_install | bool

View File

@ -1,76 +0,0 @@
################################
# Elasticsearch
################################
# Elasticsearch home directory
#ES_HOME=/usr/share/elasticsearch
# Elasticsearch configuration directory
#CONF_DIR=/etc/elasticsearch
# Elasticsearch data directory
DATA_DIR={{ elasticsearch_data_dir }}
# Elasticsearch logs directory
LOG_DIR={{ elasticsearch_log_dir }}
# Elasticsearch PID directory
#PID_DIR=/var/run/elasticsearch
{% if elasticsearch_major_version <= 2 %}
# Set ES_HEAP_SIZE to 50% of available RAM, but no more than 31g
ES_HEAP_SIZE={{ elasticsearch_heap_size }}
# Heap new generation
#ES_HEAP_NEWSIZE=
# Maximum direct memory
#ES_DIRECT_SIZE=
# Additional Java OPTS
ES_JAVA_OPTS="{{ elasticsearch_additional_java_opts }}"
{% endif %}
# Configure restart on package upgrade (true, every other setting will lead to not restarting)
#ES_RESTART_ON_UPGRADE=true
# Path to the GC log file
#ES_GC_LOG_FILE=/var/log/elasticsearch/gc.log
################################
# Elasticsearch service
################################
# SysV init.d
#
# When executing the init script, this user will be used to run the elasticsearch service.
# The default value is 'elasticsearch' and is declared in the init.d file.
# Note that this setting is only used by the init script. If changed, make sure that
# the configured user can read and write into the data, work, plugins and log directories.
# For systemd service, the user is usually configured in file /usr/lib/systemd/system/elasticsearch.service
{% if elasticsearch_major_version < 6 %}
ES_USER=elasticsearch
ES_GROUP=elasticsearch
{% endif %}
# The number of seconds to wait before checking if Elasticsearch started successfully as a daemon process
ES_STARTUP_SLEEP_TIME=5
################################
# System properties
################################
# Specifies the maximum file descriptor number that can be opened by this process
# When using Systemd, this setting is ignored and the LimitNOFILE defined in
# /usr/lib/systemd/system/elasticsearch.service takes precedence
MAX_OPEN_FILES={{ elasticsearch_max_open_files }}
# The maximum number of bytes of memory that may be locked into RAM
# Set to "unlimited" if you use the 'bootstrap.mlockall: true' option
# in elasticsearch.yml (ES_HEAP_SIZE must also be set).
# When using Systemd, the LimitMEMLOCK property must be set
# in /usr/lib/systemd/system/elasticsearch.service
MAX_LOCKED_MEMORY=unlimited
# Maximum number of VMA (Virtual Memory Areas) a process can own
# When using Systemd, this setting is ignored and the 'vm.max_map_count'
# property is set at boot time in /usr/lib/sysctl.d/elasticsearch.conf
#MAX_MAP_COUNT=262144

View File

@ -1,136 +0,0 @@
# ======================== Elasticsearch Configuration =========================
#
# NOTE: Elasticsearch comes with reasonable defaults for most settings.
# Before you set out to tweak and tune the configuration, make sure you
# understand what are you trying to accomplish and the consequences.
#
# The primary way of configuring a node is via this file. This template lists
# the most important settings you may want to configure for a production cluster.
#
# Please see the documentation for further information on configuration options:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration.html>
#
# ---------------------------------- Cluster -----------------------------------
#
# Use a descriptive name for your cluster:
#
cluster.name: {{ elasticsearch_cluster_name }}
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
node.name: {{ ansible_fqdn }}
{% if elasticsearch_kibana_proxy %}
# This node is bein used by kibana as proxy to a cluster
node.master: false
node.data: false
node.ingest: false
{% endif %}
{% if elasticsearch_major_version >= 7 %}
cluster.initial_master_nodes:
{% for n in elasticsearch_bootstrap_known_masters %}
- {{ n }}
{% endfor %}
{% endif %}
#
# Add custom attributes to the node:
#
# node.rack: r1
#
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
path.data: {{ elasticsearch_data_dir }}
#
# Path to log files:
#
path.logs: {{ elasticsearch_log_dir }}
#
{% if elasticsearch_major_version <= 2 %}
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
bootstrap.mlockall: true
#
# Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory
# available on the system and that the owner of the process is allowed to use this limit.
#
# Elasticsearch performs poorly when the system is swapping the memory.
{% endif %}
#
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
#
{% if elasticsearch_kibana_proxy %}
network.host: localhost
{% else %}
network.host: {{ elasticsearch_bind_ip }}
{% endif %}
#
# Set a custom port for HTTP:
#
http.port: {{ elasticsearch_http_port }}
# by default transport.host refers to network.host
transport.host: {{ elasticsearch_bind_ip }}
{% if elasticsearch_major_version >= 6 %}
transport.tcp.port: {{ elasticsearch_transport_min_port }}-{{ elasticsearch_transport_max_port }}
{% endif %}
#
# For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html>
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when new node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
discovery.zen.ping.unicast.hosts: {{ elasticsearch_discovery_host_list }}
#
{% if elasticsearch_define_majority_of_nodes %}
# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1):
#
discovery.zen.minimum_master_nodes: {{ elasticsearch_majority_of_nodes }}
#
# For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery.html>
#
{% if elasticsearch_real_cluster %}
# ---------------------------------- Gateway -----------------------------------
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
gateway.recover_after_nodes: {{ elasticsearch_recover_after_nodes }}
#
# For more information, see the documentation at:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway.html>
#
{% endif %}
{% endif %}
# ---------------------------------- Various -----------------------------------
#
# Disable starting multiple nodes on a single system:
#
node.max_local_storage_nodes: {{ elasticsearch_max_local_storage_nodes }}
#
# Require explicit names when deleting indices:
#
action.destructive_requires_name: {{ elasticsearch_destructive_requires_name }}
#
cluster.routing.allocation.disk.threshold_enabled: {{ elasticsearch_cluster_routing_allocation_disk_threshold_enabled }}
cluster.routing.allocation.disk.watermark.low: {{ elasticsearch_cluster_routing_allocation_disk_watermark_low }}
cluster.routing.allocation.disk.watermark.high: {{ elasticsearch_cluster_routing_allocation_disk_watermark_high }}
{% if elasticsearch_disable_bootstrap_syscall_filter %}
# When using an old kernel
bootstrap.system_call_filter: false
{% endif %}

View File

@ -1,129 +0,0 @@
## JVM configuration
################################################################
## IMPORTANT: JVM heap size
################################################################
##
## You should always set the min and max JVM heap
## size to the same value. For example, to set
## the heap to 4 GB, set:
##
## -Xms4g
## -Xmx4g
##
## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
## for more information
##
################################################################
# Xms represents the initial size of total heap space
# Xmx represents the maximum size of total heap space
-Xms{{ elasticsearch_heap_size }}
-Xmx{{ elasticsearch_heap_size }}
################################################################
## Expert settings
################################################################
##
## All settings below this section are considered
## expert settings. Don't tamper with them unless
## you understand what you are doing
##
################################################################
## GC configuration
-XX:+UseConcMarkSweepGC
-XX:CMSInitiatingOccupancyFraction=75
-XX:+UseCMSInitiatingOccupancyOnly
## optimizations
# disable calls to System#gc
-XX:+DisableExplicitGC
# pre-touch memory pages used by the JVM during initialization
-XX:+AlwaysPreTouch
## basic
# force the server VM
-server
# set to headless, just in case
-Djava.awt.headless=true
# ensure UTF-8 encoding by default (e.g. filenames)
-Dfile.encoding=UTF-8
# use our provided JNA always versus the system one
-Djna.nosys=true
# turn off a JDK optimization that throws away stack traces for common
# exceptions because stack traces are important for debugging
-XX:-OmitStackTraceInFastThrow
# flag to explicitly tell Netty to not use unsafe
-Dio.netty.noUnsafe=true
-Dio.netty.noKeySetOptimization=true
-Dio.netty.recycler.maxCapacityPerThread=0
-Dlog4j.shutdownHookEnabled=false
-Dlog4j2.disable.jmx=true
{% if elasticsearch_major_version >= 6 %}
-Djava.io.tmpdir=${ES_TMPDIR}
{% endif %}
## heap dumps
# generate a heap dump when an allocation from the Java heap fails
# heap dumps are created in the working directory of the JVM
-XX:+HeapDumpOnOutOfMemoryError
# specify an alternative path for heap dumps
# ensure the directory exists and has sufficient space
#-XX:HeapDumpPath=${heap.dump.path}
## GC logging
#-XX:+PrintGCDetails
#-XX:+PrintGCTimeStamps
#-XX:+PrintGCDateStamps
#-XX:+PrintClassHistogram
#-XX:+PrintTenuringDistribution
#-XX:+PrintGCApplicationStoppedTime
# log GC status to a file with time stamps
# ensure the directory exists
#-Xloggc:${loggc}
# Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON.
# If documents were already indexed with unquoted fields in a previous version
# of Elasticsearch, some operations may throw errors.
#
# WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided
# only for migration purposes.
#-Delasticsearch.json.allow_unquoted_field_names=true
# specify an alternative path for heap dumps
# ensure the directory exists and has sufficient space
-XX:HeapDumpPath={{ elasticsearch_data_dir }}
## JDK 8 GC logging
8:-XX:+PrintGCDetails
8:-XX:+PrintGCDateStamps
8:-XX:+PrintTenuringDistribution
8:-XX:+PrintGCApplicationStoppedTime
8:-Xloggc:/var/log/elasticsearch/gc.log
8:-XX:+UseGCLogFileRotation
8:-XX:NumberOfGCLogFiles=32
8:-XX:GCLogFileSize=64m
# JDK 9+ GC logging
9-:-Xlog:gc*,gc+age=trace,safepoint:file=/var/log/elasticsearch/gc.log:utctime,pid,tags:filecount=32,filesize=64m
# due to internationalization enhancements in JDK 9 Elasticsearch need to set the provider to COMPAT otherwise
# time/date parsing will break in an incompatible way for some date patterns and locals
9-:-Djava.locale.providers=COMPAT

View File

@ -1,38 +0,0 @@
#!/bin/bash
H_NAME=$( hostname -f )
LE_SERVICES_SCRIPT_DIR=/usr/lib/acme/hooks
LE_CERTS_DIR="/var/lib/acme/live/$H_NAME"
LE_LOG_DIR=/var/log/letsencrypt
KIBANA_CERTDIR=/etc/pki/kibana
KIBANA_KEYFILE="$KIBANA_CERTDIR/privkey"
DATE=$( date )
[ ! -d $KIBANA_CERTDIR ] && mkdir -p $KIBANA_CERTDIR
[ ! -d $LE_LOG_DIR ] && mkdir $LE_LOG_DIR
echo "$DATE" >> $LE_LOG_DIR/kibana.log
{% if letsencrypt_acme_install %}
LE_ENV_FILE=/etc/default/letsencrypt
{% endif %}
{% if letsencrypt_acme_sh_install %}
LE_ENV_FILE=/etc/default/acme_sh_request_env
{% endif %}
if [ -f "$LE_ENV_FILE" ] ; then
. "$LE_ENV_FILE"
else
echo "No letsencrypt default file" >> $LE_LOG_DIR/kibana.log
fi
echo "Building the new certificate file" >> $LE_LOG_DIR/kibana.log
cp -f ${LE_CERTS_DIR}/privkey ${KIBANA_KEYFILE}
chmod 440 ${KIBANA_KEYFILE}
chgrp kibana ${KIBANA_KEYFILE}
echo "Reload the kibana service" >> $LE_LOG_DIR/kibana.log
systemctl restart kibana >> $LE_LOG_DIR/kibana.log 2>&1
echo "Done." >> $LE_LOG_DIR/kibana.log
exit 0

View File

@ -1,108 +0,0 @@
# Kibana is served by a back end server. This setting specifies the port to use.
server.port: {{ elasticsearch_kibana_http_port }}
# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
server.host: "{{ elasticsearch_kibana_bind_ip }}"
# Enables you to specify a path to mount Kibana at if you are running behind a proxy. This only affects
# the URLs generated by Kibana, your proxy is expected to remove the basePath value before forwarding requests
# to Kibana. This setting cannot end in a slash.
server.basePath: "{{ elasticsearch_kibana_serverpath }}"
# The maximum payload size in bytes for incoming server requests.
#server.maxPayloadBytes: 1048576
# The Kibana server's name. This is used for display purposes.
server.name: "{{ elasticsearch_kibana_servername }}"
# The URL of the Elasticsearch instance to use for all your queries.
elasticsearch.url: "{{ elasticsearch_kibana_elasticsearch_url }}"
# When this setting's value is true Kibana uses the hostname specified in the server.host
# setting. When the value of this setting is false, Kibana uses the hostname of the host
# that connects to this Kibana instance.
elasticsearch.preserveHost: {{ elasticsearch_kibana_preserve_host }}
# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
# dashboards. Kibana creates a new index if the index doesn't already exist.
#kibana.index: ".kibana"
# The default application to load.
#kibana.defaultAppId: "discover"
# If your Elasticsearch is protected with basic authentication, these settings provide
# the username and password that the Kibana server uses to perform maintenance on the Kibana
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
# is proxied through the Kibana server.
#elasticsearch.username: "user"
#elasticsearch.password: "pass"
{% if letsencrypt_acme_install is defined and letsencrypt_acme_install %}
{% if elasticsearch_kibana_ssl_enabled %}
# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
# These settings enable SSL for outgoing requests from the Kibana server to the browser.
server.ssl.enabled: true
server.ssl.certificate: {{ letsencrypt_acme_certs_dir }}/fullchain
server.ssl.key: {{ pki_dir }}/kibana/privkey
{% endif %}
{% endif %}
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
# These files validate that your Elasticsearch backend uses the same key files.
#elasticsearch.ssl.certificate: /path/to/your/client.crt
#elasticsearch.ssl.key: /path/to/your/client.key
# Optional setting that enables you to specify a path to the PEM file for the certificate
# authority for your Elasticsearch instance.
#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
# To disregard the validity of SSL certificates, change this setting's value to 'none'.
#elasticsearch.ssl.verificationMode: full
# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
# the elasticsearch.requestTimeout setting.
#elasticsearch.pingTimeout: 1500
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
# must be a positive integer.
#elasticsearch.requestTimeout: 30000
# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
# headers, set this value to [] (an empty list).
#elasticsearch.requestHeadersWhitelist: [ authorization ]
# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
#elasticsearch.customHeaders: {}
# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
#elasticsearch.shardTimeout: 0
# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying.
#elasticsearch.startupTimeout: 5000
# Specifies the path where Kibana creates the process ID file.
pid.file: {{ elasticsearch_kibana_rundir }}/kibana.pid
# Enables you specify a file where Kibana stores log output.
#logging.dest: stdout
# Set the value of this setting to true to suppress all logging output.
#logging.silent: false
# Set the value of this setting to true to suppress all logging output other than error messages.
#logging.quiet: false
# Set the value of this setting to true to log all events, including system usage information
# and all requests.
#logging.verbose: false
# Set the interval in milliseconds to sample system and process performance
# metrics. Minimum is 100ms. Defaults to 5000.
#ops.interval: 5000
# The default locale. This locale can be used in certain circumstances to substitute any missing
# translations.
#i18n.defaultLocale: "en"

View File

@ -1 +0,0 @@
d {{ elasticsearch_kibana_rundir }} 0775 kibana kibana

View File

@ -1,116 +0,0 @@
---
#
# There's an untrusted Ubuntu PPA repository with gobs of packages and dependencies: https://launchpad.net/~marutter/+archive/ubuntu/c2d4u NOT TESTED
#
# To list the installed R packages
# Run R, then execute
# packinfo <- installed.packages (fields = c ("Package", "Version"))
# packinfo[,c("Package", "Version")]
#
# The install/remove script has been taken from here: http://adamj.eu/tech/2014/07/19/installing-and-removing-r-packages-with-ansible/
#
# Set to True if you want install from the CRAN deb repository
r_install_cran_repo: False
#r_cran_mirror_site: http://cran.rstudio.com
r_cran_set_default_mirror: True
r_cran_mirror_site: https://cran.mirror.garr.it/mirrors/CRAN/
r_base_specific_version: False
r_base_pkg_version: 3.4.3
r_packages_main_state: present
r_packages_state: '{{ r_packages_main_state }}'
r_sitelib_path: '/usr/local/lib/R/site-library'
r_plugins_from_deb: True
r_packages_cleanup: False
#
r_packages_updater: False
r_package_updater_via_subversion: True
r_package_updater_subversion_repo: http://svn.research-infrastructures.eu/public/d4science/gcube/trunk/data-analysis/RConfiguration/RPackagesManagement/
r_packages_svn_base_dir: /srv/r_updater
r_packages_svn_files_prefix: ''
# They need to be flat text files
# 1 package per line
#r_debian_packages_list_url
# package[:cran mirror]
# The CRAN mirror URL is optional
#r_cran_packages_list_url
# user/package_name
#r_github_packages_list_url
r_source_plugins_dest_dir: /var/cache/R
# r_distribution_required_packages:
# - gdal-bin
# - dans-gdal-scripts
# - libgdal1-dev
# - libgeos-dev
# - libspatialite-dev
# - proj
# - proj-bin
# - proj-data
# - libproj-dev
r_base_packages_list:
- r-base
# Same list as above, but without version numbers.
#r_base_packages_hold_list:
r_plugins_packages_list:
- jags
- r-cran-rjags
- r-cran-abind
- r-cran-boot
- r-cran-class
- r-cran-cluster
- r-cran-coda
- r-cran-codetools
- r-cran-foreign
- r-cran-lattice
- r-cran-maptools
- r-cran-mass
- r-cran-matrix
- r-cran-mgcv
- r-cran-nlme
- r-cran-nnet
- r-cran-rpart
- r-cran-sp
- r-cran-spatial
- r-cran-survival
r_apt_additional_repos:
- 'ppa:opencpu/jq'
# r_plugins_list_to_install:
# - { name: 'R2WinBUGS', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'R2jags', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'bayesmix', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'coda', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'rjags', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'runjags', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'base', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'compiler', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'datasets', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'grDevices', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'graphics', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'grid', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'methods', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'parallel', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'reshape', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'splines', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'stats', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'stats4', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'tcltk', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'tools', repo: '{{ r_cran_mirror_site }}' }
# - { name: 'utils', repo: '{{ r_cran_mirror_site }}' }
# r_plugins_from_sources:
# - { name: 'plyr', version: '1.7.1', source: 'plyr_1.7.1.tar.gz', url: 'http://cran.r-project.org/src/contrib/Archive/plyr/plyr_1.7.1.tar.gz' }
# r_plugins_from_github:
# - { plugin_name: 'RFigisGeo', github_user: 'openfigis' }
# - { plugin_name: 'rsdmx', github_user: 'opensdmx' }
#
#r_plugins_list_to_remove:

View File

@ -1,15 +0,0 @@
#!/bin/bash
# Put the base packages to unhold
for package in r-base r-base-core r-base-dev r-base-html r-cran-boot r-cran-class r-cran-cluster r-cran-codetools r-cran-foreign r-cran-kernsmooth r-cran-lattice r-cran-mass r-cran-matrix r-cran-mgcv r-cran-nlme r-cran-nnet r-cran-rpart r-cran-spatial r-cran-survival r-doc-html r-recommended ; do apt-mark unhold $package; done
# Remove the old r packages
apt-get purge r-base-* r-cran-* -y --force-yes
apt-get autoremove -y
apt-get update
# Remove the CRAN packages
rm -fr /usr/lib/R/site-library /usr/local/lib/R/site-library
rm -fr /var/cache/R/*
exit 0

View File

@ -1,5 +0,0 @@
---
- import_tasks: r-packages_cleanup.yml
when: r_packages_cleanup
- import_tasks: r-installation.yml
- import_tasks: r-packages-updater.yml

View File

@ -1,156 +0,0 @@
---
- block:
- name: Add the cran repository key
apt_key: id=E084DAB9 keyserver=keyserver.ubuntu.com state=present
tags: [ 'r_software', 'r_repo', 'r_repo_key' ]
- name: Add the CRAN repository
apt_repository: repo='deb http://cran.rstudio.com/bin/linux/ubuntu {{ ansible_distribution_release }}/' state=present update_cache=yes
when: r_install_cran_repo | bool
tags: [ 'r_software', 'r_repo' ]
- block:
- name: Remove the CRAN repository
apt_repository: repo='deb http://cran.rstudio.com/bin/linux/ubuntu {{ ansible_distribution_release }}/' state=absent update_cache=yes
when: not r_install_cran_repo | bool
tags: [ 'r_software', 'r_repo' ]
- name: Remove the hold state from the debian R packages
shell: apt-mark unhold {{ item }}
with_items: '{{ r_base_packages_hold_list | default([]) }}'
when: r_base_packages_hold_list is defined
ignore_errors: True
tags: [ 'r_software', 'r_pkg', 'r_pkg_hold' ]
- name: Install the R base packages.
apt: pkg={{ r_base_packages_list }} state={{ r_packages_main_state }} force=yes update_cache=yes cache_valid_time=3600
tags: [ 'r_software', 'r_pkg' ]
- name: When we install specific R deb packages, put them on hold
shell: apt-mark hold {{ item }}
with_items: '{{ r_base_packages_hold_list| default([]) }}'
when: r_base_specific_version
ignore_errors: True
tags: [ 'r_software', 'r_pkg', 'r_pkg_hold' ]
- name: Install the R additional modules from the deb repo
apt: pkg={{ r_plugins_packages_list | default([]) }} state={{ r_packages_state }} force=yes
when: r_plugins_from_deb
tags: [ 'r_software', 'r_pkg' ]
- name: Configure the default CRAN mirror
template: src=Rprofile.site.j2 dest=/etc/R/Rprofile.site owner=root group=root mode=0444
when: r_install_cran_repo == 'present'
tags: [ 'r_software', 'r_profile', 'r_pkg' ]
- name: Configure the JDK environment
shell: export JAVA_HOME={{ jdk_java_home }} ; export J2SDKDIR={{ jdk_java_home }} ; export J2REDIR={{ jdk_java_home }}/jre ; R CMD javareconf ; touch /etc/R/.java{{ jdk_default }}.env_conf
args:
creates: '/etc/R/.java{{ jdk_default }}.env_conf'
when:
- jdk_java_home is defined
- jdk_default is defined
tags: [ 'r_software', 'r_profile', 'r_pkg', 'r_java' ]
- name: Install some additional repositories. They provide dependencies for some R packages
apt_repository: repo={{ item }} state=present update_cache=yes
with_items: '{{ r_apt_additional_repos }}'
when: ansible_distribution_version is version_compare('18.04', '<')
tags: [ 'r_software', 'r_apt_repo', 'r_deps' ]
- name: Install some packages needed by R packages when installed from source
apt: pkg={{ r_distribution_required_packages | default([]) }} state={{ r_packages_state }} update_cache=yes force=yes cache_valid_time=3600
tags: [ 'r_software', 'r_pkg', 'r_deps' ]
- name: Ensure that the R packages sources directory exists
file: dest={{ r_source_plugins_dest_dir }} state=directory owner=root group=root
when: r_plugins_from_sources is defined
tags: [ 'r_software', 'r_pkg', 'r_plugins', 'r_plugins_version' ]
- name: Install R packages from the cran sources repo or from an alternative repository, latest available version. First try
command: >
Rscript --slave --no-save --no-restore-history -e "if (! ('{{ item.name }}' %in% installed.packages()[,'Package'])) { install.packages(pkgs='{{ item.name }}', repos=c('{{ item.repo | default ('https://cloud.r-project.org') }}/')); print('Added'); } else { print('Already installed'); }"
register: install_plugins_result
failed_when: "install_plugins_result.rc != 0 or 'had non-zero exit status' in install_plugins_result.stderr"
changed_when: "'Added' in install_plugins_result.stdout"
with_items: '{{ r_plugins_list_to_install | default([]) }}'
ignore_errors: True
tags: [ 'r_software', 'r_pkg', 'r_plugins' ]
- name: Get the R packages sources that need to be installed
get_url: url={{ item.url }} dest={{ r_source_plugins_dest_dir }}
with_items: '{{ r_plugins_from_sources | default([]) }}'
tags: [ 'r_software', 'r_pkg', 'r_plugins', 'r_plugins_version' ]
- name: Install R packages from the cran sources, specific versions. First round
command: >
Rscript --slave --no-save --no-restore-history -e "if (! ('{{ item.name }}' %in% installed.packages()[,'Package'])) { install.packages('{{ r_source_plugins_dest_dir }}/{{ item.source }}', repos = NULL, type='source'); print('Added'); } else if (packageVersion('{{ item.name }}') != '{{ item.version }}') { install.packages('{{ r_source_plugins_dest_dir }}/{{ item.source }}', repos = NULL, type='source'); print('Added'); } else { print('Already Installed'); }"
register: install_s_plugins_result
failed_when: "install_s_plugins_result.rc != 0 or 'had non-zero exit status' in install_s_plugins_result.stderr"
changed_when: '"Added" in install_s_plugins_result.stdout'
with_items: '{{ r_plugins_from_sources | default([]) }}'
ignore_errors: True
tags: [ 'r_software', 'r_pkg', 'r_plugins', 'r_plugins_version' ]
- name: Install R packages from the cran sources, specific versions. Second round, to avoid circular dependencies
command: >
Rscript --slave --no-save --no-restore-history -e "if (! ('{{ item.name }}' %in% installed.packages()[,'Package'])) { install.packages('{{ r_source_plugins_dest_dir }}/{{ item.source }}', repos = NULL, type='source'); print('Added'); } else if (packageVersion('{{ item.name }}') != '{{ item.version }}') { install.packages('{{ r_source_plugins_dest_dir }}/{{ item.source }}', repos = NULL, type='source'); print('Added'); } else { print('Already Installed'); }"
register: install_s_plugins_result
failed_when: "install_s_plugins_result.rc != 0 or 'had non-zero exit status' in install_s_plugins_result.stderr"
changed_when: '"Added" in install_s_plugins_result.stdout'
with_items: '{{ r_plugins_from_sources | default([]) }}'
when: ( install_s_plugins_result | failed )
tags: [ 'r_software', 'r_pkg', 'r_plugins', 'r_plugins_version' ]
- name: Install R packages from the cran sources repo or from an alternative repository, latest available version. Second try
command: >
Rscript --slave --no-save --no-restore-history -e "if (! ('{{ item.name }}' %in% installed.packages()[,'Package'])) { install.packages(pkgs='{{ item.name }}', repos=c('{{ item.repo | default ('https://cloud.r-project.org') }}/')); print('Added'); } else { print('Already installed'); }"
register: install_plugins_result
failed_when: "install_plugins_result.rc != 0 or 'had non-zero exit status' in install_plugins_result.stderr"
changed_when: "'Added' in install_plugins_result.stdout"
with_items: '{{ r_plugins_list_to_install | default([]) }}'
when: ( install_plugins_result | failed )
tags: [ 'r_software', 'r_pkg', 'r_plugins' ]
- name: Install R packages from github
command: >
Rscript --slave --no-save --no-restore-history -e "if (! ('{{ item.plugin_name }}' %in% installed.packages()[,'Package'])) { require(devtools); require(methods) ; options(repos='{{ r_cran_mirror_site }}/') ; install_github('{{ item.github_user }}/{{ item.plugin_name }}'); print('Added'); } else { print('Already Installed'); }"
register: install_github_plugins_result
failed_when: "install_github_plugins_result.rc != 0 or 'had non-zero exit status' in install_github_plugins_result.stderr"
changed_when: "'Added' in install_github_plugins_result.stdout"
with_items: '{{ r_plugins_from_github | default([]) }}'
tags: [ 'r_software', 'r_pkg', 'r_plugins', 'r_plugins_github' ]
ignore_errors: True
- name: Install R packages from the cran sources, specific versions. First round
command: >
Rscript --slave --no-save --no-restore-history -e "if (! ('{{ item.name }}' %in% installed.packages()[,'Package'])) { install.packages('{{ r_source_plugins_dest_dir }}/{{ item.source }}', repos = NULL, type='source'); print('Added'); } else if (packageVersion('{{ item.name }}') != '{{ item.version }}') { install.packages('{{ r_source_plugins_dest_dir }}/{{ item.source }}', repos = NULL, type='source'); print('Added'); } else { print('Already Installed'); }"
register: install_s_plugins_result
failed_when: "install_s_plugins_result.rc != 0 or 'had non-zero exit status' in install_s_plugins_result.stderr"
changed_when: '"Added" in install_s_plugins_result.stdout'
with_items: '{{ r_plugins_from_sources | default([]) }}'
ignore_errors: True
tags: [ 'r_software', 'r_pkg', 'r_plugins', 'r_plugins_version' ]
- name: Install R packages from the cran sources, specific versions. Second round, to avoid circular dependencies
command: >
Rscript --slave --no-save --no-restore-history -e "if (! ('{{ item.name }}' %in% installed.packages()[,'Package'])) { install.packages('{{ r_source_plugins_dest_dir }}/{{ item.source }}', repos = NULL, type='source'); print('Added'); } else if (packageVersion('{{ item.name }}') != '{{ item.version }}') { install.packages('{{ r_source_plugins_dest_dir }}/{{ item.source }}', repos = NULL, type='source'); print('Added'); } else { print('Already Installed'); }"
register: install_s_plugins_result
failed_when: "install_s_plugins_result.rc != 0 or 'had non-zero exit status' in install_s_plugins_result.stderr"
changed_when: '"Added" in install_s_plugins_result.stdout'
with_items: '{{ r_plugins_from_sources | default([]) }}'
when: ( install_s_plugins_result | failed )
tags: [ 'r_software', 'r_pkg', 'r_plugins', 'r_plugins_version' ]
- name: Remove R unwanted packages
command: >
Rscript --slave --no-save --no-restore-history -e "if (! ('{{ item }}' %in% installed.packages()[,'Package'])) { print('Not installed'); } else { remove.packages(pkgs='{{ item }}'); print('Removed'); }"
register: remove_plugins_result
failed_when: remove_plugins_result.rc != 0
changed_when: "'Removed' in remove_plugins_result.stdout"
with_items: '{{ r_plugins_list_to_remove | default([]) }}'
when: r_plugins_list_to_remove is defined
tags: [ 'r_software', 'r_pkg', 'r_plugins' ]

View File

@ -1,33 +0,0 @@
---
- block:
- name: Install the R packages updater script
template: src=update_r_packages.sh.j2 dest=/usr/local/bin/update_r_packages owner=root group=root mode=0755
- name: Create the R packages updater SVN base directory
file: dest={{ r_packages_svn_base_dir }} state=directory
- name: Cron job that installs new R packages, if any
cron: name="install new R packages" user=root cron_file=install-r-packages minute="*/10" hour="5-23,1-2" job="/usr/local/bin/update_r_packages install >/var/log/install_r_packages 2>&1" state=present
- name: Cron job that upgrades existing R packages and installs new ones, if any
cron: name="install new R packages" user=root cron_file=upgrade-r-packages minute="7" hour="3" job="/usr/local/bin/update_r_packages upgrade >/var/log/update_r_packages 2>&1" state=present
when: r_packages_updater
tags: [ 'r_software', 'r_pkg', 'r_plugins', 'r_plugins_github', 'r_cran_pkgs', 'r_github_pkgs', 'r_packages_updater' ]
- block:
- name: Remove the R packages updater script
file: dest=/usr/local/bin/update_r_packages state=absent
- name: Remove the R packages updater SVN base directory
file: dest={{ r_packages_svn_base_dir }} state=absent
- name: Remove the cron job that installs new R packages
cron: name="install new R packages" user=root minute="*/10" cron_file=install-r-packages job="/usr/local/bin/update_r_packages install >/var/log/install_r_packages 2>&1" state=absent
- name: Remove the cron job that upgrades existing R packages and installs new ones
cron: name="install new R packages" user=root cron_file=upgrade-r-packages hour="3" job="/usr/local/bin/update_r_packages upgrade >/var/log/update_r_packages 2>&1" state=absent
when: not r_packages_updater
tags: [ 'r_software', 'r_pkg', 'r_plugins', 'r_plugins_github', 'r_cran_pkgs', 'r_github_pkgs', 'r_packages_updater' ]

View File

@ -1,8 +0,0 @@
---
- name: Install the script that cleans up the R deb and cran packages
copy: src=r_packages_cleanup.sh dest=/usr/local/bin/r_packages_cleanup owner=root group=root mode=0500
tags: [ 'r_software', 'r_pkgs', 'r_cleanup' ]
- name: Remove all the old R deb and cran packages. Otherwise the upgrade will fail miserably
shell: /usr/local/bin/r_packages_cleanup
tags: [ 'r_software', 'r_pkgs', 'r_cleanup' ]

View File

@ -1,22 +0,0 @@
## Emacs please make this -*- R -*-
## empty Rprofile.site for R on Debian
##
## Copyright (C) 2008 Dirk Eddelbuettel and GPL'ed
##
## see help(Startup) for documentation on ~/.Rprofile and Rprofile.site
#
# NOTE: managed by ansible
#
local({r <- getOption("repos")
r["CRAN"] <- "{{ r_cran_mirror_site }}"
options(repos=r)
})
# Explicitly set the R encoding using the system one, if it exists
if (file.exists("/etc/default/locale")) {
readRenviron("/etc/default/locale")
LANG <- Sys.getenv("LANG")
if(nchar(LANG))
Sys.setlocale("LC_ALL", LANG)
}

View File

@ -1,239 +0,0 @@
#!/bin/bash
#
# TODO: kill an old process if it is running from too much time (12 hours?)
# using something like ps -o etimes= -p "$PROCNUM"
#
export PATH="$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin"
RETVAl=
PARAMS=$#
ACTION=$1
PROCNUM=$$
OLDPROC=
OLDPROC_RUNNING=
LOCKDIR=/var/run
LOCK_FILE=$LOCKDIR/.update_r_pkgs.lock
TMP_FILES_DIR=/var/tmp/r_pkgs_update
# We cannot answer questions
DEBIAN_FRONTEND=noninteractive
R_CRAN_MIRROR={{ r_cran_mirror_site }}
R_PKGS_FROM_SVN={{ r_package_updater_via_subversion }}
R_PKGS_SVN_DIR=RPackagesManagement
R_PKGS_SVN_URL={{ r_package_updater_subversion_repo }}
R_PKGS_SVN_BASE_DIR={{ r_packages_svn_base_dir }}
R_PKGS_FILES_PREFIX={{ r_packages_svn_files_prefix }}
SVN_UPDATE_STATUS=
# In seconds. 60*60*6=21600s (6h)
UPDATER_PROCESS_MAX_RUNTIME=21600
# - debian packages list format:
# one package per line
DEB_PKGS_SKIP=0
DEBIAN_PKGS_LIST_URL={{ r_debian_packages_list_url | default('') }}
PKGS_LIST=
# - R packages list format:
# name[:mirror]
CRAN_PKGS_SKIP=0
R_PKGS_LIST_URL={{ r_cran_packages_list_url | default('') }}
R_PKGS_LIST=
# - R packages from github list format:
# - owner/package
GITHUB_PKGS_SKIP=0
R_PKGS_FROM_GITHUB_LIST_URL={{ r_github_packages_list_url | default('') }}
R_PKGS_GITHUB=
trap "logger 'update_r_packages: trap intercepted, exiting.' ; cleanup" SIGHUP SIGINT SIGTERM
function cleanup() {
logger "update_r_packages: cleaning up"
rm -f $LOCK_FILE
rm -fr $TMP_FILES_DIR
}
function usage() {
if [ $PARAMS -ne 1 ] ; then
echo "Need at least an argument: 'upgrade' or 'install'."
echo "- 'upgrade' installs new packages and upgrades the existin ones when needed."
echo "- 'install' installs new packages."
cleanup
exit 1
fi
}
function get_args() {
if [ "$ACTION" != "upgrade" -a "$ACTION" != "install" ] ; then
usage
fi
}
function fail() {
logger "update_r_packages: Something went wrong, exiting."
cleanup
exit 1
}
function init_env() {
if [ -f $LOCK_FILE ] ; then
OLDPROC=$( cat $LOCK_FILE )
OLDPROC_RUNNING=$( ps auwwx | grep -v grep | grep $OLDPROC | awk '{ print $2 }' )
RETVAL=$?
if [ ! -z "$OLDPROC_RUNNING" ] ; then
logger "update_r_packages: pid of the already running process: $OLDPROC_RUNNING"
OLDPROC_RUNNING_TIME=$( ps -o etimes= -p ${OLDPROC_RUNNING} )
if [ $OLDPROC_RUNNING_TIME -gt $UPDATER_PROCESS_MAX_RUNTIME ] ; then
logger "update_r_packages: process $OLDPROC_RUNNING was running for $OLDPROC_RUNNING_TIME seconds. Got stuck, killing it"
kill -9 $OLDPROC_RUNNING
cleanup
else
logger "update_r_packages: another process is running, exiting."
exit 0
fi
else
logger "update_r_packages: lock file exist but the process not. Continuing."
rm -fr $TMP_FILES_DIR
fi
else
logger 'update_r_packages: no other jobs running, proceeding.'
fi
RETVAL=
echo "$PROCNUM" > $LOCK_FILE
mkdir -p $TMP_FILES_DIR
}
function get_data_files() {
logger "update_r_packages: get the single files from http."
# Get the packages list
if [ -z $DEBIAN_PKGS_LIST_URL ] ; then
DEB_PKGS_SKIP=1
logger "update_r_packages: the debian packages list is not available."
else
PKGS_LIST=$( mktemp $TMP_FILES_DIR/rdebs.XXXXXXX )
logger "update_r_packages: getting the debian packages list."
wget -q -o /dev/null -O $PKGS_LIST $DEBIAN_PKGS_LIST_URL
fi
if [ -z $R_PKGS_LIST_URL ] ; then
CRAN_PKGS_SKIP=1
logger "update_r_packages: the CRAN packages list is not available."
else
R_PKGS_LIST=$( mktemp $TMP_FILES_DIR/rpkgs.XXXXXXX )
logger "update_r_packages: getting the R packages list that will be installed from CRAN"
wget -q -o /dev/null -O $R_PKGS_LIST $R_PKGS_LIST_URL
fi
if [ -z $R_PKGS_FROM_GITHUB_LIST_URL ] ; then
GITHUB_PKGS_SKIP=1
logger "update_r_packages: the Github packages list is not available."
else
R_PKGS_GITHUB=$( mktemp $TMP_FILES_DIR/rpkgsgithub.XXXXXXX )
logger "update_r_packages: getting the R packages list that will be installed from github"
wget -q -o /dev/null -O $R_PKGS_GITHUB $R_PKGS_FROM_GITHUB_LIST_URL
fi
}
function get_data_files_from_svn() {
logger "update_r_packages: files from a SVN repo."
if [ -d $R_PKGS_SVN_BASE_DIR/$R_PKGS_SVN_DIR ] ; then
logger "update_r_packages: SVN update"
cd $R_PKGS_SVN_BASE_DIR/$R_PKGS_SVN_DIR
SVN_CLEANUP_OP=$( svn cleanup )
SVN_UPDATE_OP=$( svn update | tail -1 | grep Updated >/dev/null 2>&1 )
SVN_UPDATE_STATUS=$?
else
cd $R_PKGS_SVN_BASE_DIR
logger "update_r_packages: first SVN checkout."
svn co $R_PKGS_SVN_URL >/dev/null 2>&1
fi
PKGS_LIST=$R_PKGS_SVN_BASE_DIR/$R_PKGS_SVN_DIR/${R_PKGS_FILES_PREFIX}r_deb_pkgs.txt
R_PKGS_LIST=$R_PKGS_SVN_BASE_DIR/$R_PKGS_SVN_DIR/${R_PKGS_FILES_PREFIX}r_cran_pkgs.txt
R_PKGS_GITHUB=$R_PKGS_SVN_BASE_DIR/$R_PKGS_SVN_DIR/${R_PKGS_FILES_PREFIX}r_github_pkgs.txt
}
function debian_pkgs() {
if [ $DEB_PKGS_SKIP -eq 0 ] ; then
# Update the apt cache and install the packages in non interactive mode
logger "update_r_packages: Installing the debian dependencies"
if [ -z "$(find /var/cache/apt/pkgcache.bin -mmin -360)" ]; then
apt-get update -q >/dev/null 2>&1
else
logger "update_r_packages: APT cache not updated"
fi
>/var/log/update_r_debs.log
while read deb_pkg ; do
apt-get install ${deb_pkg} -q -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" >>/var/log/update_r_debs.log 2>&1
done < $PKGS_LIST
apt-get autoremove -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" >> /var/log/update_r_debs.log 2>&1
else
logger "update_r_packages: skipping the debian packages installation"
fi
}
function remove_r_install_packages_lock_files() {
# install.packages leaves lock files around if the process crashes
rm -fr {{ r_sitelib_path }}/00LOCK-*
}
function r_cran_pkgs() {
if [ $CRAN_PKGS_SKIP -eq 0 ] ; then
logger "update_r_packages: Installing R packages from CRAN"
for l in $( cat $R_PKGS_LIST ) ; do
pkg=$( echo $l | cut -d : -f 1 )
is_mirror_ret=
is_mirror=$( echo $l | grep ':' )
is_mirror_ret=$?
if [ $is_mirror_ret -eq 0 ] ; then
mirror=$( echo $l | cut -d : -f 2- )
else
mirror=$R_CRAN_MIRROR
fi
if [ "$ACTION" == "upgrade" ] ; then
Rscript --slave --no-save --no-restore-history -e "install.packages(pkgs='$pkg', repos=c('$mirror/'));"
else
Rscript --slave --no-save --no-restore-history -e "if (! ('$pkg' %in% installed.packages()[,'Package'])) { install.packages(pkgs='$pkg', repos=c('$mirror/')); }"
fi
done
else
logger "update_r_packages: skipping the R CRAN packages installation"
fi
}
function r_github_pkgs() {
if [ $GITHUB_PKGS_SKIP -eq 0 ] ; then
logger "update_r_packages: Installing R packages from Github"
for l in $( cat $R_PKGS_GITHUB ) ; do
pkg=$( echo $l | cut -d "/" -f 2 )
if [ "$ACTION" == "upgrade" ] ; then
#Rscript --slave --no-save --no-restore-history -e "require(devtools); require(methods); install_github('$l');"
Rscript --slave --no-save --no-restore-history -e "require(devtools); require(methods); require(jsonlite) ; package_to_install <- '$l' ; refs <- jsonlite::read_json(sprintf('https://api.github.com/repos/%s/releases', package_to_install)) ; ref_to_install <- 'master'; if(length(refs)>0) { ref_to_install <- refs[[1]][['tag_name']] } ; devtools::install_github(package_to_install, ref = ref_to_install, upgrade='always')"
else
#Rscript --slave --no-save --no-restore-history -e "if (! ('$pkg' %in% installed.packages()[,'Package'])) { require(devtools); require(methods) ; install_github('$l'); }"
Rscript --slave --no-save --no-restore-history -e "if (! ('$pkg' %in% installed.packages()[,'Package'])) { require(devtools); require(methods); require(jsonlite) ; package_to_install <- '$l' ; refs <- jsonlite::read_json(sprintf('https://api.github.com/repos/%s/releases', package_to_install)) ; ref_to_install <- 'master'; if(length(refs)>0) { ref_to_install <- refs[[1]][['tag_name']] } ; devtools::install_github(package_to_install, ref = ref_to_install, upgrade='always') }"
fi
done
else
logger "update_r_packages: skipping the R GitHub packages installation"
fi
}
#########
# Main
#
usage
get_args
init_env
if [ $R_PKGS_FROM_SVN == 'True' ] ; then
get_data_files_from_svn
if [ $SVN_UPDATE_STATUS -ne 0 -a "$ACTION" == "install" ] ; then
logger "update_r_packages: nothing new to install from SVN, exiting"
cleanup
exit 0
fi
else
get_data_files
fi
debian_pkgs
remove_r_install_packages_lock_files
r_cran_pkgs
r_github_pkgs
cleanup
exit 0

View File

@ -1,6 +0,0 @@
---
ckan_solr_port: 8983
solr_multicore: True
solr_cores:
- collection1

View File

@ -1,187 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!--
NB Please copy changes to this file into the multilingual schema:
ckanext/multilingual/solr/schema.xml
-->
<!-- We update the version when there is a backward-incompatible change to this
schema. In this case the version should be set to the next CKAN version number.
(x.y but not x.y.z since it needs to be a float) -->
<schema name="ckan" version="2.3">
<types>
<fieldType name="string" class="solr.StrField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="boolean" class="solr.BoolField" sortMissingLast="true" omitNorms="true"/>
<fieldtype name="binary" class="solr.BinaryField"/>
<fieldType name="int" class="solr.TrieIntField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="float" class="solr.TrieFloatField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="tint" class="solr.TrieIntField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="date" class="solr.TrieDateField" omitNorms="true" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="tdate" class="solr.TrieDateField" omitNorms="true" precisionStep="6" positionIncrementGap="0"/>
<fieldType name="text" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.SnowballPorterFilterFactory" language="English" protected="protwords.txt"/>
<filter class="solr.ASCIIFoldingFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.SnowballPorterFilterFactory" language="English" protected="protwords.txt"/>
<filter class="solr.ASCIIFoldingFilterFactory"/>
</analyzer>
</fieldType>
<!-- A general unstemmed text field - good if one does not know the language of the field -->
<fieldType name="textgen" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
</types>
<fields>
<field name="index_id" type="string" indexed="true" stored="true" required="true" />
<field name="id" type="string" indexed="true" stored="true" required="true" />
<field name="site_id" type="string" indexed="true" stored="true" required="true" />
<field name="title" type="text" indexed="true" stored="true" />
<field name="entity_type" type="string" indexed="true" stored="true" omitNorms="true" />
<field name="dataset_type" type="string" indexed="true" stored="true" />
<field name="state" type="string" indexed="true" stored="true" omitNorms="true" />
<field name="name" type="string" indexed="true" stored="true" omitNorms="true" />
<field name="revision_id" type="string" indexed="true" stored="true" omitNorms="true" />
<field name="version" type="string" indexed="true" stored="true" />
<field name="url" type="string" indexed="true" stored="true" omitNorms="true" />
<field name="ckan_url" type="string" indexed="true" stored="true" omitNorms="true" />
<field name="download_url" type="string" indexed="true" stored="true" omitNorms="true" />
<field name="notes" type="text" indexed="true" stored="true"/>
<field name="author" type="textgen" indexed="true" stored="true" />
<field name="author_email" type="textgen" indexed="true" stored="true" />
<field name="maintainer" type="textgen" indexed="true" stored="true" />
<field name="maintainer_email" type="textgen" indexed="true" stored="true" />
<field name="license" type="string" indexed="true" stored="true" />
<field name="license_id" type="string" indexed="true" stored="true" />
<field name="ratings_count" type="int" indexed="true" stored="false" />
<field name="ratings_average" type="float" indexed="true" stored="false" />
<field name="tags" type="string" indexed="true" stored="true" multiValued="true"/>
<field name="groups" type="string" indexed="true" stored="true" multiValued="true"/>
<field name="organization" type="string" indexed="true" stored="true" multiValued="false"/>
<field name="capacity" type="string" indexed="true" stored="true" multiValued="false"/>
<field name="res_name" type="textgen" indexed="true" stored="true" multiValued="true" />
<field name="res_description" type="textgen" indexed="true" stored="true" multiValued="true"/>
<field name="res_format" type="string" indexed="true" stored="true" multiValued="true"/>
<field name="res_url" type="string" indexed="true" stored="true" multiValued="true"/>
<field name="res_type" type="string" indexed="true" stored="true" multiValued="true"/>
<!-- Fields needed by the spatial extension-->
<field name="bbox_area" type="float" indexed="true" stored="true" />
<field name="maxx" type="float" indexed="true" stored="true" />
<field name="maxy" type="float" indexed="true" stored="true" />
<field name="minx" type="float" indexed="true" stored="true" />
<field name="miny" type="float" indexed="true" stored="true" />
<!-- catchall field, containing all other searchable text fields (implemented
via copyField further on in this schema -->
<field name="text" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="urls" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="depends_on" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="dependency_of" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="derives_from" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="has_derivation" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="links_to" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="linked_from" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="child_of" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="parent_of" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="views_total" type="int" indexed="true" stored="false"/>
<field name="views_recent" type="int" indexed="true" stored="false"/>
<field name="resources_accessed_total" type="int" indexed="true" stored="false"/>
<field name="resources_accessed_recent" type="int" indexed="true" stored="false"/>
<field name="metadata_created" type="date" indexed="true" stored="true" multiValued="false"/>
<field name="metadata_modified" type="date" indexed="true" stored="true" multiValued="false"/>
<field name="indexed_ts" type="date" indexed="true" stored="true" default="NOW" multiValued="false"/>
<!-- Copy the title field into titleString, and treat as a string
(rather than text type). This allows us to sort on the titleString -->
<field name="title_string" type="string" indexed="true" stored="false" />
<field name="data_dict" type="string" indexed="false" stored="true" />
<field name="validated_data_dict" type="string" indexed="false" stored="true" />
<field name="_version_" type="string" indexed="true" stored="true"/>
<dynamicField name="*_date" type="date" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="extras_*" type="text" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="res_extras_*" type="text" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="vocab_*" type="string" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*" type="string" indexed="true" stored="false"/>
</fields>
<uniqueKey>index_id</uniqueKey>
<defaultSearchField>text</defaultSearchField>
<solrQueryParser defaultOperator="AND"/>
<copyField source="url" dest="urls"/>
<copyField source="ckan_url" dest="urls"/>
<copyField source="download_url" dest="urls"/>
<copyField source="res_url" dest="urls"/>
<copyField source="extras_*" dest="text"/>
<copyField source="res_extras_*" dest="text"/>
<copyField source="vocab_*" dest="text"/>
<copyField source="urls" dest="text"/>
<copyField source="name" dest="text"/>
<copyField source="title" dest="text"/>
<copyField source="text" dest="text"/>
<copyField source="license" dest="text"/>
<copyField source="notes" dest="text"/>
<copyField source="tags" dest="text"/>
<copyField source="groups" dest="text"/>
<copyField source="organization" dest="text"/>
<copyField source="res_name" dest="text"/>
<copyField source="res_description" dest="text"/>
<copyField source="maintainer" dest="text"/>
<copyField source="author" dest="text"/>
</schema>

View File

@ -1,3 +0,0 @@
---
- name: Solr Restart
service: name=tomcat-instance-{{ ckan_solr_port }} state=restarted

View File

@ -1,14 +0,0 @@
---
- name: Install the solr schema used by CKAN
file: src=/usr/lib/ckan/default/src/ckan/ckan/config/solr/schema.xml dest={{ solr_collections_base_dir }}/{{ item }}/conf/schema.xml state=link force=yes
with_items: '{{ solr_cores }}'
when: not ckan_geonetwork_harvester
notify: Solr Restart
tags: [ 'ckan', 'solr', 'solr_schema', 'solr_core' ]
- name: Install the solr schema used by CKAN, modified with the spatial fields
copy: src=schema.xml dest={{ solr_collections_base_dir }}/{{ item }}/conf/schema.xml force=yes
with_items: '{{ solr_cores }}'
when: ckan_geonetwork_harvester
notify: Solr Restart
tags: [ 'ckan', 'solr', 'solr_schema', 'solr_core' ]

View File

@ -1,2 +0,0 @@
---
remote_user: root

View File

@ -1,209 +0,0 @@
---
# To create the first sysadmin user:
# . /usr/lib/ckan/default/bin/activate
# cd /usr/lib/ckan/default/src/ckan
# You have to create your first CKAN sysadmin user from the command line. For example, to create a user called seanh and make him a # sysadmin:
# paster sysadmin add seanh -c /etc/ckan/default/production.ini
#
# To create some test data:
# paster create-test-data -c /etc/ckan/default/production.ini
ckan_version: 2.6
ckan_deb_file: 'python-ckan_{{ ckan_version }}-{{ ansible_distribution_release }}_amd64.deb'
ckan_package_url: 'http://packaging.ckan.org/{{ ckan_deb_file }}'
ckan_libdir: /usr/lib/ckan
ckan_confdir: /etc/ckan/default
ckan_virtenv: '{{ ckan_libdir }}/default'
ckan_file_harvesting_dir: /var/lib/ckan
ckan_file_storage_dir: '{{ ckan_file_harvesting_dir }}/dev'
ckan_config_file: '{{ ckan_confdir }}/production.ini'
ckan_webapp_port: 8080
ckan_solr_port: 8983
ckan_shell_user: ckan
ckan_logdir: /var/log/ckan
ckan_db_name: ckan
ckan_db_user: ckan
# By default, initialize the db and solr. Disable if you want to reinstall and maintain the old data
ckan_init_db_and_solr: True
# CKAN plugins
ckan_plugins_state: present
# yes: update the repository. no, do not update
ckan_git_plugins_state: 'no'
# Order is important
ckan_geonetwork_harvester: False
ckan_ckanext_harvester_url: 'git+https://github.com/ckan/ckanext-harvest.git#egg=ckanext-harvest'
ckan_ckanext_spatial_url: 'git+https://github.com/okfn/ckanext-spatial.git#egg=ckanext-spatial'
ckan_geonetwork_harvester_url: 'https://github.com/geosolutions-it/ckanext-geonetwork.git'
ckan_geoview: False
ckan_geoview_url: ckanext-geoview
ckan_geoview_name: resource_proxy
ckan_dcat: False
ckan_dcat_url: 'git+https://github.com/ckan/ckanext-dcat.git#egg=ckanext-dcat'
ckan_dcat_1_0_0_url: 'git+https://github.com/ckan/ckanext-dcat.git@v1.0.0#egg=ckanext-dcat'
# dcat implement harvesters too.
# ckan_dcat_name: 'dcat dcat_rdf_harvester dcat_json_harvester dcat_json_interface'
ckan_dcat_name: 'dcat dcat_json_interface'
# Set this to true to install a cron job that regularly runs the harvesters
ckan_harvester_run: False
ckan_pdfview: False
ckan_ckanext_pdfview_url: ckanext-pdfview
ckan_privatedatasets: False
ckan_privatedatasets_url: ckanext-privatedatasets
ckan_privatedatasets_name: privatedatasets
ckan_hierarchy: False
ckan_hierarchy_url: 'git+https://github.com/datagovuk/ckanext-hierarchy.git#egg=ckanext-hierarchy'
ckan_hierarchy_name: hierarchy_display hierarchy_form
ckan_pages: False
ckan_pages_url: 'git+https://github.com/ckan/ckanext-pages.git#egg=ckanext-pages'
ckan_pages_name: pages
ckan_ldap: False
#ckan_ldap_url: 'git+https://github.com/NaturalHistoryMuseum/ckanext-ldap'
ckan_ldap_url: 'https://github.com/NaturalHistoryMuseum/ckanext-ldap'
ckan_ldap_name: ldap
ckan_ldap_uri: 'ldap://ldap.example.org'
ckan_ldap_base_dn: ''
ckan_ldap_search_filter: 'uid={login}'
ckan_ldap_user_fullname: 'cn'
ckan_ldap_username: uid
ckan_ldap_email: mail
ckan_ldap_prevent_edits: True
ckan_ldap_fallback: True
ckan_ckanext_lire: False
ckan_ckanext_lire_n: lire
ckan_ckanext_lire_url: 'https://github.com/milicp/ckanext-lire.git'
# Kata OAI-PMH
ckan_kata_oai_pmh: False
ckan_oai_pmh_name: oaipmh
ckan_oai_pmh_state: absent
ckan_oai_pmh_url: 'git+https://github.com/kata-csc/ckanext-oaipmh#egg=ckanext-oaipmh'
ckan_oai_pmh_kata_plugin_url: 'git+https://github.com/kata-csc/ckanext-kata.git#egg=ckanext-kata'
ckan_oai_pmh_kata_ini_state: 'present'
ckan_oai_pmh_kata_ini_options:
- { section: 'app:main', option: 'kata.storage.malware_scan', value: 'false', state: '{{ ckan_oai_pmh_kata_ini_state }}' }
- { section: 'app:main', option: 'kata.ldap.enabled', value: 'false', state: '{{ ckan_oai_pmh_kata_ini_state }}' }
- { section: 'app:main', option: 'kata.disable_contact', value: 'true', state: '{{ ckan_oai_pmh_kata_ini_state }}' }
# OLD OAI-PMH
ckan_oai_pm: False
ckan_oai_pm_name: oaipmh
ckan_oai_pm_state: absent
ckan_oai_pm_url: 'git+https://github.com/florenthemmi/ckanext-oaipmh#egg=ckanext-oaipm'
# Google analytics
ckan_google_analytics: False
ckan_ga_plugin_state: '{{ ckan_plugins_state }}'
ckan_google_analytics_name: googleanalytics
ckan_google_analytics_url: 'git+https://github.com/ckan/ckanext-googleanalytics.git#egg=ckanext-googleanalytics'
ckan_google_analytics_fixed_file: 'http://svn.research-infrastructures.eu/public/d4science/gcube/trunk/data-catalogue/ckan-d4science-extension/{{ ckan_version }}/ckan-default/plugins/googleanalytics/plugin.py'
#CKANEXT-RATING
ckan_star_ratings: False
ckan_star_ratings_state: present
ckan_star_ratings_name: rating
ckan_star_ratings_url: 'git+https://github.com/6aika/ckanext-rating.git#egg=ckanext-rating'
ckan_memcache_sessions: False
ckan_memcache_deb_pkgs:
- libmemcached10
- libmemcached-dev
ckan_memcache_ini_opts:
- { section: 'app:main', option: 'beaker.session.type', value: 'ext:memcached', state: 'present' }
- { section: 'app:main', option: 'beaker.session.url ', value: "{{ mc_ipaddress | default('127.0.0.1') }}:{{ mc_port | default('11211') }}", state: 'present' }
# Google analytics reports
ckan_ga_reports: False
ckan_ga_reports_name: ga-report
ckan_ga_reports_url: 'git+https://github.com/datagovuk/ckanext-ga-report.git#egg=ckanext-ga-report'
ckan_profiler: False
ckan_profiler_url: 'git+https://github.com/morty/ckanext-profile.git#egg=ckanext-profile'
# CKAN-DATESEARCH
ckan_datesearch: False
ckan_datesearch_name: datesearch
ckan_datesearch_state: present
ckan_datesearch_url: 'https://github.com/EUDAT-B2FIND/ckanext-datesearch'
# Needed to install some CKAN plugins
ckan_additional_packages:
- git
- libxslt1-dev
- gcc
- python-dev
- libffi-dev
- libxml2-dev
- zlib1g-dev
- libxslt1-dev
- libgeos-c1
- libldap2-dev
- libsasl2-dev
- libssl-dev
ckan_pip_dependencies:
- lxml
- factory
- python-ldap
- rdflib
- 'urllib3[secure]'
- bleach
- pyOpenSSL
- idna
- certifi
- xmltodict
- ndg-httpsclient
- pyasn1
- enum
- ipaddress
- x509
ckan_pip_versioned_dependencies:
- { name: 'SQLAlchemy', version: '0.9.6', state: 'present' }
- { name: 'cryptography', version: '2.8', state: 'present' }
#
apache_additional_packages:
- libapache2-mod-uwsgi
- libpq5
apache_additional_modules:
- uwsgi
ckan_production_ini_opts:
- { section: 'app:main', option: 'ckan.site_id', value: 'ckan_installation', state: 'present' }
- { section: 'app:main', option: 'sqlalchemy.url', value: 'postgresql://{{ ckan_db_user }}:{{ ckan_db_pwd }}@{{ psql_db_host }}/{{ ckan_db_name }}', state: 'present' }
- { section: 'app:main', option: 'ckan.site_url', value: 'http://{{ ansible_fqdn }}', state: 'present' }
- { section: 'app:main', option: 'solr_url', value: 'http://127.0.0.1:{{ ckan_solr_port }}/solr', state: 'present' }
- { section: 'app:main', option: 'ckan.datastore.write_url', value: 'postgresql://{{ ckan_db_user }}:{{ ckan_db_pwd }}@{{ psql_db_host }}/{{ ckan_datastore_db_name }}', state: 'present' }
- { section: 'app:main', option: 'ckan.datastore.read_url', value: 'postgresql://{{ ckan_datastore_db_reader }}:{{ ckan_db_pwd }}@{{ psql_db_host }}/{{ ckan_datastore_db_name }}', state: 'present' }
- { section: 'app:main', option: 'ckan.site_title', value: 'D4Science CKAN development installation', state: 'present' }
- { section: 'app:main', option: 'ckan.site_logo', value: '/base/images/ckan-logo.png', state: 'present' }
- { section: 'app:main', option: 'ckan.max_resource_size', value: '10', state: 'present' }
- { section: 'app:main', option: 'ckan.max_image_size', value: '2', state: 'present' }
- { section: 'app:main', option: 'ckan.tracking_enabled', value: 'true', state: 'present' }
- { section: 'app:main', option: 'ckan.privatedatasets.show_acquire_url_on_create', value: 'true', state: 'present' }
- { section: 'app:main', option: 'ckan.privatedatasets.show_acquire_url_on_edit', value: 'true', state: 'present' }
ckan_production_ini_plugins_opts:
- { section: 'app:main', option: 'ckan.plugins', value: 'stats text_view image_view recline_view datastore datapusher harvest', state: 'present' }
- { section: 'app:main', option: 'ckan.datapusher.url', value: 'http://127.0.0.1:8800', state: 'present' }
- { section: 'app:main', option: 'ckan.datapusher.formats', value: 'csv xls xlsx tsv application/csv application/vnd.ms-excel application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', state: 'present' }
- { section: 'app:main', option: 'ckan.storage_path', value: '{{ ckan_file_storage_dir }}', state: 'present' }
- { section: 'app:main', option: 'ckan.harvest.mq.type', value: 'redis', state: 'present' }
- { section: 'app:main', option: 'ckan.harvest.mq.hostname', value: 'localhost', state: 'present' }
- { section: 'app:main', option: 'ckan.harvest.mq.port', value: '6379', state: 'present' }
- { section: 'app:main', option: 'ckan.harvest.mq.db', value: '0', state: 'present' }
- { section: 'app:main', option: 'ckanext.spatial.search_backend', value: 'solr', state: 'present' }
- { section: 'app:main', option: 'ckanext.pages.organization', value: 'true', state: 'present' }
- { section: 'app:main', option: 'ckanext.pages.group', value: 'true', state: 'present' }
- { section: 'app:main', option: 'ckanext.pages.about_menu', value: 'false', state: 'absent' }
- { section: 'app:main', option: 'ckanext.pages.group_menu', value: 'false', state: 'absent' }
- { section: 'app:main', option: 'ckanext.pages.organization_menu', value: 'false', state: 'absent' }
ckan_gather_fetch_pkgs:
- supervisor
ckan_gather_fetch_apps:
- ckan_gather_consumer
- ckan_fetch_consumer

View File

@ -1,187 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!--
NB Please copy changes to this file into the multilingual schema:
ckanext/multilingual/solr/schema.xml
-->
<!-- We update the version when there is a backward-incompatible change to this
schema. In this case the version should be set to the next CKAN version number.
(x.y but not x.y.z since it needs to be a float) -->
<schema name="ckan" version="2.3">
<types>
<fieldType name="string" class="solr.StrField" sortMissingLast="true" omitNorms="true"/>
<fieldType name="boolean" class="solr.BoolField" sortMissingLast="true" omitNorms="true"/>
<fieldtype name="binary" class="solr.BinaryField"/>
<fieldType name="int" class="solr.TrieIntField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="float" class="solr.TrieFloatField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="tint" class="solr.TrieIntField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="date" class="solr.TrieDateField" omitNorms="true" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="tdate" class="solr.TrieDateField" omitNorms="true" precisionStep="6" positionIncrementGap="0"/>
<fieldType name="text" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.SnowballPorterFilterFactory" language="English" protected="protwords.txt"/>
<filter class="solr.ASCIIFoldingFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.SnowballPorterFilterFactory" language="English" protected="protwords.txt"/>
<filter class="solr.ASCIIFoldingFilterFactory"/>
</analyzer>
</fieldType>
<!-- A general unstemmed text field - good if one does not know the language of the field -->
<fieldType name="textgen" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
</types>
<fields>
<field name="index_id" type="string" indexed="true" stored="true" required="true" />
<field name="id" type="string" indexed="true" stored="true" required="true" />
<field name="site_id" type="string" indexed="true" stored="true" required="true" />
<field name="title" type="text" indexed="true" stored="true" />
<field name="entity_type" type="string" indexed="true" stored="true" omitNorms="true" />
<field name="dataset_type" type="string" indexed="true" stored="true" />
<field name="state" type="string" indexed="true" stored="true" omitNorms="true" />
<field name="name" type="string" indexed="true" stored="true" omitNorms="true" />
<field name="revision_id" type="string" indexed="true" stored="true" omitNorms="true" />
<field name="version" type="string" indexed="true" stored="true" />
<field name="url" type="string" indexed="true" stored="true" omitNorms="true" />
<field name="ckan_url" type="string" indexed="true" stored="true" omitNorms="true" />
<field name="download_url" type="string" indexed="true" stored="true" omitNorms="true" />
<field name="notes" type="text" indexed="true" stored="true"/>
<field name="author" type="textgen" indexed="true" stored="true" />
<field name="author_email" type="textgen" indexed="true" stored="true" />
<field name="maintainer" type="textgen" indexed="true" stored="true" />
<field name="maintainer_email" type="textgen" indexed="true" stored="true" />
<field name="license" type="string" indexed="true" stored="true" />
<field name="license_id" type="string" indexed="true" stored="true" />
<field name="ratings_count" type="int" indexed="true" stored="false" />
<field name="ratings_average" type="float" indexed="true" stored="false" />
<field name="tags" type="string" indexed="true" stored="true" multiValued="true"/>
<field name="groups" type="string" indexed="true" stored="true" multiValued="true"/>
<field name="organization" type="string" indexed="true" stored="true" multiValued="false"/>
<field name="capacity" type="string" indexed="true" stored="true" multiValued="false"/>
<field name="res_name" type="textgen" indexed="true" stored="true" multiValued="true" />
<field name="res_description" type="textgen" indexed="true" stored="true" multiValued="true"/>
<field name="res_format" type="string" indexed="true" stored="true" multiValued="true"/>
<field name="res_url" type="string" indexed="true" stored="true" multiValued="true"/>
<field name="res_type" type="string" indexed="true" stored="true" multiValued="true"/>
<!-- Fields needed by the spatial extension-->
<field name="bbox_area" type="float" indexed="true" stored="true" />
<field name="maxx" type="float" indexed="true" stored="true" />
<field name="maxy" type="float" indexed="true" stored="true" />
<field name="minx" type="float" indexed="true" stored="true" />
<field name="miny" type="float" indexed="true" stored="true" />
<!-- catchall field, containing all other searchable text fields (implemented
via copyField further on in this schema -->
<field name="text" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="urls" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="depends_on" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="dependency_of" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="derives_from" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="has_derivation" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="links_to" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="linked_from" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="child_of" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="parent_of" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="views_total" type="int" indexed="true" stored="false"/>
<field name="views_recent" type="int" indexed="true" stored="false"/>
<field name="resources_accessed_total" type="int" indexed="true" stored="false"/>
<field name="resources_accessed_recent" type="int" indexed="true" stored="false"/>
<field name="metadata_created" type="date" indexed="true" stored="true" multiValued="false"/>
<field name="metadata_modified" type="date" indexed="true" stored="true" multiValued="false"/>
<field name="indexed_ts" type="date" indexed="true" stored="true" default="NOW" multiValued="false"/>
<!-- Copy the title field into titleString, and treat as a string
(rather than text type). This allows us to sort on the titleString -->
<field name="title_string" type="string" indexed="true" stored="false" />
<field name="data_dict" type="string" indexed="false" stored="true" />
<field name="validated_data_dict" type="string" indexed="false" stored="true" />
<field name="_version_" type="string" indexed="true" stored="true"/>
<dynamicField name="*_date" type="date" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="extras_*" type="text" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="res_extras_*" type="text" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="vocab_*" type="string" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*" type="string" indexed="true" stored="false"/>
</fields>
<uniqueKey>index_id</uniqueKey>
<defaultSearchField>text</defaultSearchField>
<solrQueryParser defaultOperator="AND"/>
<copyField source="url" dest="urls"/>
<copyField source="ckan_url" dest="urls"/>
<copyField source="download_url" dest="urls"/>
<copyField source="res_url" dest="urls"/>
<copyField source="extras_*" dest="text"/>
<copyField source="res_extras_*" dest="text"/>
<copyField source="vocab_*" dest="text"/>
<copyField source="urls" dest="text"/>
<copyField source="name" dest="text"/>
<copyField source="title" dest="text"/>
<copyField source="text" dest="text"/>
<copyField source="license" dest="text"/>
<copyField source="notes" dest="text"/>
<copyField source="tags" dest="text"/>
<copyField source="groups" dest="text"/>
<copyField source="organization" dest="text"/>
<copyField source="res_name" dest="text"/>
<copyField source="res_description" dest="text"/>
<copyField source="maintainer" dest="text"/>
<copyField source="author" dest="text"/>
</schema>

View File

@ -1,10 +0,0 @@
---
- name: Restart CKAN
service: name=apache2 state=restarted sleep=10
- name: Reconfigure the supervisor daemon
shell: supervisorctl reread ; supervisorctl add ckan_gather_consumer ; supervisorctl add ckan_fetch_consumer ; supervisorctl start ckan_gather_consumer ; supervisorctl start ckan_fetch_consumer
- name: Restart fetch and gather consumers
supervisorctl: name={{ item }} state=restarted
with_items: '{{ ckan_gather_fetch_apps }}'

View File

@ -1,17 +0,0 @@
---
- name: Configure the CKAN plugins list into the configuration file
ini_file: dest={{ ckan_config_file }} section={{ item.section }} option={{ item.option }} value={{ item.value }} state={{ item.state }} backup=no
with_items: '{{ ckan_production_ini_plugins_opts }}'
notify:
- Restart CKAN
- Restart fetch and gather consumers
tags: [ 'ckan', 'ckan_ini', 'ckan_plugins' ]
- name: Configure the CKAN options used by the KATA plugin
ini_file: dest={{ ckan_config_file }} section={{ item.section }} option={{ item.option }} value={{ item.value }} state={{ item.state }} backup=no
with_items: '{{ ckan_oai_pmh_kata_ini_options }}'
notify:
- Restart CKAN
- Restart fetch and gather consumers
tags: [ 'ckan', 'ckan_ini', 'ckan_plugins', 'ckan_oai_pmh' ]

View File

@ -1,50 +0,0 @@
---
- block:
- name: Install the memcache library deb package
apt: pkg={{ ckan_memcache_deb_pkgs }} state=present cache_valid_time=1800
when: ckan_memcache_sessions is defined and ckan_memcache_sessions
tags: [ 'ckan', 'ckan_sessions', 'ckan_memcache' ]
- block:
- name: Install the memcache library
pip: name=pylibmc virtualenv={{ ckan_virtenv }} state=present
become: True
become_user: '{{ ckan_shell_user }}'
when: ckan_memcache_sessions is defined and ckan_memcache_sessions
tags: [ 'ckan', 'ckan_sessions', 'ckan_memcache' ]
- block:
- name: Configure CKAN so that it uses memcache for its sessions
ini_file: dest={{ ckan_config_file }} section={{ item.section }} option={{ item.option }} value={{ item.value }} state={{ item.state }}
with_items: '{{ ckan_memcache_ini_opts }}'
notify: Restart fetch and gather consumers
register: ckan_use_memcache
tags: [ 'ckan', 'ckan_ini', 'ckan_plugins', 'ckan_sessions', 'ckan_memcache' ]
- name: Restart CKAN after enabling the memcache sessions configuration
service: name=apache2 state=reloaded
when: ckan_use_memcache is changed
- name: Remove the CKAN session files
file: dest=/tmp/{{ ckan_site_id }}/sessions state=absent
ignore_errors: True
when: ckan_memcache_sessions is defined and ckan_memcache_sessions
tags: [ 'ckan', 'ckan_sessions', 'ckan_memcache' ]
- block:
- name: Configure CKAN to not use memcache for its sessions
ini_file: dest={{ ckan_config_file }} section={{ item.section }} option={{ item.option }} value={{ item.value }} state=absent
with_items: '{{ ckan_memcache_ini_opts }}'
notify:
- Restart CKAN
- Restart fetch and gather consumers
tags: [ 'ckan', 'ckan_ini', 'ckan_plugins', 'ckan_sessions', 'ckan_memcache' ]
when:
- ckan_memcache_sessions is defined
- not ckan_memcache_sessions
tags: [ 'ckan', 'ckan_sessions', 'ckan_memcache' ]

View File

@ -1,276 +0,0 @@
---
- block:
- name: Install some packages dependencies
apt: name={{ ckan_additional_packages }} state=latest update_cache=yes cache_valid_time=3600
tags: [ 'ckan', 'geonetwork', 'ckan_plugins', 'ckan_pip_deps' ]
- block:
- name: Upgrade pip inside the virtualenv
pip: name=pip virtualenv={{ ckan_virtenv }} state=latest
tags: [ 'ckan', 'geonetwork', 'ckan_plugins', 'ckan_pip_deps' ]
- name: Install some python versioned plugins dependencies inside the CKAN virtualenv
pip: name={{ item.name }} virtualenv={{ ckan_virtenv }} version={{ item.version }} state={{ item.state }}
with_items: '{{ ckan_pip_versioned_dependencies }}'
tags: [ 'ckan', 'geonetwork', 'ckan_plugins', 'ckan_pip_deps' ]
- name: Install some python plugins dependencies inside the CKAN virtualenv
pip: name={{ ckan_pip_dependencies }} virtualenv={{ ckan_virtenv }} state=present
tags: [ 'ckan', 'geonetwork', 'ckan_plugins', 'ckan_pip_deps' ]
- name: Download the CKAN ckanext-harvest plugin
pip: name='{{ ckan_ckanext_harvester_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
when: ckan_geonetwork_harvester | bool
register: ckanext_harvest_install
notify:
- Restart CKAN
- Restart fetch and gather consumers
tags: [ 'ckan', 'geonetwork', 'ckan_plugins' ]
- name: Download the CKAN ckanext-harvest requirements
pip: requirements={{ ckan_virtenv }}/src/ckanext-harvest/pip-requirements.txt virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }}
when: ckan_geonetwork_harvester | bool
notify: Restart fetch and gather consumers
tags: [ 'ckan', 'geonetwork', 'ckan_plugins' ]
- name: Initialize the CKAN ckanext-harvest plugin
shell: . /usr/lib/ckan/default/bin/activate ; paster --plugin=ckanext-harvest harvester initdb --config={{ ckan_config_file }}
when:
- ckanext_harvest_install is changed
- ckan_init_db_and_solr | bool
notify: Restart fetch and gather consumers
tags: [ 'ckan', 'geonetwork', 'ckan_plugins' ]
- name: Download the CKAN ckanext-spatial plugin
pip: name='{{ ckan_ckanext_spatial_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
notify: Restart CKAN
when: ckan_geonetwork_harvester | bool
register: ckanext_spatial_install
tags: [ 'ckan', 'ckan_spatial', 'ckan_plugins' ]
- name: Download the CKAN ckanext-spatial requirements
pip: requirements={{ ckan_virtenv }}/src/ckanext-spatial/pip-requirements.txt virtualenv={{ ckan_virtenv }} state=present
when: ckan_geonetwork_harvester | bool
tags: [ 'ckan', 'ckan_spatial', 'ckan_plugins' ]
- name: Initialize the CKAN ckanext-spatial plugin
shell: . /usr/lib/ckan/default/bin/activate ; paster --plugin=ckanext-spatial spatial initdb --config={{ ckan_config_file }}
when:
- ckanext_spatial_install is changed
- ckan_init_db_and_solr | bool
tags: [ 'ckan', 'ckan_spatial', 'ckan_plugins' ]
- name: Download the CKAN ckanext-geoview plugin
pip: name='{{ ckan_geoview_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }}
notify: Restart CKAN
when: ckan_geoview | bool
tags: [ 'ckan', 'ckan_geoview', 'ckan_plugins' ]
- name: Download the latest version of the CKAN ckanext-dcat plugin code on CKAN version >= 2.8
pip: name={{ ckan_dcat_url }} virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
when:
- ckan_dcat | bool
- ckan_version is version_compare('2.8', '>=')
notify: Restart CKAN
tags: [ 'ckan', 'ckan_dcat', 'ckan_plugins' ]
- name: Download the CKAN ckanext-dcat plugin code. Stick to version 1.0.0 on CKAN < 2.8
pip: name={{ ckan_dcat_1_0_0_url }} virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
when:
- ckan_dcat | bool
- ckan_version is version_compare('2.8', '<')
notify: Restart CKAN
tags: [ 'ckan', 'ckan_dcat', 'ckan_plugins' ]
- name: Download the CKAN ckanext-dcat requirements
pip: requirements={{ ckan_virtenv }}/src/ckanext-dcat/requirements.txt virtualenv={{ ckan_virtenv }} state=present
when: ckan_dcat | bool
notify: Restart CKAN
tags: [ 'ckan', 'ckan_dcat', 'ckan_plugins' ]
- name: Download the CKAN Geonetwork plugin code
git: repo={{ ckan_geonetwork_harvester_url }} dest=/usr/lib/ckan/default/src/ckanext-geonetwork force=yes update={{ ckan_git_plugins_state }}
when: ckan_geonetwork_harvester | bool
register: install_geonetwork_harvester
tags: [ 'ckan', 'ckan_geonetwork', 'ckan_plugins' ]
- name: Install the CKAN Geonetwork plugin code
shell: . /usr/lib/ckan/default/bin/activate ; cd /usr/lib/ckan/default/src/ckanext-geonetwork ; python setup.py develop
when: install_geonetwork_harvester is changed
notify: Restart CKAN
tags: [ 'ckan', 'ckan_geonetwork', 'ckan_plugins' ]
- name: Install the script that updates the tracking data
template: src=tracker_update.sh.j2 dest={{ ckan_virtenv }}/bin/tracker_update owner={{ ckan_shell_user }} group={{ ckan_shell_user }} mode=0555
when: ckan_geonetwork_harvester | bool
tags: [ 'ckan', 'ckan_geonetwork', 'ckan_plugins', 'tracker' ]
- name: Install the cron job that runs the tracker update script
cron: name="tracker update" minute="0" hour="3" job="{{ ckan_virtenv }}/bin/tracker_update > {{ ckan_logdir }}/tracker_update.log 2>&1" user={{ ckan_shell_user }}
when: ckan_geonetwork_harvester | bool
tags: [ 'ckan', 'ckan_geonetwork', 'ckan_plugins', 'tracker' ]
- name: Download the CKAN PDF viewer plugin
pip: name='{{ ckan_ckanext_pdfview_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }}
when: ckan_pdfview | bool
notify: Restart CKAN
tags: [ 'ckan', 'ckan_pdfview', 'ckan_plugins' ]
- name: Download the CKAN Privatedatasets extension for CKAN 2.8
pip: name='{{ ckan_privatedatasets_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }}
when:
- ckan_privatedatasets | bool
- ckan_version is version_compare('2.8', '>=')
notify: Restart CKAN
tags: [ 'ckan', 'ckan_privdatasets', 'ckan_plugins' ]
- name: Download the CKAN Privatedatasets extension for CKAN 2.6
pip: name='{{ ckan_privatedatasets_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} version=0.2.19
when:
- ckan_privatedatasets | bool
- ckan_version is version_compare('2.8', '<')
notify: Restart CKAN
tags: [ 'ckan', 'ckan_privdatasets', 'ckan_plugins' ]
- name: Download the CKAN hierarchy plugin code
pip: name='{{ ckan_hierarchy_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
when: ckan_hierarchy | bool
notify: Restart CKAN
tags: [ 'ckan', 'ckan_hierarchy', 'ckan_plugins' ]
- name: Download the CKAN pages plugin code
pip: name='{{ ckan_pages_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
when: ckan_pages | bool
notify: Restart CKAN
tags: [ 'ckan', 'ckan_pages', 'ckan_plugins' ]
- name: Download the CKAN LDAP plugin code
git: repo={{ ckan_ldap_url }} dest=/usr/lib/ckan/default/src/ckanext-ldap force=yes update={{ ckan_git_plugins_state }}
when: ckan_ldap | bool
register: install_ldap_plugin
tags: [ 'ckan', 'ckan_ldap', 'ckan_plugins' ]
- name: Enable the CKAN ldap plugin code
shell: . /usr/lib/ckan/default/bin/activate ; cd /usr/lib/ckan/default/src/ckanext-ldap ; python setup.py develop
when: install_ldap_plugin is changed
notify: Restart CKAN
tags: [ 'ckan', 'ckan_ldap', 'ckan_plugins' ]
- name: Download the CKAN LIRE plugin code
git: repo={{ ckan_ckanext_lire_url }} dest={{ ckan_virtenv }}/src/ckanext-lire force=yes update={{ ckan_git_plugins_state }}
when: ckan_ckanext_lire | bool
register: install_lire_plugin
tags: [ 'ckan', 'ckan_lire', 'ckan_plugins' ]
- name: Activate the CKAN Lire plugin code
shell: . /usr/lib/ckan/default/bin/activate ; cd {{ ckan_virtenv }}/src/ckanext-lire ; pip install -e ./
when: install_lire_plugin is changed
notify: Restart CKAN
tags: [ 'ckan', 'ckan_lire', 'ckan_plugins' ]
- name: Download the KATA CKAN OAI-PMH plugin
pip: name='{{ ckan_oai_pmh_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
when: ckan_kata_oai_pmh | bool
notify: Restart CKAN
tags: [ 'ckan', 'ckan_oai_pmh', 'ckan_plugins' ]
- name: Download the KATA CKAN ckanext-oaiphm requirements
pip: requirements={{ ckan_virtenv }}/src/ckanext-oaipmh/requirements.txt virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }}
when: ckan_kata_oai_pmh | bool
notify: Restart CKAN
tags: [ 'ckan', 'ckan_oai_pmh', 'ckan_plugins' ]
- name: Download the KATA CKAN plugin
pip: name='{{ ckan_oai_pmh_kata_plugin_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
when: ckan_kata_oai_pmh | bool
notify: Restart CKAN
tags: [ 'ckan', 'ckan_oai_pmh', 'ckan_plugins' ]
- name: Download the KATA CKAN requirements
pip: requirements={{ ckan_virtenv }}/src/ckanext-kata/requirements.txt virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }}
when: ckan_kata_oai_pmh | bool
notify: Restart CKAN
tags: [ 'ckan', 'ckan_oai_pmh', 'ckan_plugins' ]
- name: Download the opendatasoft CKAN OAI-PMH plugin
pip: name='{{ ckan_oai_pm_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
when: ckan_oai_pm | bool
notify: Restart CKAN
tags: [ 'ckan', 'ckan_oai_pm', 'ckan_plugins' ]
- name: Download the CKAN google analytics plugin python requirements
pip: name='genshi' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }}
when: ckan_google_analytics | bool
notify: Restart CKAN
tags: [ 'ckan', 'ckan_google_analytics', 'ckan_plugins' ]
- name: Download the CKAN google analytics plugin
pip: name='{{ ckan_google_analytics_url }}' virtualenv={{ ckan_virtenv }} editable=true state={{ ckan_ga_plugin_state }}
when: ckan_google_analytics | bool
register: install_ckan_google_analytics
notify: Restart CKAN
tags: [ 'ckan', 'ckan_google_analytics', 'ckan_plugins' ]
- name: Setup the CKAN google analytics plugin
shell: . /usr/lib/ckan/default/bin/activate ; cd /usr/lib/ckan/default/src/ckanext-googleanalytics ; python setup.py develop
when: install_ckan_google_analytics is changed
notify: Restart CKAN
tags: [ 'ckan', 'ckan_google_analytics', 'ckan_plugins' ]
- name: Download the CKAN google analytics reports plugin
pip: name='{{ ckan_ga_reports_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
when: ckan_ga_reports | bool
register: install_ckan_ga_reports
tags: [ 'ckan', 'ckan_google_analytics', 'ckan_ga_reports', 'ckan_plugins' ]
- name: Setup the CKAN google analytics reports plugin
shell: . /usr/lib/ckan/default/bin/activate ; cd /usr/lib/ckan/default/src/ckanext-ga-report ; paster initdb --config={{ ckan_config_file }}
when:
- install_ckan_ga_reports is changed
- ckan_init_db_and_solr | bool
notify: Restart CKAN
tags: [ 'ckan', 'ckan_google_analytics', 'ckan_ga_reports', 'ckan_plugins' ]
- name: Download the CKAN star ratings plugin
pip: name='{{ ckan_star_ratings_url }}' virtualenv={{ ckan_virtenv }} editable=true state={{ ckan_star_ratings_state }}
notify: Restart CKAN
when: ckan_star_ratings | bool
register: install_ckan_star_ratings
tags: [ 'ckan', 'ckan_star_ratings', 'ckan_plugins' ]
- name: Setup the CKAN star ratings plugin
shell: . /usr/lib/ckan/default/bin/activate ; paster --plugin=ckanext-rating rating init --config={{ ckan_config_file }}
notify: Restart CKAN
when:
- install_ckan_star_ratings is changed
- ckan_star_ratings | bool
tags: [ 'ckan', 'ckan_star_ratings', 'ckan_plugins' ]
- name: Install the CKAN profiler plugin
pip: name='{{ ckan_profiler_url }}' virtualenv={{ ckan_virtenv }} state={{ ckan_plugins_state }} editable=True
when: ckan_profiler | bool
notify: Restart CKAN
tags: [ 'ckan', 'ckan_profiler', 'ckan_plugins' ]
- name: Create the profiler plugin log directory
become_user: root
file: dest=/var/log/ckan-profiler owner=www-data group=www-data state=directory
when: ckan_profiler | bool
tags: [ 'ckan', 'ckan_profiler', 'ckan_plugins' ]
- name: Download the CKAN-DATESEARCH plugin code
git: repo={{ ckan_datesearch_url }} dest=/usr/lib/ckan/default/src/ckanext-datesearch force=yes update={{ ckan_git_plugins_state }}
when: ckan_datesearch | bool
register: install_datesearch_plugin
tags: [ 'ckan', 'ckan_datesearch', 'ckan_plugins' ]
- name: Enable the CKAN-DATESEARCH plugin code
shell: . /usr/lib/ckan/default/bin/activate ; cd /usr/lib/ckan/default/src/ckanext-datesearch ; python setup.py develop
when: install_datesearch_plugin is changed
notify: Restart CKAN
tags: [ 'ckan', 'ckan_datesearch', 'ckan_plugins' ]
become: True
become_user: '{{ ckan_shell_user }}'
tags: [ 'ckan', 'ckan_plugins' ]

View File

@ -1,63 +0,0 @@
---
- name: Download the CKAN distribution
get_url: url='{{ ckan_package_url }}' dest=/srv/{{ ckan_deb_file }} force=yes
tags: [ 'ckan', 'ckan_pkg' ]
- name: Install the CKAN deb package
apt: deb=/srv/{{ ckan_deb_file }}
register: ckan_install
tags: [ 'ckan', 'ckan_pkg' ]
- name: Create the CKAN user
user: name={{ ckan_shell_user }} home={{ ckan_libdir }} createhome=no shell=/usr/sbin/nologin system=yes
- name: Configure the CKAN production configuration file excluding the plugins list
ini_file: dest={{ ckan_config_file }} section={{ item.section }} option={{ item.option }} value={{ item.value }} state={{ item.state }} backup=yes
with_items: '{{ ckan_production_ini_opts }}'
notify: Restart CKAN
tags: [ 'ckan', 'ckan_ini' ]
- name: Install the apache.wsgi
template: src=apache.wsgi.j2 dest={{ ckan_confdir }}/apache.wsgi
notify: Restart CKAN
tags: [ 'ckan', 'apache', 'ckan_pkg' ]
- name: Create the base directory for the CKAN file storage
file: dest={{ ckan_file_storage_dir }} state=directory owner={{ apache_user }} group={{ ckan_shell_user }} mode=2770
tags: ckan
- name: Fix the CKAN harvesting storage permissions
file: dest={{ ckan_file_harvesting_dir }} state=directory owner={{ apache_user }} group={{ ckan_shell_user }} mode=2770 recurse=yes
tags: ckan
- name: authorization file for the psql command, if the database is on a remote server
template: src=pgpass.j2 dest=/root/.pgpass owner=root mode=0600
when: psql_db_host != 'localhost'
tags: [ 'pg_backup', 'postgresql', 'postgres' ]
- name: Initialize the CKAN database
shell: ckan db init && touch {{ ckan_libdir }}/.ckan_db_initialized
args:
creates: '{{ ckan_libdir }}/.ckan_db_initialized'
when: ckan_init_db_and_solr
tags: ckan
- name: Initialize the CKAN datastore database
shell: ckan datastore set-permissions | psql --set ON_ERROR_STOP=1 -h {{ psql_db_host }} -U {{ ckan_db_user }} -w {{ ckan_datastore_db_name }} && touch {{ ckan_libdir }}/.ckan_datastore_db_initialized
args:
creates: '{{ ckan_libdir }}/.ckan_datastore_db_initialized'
when: ckan_init_db_and_solr
tags: ckan
- name: Create the pip cache directory with the right permissions
file: dest={{ ckan_libdir }}/.cache owner={{ ckan_shell_user }} group={{ ckan_shell_user }} state=directory
tags: [ 'ckan', 'ckan_user' ]
- name: Assign the CKAN virtenv dir to the ckan user
file: dest={{ ckan_virtenv }} recurse=yes owner={{ ckan_shell_user }} group={{ ckan_shell_user }}
tags: [ 'ckan', 'ckan_user', 'ckan_permissions' ]
- name: Create a log directory for the jobs run by the ckan user
file: dest={{ ckan_logdir }} state=directory owner={{ ckan_shell_user }} group={{ ckan_shell_user }}
tags: [ 'ckan', 'ckan_user' ]

View File

@ -1,11 +0,0 @@
---
- name: Restart apache
service: name=apache2 state=restarted enabled=yes
when: ckan_install is changed
tags: ckan
- name: Restart nginx
service: name=nginx state=restarted enabled=yes
when: ckan_install is changed
tags: ckan

View File

@ -1,7 +0,0 @@
---
- import_tasks: ckan.yml
- import_tasks: ckan-plugins.yml
- import_tasks: ckan-memcache.yml
- import_tasks: ckan-config.yml
- import_tasks: enable-ckan.yml
- import_tasks: supervisor.yml

View File

@ -1,18 +0,0 @@
---
- name: Install the supervisor daemon needed to automate the gather and fetch operations
apt: pkg={{ ckan_gather_fetch_pkgs }} state=present
tags: [ 'ckan', 'ckan_harvest' ]
- name: Install the gather and fetch supervisor configuration
template: src=ckan_harvesting.conf.j2 dest=/etc/supervisor/conf.d/ckan_harvesting.conf owner=root group=root mode=0644
notify: Reconfigure the supervisor daemon
tags: [ 'ckan', 'ckan_harvest' ]
- name: Install a cron job that run the harvesters
cron: name="CKAN harvester" minute="0" job="{{ ckan_virtenv }}/bin/paster --plugin=ckanext-harvest harvester run --config={{ ckan_config_file }} > {{ ckan_logdir }}/harvester_run.log 2>&1" user={{ ckan_shell_user }}
when: ckan_harvester_run
tags: [ 'ckan', 'ckan_harvest', 'ckan_harvest_cron' ]
- name: Ensure that supervisord is running and enabled
service: name=supervisor state=started enabled=yes
tags: [ 'ckan', 'ckan_harvest' ]

View File

@ -1,14 +0,0 @@
import os
activate_this = os.path.join('/usr/lib/ckan/default/bin/activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
from paste.deploy import loadapp
config_filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'production.ini')
from paste.script.util.logging_config import fileConfig
fileConfig(config_filepath)
_application = loadapp('config:%s' % config_filepath)
def application(environ, start_response):
environ['wsgi.url_scheme'] = environ.get('HTTP_X_URL_SCHEME', 'http')
return _application(environ, start_response)

View File

@ -1,31 +0,0 @@
; ===============================
; ckan harvester
; ===============================
[program:ckan_gather_consumer]
command={{ ckan_virtenv }}/bin/paster --plugin=ckanext-harvest harvester gather_consumer --config={{ ckan_config_file }}
; user that owns virtual environment.
user={{ ckan_shell_user }}
numprocs=1
stdout_logfile={{ ckan_logdir }}/gather_consumer.log
stderr_logfile={{ ckan_logdir }}/gather_consumer.log
autostart=true
autorestart=true
startsecs=10
[program:ckan_fetch_consumer]
command={{ ckan_virtenv }}/bin/paster --plugin=ckanext-harvest harvester fetch_consumer --config={{ ckan_config_file }}
; user that owns virtual environment.
user={{ ckan_shell_user }}
numprocs=1
stdout_logfile={{ ckan_logdir }}/fetch_consumer.log
stderr_logfile={{ ckan_logdir }}/fetch_consumer.log
autostart=true
autorestart=true
startsecs=10

View File

@ -1,8 +0,0 @@
# Loop psql_db_data to add multiple databases
{% if psql_db_data is defined %}
{% for db in psql_db_data %}
{%if db.pwd is defined %}
{{ psql_db_host }}:{{ psql_db_port }}:{{ db.name }}:{{ db.user }}:{{ db.pwd }}
{% endif %}
{% endfor %}
{% endif %}

View File

@ -1,17 +0,0 @@
#!/bin/bash
LOCK_DIR={{ ckan_logdir }}
LOCK_FILE=$LOCK_DIR/.index_rebuild.lock
. {{ ckan_virtenv }}/bin/activate
if [ -f $LOCK_FILE ] ; then
echo 'A lock file is present, exiting'
exit 2
fi
echo "cron pid: ${$}" > $LOCK_FILE
paster --plugin=ckan tracking update -c {{ ckan_config_file }}
paster --plugin=ckan search-index rebuild -r -c {{ ckan_config_file }}
rm -f $LOCK_FILE
exit 0

View File

@ -2,6 +2,9 @@
# First things first: install the basic requirements with a raw command
- name: Install python 2 and python-apt
raw: "apt-get update; apt-get install -y python python-apt lsb-release"
when:
- ansible_distribution == 'Ubuntu'
- ansible_distribution_version is version_compare('20.04', '<=')
tags: [ 'python', 'ansible_setup' ]
- name: Install python-software-properties

View File

@ -4,7 +4,7 @@ dell_utilities_base_dir: /opt/dell_dsu
dell_utilities_packages:
- dell-system-update
- srvadmin-all
- syscfg
#- syscfg
dell_utilities_raid_packages:
- raidcfg

View File

@ -3,11 +3,20 @@
- name: Create the Dell utilities directory
file: dest={{ dell_utilities_base_dir }} state=directory
- name: Check if the Dell utility installer is already present
stat:
path: /opt/dell_dsu/dsu_installer
register: dell_dsu_installer_bin
- name: Download the Dell utility installer
get_url: url={{ dell_utilities_installer_url }} dest={{ dell_utilities_base_dir }}/dsu_installer mode=0700
when: not dell_dsu_installer_bin.stat.exists
- name: Run the installer
command: '{{ dell_utilities_base_dir }}/dsu_installer'
shell: >
{{ dell_utilities_base_dir }}/dsu_installer && touch {{ dell_utilities_base_dir }}/.dsu_installer_run
args:
creates: '{{ dell_utilities_base_dir }}/.dsu_installer_run'
when:
- "'Dell' in ansible_system_vendor"

View File

@ -21,7 +21,7 @@ joomla_php_prereq:
- 'php{{ php_version }}-curl'
- php-pear
- php-date
- php-xml-serializer
# - php-xml-serializer
- imagemagick
joomla_use_postgresql: True

View File

@ -1,12 +0,0 @@
---
sysctl_custom_file: /etc/sysctl.d/90-custom-values.conf
sysctl_opts_reload: yes
sysctl_custom_file_state: present
# Only name and value are mandatory. The others have defaults
#systemctl_custom_options:
# - { name: 'net.nf_conntrack_max', value: '32768', sysctlfile: '{{ sysctl_custom_file }}', sysctl_reload: '{{ sysctl_opts_reload }}', sysctlfile_state: '{{ sysctl_custom_file_state }}' }
disable_ipv6: True
ipv6_sysctl_value: 1
ipv6_sysctl_file: /etc/sysctl.d/10-ipv6-disable.conf

View File

@ -1,29 +0,0 @@
---
- block:
- name: Ensure that the /etc/sysctl.d directory exists
file: path=/etc/sysctl.d state=directory owner=root group=root
- name: Disable the in kernel ipv6 support
sysctl: name={{ item }} value=1 sysctl_file={{ ipv6_sysctl_file }} reload=yes state=present
with_items:
- net.ipv6.conf.all.disable_ipv6
- net.ipv6.conf.default.disable_ipv6
- net.ipv6.conf.lo.disable_ipv6
- net.ipv6.conf.{{ ansible_default_ipv4.interface }}.disable_ipv6
when: disable_ipv6
- name: enable the in kernel ipv6 support
sysctl: name={{ item }} value=0 sysctl_file={{ ipv6_sysctl_file }} reload=yes state=present
with_items:
- net.ipv6.conf.all.disable_ipv6
- net.ipv6.conf.default.disable_ipv6
- net.ipv6.conf.lo.disable_ipv6
- net.ipv6.conf.{{ ansible_default_ipv4.interface }}.disable_ipv6
when: not disable_ipv6
- name: Set the custom sysctl values
sysctl: name={{ item.name }} value={{ item.value }} sysctl_file={{ item.sysctlfile | default ('/etc/sysctl.d/90-custom-values.conf') }} reload={{ item.sysctl_reload | default(yes) }} state={{ item.sysctlfile_state | default('present') }}
with_items: '{{ systemctl_custom_options | default([]) }}'
when: systemctl_custom_options is defined
tags: [ 'sysctl', 'kernel' ]

View File

@ -37,7 +37,7 @@ default_python_packages_trusty:
- python-lxml
- python-boto
default_python_packages:
default_python_packages_bionic:
- python-lxml
- python3-lxml
- python-boto
@ -45,6 +45,14 @@ default_python_packages:
- python-setuptools
- python3-setuptools
default_python_packages_focal:
- python-lxml
- python3-lxml
- python-ipaddress
- python3-ipaddr
- python-setuptools
- python3-setuptools
# Set this variable in your playbook
# additional_packages:
# - pkg1

View File

@ -8,6 +8,5 @@ dependencies:
state: latest
- role: '../../library/roles/motd'
- role: '../../library/roles/ntp'
- role: '../../library/roles/linux-kernel-sysctl'
- role: '../../library/roles/sshd_config'
- role: '../../library/roles/fail2ban'

View File

@ -3,13 +3,6 @@
apt: pkg=aptitude state=present cache_valid_time=1800
tags: packages
- name: Install the basic python packages. Not Trusty
apt: pkg={{ default_python_packages }} state=present cache_valid_time=1800
when:
- ansible_distribution == "Ubuntu"
- ansible_distribution_release != "trusty"
tags: packages
- name: Install the basic python packages on trusty
apt: pkg={{ default_python_packages_trusty }} state=present update_cache=yes cache_valid_time=1800
when:
@ -17,6 +10,20 @@
- ansible_distribution_release == "trusty"
tags: packages
- name: Install the basic python packages on Ubuntu Bionic
apt: pkg={{ default_python_packages_bionic }} state=present cache_valid_time=1800
when:
- ansible_distribution == "Ubuntu"
- ansible_distribution_release == "bionic"
tags: packages
- name: Install the basic python packages on Ubuntu Focal
apt: pkg={{ default_python_packages_focal }} state=present cache_valid_time=1800
when:
- ansible_distribution == "Ubuntu"
- ansible_distribution_release == "focal"
tags: packages
- name: Install software-properties-common if needed
apt: pkg=software-properties-common state=present update_cache=yes cache_valid_time=1800
when: