isti-puma: New separate playbook to install and configure the puma dev server.

dnet-openaire/group_vars/puma_dev: Remove from there.
library/roles/joomla-org: Role to install the joomla distribution and dependencies.
This commit is contained in:
Andrea Dell'Amico 2016-02-02 17:40:01 +01:00
parent 200677f66a
commit bf9c783f9b
94 changed files with 291 additions and 8 deletions

View File

@ -52,11 +52,13 @@ apache_basic_auth_modules:
# - { username:'', password:'', state:'present,absent', auth_file:'path_to_file' } # - { username:'', password:'', state:'present,absent', auth_file:'path_to_file' }
# #
# apache_additional_packages: apache_additional_packages: False
apache_additional_packages_list:
# - libapache2-mod-uwsgi # - libapache2-mod-uwsgi
# - ... # - ...
# #
# Set this variable to load the modules you need # Set this variable to load the modules you need
#apache_additional_modules: apache_additional_modules: False
apache_additional_modules_list:
# - # -
# - # -

View File

@ -17,7 +17,7 @@
- name: Load additional apache modules if any - name: Load additional apache modules if any
apache2_module: name={{ item }} state=present apache2_module: name={{ item }} state=present
with_items: apache_additional_modules with_items: apache_additional_modules_list
when: apache_additional_modules is defined when: apache_additional_modules
notify: apache2 reload notify: apache2 reload
tags: [ 'apache', 'apache_mods' ] tags: [ 'apache', 'apache_mods' ]

View File

@ -1,13 +1,13 @@
--- ---
- name: Install the apache packages - name: Install the apache packages
apt: pkg={{ item }} state=installed force=yes apt: pkg={{ item }} state=installed
with_items: '{{ apache_packages }}' with_items: '{{ apache_packages }}'
tags: [ 'apache', 'apache_main_packages' ] tags: [ 'apache', 'apache_main_packages' ]
- name: Install the apache additional packages, if any - name: Install the apache additional packages, if any
apt: pkg={{ item }} state=installed force=yes apt: pkg={{ item }} state=installed
with_items: '{{ apache_additional_packages }}' with_items: '{{ apache_additional_packages_list }}'
when: apache_additional_packages is defined when: apache_additional_packages
tags: [ 'apache', 'apache_additional_packages' ] tags: [ 'apache', 'apache_additional_packages' ]
- name: Load the required worker module - name: Load the required worker module

View File

@ -1,6 +1,8 @@
--- ---
# Ignore errors because it fails if the user is already present and used to run a service.
- name: Create users needed to operate services other than the dnet ones - name: Create users needed to operate services other than the dnet ones
user: name={{ item.user }} comment="{{ item.user }}" home={{ item.home }} createhome={{ item.createhome }} shell={{ item.shell }} user: name={{ item.user }} comment="{{ item.user }}" home={{ item.home }} createhome={{ item.createhome }} shell={{ item.shell }}
with_items: dnet_other_services_users with_items: dnet_other_services_users
when: dnet_other_services_users is defined when: dnet_other_services_users is defined
ignore_errors: True
tags: [ 'users', 'dnet' ] tags: [ 'users', 'dnet' ]

View File

@ -0,0 +1,20 @@
---
joomla_dist_name: Joomla
joomla_dist_version: 3.4.8
joomla_dist_file: '{{ joomla_dist_name }}_{{ joomla_dist_version }}-Stable-Full_Package.zip'
joomla_tar_url: 'https://github.com/joomla/joomla-cms/releases/download/{{ joomla_dist_version }}/{{ joomla_dist_file }}'
joomla_download_dir: /srv/joomla
joomla_install_dir: /var/www
joomla_php_prereq:
- php5-json
- php5-intl
- php5-cli
- php5-pgsql
- php5-gd
- php5-memcached
- php-pear
- php-date
- php-xml-serializer
- imagemagick

43
joomla-org/tasks/main.yml Normal file
View File

@ -0,0 +1,43 @@
---
- name: Install the joomla php prerequisites
apt: name={{ item }} state=present
with_items: joomla_php_prereq
tags: joomla
- name: Ensure that the download and install dirs exist
file: path={{ item }} state=directory
with_items:
- '{{ joomla_download_dir }}/joomla-unpacked'
- '{{ joomla_install_dir }}'
tags: joomla
- name: Download the joomla distribution file
get_url: url={{ joomla_tar_url }} dest={{ joomla_download_dir }}
register: joomla_download
tags: joomla
- name: Unpack the joomla distribution file
unarchive: copy=no src={{ joomla_download_dir }}/{{ joomla_dist_file }} dest={{ joomla_download_dir }}/joomla-unpacked
when: ( joomla_download | changed )
tags: joomla
- name: Move the joomla files to the right place
shell: cp -a {{ joomla_download_dir }}/joomla-unpacked/* {{ joomla_install_dir }}/
args:
creates: '{{ joomla_install_dir }}/index.php'
with_items: phpfpm_pools
when: ( joomla_download | changed )
register: unpack_joomla
tags: joomla
- name: Set the correct ownership of the joomla files
file: dest={{ joomla_install_dir }} owner={{ item.user }} group={{ item.group }} recurse=yes state=directory
with_items: phpfpm_pools
when: ( unpack_joomla | changed )
tags: joomla
- name: Remove the original joomla unpacked distribution
command: rm -fr {{ joomla_download_dir }}/{{ joomla_dist_name }}
when: ( unpack_joomla | changed )
tags: joomla

View File

@ -0,0 +1,16 @@
The analysis-extras plugin provides additional analyzers that rely
upon large dependencies/dictionaries.
It includes integration with ICU for multilingual support, and
analyzers for Chinese and Polish.
ICU relies upon lucene-libs/lucene-analyzers-icu-X.Y.jar
and lib/icu4j-X.Y.jar
Smartcn relies upon lucene-libs/lucene-analyzers-smartcn-X.Y.jar
Stempel relies on lucene-libs/lucene-analyzers-stempel-X.Y.jar
Morfologik relies on lucene-libs/lucene-analyzers-morfologik-X.Y.jar
and lib/morfologik-*.jar

View File

@ -0,0 +1,4 @@
The Clustering contrib plugin for Solr provides a generic mechanism for plugging in third party clustering implementations.
It currently provides clustering support for search results using the Carrot2 project.
See http://wiki.apache.org/solr/ClusteringComponent for how to get started.

View File

@ -0,0 +1,16 @@
Apache Solr - DataImportHandler
Introduction
------------
DataImportHandler is a data import tool for Solr which makes importing data from Databases, XML files and
HTTP data sources quick and easy.
Important Note
--------------
Although Solr strives to be agnostic of the Locale where the server is
running, some code paths in DataImportHandler are known to depend on the
System default Locale, Timezone, or Charset. It is recommended that when
running Solr you set the following system properties:
-Duser.language=xx -Duser.country=YY -Duser.timezone=ZZZ
where xx, YY, and ZZZ are consistent with any database server's configuration.

View File

@ -0,0 +1,16 @@
Apache Solr Content Extraction Library (Solr Cell)
Introduction
------------
Apache Solr Extraction provides a means for extracting and indexing content contained in "rich" documents, such
as Microsoft Word, Adobe PDF, etc. (Each name is a trademark of their respective owners) This contrib module
uses Apache Tika to extract content and metadata from the files, which can then be indexed. For more information,
see http://wiki.apache.org/solr/ExtractingRequestHandler
Getting Started
---------------
You will need Solr up and running. Then, simply add the extraction JAR file, plus the Tika dependencies (in the ./lib folder)
to your Solr Home lib directory. See http://wiki.apache.org/solr/ExtractingRequestHandler for more details on hooking it in
and configuring.

View File

@ -0,0 +1,21 @@
Apache Solr Language Identifier
Introduction
------------
This module is intended to be used while indexing documents.
It is implemented as an UpdateProcessor to be placed in an UpdateChain.
Its purpose is to identify language from documents and tag the document with language code.
The module can optionally map field names to their language specific counterpart,
e.g. if the input is "title" and language is detected as "en", map to "title_en".
Language may be detected globally for the document, and/or individually per field.
Language detector implementations are pluggable.
Getting Started
---------------
Please refer to the module documentation at http://wiki.apache.org/solr/LanguageDetection
Dependencies
------------
The Tika detector depends on Tika Core (which is part of extraction contrib)
The Langdetect detector depends on LangDetect library

View File

@ -0,0 +1,20 @@
Apache Solr MapReduce
*Experimental* - This contrib is currently subject to change in ways that may
break back compatibility.
The Solr MapReduce contrib provides an a mapreduce job that allows you to build
Solr indexes and optionally merge them into a live Solr cluster.
Example:
# Build an index with map-reduce and deploy it to SolrCloud
source $solr_distrib/example/scripts/map-reduce/set-map-reduce-classpath.sh
$hadoop_distrib/bin/hadoop --config $hadoop_conf_dir jar \
$solr_distrib/dist/solr-map-reduce-*.jar -D 'mapred.child.java.opts=-Xmx500m' \
-libjars "$HADOOP_LIBJAR" --morphline-file readAvroContainer.conf \
--zk-host 127.0.0.1:9983 --output-dir hdfs://127.0.0.1:8020/outdir \
--collection $collection --log4j log4j.properties --go-live \
--verbose "hdfs://127.0.0.1:8020/indir"

View File

@ -0,0 +1 @@
The test-files by this module are located in the morphlines-core module.

View File

@ -0,0 +1,6 @@
Apache Solr Morphlines-Cell
*Experimental* - This contrib is currently subject to change in ways that may
break back compatibility.
This contrib provides a variety of Kite Morphlines features for Solr Cell type functionality.

View File

@ -0,0 +1 @@
The test-files by this module are located in the morphlines-core module.

View File

@ -0,0 +1,6 @@
Apache Solr Morphlines-Core
*Experimental* - This contrib is currently subject to change in ways that may
break back compatibility.
This contrib provides a variety of Kite Morphlines features for Solr.

View File

@ -0,0 +1,109 @@
Apache Solr UIMA Metadata Extraction Library
Introduction
------------
This module is intended to be used both as an UpdateRequestProcessor while indexing documents and as a set of tokenizer/filters
to be configured inside the schema.xml for use during analysis phase.
UIMAUpdateRequestProcessor purpose is to provide additional on the fly automatically generated fields to the Solr index.
Such fields could be language, concepts, keywords, sentences, named entities, etc.
UIMA based tokenizers/filters can be used either inside plain Lucene or as index/query analyzers to be defined
inside the schema.xml of a Solr core to create/filter tokens using specific UIMA annotations.
Getting Started
---------------
To start using Solr UIMA Metadata Extraction Library you should go through the following configuration steps:
1. copy generated solr-uima jar and its libs (under contrib/uima/lib) inside a Solr libraries directory.
or set <lib/> tags in solrconfig.xml appropriately to point those jar files.
<lib dir="../../contrib/uima/lib" />
<lib dir="../../contrib/uima/lucene-libs" />
<lib dir="../../dist/" regex="solr-uima-\d.*\.jar" />
2. modify your schema.xml adding the fields you want to be hold metadata specifying proper values for type, indexed, stored and multiValued options:
for example you could specify the following
<field name="language" type="string" indexed="true" stored="true" required="false"/>
<field name="concept" type="string" indexed="true" stored="true" multiValued="true" required="false"/>
<field name="sentence" type="text" indexed="true" stored="true" multiValued="true" required="false" />
3. modify your solrconfig.xml adding the following snippet:
<updateRequestProcessorChain name="uima">
<processor class="org.apache.solr.uima.processor.UIMAUpdateRequestProcessorFactory">
<lst name="uimaConfig">
<lst name="runtimeParameters">
<str name="keyword_apikey">VALID_ALCHEMYAPI_KEY</str>
<str name="concept_apikey">VALID_ALCHEMYAPI_KEY</str>
<str name="lang_apikey">VALID_ALCHEMYAPI_KEY</str>
<str name="cat_apikey">VALID_ALCHEMYAPI_KEY</str>
<str name="entities_apikey">VALID_ALCHEMYAPI_KEY</str>
<str name="oc_licenseID">VALID_OPENCALAIS_KEY</str>
</lst>
<str name="analysisEngine">/org/apache/uima/desc/OverridingParamsExtServicesAE.xml</str>
<!-- Set to true if you want to continue indexing even if text processing fails.
Default is false. That is, Solr throws RuntimeException and
never indexed documents entirely in your session. -->
<bool name="ignoreErrors">true</bool>
<!-- This is optional. It is used for logging when text processing fails.
If logField is not specified, uniqueKey will be used as logField.
<str name="logField">id</str>
-->
<lst name="analyzeFields">
<bool name="merge">false</bool>
<arr name="fields">
<str>text</str>
</arr>
</lst>
<lst name="fieldMappings">
<lst name="type">
<str name="name">org.apache.uima.alchemy.ts.concept.ConceptFS</str>
<lst name="mapping">
<str name="feature">text</str>
<str name="field">concept</str>
</lst>
</lst>
<lst name="type">
<str name="name">org.apache.uima.alchemy.ts.language.LanguageFS</str>
<lst name="mapping">
<str name="feature">language</str>
<str name="field">language</str>
</lst>
</lst>
<lst name="type">
<str name="name">org.apache.uima.SentenceAnnotation</str>
<lst name="mapping">
<str name="feature">coveredText</str>
<str name="field">sentence</str>
</lst>
</lst>
</lst>
</lst>
</processor>
<processor class="solr.LogUpdateProcessorFactory" />
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
where VALID_ALCHEMYAPI_KEY is your AlchemyAPI Access Key. You need to register AlchemyAPI Access
key to exploit the AlchemyAPI services: http://www.alchemyapi.com/api/register.html
where VALID_OPENCALAIS_KEY is your Calais Service Key. You need to register Calais Service
key to exploit the Calais services: http://www.opencalais.com/apikey
the analysisEngine must contain an AE descriptor inside the specified path in the classpath
the analyzeFields must contain the input fields that need to be analyzed by UIMA,
if merge=true then their content will be merged and analyzed only once
field mapping describes which features of which types should go in a field
4. in your solrconfig.xml replace the existing default (<requestHandler name="/update"...) or create a new UpdateRequestHandler with the following:
<requestHandler name="/update" class="solr.XmlUpdateRequestHandler">
<lst name="defaults">
<str name="update.processor">uima</str>
</lst>
</requestHandler>
Once you're done with the configuration you can index documents which will be automatically enriched with the specified fields