Merge pull request 'The roles have not their own repository.' (#225) from adellam/ansible-roles:master into master
This commit is contained in:
commit
93b3f38993
|
@ -1,16 +0,0 @@
|
|||
---
|
||||
prometheus_n_e_install: True
|
||||
prometheus_n_e_version: 0.15.2
|
||||
prometheus_n_e_dir: 'node_exporter-{{ prometheus_n_e_version }}.linux-amd64'
|
||||
prometheus_n_e_file: '{{ prometheus_n_e_dir }}.tar.gz'
|
||||
prometheus_n_e_download_url: 'https://github.com/prometheus/node_exporter/releases/download/v{{ prometheus_n_e_version }}/{{ prometheus_n_e_file }}'
|
||||
prometheus_n_e_user: prometheus
|
||||
prometheus_n_e_home: /opt/prometheus
|
||||
prometheus_n_e_dist_dir: '{{ prometheus_n_e_home }}/dist'
|
||||
prometheus_n_e_logdir: '/var/log/prometheus-node-exporter'
|
||||
prometheus_n_e_cmd: '{{ prometheus_n_e_dist_dir }}/{{ prometheus_n_e_dir }}/node_exporter'
|
||||
prometheus_n_e_port: 9100
|
||||
prometheus_n_e_loglevel: info
|
||||
prometheus_n_e_opts: '--web.listen-address=":{{ prometheus_n_e_port }}" --log.level={{ prometheus_n_e_loglevel }}'
|
||||
# List the additional options here
|
||||
prometheus_n_e_additional_opts: ''
|
|
@ -1,7 +0,0 @@
|
|||
---
|
||||
- name: systemd reload
|
||||
command: systemctl daemon-reload
|
||||
|
||||
- name: Restart node exporter
|
||||
service: name=node_exporter state=restarted
|
||||
|
|
@ -1,53 +0,0 @@
|
|||
---
|
||||
- block:
|
||||
- name: Create the user under the node exporter will run
|
||||
user: name={{ prometheus_n_e_user }} home={{ prometheus_n_e_home }} createhome=no shell=/usr/sbin/nologin system=yes
|
||||
|
||||
- name: Create the prometheus node exporter base directory
|
||||
file: dest={{ item }} state=directory owner=root group=root
|
||||
with_items:
|
||||
- '{{ prometheus_n_e_home }}'
|
||||
- '{{ prometheus_n_e_dist_dir }}'
|
||||
|
||||
- name: Create the prometheus node exporter log directory
|
||||
file: dest={{ prometheus_n_e_logdir }} state=directory owner={{ prometheus_n_e_user }} group={{ prometheus_n_e_user }}
|
||||
|
||||
- name: Download the prometheus node exporter
|
||||
get_url: url={{ prometheus_n_e_download_url }} dest=/srv/
|
||||
|
||||
- name: Unarchive the prometheus distribution
|
||||
unarchive: src=/srv/{{ prometheus_n_e_file }} dest={{ prometheus_n_e_dist_dir }} remote_src=yes owner=root group=root
|
||||
args:
|
||||
creates: '{{ prometheus_n_e_dist_dir }}/{{ prometheus_n_e_dir }}/node_exporter'
|
||||
notify: Restart node exporter
|
||||
|
||||
- name: Install the prometheus node exporter upstart script
|
||||
template: src=node_exporter.upstart.j2 dest=/etc/init/node_exporter.conf mode=0644 owner=root group=root
|
||||
when: ansible_service_mgr != 'systemd'
|
||||
|
||||
- name: Install the prometheus node exporter systemd unit
|
||||
template: src=node_exporter.systemd.j2 dest=/etc/systemd/system/node_exporter.service mode=0644 owner=root group=root
|
||||
when: ansible_service_mgr == 'systemd'
|
||||
notify: systemd reload
|
||||
|
||||
- name: Ensure that prometheus node_exporter is started and enabled
|
||||
service: name=node_exporter state=started enabled=yes
|
||||
|
||||
tags: [ 'prometheus', 'node_exporter' ]
|
||||
when: prometheus_n_e_install
|
||||
|
||||
- block:
|
||||
- name: Ensure that prometheus node_exporter is stopped and disabled
|
||||
service: name=node_exporter state=stopped enabled=no
|
||||
|
||||
- name: Remove prometheus node exporter upstart script
|
||||
file: dest=/etc/init/node_exporter.conf state=absent
|
||||
when: ansible_service_mgr != 'systemd'
|
||||
|
||||
- name: Remove the prometheus node exporter systemd unit
|
||||
file: dest=/etc/systemd/system/node_exporter.service state=absent
|
||||
when: ansible_service_mgr == 'systemd'
|
||||
notify: systemd reload
|
||||
|
||||
tags: [ 'prometheus', 'node_exporter' ]
|
||||
when: not prometheus_n_e_install
|
|
@ -1,17 +0,0 @@
|
|||
[Unit]
|
||||
Description=node_exporter - Prometheus exporter for machine metrics.
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
Restart=on-failure
|
||||
|
||||
User={{ prometheus_n_e_user }}
|
||||
Group={{ prometheus_n_e_user }}
|
||||
|
||||
ExecStart={{ prometheus_n_e_cmd }} {{ prometheus_n_e_opts }} {{ prometheus_n_e_additional_opts }} --collector.systemd
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
Alias=prometheus_node_exporter.service
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
description "Prometheus node exporter"
|
||||
start on (local-filesystems and net-device-up IFACE!=lo)
|
||||
stop on runlevel [016]
|
||||
|
||||
respawn
|
||||
respawn limit 10 5
|
||||
setuid {{ prometheus_n_e_user }}
|
||||
setgid {{ prometheus_n_e_user }}
|
||||
|
||||
script
|
||||
exec {{ prometheus_n_e_cmd }} {{ prometheus_n_e_opts }} {{ prometheus_n_e_additional_opts }} > {{ prometheus_n_e_logdir }}/node_exporter.log 2>&1
|
||||
end script
|
|
@ -1,14 +0,0 @@
|
|||
---
|
||||
prometheus_install: True
|
||||
prometheus_version: 2.2.1
|
||||
prometheus_dir: 'prometheus-{{ prometheus_version }}.linux-amd64'
|
||||
prometheus_file: '{{ prometheus_dir }}.tar.gz'
|
||||
prometheus_download_url: 'https://github.com/prometheus/prometheus/releases/download/v{{ prometheus_version }}/{{ prometheus_file }}'
|
||||
prometheus_user: prometheus
|
||||
prometheus_home: /opt/prometheus
|
||||
prometheus_dist_dir: '{{ prometheus_home }}/dist'
|
||||
prometheus_confdir: '/opt/prometheus/conf'
|
||||
prometheus_cmd: '{{ prometheus_dist_dir }}/{{ prometheus_dir }}/prometheus'
|
||||
prometheus_loglevel: info
|
||||
prometheus_http_port: 9090
|
||||
prometheus_opts: '--storage.tsdb.retention=360d'
|
|
@ -1,21 +0,0 @@
|
|||
description "Prometheus"
|
||||
start on (local-filesystems and net-device-up IFACE!=lo)
|
||||
stop on runlevel [016]
|
||||
|
||||
respawn
|
||||
respawn limit 10 5
|
||||
setuid prometheus
|
||||
setgid prometheus
|
||||
|
||||
script
|
||||
. /etc/default/prometheus
|
||||
export GOMAXPROCS
|
||||
export PROMETHEUS_CMD
|
||||
export PROMETHEUS_LOGDIR
|
||||
export PROMETHEUS_DATADIR
|
||||
export PROMETHEUS_LOGLEVEL
|
||||
export PROMETHEUS_CONF
|
||||
export PROMETHEUS_OPTS
|
||||
exec $PROMETHEUS_CMD --config.file=$PROMETHEUS_CONF --storage.tsdb.path="$PROMETHEUS_DATADIR" --log.level=$PROMETHEUS_LOGLEVEL $PROMETHEUS_OPTS > $PROMETHEUS_LOGDIR/prometheus.log 2>&1
|
||||
end script
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
---
|
||||
- name: Restart prometheus
|
||||
service: name=prometheus state=restarted
|
||||
|
||||
- name: Reload prometheus
|
||||
service: name=prometheus state=reloaded
|
|
@ -1,3 +0,0 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: '../../library/roles/nginx'
|
|
@ -1,61 +0,0 @@
|
|||
---
|
||||
- block:
|
||||
- name: Create the user under prometheus will run
|
||||
user: name={{ prometheus_user }} home={{ prometheus_home }} createhome=no shell=/usr/sbin/nologin system=yes
|
||||
|
||||
- name: Create the prometheus server base and conf directories
|
||||
file: dest={{ item }} state=directory owner=root group=root
|
||||
with_items:
|
||||
- '{{ prometheus_home }}'
|
||||
- '{{ prometheus_confdir }}'
|
||||
- '{{ prometheus_dist_dir }}'
|
||||
|
||||
- name: Create the prometheus directory structure
|
||||
file: dest={{ prometheus_home }}/{{ item }} state=directory owner={{ prometheus_user }} group={{ prometheus_user }}
|
||||
with_items:
|
||||
- data
|
||||
- logs
|
||||
|
||||
- name: Download prometheus
|
||||
get_url: url={{ prometheus_download_url }} dest=/srv/
|
||||
|
||||
- name: Unarchive the prometheus distribution
|
||||
unarchive: src=/srv/{{ prometheus_file }} dest={{ prometheus_dist_dir }} remote_src=yes
|
||||
args:
|
||||
creates: '{{ prometheus_dist_dir }}/{{ prometheus_dir }}/prometheus'
|
||||
notify: Restart prometheus
|
||||
|
||||
- name: Install the prometheus configuration
|
||||
template: src=prometheus.yml.j2 dest={{ prometheus_confdir }}/prometheus.yml force=no
|
||||
notify: Reload prometheus
|
||||
|
||||
- name: Install the prometheus defaults
|
||||
template: src=prometheus.default.j2 dest=/etc/default/prometheus mode=0644 owner=root group=root
|
||||
|
||||
- name: Install the prometheus upstart script
|
||||
copy: src=prometheus.upstart dest=/etc/init/prometheus.conf mode=0644 owner=root group=root
|
||||
when: ansible_service_mgr != 'systemd'
|
||||
|
||||
- name: Install the prometheus server systemd unit
|
||||
template: src=prometheus.systemd dest=/etc/systemd/system/prometheus.service mode=0644 owner=root group=root
|
||||
when: ansible_service_mgr == 'systemd'
|
||||
notify: systemd reload
|
||||
|
||||
- name: Ensure that prometheus is started and enabled
|
||||
service: name=prometheus state=started enabled=yes
|
||||
|
||||
tags: prometheus
|
||||
when: prometheus_install
|
||||
|
||||
- block:
|
||||
- name: Ensure that prometheus is stopped and disabled
|
||||
service: name=prometheus state=stopped enabled=no
|
||||
|
||||
- name: Remove the prometheus init script
|
||||
file: dest=/etc/init/prometheus.conf state=absent
|
||||
|
||||
- name: Remove all the prometheus files
|
||||
file: dest={{ prometheus_home }} state=absent
|
||||
|
||||
tags: prometheus
|
||||
when: not prometheus_install
|
|
@ -1,9 +0,0 @@
|
|||
GOMAXPROCS={{ ansible_processor_vcpus }}
|
||||
PROMETHEUS_CMD={{ prometheus_cmd }}
|
||||
PROMETHEUS_LOGDIR={{ prometheus_home }}/logs
|
||||
PROMETHEUS_DATADIR={{ prometheus_home }}/data
|
||||
PROMETHEUS_LOGLEVEL={{ prometheus_loglevel }}
|
||||
PROMETHEUS_CONF={{ prometheus_confdir }}/prometheus.yml
|
||||
PROMETHEUS_OPTS="{{ prometheus_opts }}"
|
||||
PROMETHEUS_STARTUP_OPTS="--config.file={{ prometheus_confdir }}/prometheus.yml --storage.tsdb.path={{ prometheus_home }}/data {{ prometheus_opts }} --log.level={{ prometheus_loglevel }}"
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
[Unit]
|
||||
Description=Prometheus - Prometheus metrics collector.
|
||||
Documentation=https://prometheus.io/docs/introduction/overview/
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User={{ prometheus_user }}
|
||||
Group={{ prometheus_user }}
|
||||
EnvironmentFile=/etc/default/prometheus
|
||||
ExecStart={{ prometheus_cmd }} $PROMETHEUS_STARTUP_OPTS
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
global:
|
||||
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Alertmanager configuration
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
# - alertmanager:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||
- job_name: 'prometheus'
|
||||
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
|
@ -1,20 +0,0 @@
|
|||
---
|
||||
nginx_letsencrypt_managed: True
|
||||
nginx_use_common_virthost: True
|
||||
nginx_virthosts:
|
||||
- virthost_name: '{{ ansible_fqdn }}'
|
||||
listen: '{{ http_port }}'
|
||||
server_name: '{{ ansible_fqdn }}'
|
||||
server_aliases: ''
|
||||
index: index.html
|
||||
ssl_enabled: True
|
||||
ssl_only: True
|
||||
ssl_letsencrypt_certs: '{{ nginx_letsencrypt_managed }}'
|
||||
root: '{{ nginx_webroot }}'
|
||||
server_tokens: 'off'
|
||||
proxy_standard_setup: True
|
||||
locations:
|
||||
- location: /
|
||||
target: http://localhost:{{ prometheus_http_port }}
|
||||
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
---
|
||||
#
|
||||
rsyslog_repo_install: True
|
||||
rsyslog_ppa: "ppa:adiscon/v8-stable"
|
||||
rsyslog_debian_repo: "deb http://debian.adiscon.com/v8-stable wheezy/"
|
||||
rsyslog_repo_key: "AEF0CF8E"
|
||||
rsyslog_pkg_status: "latest"
|
||||
rsyslog_send_to_elasticsearch: True
|
||||
|
||||
rsyslog_use_inotify: True
|
||||
# Not used when inotify is enabled
|
||||
rsyslog_file_polling_interval: 10
|
||||
|
||||
# We use logstash if the elastisearch module is not enabled
|
||||
#rsys_logstash_collector_host: logstash.t.hadoop.research-infrastructures.eu
|
||||
rsys_logstash_collector_host: logstash
|
||||
rsys_logstash_collector_port: 5544
|
||||
|
||||
# IMPORTANT: the log_state_file names must be unique
|
||||
#rsys_logfiles:
|
||||
# - { logfile: '/var/log/tomcat7/catalina.log', log_tag: 'solr-state', log_state_file: 'solr-state'}
|
||||
# - { logfile: '/var/log/tomcat7/localhost_access.log', log_tag: 'solr-access', log_state_file: 'solr-access'}
|
||||
|
||||
#
|
||||
# IMPORTANT NOTE: the following setting only work if rsyslog_install_newer_package is set to True
|
||||
#
|
||||
rsyslog_use_queues: True
|
||||
rsyslog_main_queue_size: 1000000
|
||||
rsyslog_main_queue_debatchsize: 256
|
||||
rsyslog_main_queue_workerthreads: 2
|
||||
rsyslog_action_queue_debatchsize: 1024
|
||||
rsyslog_action_queue_size: 100000
|
||||
rsyslog_action_queue_workerthreads: 5
|
||||
# -1 means retry indefinitely if ES is unreachable
|
||||
rsyslog_action_resumeretrycount: -1
|
||||
|
||||
# The elasticsearch module bypasses logstash and talks directly to elasticsearch
|
||||
rsyslog_use_elasticsearch_module: True
|
||||
#rsys_elasticsearch_collector_host: logstash.t.hadoop.research-infrastructures.eu
|
||||
rsys_elasticsearch_collector_host: logstash
|
||||
rsys_elasticsearch_collector_port: 9200
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
---
|
||||
- name: Restart rsyslog
|
||||
#service: name=rsyslog state=restarted
|
||||
command: /usr/sbin/service rsyslog stop ; /usr/sbin/service rsyslog start
|
||||
|
||||
|
|
@ -1,61 +0,0 @@
|
|||
---
|
||||
- name: Install the rsyslog ppa on ubuntu precise or later
|
||||
apt_repository: repo='{{ rsyslog_ppa }}' update_cache=yes
|
||||
when:
|
||||
- is_ubuntu
|
||||
- rsyslog_repo_install
|
||||
tags: [ 'rsyslog', 'logstash' ]
|
||||
|
||||
- name: Remove the rsyslog ppa on ubuntu precise or later
|
||||
apt_repository: repo='{{ rsyslog_ppa }}' update_cache=yes state=absent
|
||||
when:
|
||||
- is_ubuntu
|
||||
- not rsyslog_repo_install
|
||||
tags: [ 'rsyslog', 'logstash' ]
|
||||
|
||||
- name: Install the rsyslog repo key on debian wheezy
|
||||
apt_key: keyserver=keys.gnupg.net id=AEF0CF8E state=present
|
||||
when:
|
||||
- is_debian7
|
||||
- rsyslog_repo_install
|
||||
tags: [ 'rsyslog', 'logstash' ]
|
||||
|
||||
- name: Install the rsyslog repository on debian wheezy
|
||||
apt_repository: repo="{{ rsyslog_debian_repo }}" state=present update_cache=yes
|
||||
when:
|
||||
- is_debian7
|
||||
- rsyslog_repo_install
|
||||
tags: [ 'rsyslog', 'logstash' ]
|
||||
|
||||
- name: Remove the rsyslog repository on debian wheezy
|
||||
apt_repository: repo="{{ rsyslog_debian_repo }}" state=absent update_cache=yes
|
||||
when:
|
||||
- is_debian7
|
||||
- not rsyslog_repo_install
|
||||
tags: [ 'rsyslog', 'logstash' ]
|
||||
|
||||
- name: Add the syslog user to the adm group so it can read all the log files
|
||||
user: name=syslog groups=adm
|
||||
tags: [ 'rsyslog', 'logstash' ]
|
||||
|
||||
- name: Upgrade rsyslog and install the elasticsearch module
|
||||
apt: pkg={{ item }} state={{ rsyslog_pkg_status }} update_cache=yes cache_valid_time=1800
|
||||
with_items:
|
||||
- rsyslog
|
||||
- rsyslog-elasticsearch
|
||||
tags: [ 'rsyslog', 'logstash' ]
|
||||
|
||||
- name: Add a rsyslog configuration to send logfiles data to a logstash collector or directly to elasticsearch
|
||||
template: src=rsyslog-logstash.conf.j2 dest=/etc/rsyslog.d/90-rsyslog-logstash.conf owner=root group=root mode=0444
|
||||
when:
|
||||
- rsyslog_repo_install
|
||||
- rsyslog_send_to_elasticsearch
|
||||
notify: Restart rsyslog
|
||||
tags: [ 'rsyslog', 'logstash' ]
|
||||
|
||||
- name: Remove the rsyslog configuration to send logfiles data to a logstash collector or directly to elasticsearch
|
||||
file: dest=/etc/rsyslog.d/90-rsyslog-logstash.conf state=absent
|
||||
when: not rsyslog_send_to_elasticsearch
|
||||
notify: Restart rsyslog
|
||||
tags: [ 'rsyslog', 'logstash' ]
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
$ModLoad imfile
|
||||
|
||||
{% for log in rsys_logfiles %}
|
||||
$InputFileName {{ log.logfile }}
|
||||
$InputFileTag {{ log.log_tag }}
|
||||
$InputFileStateFile {{ log.log_state_file }}
|
||||
$InputRunFileMonitor
|
||||
|
||||
{% endfor %}
|
||||
|
||||
# Send all to the logstash server
|
||||
*.* @@{{ rsys_logstash_collector_host }}:{{ rsys_logstash_collector_port }}
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
{% if rsys_logfiles is defined %}
|
||||
{% if rsyslog_use_inotify %}
|
||||
module(load="imfile" mode="inotify" )
|
||||
{% else %}
|
||||
module(load="imfile" mode="polling" PollingInterval="10" )
|
||||
{% endif %}
|
||||
{% for log in rsys_logfiles %}
|
||||
input(
|
||||
Type="imfile"
|
||||
File="{{ log.logfile }}"
|
||||
Tag="{{ log.log_tag }}"
|
||||
)
|
||||
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if rsyslog_use_elasticsearch_module %}
|
||||
module(load="omelasticsearch")
|
||||
|
||||
{% if rsyslog_use_queues %}
|
||||
main_queue(
|
||||
queue.size="{{ rsyslog_main_queue_size }}" # capacity of the main queue
|
||||
queue.debatchsize="{{ rsyslog_main_queue_debatchsize }}" # process messages in batches of 1000 and move them to the action queues
|
||||
queue.workerthreads="{{ rsyslog_main_queue_workerthreads }}" # threads for the main queue
|
||||
)
|
||||
{% endif %}
|
||||
|
||||
template(name="logstash-index"
|
||||
type="list") {
|
||||
constant(value="logstash-")
|
||||
property(name="timereported" dateFormat="rfc3339" position.from="1" position.to="4")
|
||||
constant(value=".")
|
||||
property(name="timereported" dateFormat="rfc3339" position.from="6" position.to="7")
|
||||
constant(value=".")
|
||||
property(name="timereported" dateFormat="rfc3339" position.from="9" position.to="10")
|
||||
}
|
||||
|
||||
# this is for formatting our syslog in JSON with @timestamp
|
||||
template(name="plain-syslog"
|
||||
type="list") {
|
||||
constant(value="{")
|
||||
constant(value="\"@timestamp\":\"") property(name="timereported" dateFormat="rfc3339")
|
||||
constant(value="\"received_at\":\"") property(name="timereported" dateFormat="rfc3339")
|
||||
constant(value="\",\"host\":\"") property(name="hostname")
|
||||
constant(value="\",\"received_from\":\"") property(name="hostname")
|
||||
constant(value="\",\"severity\":\"") property(name="syslogseverity-text")
|
||||
constant(value="\",\"facility\":\"") property(name="syslogfacility-text")
|
||||
constant(value="\",\"tag\":\"") property(name="syslogtag" format="json")
|
||||
constant(value="\",\"message\":\"") property(name="msg" format="json")
|
||||
constant(value="\"}")
|
||||
}
|
||||
# this is where we actually send the logs to Elasticsearch ({{ rsys_elasticsearch_collector_host }}:{{ rsys_elasticsearch_collector_port }})
|
||||
*.* action(type="omelasticsearch"
|
||||
template="plain-syslog"
|
||||
searchIndex="logstash-index"
|
||||
dynSearchIndex="on"
|
||||
{% if rsyslog_use_queues %}
|
||||
bulkmode="on"
|
||||
queue.dequeuebatchsize="{{ rsyslog_action_queue_debatchsize }}" # ES bulk size
|
||||
queue.size="{{ rsyslog_action_queue_size }}" # capacity of the action queue
|
||||
queue.workerthreads="{{ rsyslog_action_queue_workerthreads }}" # workers for the action
|
||||
action.resumeretrycount="{{ rsyslog_action_resumeretrycount }}"
|
||||
{% endif %}
|
||||
server="{{ rsys_elasticsearch_collector_host }}"
|
||||
serverport="{{ rsys_elasticsearch_collector_port }}"
|
||||
)
|
||||
{% else %}
|
||||
# Send all to the logstash server
|
||||
*.* @@{{ rsys_logstash_collector_host }}:{{ rsys_logstash_collector_port }}
|
||||
{% endif %}
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
---
|
||||
rsyslog_enable_remote_socket: False
|
||||
rsyslog_enable_remote_udp: 'enabled'
|
||||
rsyslog_enable_remote_tcp: 'disabled'
|
||||
|
||||
rsyslog_remote_path: /var/log/remote
|
||||
rsyslog_tls_status: 'disabled'
|
||||
rsyslog_tls_deb_pkgs:
|
||||
- 'rsyslog-gnutls'
|
||||
|
||||
rsyslog_tls_rh_pkgs:
|
||||
- 'rsyslog-gnutls'
|
||||
|
||||
rsyslog_udp_port: 514
|
||||
rsyslog_tcp_port: 514
|
||||
|
||||
rsyslog_send_to_remote: False
|
||||
|
||||
rsyslog_firewalld_services:
|
||||
- { service: 'syslog', state: '{{ rsyslog_enable_remote_udp }}', zone: '{{ firewalld_default_zone }}' }
|
||||
- { service: 'syslog-tls', state: '{{ rsyslog_tls_status }}', zone: '{{ firewalld_default_zone }}' }
|
||||
|
||||
rsyslog_firewalld_ports:
|
||||
- { port: '{{ rsyslog_tcp_port }}', protocol: 'tcp', state: '{{ rsyslog_enable_remote_tcp }}', zone: '{{ firewalld_default_zone }}' }
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
---
|
||||
- name: Restart rsyslog
|
||||
service: name=rsyslog state=restarted
|
||||
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
---
|
||||
- name: Configure rsyslog so that it accepts logs from remote services
|
||||
block:
|
||||
- name: Ensure that the rsyslog package is installed. deb/ubuntu
|
||||
apt: pkg=rsyslog state=present cache_valid_time=1800
|
||||
when: ansible_distribution_file_variety == "Debian"
|
||||
|
||||
- name: Ensure that the rsyslog package is installed. centos/rhel
|
||||
yum: pkg=rsyslog state=present
|
||||
when: ansible_distribution_file_variety == "RedHat"
|
||||
|
||||
- name: Create the additional rsyslog directory
|
||||
file: dest={{ rsyslog_remote_path }} state=directory owner=syslog group=adm
|
||||
|
||||
- name: Install the rsyslog configuration
|
||||
template: src=rsyslog-remote-socket.conf.j2 dest=/etc/rsyslog.d/10-rsyslog-remote-socket.conf
|
||||
notify: Restart rsyslog
|
||||
|
||||
- name: Ensure that rsyslog is running and enabled
|
||||
service: name=rsyslog state=started enabled=yes
|
||||
|
||||
when: rsyslog_enable_remote_socket | bool
|
||||
tags: [ 'syslog', 'rsyslog', 'remote_syslog' ]
|
||||
|
||||
- name: Install the rsyslog TLS package on deb/ubuntu
|
||||
block:
|
||||
- name: Install the rsyslog TLS support
|
||||
apt: pkg={{ rsyslog_tls_deb_pkgs }} state=present cache_valid_time=1800
|
||||
notify: Restart rsyslog
|
||||
|
||||
when:
|
||||
- rsyslog_enable_remote_socket | bool
|
||||
- rsyslog_tls_status == 'enabled'
|
||||
- ansible_distribution_file_variety == "Debian"
|
||||
tags: [ 'syslog', 'rsyslog', 'remote_syslog' ]
|
||||
|
||||
- name: Install the rsyslog TLS package on RHEL/CentOS
|
||||
block:
|
||||
- name: Install the rsyslog TLS support
|
||||
yum: pkg={{ rsyslog_tls_rh_pkgs }} state=present
|
||||
notify: Restart rsyslog
|
||||
|
||||
when:
|
||||
- rsyslog_enable_remote_socket | bool
|
||||
- rsyslog_tls_status == 'enabled'
|
||||
- ansible_distribution_file_variety == "RedHat"
|
||||
tags: [ 'syslog', 'rsyslog', 'remote_syslog' ]
|
||||
|
||||
- name: Configure SELinux and firewalld on RHEL/CentOS
|
||||
block:
|
||||
- name: SELinux udp port
|
||||
seport: ignore_selinux_state=yes ports=514 proto=udp setype=syslogd_port_t state=present
|
||||
when: rsyslog_enable_remote_udp == 'enabled'
|
||||
|
||||
- name: SELinux tcp port
|
||||
seport: ignore_selinux_state=yes ports=514 proto=tcp setype=syslogd_port_t state=present
|
||||
when: rsyslog_enable_remote_tcp == 'enabled'
|
||||
|
||||
- name: rsyslog firewalld services
|
||||
firewalld: service={{ item.service }} zone={{ item.zone }} permanent={{ item.permanent | default(True) }} state={{ item.state }} immediate=True
|
||||
with_items: '{{ rsyslog_firewalld_services }}'
|
||||
|
||||
- name: rsyslog firewalld ports
|
||||
firewalld: port={{ item.port }}/{{ item.protocol }} zone={{ item.zone }} permanent={{ item.permanent | default(False) }} state={{ item.state }} immediate=True
|
||||
with_items: '{{ rsyslog_firewalld_ports }}'
|
||||
|
||||
when:
|
||||
- rsyslog_enable_remote_socket | bool
|
||||
- ansible_distribution_file_variety == "RedHat"
|
||||
tags: [ 'syslog', 'rsyslog', 'remote_syslog', 'selinux', 'firewalld' ]
|
|
@ -1,34 +0,0 @@
|
|||
#
|
||||
# The order counts
|
||||
#
|
||||
{% if rsyslog_enable_remote_udp == 'enabled' %}
|
||||
# Provides UDP syslog reception
|
||||
module(load="imudp") # needs to be done just once
|
||||
# input(type="imudp" port="{{ rsyslog_udp_port }}")
|
||||
{% endif %}
|
||||
|
||||
{% if rsyslog_enable_remote_tcp == 'enabled' %}
|
||||
# Provides TCP syslog reception
|
||||
module(load="imtcp") # needs to be done just once
|
||||
# input(type="imtcp" port="{{ rsyslog_tcp_port }}")
|
||||
{% endif %}
|
||||
|
||||
# log every host in its own directory
|
||||
$template RemoteHost,"{{ rsyslog_remote_path }}/%HOSTNAME%/syslog.log"
|
||||
$RuleSet remote
|
||||
*.* ?RemoteHost
|
||||
|
||||
{% if rsyslog_enable_remote_udp == 'enabled' %}
|
||||
# bind the ruleset to the udp listener
|
||||
$InputUDPServerBindRuleset remote
|
||||
# and activate it:
|
||||
$UDPServerRun {{ rsyslog_udp_port }}
|
||||
{% endif %}
|
||||
|
||||
{% if rsyslog_enable_remote_tcp == 'enabled' %}
|
||||
# bind the ruleset to the tcp listener
|
||||
$InputTCPServerBindRuleset remote
|
||||
# and activate it:
|
||||
$InputTCPServerRun {{ rsyslog_tcp_port }}
|
||||
{% endif %}
|
||||
|
Loading…
Reference in New Issue