Update to 2020-04-14

master
Daniel Berteaud 4 years ago
parent 359b7b5c4c
commit 460a3fc863
  1. 1087
      library/iptables_raw.py
  2. 9
      playbooks/update_all.yml
  3. 42
      playbooks/update_zabbix.yml
  4. 94
      roles/ampache/defaults/main.yml
  5. 4
      roles/ampache/handlers/main.yml
  6. 6
      roles/ampache/meta/main.yml
  7. 218
      roles/ampache/tasks/main.yml
  8. 134
      roles/ampache/templates/ampache.cfg.php.j2
  9. 31
      roles/ampache/templates/cron.sh.j2
  10. 7
      roles/ampache/templates/dump_db.j2
  11. 27
      roles/ampache/templates/httpd.conf.j2
  12. 3
      roles/ampache/templates/motd.php.j2
  13. 15
      roles/ampache/templates/perms.sh.j2
  14. 37
      roles/ampache/templates/php.conf.j2
  15. 3
      roles/ampache/templates/rm_dump.j2
  16. 6
      roles/ampache/templates/sso.php.j2
  17. 36
      roles/backup/defaults/main.yml
  18. 53
      roles/backup/files/dump-megaraid-cfg
  19. 3
      roles/backup/files/dump-rpms-list
  20. 9
      roles/backup/files/post-backup
  21. 29
      roles/backup/files/pre-backup
  22. 3
      roles/backup/files/rm-megaraid-cfg
  23. 84
      roles/backup/tasks/main.yml
  24. 2
      roles/backup/templates/sudo.j2
  25. 19
      roles/backuppc/defaults/main.yml
  26. 5
      roles/backuppc/handlers/main.yml
  27. 3
      roles/backuppc/meta/main.yml
  28. 48
      roles/backuppc/tasks/main.yml
  29. 25
      roles/backuppc/templates/httpd.conf.j2
  30. 3
      roles/backuppc/templates/sudoers.j2
  31. 45
      roles/bitwarden_rs/defaults/main.yml
  32. 5
      roles/bitwarden_rs/handlers/main.yml
  33. 7
      roles/bitwarden_rs/meta/main.yml
  34. 12
      roles/bitwarden_rs/tasks/archive_post.yml
  35. 23
      roles/bitwarden_rs/tasks/archive_pre.yml
  36. 8
      roles/bitwarden_rs/tasks/cleanup.yml
  37. 11
      roles/bitwarden_rs/tasks/conf.yml
  38. 24
      roles/bitwarden_rs/tasks/directories.yml
  39. 67
      roles/bitwarden_rs/tasks/facts.yml
  40. 97
      roles/bitwarden_rs/tasks/install.yml
  41. 9
      roles/bitwarden_rs/tasks/iptables.yml
  42. 14
      roles/bitwarden_rs/tasks/main.yml
  43. 6
      roles/bitwarden_rs/tasks/service.yml
  44. 5
      roles/bitwarden_rs/tasks/user.yml
  45. 10
      roles/bitwarden_rs/tasks/write_version.yml
  46. 25
      roles/bitwarden_rs/templates/bitwarden_rs.conf.j2
  47. 27
      roles/bitwarden_rs/templates/bitwarden_rs.service.j2
  48. 71
      roles/bitwarden_rs/templates/nginx.conf.j2
  49. 4
      roles/bitwarden_rs/templates/post-backup.sh.j2
  50. 17
      roles/bitwarden_rs/templates/pre-backup.sh.j2
  51. 117
      roles/bluemind/defaults/main.yml
  52. 4
      roles/bluemind/handlers/main.yml
  53. 118
      roles/bluemind/tasks/main.yml
  54. 53
      roles/bluemind/templates/bm-core.log.xml.j2
  55. 59
      roles/bluemind/templates/bm-eas.log.xml.j2
  56. 12
      roles/bluemind/templates/bm-hps.log.xml.j2
  57. 12
      roles/bluemind/templates/bm-ips.log.xml.j2
  58. 12
      roles/bluemind/templates/bm-lmtp.log.xml.j2
  59. 13
      roles/bluemind/templates/bm-locator.log.xml.j2
  60. 12
      roles/bluemind/templates/bm-milter.log.xml.j2
  61. 13
      roles/bluemind/templates/bm-node.log.xml.j2
  62. 19
      roles/bluemind/templates/bm-syslog.service.j2
  63. 12
      roles/bluemind/templates/bm-tika.log.xml.j2
  64. 43
      roles/bluemind/templates/bm-webserver.log.xml.j2
  65. 12
      roles/bluemind/templates/bm-xmpp.log.xml.j2
  66. 14
      roles/bluemind/templates/bm-ysnp.log.xml.j2
  67. 12
      roles/bluemind/templates/dehydrated_deploy_hook.j2
  68. 5
      roles/bluemind/templates/post-backup.j2
  69. 17
      roles/bluemind/templates/pre-backup.j2
  70. 11
      roles/bluemind/templates/rules.json.j2
  71. 19
      roles/bounca/defaults/main.yml
  72. 5
      roles/bounca/handlers/main.yml
  73. 2
      roles/bounca/meta/main.yml
  74. 323
      roles/bounca/tasks/main.yml
  75. 17
      roles/bounca/templates/bounca.service.j2
  76. 14
      roles/bounca/templates/main.ini.j2
  77. 17
      roles/bounca/templates/uwsgi.ini.j2
  78. 16
      roles/clamav/defaults/main.yml
  79. 9
      roles/clamav/handlers/main.yml
  80. 57
      roles/clamav/tasks/main.yml
  81. 12
      roles/clamav/templates/clamd.conf.j2
  82. 13
      roles/clamav/templates/clamd.service.j2
  83. 13
      roles/clamav/templates/freshclam.conf.j2
  84. 15
      roles/clamav/templates/freshclam.service.j2
  85. 120
      roles/common/defaults/main.yml
  86. BIN
      roles/common/files/MegaCli-8.07.14-1.noarch.rpm
  87. 10
      roles/common/files/bash_aliases.sh
  88. 1
      roles/common/files/crond
  89. 10
      roles/common/files/fstrim_all
  90. BIN
      roles/common/files/megacli_8.07.14-1_all.deb
  91. 4
      roles/common/files/vimrc.local_Debian
  92. 33
      roles/common/handlers/main.yml
  93. 28
      roles/common/meta/main.yml
  94. 16
      roles/common/tasks/guest.yml
  95. 4
      roles/common/tasks/guest_Debian.yml
  96. 5
      roles/common/tasks/guest_RedHat.yml
  97. 18
      roles/common/tasks/hardware.yml
  98. 30
      roles/common/tasks/hardware_Debian.yml
  99. 24
      roles/common/tasks/hardware_RedHat.yml
  100. 11
      roles/common/tasks/hostname.yml
  101. Some files were not shown because too many files have changed in this diff Show More

File diff suppressed because it is too large Load Diff

@ -0,0 +1,9 @@
---
- name: Update everything
hosts: '*'
tasks:
- yum: name='*' state=latest
when: ansible_os_family == 'RedHat'
- apt: name='*' state=latest
when: ansible_os_family == 'Debian'

@ -0,0 +1,42 @@
---
- name: Update Zabbix
hosts: '*'
tasks:
- yum:
name:
- zabbix-agent
- zabbix-agent-addons
state: latest
when: ansible_os_family == 'RedHat'
notify: restart zabbix-agent
- apt:
name:
- zabbix-agent
update_cache: True
state: latest
when: ansible_os_family == 'Debian'
notify: restart zabbix-agent
- git:
repo: https://git.fws.fr/fws/zabbix-agent-addons.git
dest: /var/lib/zabbix/addons
register: zabbix_agent_addons_git
when: ansible_os_family == 'Debian'
notify: restart zabbix-agent
- shell: cp -af /var/lib/zabbix/addons/{{ item.src }}/* {{ item.dest }}/
with_items:
- { src: zabbix_conf, dest: /etc/zabbix/zabbix_agentd.conf.d }
- { src: zabbix_scripts, dest: /var/lib/zabbix/bin }
- { src: lib, dest: /usr/local/lib/site_perl }
when:
- zabbix_agent_addons_git.changed
- ansible_os_family == 'Debian'
- shell: chmod +x /var/lib/zabbix/bin/*
args:
warn: False
when:
- zabbix_agent_addons_git.changed
- ansible_os_family == 'Debian'
handlers:
- name: restart zabbix-agent
service: name=zabbix-agent state=restarted

@ -0,0 +1,94 @@
---
ampache_id: "1"
ampache_manage_upgrade: True
ampache_version: '4.1.1'
ampache_config_version: 40
ampache_zip_url: https://github.com/ampache/ampache/archive/{{ ampache_version }}.zip
ampache_zip_sha1: 744ff90039a268579551d50650ce1502ec89daf1
ampache_root_dir: /opt/ampache_{{ ampache_id }}
ampache_php_user: php-ampache_{{ ampache_id }}
ampache_php_version: 74
# If you prefer using a custom PHP FPM pool, set it's name.
# You might need to adjust ampache_php_user
# ampache_php_fpm_pool: php56
ampache_mysql_server: "{{ mysql_server | default('localhost') }}"
# ampache_mysql_port: 3306
ampache_mysql_db: ampache_{{ ampache_id }}
ampache_mysql_user: ampache_{{ ampache_id }}
# If not defined, a random pass will be generated and stored in the meta directory
# ampache_mysql_pass: ampache
# ampache_alias: ampache
# ampache_allowed_ip:
# - 192.168.7.0/24
# - 10.2.0.0/24
ampache_local_web_path: "http://ampache.{{ ansible_domain }}/"
ampache_auth_methods:
- mysql
ampache_ldap_url: "{{ ad_auth | default(False) | ternary('ldap://' + ad_realm | default(samba_realm) | lower,ldap_uri) }}"
ampache_ldap_starttls: True
ampache_ldap_search_dn: "{{ ad_auth | default(False) | ternary((ad_ldap_user_search_base is defined) | ternary(ad_ldap_user_search_base,'DC=' + ad_realm | default(samba_realm) | regex_replace('\\.',',DC=')), ldap_base) }}"
ampache_ldap_username: ""
ampache_ldap_password: ""
ampache_ldap_objectclass: "{{ ad_auth | default(False) | ternary('user','inetOrgPerson') }}"
ampache_ldap_filter: "{{ ad_auth | default(False) | ternary('(&(objectCategory=person)(objectClass=user)(primaryGroupId=513)(sAMAccountName=%v))','(uid=%v)') }}"
ampache_ldap_email_field: mail
ampache_ldap_name_field: cn
ampache_admin_users:
- admin
#ampache_logout_redirect: https://sso.domain.org
ampache_metadata_order: 'getID3,filename'
ampache_lastfm_api_key: 697bad201ee93391630d845c7b3f9610
ampache_lastfm_api_secret: 5f5fe59aa2f9c60220f04e94aa59c209
ampache_max_bit_rate: 192
ampache_min_bit_rate: 64
# allowed, required or false
ampache_transcode_m4a: required
ampache_transcode_flac: required
ampache_transcode_mpc: required
ampache_transcode_ogg: required
ampache_transcode_oga: required
ampache_transcode_wav: required
ampache_transcode_wma: required
ampache_transcode_aif: required
ampache_transcode_aiff: required
ampache_transcode_ape: required
ampache_transcode_shn: required
ampache_transcode_mp3: allowed
ampache_transcode_avi: required
ampache_transcode_mkv: required
ampache_transcode_mpg: required
ampache_transcode_mpeg: required
ampache_transcode_m4v: required
ampache_transcode_mp4: required
ampache_transcode_mov: required
ampache_transcode_wmv: required
ampache_transcode_ogv: required
ampache_transcode_divx: required
ampache_transcode_m2ts: required
ampache_transcode_webm: required
ampache_transcode_player_api_mp3: required
ampache_encode_player_api_target: mp3
ampache_encode_player_webplayer: mp3
ampache_encode_target: mp3
ampache_encode_video_target: webm
# If defined, will be printed on the login page. HTML can be used, eg
# ampache_motd: '<a href="/sso.php">Use central authentication</a>'
...

@ -0,0 +1,4 @@
---
- include: ../httpd_common/handlers/main.yml
- include: ../httpd_php/handlers/main.yml
...

@ -0,0 +1,6 @@
---
allow_duplicates: true
dependencies:
- role: repo_nux_dextop
- role: httpd_php
...

@ -0,0 +1,218 @@
---
- name: Install needed tools
yum:
name:
- unzip
- MySQL-python
- mariadb
- acl
- git
- composer
- patch
- ffmpeg
tags: ampache
- import_tasks: ../includes/create_system_user.yml
vars:
- user: "{{ ampache_php_user }}"
- comment: "PHP FPM for ampache {{ ampache_id }}"
tags: ampache
- import_tasks: ../includes/webapps_set_install_mode.yml
vars:
- root_dir: "{{ ampache_root_dir }}"
- version: "{{ ampache_version }}"
tags: ampache
- set_fact: ampache_install_mode={{ (install_mode == 'upgrade' and not ampache_manage_upgrade) | ternary('none',install_mode) }}
tags: ampache
- set_fact: ampache_current_version={{ current_version | default('') }}
tags: ampache
- import_tasks: ../includes/webapps_archive.yml
vars:
- root_dir: "{{ ampache_root_dir }}"
- version: "{{ ampache_current_version }}"
- db_name: "{{ ampache_mysql_db }}"
when: ampache_install_mode == 'upgrade'
tags: ampache
- name: Download Ampache
get_url:
url: "{{ ampache_zip_url }}"
dest: "{{ ampache_root_dir }}/tmp/"
checksum: "sha1:{{ ampache_zip_sha1 }}"
when: ampache_install_mode != 'none'
tags: ampache
- name: Extract ampache archive
unarchive:
src: "{{ ampache_root_dir }}/tmp/ampache-{{ ampache_version }}.zip"
dest: "{{ ampache_root_dir }}/tmp"
remote_src: yes
when: ampache_install_mode != 'none'
tags: ampache
- name: Create directory structure
file: path={{ item }} state=directory
with_items:
- "{{ ampache_root_dir }}"
- "{{ ampache_root_dir }}/web"
- "{{ ampache_root_dir }}/tmp"
- "{{ ampache_root_dir }}/sessions"
- "{{ ampache_root_dir }}/meta"
- "{{ ampache_root_dir }}/logs"
- "{{ ampache_root_dir }}/data"
- "{{ ampache_root_dir }}/data/metadata"
- "{{ ampache_root_dir }}/data/music"
- "{{ ampache_root_dir }}/data/video"
- "{{ ampache_root_dir }}/db_dumps"
tags: ampache
- name: Move files to the correct directory
synchronize:
src: "{{ ampache_root_dir }}/tmp/ampache-{{ ampache_version }}/"
dest: "{{ ampache_root_dir }}/web/"
recursive: True
delete: True
delegate_to: "{{ inventory_hostname }}"
when: ampache_install_mode != 'none'
tags: ampache
- name: Check if htaccess files needs to be moved
stat: path={{ ampache_root_dir }}/web/{{ item }}/.htaccess.dist
with_items:
- channel
- play
- rest
register: htaccess
tags: ampache
- name: Rename htaccess files
command: mv -f {{ ampache_root_dir }}/web/{{ item.item }}/.htaccess.dist {{ ampache_root_dir }}/web/{{ item.item }}/.htaccess
with_items: "{{ htaccess.results }}"
when: item.stat.exists
tags: ampache
- name: Install libs using composer
composer: command=install working_dir={{ ampache_root_dir }}/web executable={{ (ampache_php_version == '54') | ternary('/bin/php','/bin/php' ~ ampache_php_version ) }}
tags: ampache
- name: Remove temp files
file: path={{ item }} state=absent
with_items:
- "{{ ampache_root_dir }}/tmp/ampache-{{ ampache_version }}.zip"
- "{{ ampache_root_dir }}/tmp/ampache-{{ ampache_version }}"
tags: ampache
- import_tasks: ../includes/get_rand_pass.yml
vars:
- pass_file: "{{ ampache_root_dir }}/meta/key.txt"
tags: ampache
- set_fact: ampache_key={{ rand_pass }}
tags: ampache
- import_tasks: ../includes/get_rand_pass.yml
vars:
- pass_file: "{{ampache_root_dir }}/meta/ansible_dbpass"
when: ampache_mysql_pass is not defined
tags: ampache
- set_fact: ampache_mysql_pass={{ rand_pass }}
when: ampache_mysql_pass is not defined
tags: ampache
- import_tasks: ../includes/webapps_create_mysql_db.yml
vars:
- db_name: "{{ ampache_mysql_db }}"
- db_user: "{{ ampache_mysql_user }}"
- db_server: "{{ ampache_mysql_server }}"
- db_pass: "{{ ampache_mysql_pass }}"
tags: ampache
- name: Inject SQL structure
mysql_db:
name: "{{ ampache_mysql_db }}"
state: import
target: "{{ ampache_root_dir }}/web/sql/ampache.sql"
login_host: "{{ ampache_mysql_server }}"
login_user: sqladmin
login_password: "{{ mysql_admin_pass }}"
when: ampache_install_mode == 'install'
tags: ampache
- name: Upgrade SQL database
command: php{{ (ampache_php_version == '54') | ternary('', ampache_php_version) }} {{ ampache_root_dir }}/web/bin/install/update_db.inc
become_user: "{{ ampache_php_user }}"
when: ampache_install_mode == 'upgrade'
tags: ampache
- name: Grant admin privileges
command: mysql --host={{ ampache_mysql_server }} --user=sqladmin --password={{ mysql_admin_pass }} {{ ampache_mysql_db }} -e "UPDATE `user` SET `access`='100' WHERE `username`='{{ item }}'"
changed_when: False
become_user: "{{ ampache_php_user }}"
with_items: "{{ ampache_admin_users }}"
tags: ampache
- import_tasks: ../includes/webapps_webconf.yml
vars:
- app_id: ampache_{{ ampache_id }}
- php_version: "{{ ampache_php_version }}"
- php_fpm_pool: "{{ ampache_php_fpm_pool | default('') }}"
tags: ampache
- name: Deploy ampache configuration
template: src=ampache.cfg.php.j2 dest={{ ampache_root_dir }}/web/config/ampache.cfg.php group={{ ampache_php_user }} mode=640
tags: ampache
- name: Deploy motd
template: src=motd.php.j2 dest={{ ampache_root_dir }}/web/config/motd.php
when: ampache_motd is defined
tags: ampache
- name: Remove motd
file: path={{ ampache_root_dir }}/web/config/motd.php state=absent
when: ampache_motd is not defined
tags: ampache
- name: Deploy cron scripts
template: src={{ item }}.j2 dest={{ ampache_root_dir }}/web/bin/{{ item }}
with_items:
- cron.sh
tags: ampache
- name: Enable cronjob
cron:
name: ampache_{{ ampache_id }}
special_time: daily
user: "{{ ampache_php_user }}"
job: "/bin/sh {{ ampache_root_dir }}/web/bin/cron.sh"
cron_file: ampache_{{ ampache_id }}
tags: ampache
- name: Deploy sso script
template: src=sso.php.j2 dest={{ ampache_root_dir }}/web/sso.php
tags: ampache
- name: Deploy backup scripts
template: src={{ item.script }}.j2 dest=/etc/backup/{{ item.type }}.d/ampache_{{ ampache_id }}_{{ item.script }} mode=750
with_items:
- script: dump_db
type: pre
- script: rm_dump
type: post
tags: ampache
- import_tasks: ../includes/webapps_compress_archive.yml
vars:
- root_dir: "{{ ampache_root_dir }}"
- version: "{{ ampache_current_version }}"
when: ampache_install_mode == 'upgrade'
tags: ampache
- import_tasks: ../includes/webapps_post.yml
vars:
- root_dir: "{{ ampache_root_dir }}"
- version: "{{ ampache_version }}"
tags: ampache
...

@ -0,0 +1,134 @@
config_version = {{ ampache_config_version }}
{% if ampache_local_web_path is defined %}
local_web_path = "{{ ampache_local_web_path }}"
{% endif %}
database_hostname = {{ ampache_mysql_server }}
{% if ampache_mysql_port is defined %}
database_port = "{{ ampache_mysql_port }}"
{% endif %}
database_name = "{{ ampache_mysql_db }}"
database_username = "{{ ampache_mysql_user }}"
database_password = "{{ ampache_mysql_pass }}"
secret_key = "{{ ampache_key }}"
session_length = 3600
stream_length = 7200
remember_length = 604800
session_name = ampache
session_cookielife = 0
auth_methods = "{{ ampache_auth_methods | join(',') }}"
{% if 'ldap' in ampache_auth_methods %}
ldap_url = "{{ ampache_ldap_url }}"
ldap_username = "{{ ampache_ldap_username }}"
ldap_password = "{{ ampache_ldap_password }}"
ldap_start_tls = "{{ ampache_ldap_starttls | ternary('true','false') }}"
ldap_search_dn = "{{ ampache_ldap_search_dn }}"
ldap_objectclass = "{{ ampache_ldap_objectclass }}"
ldap_filter = "{{ ampache_ldap_filter }}"
ldap_email_field = "{{ ampache_ldap_email_field }}"
ldap_name_field = "{{ ampache_ldap_name_field }}"
external_auto_update = "true"
{% endif %}
{% if ampache_logout_redirect is defined %}
logout_redirect = "{{ ampache_logout_redirect }}"
{% endif %}
access_control = "true"
require_session = "true"
require_localnet_session = "true"
metadata_order = "{{ ampache_metadata_order }}"
getid3_tag_order = "id3v2,id3v1,vorbiscomment,quicktime,matroska,ape,asf,avi,mpeg,riff"
deferred_ext_metadata = "false"
additional_genre_delimiters = "[/]{2}|[/|\\\\|\|,|;]"
catalog_file_pattern = "mp3|mpc|m4p|m4a|aac|ogg|oga|wav|aif|aiff|rm|wma|asf|flac|opus|spx|ra|ape|shn|wv"
catalog_video_pattern = "avi|mpg|mpeg|flv|m4v|mp4|webm|mkv|wmv|ogv|mov|divx|m2ts"
catalog_playlist_pattern = "m3u|m3u8|pls|asx|xspf"
catalog_prefix_pattern = "The|An|A|Das|Ein|Eine|Les|Le|La"
track_user_ip = "true"
allow_zip_download = "true"
allow_zip_types = "album"
use_auth = "true"
ratings = "false"
userflags = "true"
directplay = "true"
sociable = "false"
licensing = "false"
memory_cache = "true"
album_art_store_disk = "true"
local_metadata_dir = "{{ ampache_root_dir }}/data/metadata"
max_upload_size = 1048576
resize_images = "false"
art_order = "db,tags,folder,musicbrainz,lastfm,google"
lastfm_api_key = "{{ ampache_lastfm_api_key }}"
lastfm_api_secret = "{{ ampache_lastfm_api_secret }}"
channel = "false"
live_stream = "false"
refresh_limit = "60"
show_footer_statistics = "false"
debug = "true"
debug_level = 5
log_path = "{{ ampache_root_dir }}/logs/"
log_filename = "%name.%Y%m%d.log"
site_charset = "UTF-8"
{% if 'ldap' in ampache_auth_methods or 'http' in ampache_auth_methods %}
auto_create = "true"
auto_user = "user"
{% endif %}
allow_public_registration = "false"
generate_video_preview = "true"
max_bit_rate = {{ ampache_max_bit_rate }}
min_bit_rate = {{ ampache_min_bit_rate }}
transcode_m4a = {{ ampache_transcode_m4a }}
transcode_flac = {{ ampache_transcode_flac }}
transcode_mpc = {{ ampache_transcode_mpc }}
transcode_ogg = {{ ampache_transcode_ogg }}
transcode_oga = {{ ampache_transcode_oga }}
transcode_wav = {{ ampache_transcode_wav }}
transcode_wma = {{ ampache_transcode_wma }}
transcode_aif = {{ ampache_transcode_aif }}
transcode_aiff = {{ ampache_transcode_aiff }}
transcode_ape = {{ ampache_transcode_ape }}
transcode_shn = {{ ampache_transcode_shn }}
transcode_mp3 = {{ ampache_transcode_mp3 }}
transcode_avi = {{ ampache_transcode_avi }}
transcode_mkv = {{ ampache_transcode_mkv }}
transcode_mpg = {{ ampache_transcode_mpg }}
transcode_mpeg = {{ ampache_transcode_mpeg }}
transcode_m4v = {{ ampache_transcode_m4v }}
transcode_mp4 = {{ ampache_transcode_mp4 }}
transcode_mov = {{ ampache_transcode_mov }}
transcode_wmv = {{ ampache_transcode_wmv }}
transcode_ogv = {{ ampache_transcode_ogv }}
transcode_divx = {{ ampache_transcode_divx }}
transcode_m2ts = {{ ampache_transcode_m2ts }}
transcode_webm = {{ ampache_transcode_webm }}
encode_target = {{ ampache_encode_target }}
encode_player_webplayer_target = {{ ampache_encode_player_webplayer }}
transcode_player_api_mp3 = {{ ampache_transcode_player_api_mp3 }}
encode_video_target = {{ ampache_encode_video_target }}
transcode_player_customize = "true"
transcode_cmd = "/bin/ffmpeg"
transcode_input = "-i %FILE%"
encode_args_mp3 = "-vn -b:a %BITRATE%K -c:a libmp3lame -f mp3 pipe:1"
encode_args_ogg = "-vn -b:a %BITRATE%K -c:a libvorbis -f ogg pipe:1"
encode_args_m4a = "-vn -b:a %BITRATE%K -c:a libfdk_aac -f adts pipe:1"
encode_args_wav = "-vn -b:a %BITRATE%K -c:a pcm_s16le -f wav pipe:1"
encode_args_opus = "-vn -b:a %BITRATE%K -c:a libopus -compression_level 10 -vsync 2 -f ogg pipe:1"
encode_args_flv = "-b:a %BITRATE%K -ar 44100 -ac 2 -v 0 -f flv -c:v libx264 -preset superfast -threads 0 pipe:1"
encode_args_webm = "-q %QUALITY% -f webm -c:v libvpx -maxrate %MAXBITRATE%k -preset superfast -threads 0 pipe:1"
encode_args_ts = "-q %QUALITY% -s %RESOLUTION% -f mpegts -c:v libx264 -c:a libmp3lame -maxrate %MAXBITRATE%k -preset superfast -threads 0 pipe:1"
encode_get_image = "-ss %TIME% -f image2 -vframes 1 pipe:1"
encode_srt = "-vf \"subtitles='%SRTFILE%'\""
encode_ss_frame = "-ss %TIME%"
encode_ss_duration = "-t %DURATION%"
mail_type = "sendmail"
mail_domain = "{{ ansible_domain }}"
common_abbr = "divx,xvid,dvdrip,hdtv,lol,axxo,repack,xor,pdtv,real,vtv,caph,2hd,proper,fqm,uncut,topaz,tvt,notv,fpn,fov,orenji,0tv,omicron,dsr,ws,sys,crimson,wat,hiqt,internal,brrip,boheme,vost,vostfr,fastsub,addiction,x264,LOL,720p,1080p,YIFY,evolve,fihtv,first,bokutox,bluray,tvboom,info"
force_ssl = "true"
mail_enable = "true"
mail_type = "sendmail"
mail_domain = "{{ ansible_domain }}"
{% if system_proxy is defined and system_proxy != '' %}
proxy_host = "{{ system_proxy | urlsplit('hostname') }}"
proxy_port = "{{ system_proxy | urlsplit('port') }}"
proxy_user = "{{ system_proxy | urlsplit('username') }}"
proxy_pass = "{{ system_proxy | urlsplit('password') }}"
{% endif %}

@ -0,0 +1,31 @@
#!/bin/sh
# Rotate logs
find {{ ampache_root_dir }}/logs -type f -mtime +7 -exec rm -f "{}" \;
find {{ ampache_root_dir }}/logs -type f -mtime +1 -exec xz -T0 "{}" \;
# Do we have a previous filelist to compare against ?
PREV_HASH=$(cat {{ ampache_root_dir }}/tmp/data_hash.txt || echo 'none')
# Now, compute a hash of the filelist
NEW_HASH=$(find {{ ampache_root_dir }}/data/{music,video} | sha1sum | cut -d' ' -f1)
# Write new hash so we can compare next time
echo -n $NEW_HASH > {{ ampache_root_dir }}/tmp/data_hash.txt
# If file list has changed since last time, then update the catalog
if [ "$PREV_HASH" != "$NEW_HASH" ]; then
# Clean (remove files which doesn't exists anymore)
/bin/php{{ (ampache_php_version == '54') | ternary('',ampache_php_version) }} {{ ampache_root_dir }}/web/bin/catalog_update.inc -c > /dev/null 2>&1
# Add (files added)
/bin/php{{ (ampache_php_version == '54') | ternary('',ampache_php_version) }} {{ ampache_root_dir }}/web/bin/catalog_update.inc -a > /dev/null 2>&1
# Update graphics
/bin/php{{ (ampache_php_version == '54') | ternary('',ampache_php_version) }} {{ ampache_root_dir }}/web/bin/catalog_update.inc -g > /dev/null 2>&1
fi
# Now check if files have changed recently. We can have the same file list, but metadata updates
NEW_FILES=$(find {{ ampache_root_dir }}/data/{music,video} -type f -mtime -1 | wc -l)
if [ "$NEW_FILES" -gt "0" ]; then
# Verify (update metadata)
/bin/php{{ (ampache_php_version == '54') | ternary('',ampache_php_version) }} {{ ampache_root_dir }}/web/bin/catalog_update.inc -v > /dev/null 2>&1
fi

@ -0,0 +1,7 @@
#!/bin/sh
/usr/bin/mysqldump --user={{ ampache_mysql_user }} \
--password={{ ampache_mysql_pass }} \
--host={{ ampache_mysql_server }} \
--quick --single-transaction \
--add-drop-table {{ ampache_mysql_db }} | lz4 -c > {{ ampache_root_dir }}/db_dumps/{{ ampache_mysql_db }}.sql.lz4

@ -0,0 +1,27 @@
{% if ampache_alias is defined %}
Alias /{{ ampache_alias }} {{ ampache_root_dir }}/web
{% else %}
# No alias defined, create a vhost to access it
{% endif %}
RewriteEngine On
<Directory {{ ampache_root_dir }}/web>
AllowOverride All
Options FollowSymLinks
{% if ampache_allowed_ip is defined %}
Require ip {{ ampache_src_ip | join(' ') }}
{% else %}
Require all granted
{% endif %}
<FilesMatch \.php$>
SetHandler "proxy:unix:/run/php-fpm/{{ ampache_php_fpm_pool | default('ampache_' + ampache_id | string) }}.sock|fcgi://localhost"
</FilesMatch>
<FilesMatch "(.maintenance.*|.ansible.*|.t?git.*|.php_cs|.travis.*)">
Require all denied
</FilesMatch>
</Directory>
<Directory {{ ampache_root_dir }}/web/config>
Require all denied
</Directory>

@ -0,0 +1,3 @@
<?php
echo '<a href="/sso.php">{{ ampache_motd }}</a>';

@ -0,0 +1,15 @@
#!/bin/sh
restorecon -R {{ ampache_root_dir }}
chown root:root {{ ampache_root_dir }}
chmod 700 {{ ampache_root_dir }}
setfacl -k -b {{ ampache_root_dir }}
setfacl -m u:{{ ampache_php_user | default('apache') }}:rx,u:{{ httpd_user | default('apache') }}:rx {{ ampache_root_dir }}
chown -R root:root {{ ampache_root_dir }}/web
chown apache-ampache {{ ampache_root_dir }}/data
chown -R {{ ampache_php_user }} {{ ampache_root_dir }}/{tmp,sessions,logs,data/metadata}
chmod 700 {{ ampache_root_dir }}/{tmp,sessions,logs,data}
find {{ ampache_root_dir }}/web -type f -exec chmod 644 "{}" \;
find {{ ampache_root_dir }}/web -type d -exec chmod 755 "{}" \;
chown :{{ ampache_php_user }} {{ ampache_root_dir }}/web/config/ampache.cfg.php
chmod 640 {{ ampache_root_dir }}/web/config/ampache.cfg.php

@ -0,0 +1,37 @@
; {{ ansible_managed }}
[ampache_{{ ampache_id }}]
listen.owner = root
listen.group = {{ httpd_user | default('apache') }}
listen.mode = 0660
listen = /run/php-fpm/ampache_{{ ampache_id }}.sock
user = {{ ampache_php_user }}
group = {{ ampache_php_user }}
catch_workers_output = yes
pm = dynamic
pm.max_children = 15
pm.start_servers = 3
pm.min_spare_servers = 3
pm.max_spare_servers = 6
pm.max_requests = 5000
request_terminate_timeout = 60m
php_flag[display_errors] = off
php_admin_flag[log_errors] = on
php_admin_value[error_log] = syslog
php_admin_value[memory_limit] = 512M
php_admin_value[session.save_path] = {{ ampache_root_dir }}/sessions
php_admin_value[upload_tmp_dir] = {{ ampache_root_dir }}/tmp
php_admin_value[sys_temp_dir] = {{ ampache_root_dir }}/tmp
php_admin_value[post_max_size] = 5M
php_admin_value[upload_max_filesize] = 5M
php_admin_value[disable_functions] = system, show_source, symlink, exec, dl, shell_exec, passthru, phpinfo, escapeshellarg, escapeshellcmd
php_admin_value[open_basedir] = {{ ampache_root_dir }}
php_admin_value[max_execution_time] = 1800
php_admin_value[max_input_time] = 60
php_admin_flag[allow_url_include] = off
php_admin_flag[allow_url_fopen] = on
php_admin_flag[file_uploads] = on
php_admin_flag[session.cookie_httponly] = on

@ -0,0 +1,3 @@
#!/bin/sh
rm -f {{ ampache_root_dir }}/db_dump/*

@ -0,0 +1,6 @@
<?php
# Just a dummy redirection so we can protect /sso.php with Lemonldap::NG
header('Location: /');
?>

@ -0,0 +1,36 @@
---
# The shell of the lbkp account
backup_shell: '/bin/bash'
# List of commands lbkp will be allowed to run as root, with sudo
backup_sudo_base_commands:
- /usr/bin/rsync
- /usr/local/bin/pre-backup
- /usr/local/bin/post-backup
- /bin/tar
- /bin/gtar
backup_sudo_extra_commands: []
backup_sudo_commands: "{{ backup_sudo_base_commands + backup_sudo_extra_commands }}"
# List of ssh public keys to deploy
backup_ssh_keys: []
# Options to set for the ssh keys, to restrict what they can do
backup_ssh_keys_options:
- no-X11-forwarding
- no-agent-forwarding
- no-pty
# List of IP address allowed to use the ssh keys
# Empty list means no restriction
backup_src_ip: []
# Custom pre / post script
backup_pre_script: |
#!/bin/bash -e
# Nothing to do
backup_post_script: |
#!/bin/bash -e
# Nothing to do
...

@ -0,0 +1,53 @@
#!/usr/bin/perl -w
# This script will backup the config of MegaRAID based
# RAID controllers. The saved config can be restored with
# MegaCli -CfgRestore -f /home/lbkp/mega_0.bin for example
# It also create a backup of the config as text, so you can
# manually check how things were configured at a certain point in time
# If MegaCli is not installed, then the script does nothing
use strict;
my $megacli = undef;
if (-x '/opt/MegaRAID/MegaCli/MegaCli64'){
$megacli = '/opt/MegaRAID/MegaCli/MegaCli64';
} elsif (-x '/opt/MegaRAID/MegaCli/MegaCli'){
$megacli = '/opt/MegaRAID/MegaCli/MegaCli';
}
exit (0) unless($megacli);
my $adapters = 0;
foreach (qx($megacli -adpCount -NoLog)) {
if ( m/Controller Count:\s*(\d+)/ ) {
$adapters = $1;
last;
}
}
foreach my $adp (0..$adapters-1){
my $hba = 0;
my $failgrouplist = 0;
foreach my $line (qx($megacli -CfgDsply -a$adp -NoLog)) {
if ( $line =~ m/Failed to get Disk Group list/ ) {
$failgrouplist = 1;
} elsif ( $line =~ m/Product Name:.*(JBOD|HBA)/ ) {
$hba = 1;
}
}
# Skip adapter if in HBA mode
next if ($hba && $failgrouplist);
# Save the config in binary format
qx($megacli -CfgSave -f /home/lbkp/megaraid/cfg_$adp.bin -a$adp -NoLog);
die "Failed to backup conf for adapter $adp\n" unless ($? == 0);
# Now also save in text representation
open TXT, ">/home/lbkp/megaraid/cfg_$adp.txt";
print TXT foreach qx($megacli -CfgDsply -a$adp -NoLog);
die "Failed to backup Cfg text description for adapter $adp\n" unless ($? == 0);
close TXT;
}

@ -0,0 +1,3 @@
#!/bin/sh
/bin/rpm -qa --qf "%{NAME}\t%{VERSION}\t%{RELEASE}\n" | grep -v gpg-pubkey | sort > /home/lbkp/rpms.list

@ -0,0 +1,9 @@
#!/bin/bash
if [ -d "/etc/backup/post.d" ]; then
for H in $(find /etc/backup/post.d -type f -o -type l | sort); do
[ -x $H ] && $H "$@"
done
fi
# Remove the lock
rm -f /var/lock/bkp.lock

@ -0,0 +1,29 @@
#!/bin/bash
set -e
# 2 locks are needed. The first one ensure we don't run
# The pre-backup script twice. It's an atomic lock.
# Then we need a second lock which will last until the post-backup ran
# This one doesn't need to be atomic (as we already checked this)
PRELOCKFILE="/var/lock/pre-bkp.lock"
exec 200>$PRELOCKFILE
flock -n 200 || ( echo "Couldn't aquire pre-backup lock" && exit 1 )
PID=$$
echo $PID 1>&200
if [ -e /var/lock/bkp.lock ]; then
# Consider the lock to be stale if it's older than 8 hours
if [ "$(( $(date +"%s") - $(stat -c "%Y" /var/lock/bkp.lock) ))" -gt "28800" ]; then
rm /var/lock/bkp.lock
else
echo "Another backup is running"
exit 1
fi
fi
touch /var/lock/bkp.lock
if [ -d "/etc/backup/pre.d" ]; then
for H in $(find /etc/backup/pre.d -type f -o -type l | sort); do
[ -x $H ] && $H "$@"
done
fi

@ -0,0 +1,3 @@
#!/bin/bash -e
rm -f /home/lbkp/megaraid/*

@ -0,0 +1,84 @@
---
- name: Install backup tools
yum: name=rsync
when: ansible_os_family == 'RedHat'
- name: Install backup tools
apt: name=rsync
when: ansible_os_family == 'Debian'
- name: Create a local backup user account
user: name=lbkp comment="Local backup account" system=yes shell={{ backup_shell }}
tags: backup
- name: Deploy sudo configuration
template: src=sudo.j2 dest=/etc/sudoers.d/backup mode=400
tags: backup
- name: Deploy SSH keys for the backup account
authorized_key:
user: lbkp
key: "{{ backup_ssh_keys | join(\"\n\") }}"
key_options: "{{ backup_ssh_keys_options | join(',') }}"
exclusive: yes
when: backup_src_ip is not defined or backup_src_ip | length < 1
tags: backup
- name: Deploy SSH keys for the backup account (with source IP restriction)
authorized_key:
user: lbkp
key: "{{ backup_ssh_keys | join(\"\n\") }}"
key_options: "from=\"{{ backup_src_ip | join(',') }}\",{{ backup_ssh_keys_options | join(',') }}"
exclusive: yes
when:
- backup_src_ip is defined
- backup_src_ip | length > 0
tags: backup
- name: Create pre and post backup hook dir
file: path={{ item }} state=directory mode=750
with_items:
- /etc/backup/pre.d
- /etc/backup/post.d
tags: backup
- name: Deploy default pre/post backup hooks
copy:
content: "{{ item.content }}"
dest: /etc/backup/{{ item.type }}.d/default
mode: 755
loop:
- type: pre
content: "{{ backup_pre_script }}"
- type: post
content: "{{ backup_post_script }}"
tags: backup
- name: Copy pre-backup script
copy: src={{ item }} dest=/usr/local/bin/{{ item }} mode=750 group=lbkp
with_items:
- pre-backup
- post-backup
tags: backup
- name: Deploy rpm dump list script
copy: src=dump-rpms-list dest=/etc/backup/pre.d/dump-rpms-list mode=755
when: ansible_os_family == 'RedHat'
tags: backup
- name: Create megaraid dump dir
file: path=/home/lbkp/megaraid state=directory
tags: backup
- name: Deploy MegaCli backup scripts
copy: src={{ item.script }} dest=/etc/backup/{{ item.type }}.d/{{ item.script }} mode=750
with_items:
- script: dump-megaraid-cfg
type: pre
- script: rm-megaraid-cfg
type: post
when: lsi_controllers | default([]) | length > 0
tags: backup
...

@ -0,0 +1,2 @@
Defaults:lbkp !requiretty
lbkp ALL=(root) NOPASSWD: {{ backup_sudo_commands | join(',') }}

@ -0,0 +1,19 @@
---
# You can choose either 3 or 4
bpc_major_version: 3
# Auth to access BackupPC. Can be basic, lemonldap, lemonldap2 or none
bpc_auth: basic
# List of IP address allowed
bpc_src_ip: []
# Should backuppc be started on boot ?
# You might want to turn this off if for example you must unlock
# the device on which you have your backup, and manually start backuppc after that
bpc_enabled: True
# Should /BackupPC aliases be added on the main vhost ?
# You might want to, but you can also disable this and grant access only through a dedicated vhost
bpc_alias_on_main_vhost: True

@ -0,0 +1,5 @@
---
- include: ../httpd_common/handlers/main.yml
...

@ -0,0 +1,3 @@
---
dependencies:
- { role: httpd_front }

@ -0,0 +1,48 @@
---
- name: Install BackupPC 4
yum:
name:
- BackupPC4
- fuse-backuppcfs4
when: bpc_major_version == 4
tags: bpc
- name: Install BackupPC 3
yum:
name:
- BackupPC
- fuse-backuppcfs
when: bpc_major_version != 4
tags: bpc
- name: Install tools
yum:
name:
- rsync
- tar
- samba-client
- openssh-clients
- BackupPC-server-scripts
- fuse-chunkfs
tags: bpc
- name: Deploy httpd conf
template: src=httpd.conf.j2 dest=/etc/httpd/ansible_conf.d/40-BackupPC.conf
notify: reload httpd
tags: bpc
- name: Deploy sudo config
template: src=sudoers.j2 dest=/etc/sudoers.d/backuppc mode=0400
tags: bpc
- name: Create SSH Key
user:
name: backuppc
generate_ssh_key: yes
ssh_key_bits: 4096
tags: bpc
- name: Start and enable the service
service: name=backuppc state=started enabled={{ bpc_enabled }}
tags: bpc

@ -0,0 +1,25 @@
<Directory /usr/share/BackupPC/>
SSLRequireSSL on
{% if bpc_auth == "lemonldap" %}
PerlHeaderParserHandler Lemonldap::NG::Handler
{% elif bpc_auth == "lemonldap2" %}
PerlHeaderParserHandler Lemonldap::NG::Handler::ApacheMP2
{% elif bpc_auth == "basic" %}
AuthType Basic
AuthUserFile /etc/BackupPC/apache.users
AuthName "BackupPC"
Require valid-user
{% endif %}
{% if bpc_src_ip | length < 1 %}
Require all denied
{% else %}
Require ip {{ bpc_src_ip | join(' ') }}
{% endif %}
</Directory>
{% if bpc_auth != False and bpc_auth != 'none' and bpc_alias_on_main_vhost == True %}
Alias /BackupPC/images /usr/share/BackupPC/html/
ScriptAlias /BackupPC /usr/share/BackupPC/sbin/BackupPC_Admin
ScriptAlias /backuppc /usr/share/BackupPC/sbin/BackupPC_Admin
{% endif %}

@ -0,0 +1,3 @@
Defaults:backuppc !requiretty
Cmnd_Alias BACKUPPC = /usr/bin/rsync, /bin/tar, /bin/gtar, /usr/local/bin/pre-backup, /usr/local/bin/post-backup, /usr/bin/virt-backup
backuppc ALL=(root) NOPASSWD: BACKUPPC

@ -0,0 +1,45 @@
---
bitwarden_version: 1.14.2
bitwarden_archive_url: https://github.com/dani-garcia/bitwarden_rs/archive/{{ bitwarden_version }}.tar.gz
bitwarden_archive_sha1: 1bb75b6ab11371ab60380ef19151ebd9410de4ef
bitwarden_web_version: 2.13.2b
bitwarden_web_archive_url: https://github.com/dani-garcia/bw_web_builds/releases/download/v{{ bitwarden_web_version }}/bw_web_v{{ bitwarden_web_version }}.tar.gz
bitwarden_web_archive_sha1: df6f280731b852b31c3d938bfa1733140be9abb5
bitwarden_root_dir: /opt/bitwarden_rs
bitwarden_user: bitwarden_rs
# Database : can be sqlite or mysql
bitwarden_db_engine: sqlite
bitwarden_db_server: "{{ mysql_server | default('localhost') }}"
bitwarden_db_port: 3306
bitwarden_db_name: bitwardenrs
bitwarden_db_user: bitwardenrs
# A random one will be created if not defined
# bitwaren_db_pass: S3cr3t.
# Port on which bitwarden will bind
bitwarden_http_port: 8000
bitwarden_ws_port: 8001
# List of IP addresses (can be CIDR notation) which will be able to
# access bitwarden ports
bitwarden_src_ip: []
bitwarden_web_src_ip: []
# Public URL on which bitwarden will be accessible
bitwarden_public_url: http://{{ inventory_hostname }}:{{ bitwarden_http_port }}
# Should registration be enabled
bitwarden_registration: False
# List of domain names for which registration will be accepted
# Thos domains will be accepted for registration even if bitwarden_registration is set to False
bitwarden_domains_whitelist:
- "{{ ansible_domain }}"
# Admin Token to access /admin. A random one is created if not defined
# bitwarden_admin_token: S3cr3t.
# Or you can just disable the admin token. But you have to protect /admin yourself (eg, on a reverse proxy)
bitwarden_disable_admin_token: False

@ -0,0 +1,5 @@
---
- name: restart bitwarden_rs
service: name=bitwarden_rs state=restarted
when: not bitwarden_started.changed

@ -0,0 +1,7 @@
---
dependencies:
- role: rust
- role: nginx
- role: mysql_server
when: bitwarden_db_engine == 'mysql' and (bitwarden_db_server == 'localhost' or bitwarden_db_server == '127.0.0.1')

@ -0,0 +1,12 @@
---
- name: Compress previous version
command: tar cJf {{ bitwarden_root_dir }}/archives/{{ bitwarden_current_version }}+{{ bitwarden_web_current_version }}.txz ./
args:
warn: False
chdir: "{{ bitwarden_root_dir }}/archives/{{ bitwarden_current_version }}+{{ bitwarden_web_current_version }}"
tags: bitwarden
- name: Remove archive dir
file: path={{ bitwarden_root_dir }}/archives/{{ bitwarden_current_version }}+{{ bitwarden_web_current_version }} state=absent
tags: bitwarden

@ -0,0 +1,23 @@
---
- name: Create archive dir
file: path={{ bitwarden_root_dir }}/archives/{{ bitwarden_current_version }}+{{ bitwarden_web_current_version }} state=directory
tags: bitwarden
- name: Stop bitwarden during upgrade
service: name=bitwarden_rs state=stopped
tags: bitwarden
- name: Archive current version
synchronize:
src: "{{ bitwarden_root_dir }}/{{ item }}"
dest: "{{ bitwarden_root_dir }}/archives/{{ bitwarden_current_version }}+{{ bitwarden_web_current_version }}/"
recursive: True
delete: True
delegate_to: "{{ inventory_hostname }}"
loop:
- bitwarden_rs
- data
- etc
- web-vault
tags: bitwarden

@ -0,0 +1,8 @@
---
- name: Remove temp files
files: path={{ item }} state=absent
loop:
- "{{ bitwarden_root_dir }}/tmp/bitwarden_rs-{{ bitwarden_version }}"
- "{{ bitwarden_root_dir }}/tmp/bitwarden_rs-{{ bitwarden_version }}.tar.gz"
tags: bitwarden

@ -0,0 +1,11 @@
---
- name: Deploy configuration
template: src=bitwarden_rs.conf.j2 dest={{ bitwarden_root_dir }}/etc/bitwarden_rs.conf group={{ bitwarden_user }} mode=640
notify: restart bitwarden_rs
tags: bitwarden
- name: Deploy nginx configuration
template: src=nginx.conf.j2 dest=/etc/nginx/ansible_conf.d/31-bitwarden.conf
notify: reload nginx
tags: bitwarden

@ -0,0 +1,24 @@
---
- name: Create directories
file: path={{ bitwarden_root_dir }}/{{ item.dir }} state=directory owner={{ item.owner | default(omit) }} group={{ item.group | default(omit) }} mode={{ item.mode | default(omit) }}
loop:
- dir: /
mode: 755
- dir: etc
group: "{{ bitwarden_user }}"
mode: 750
- dir: tmp
mode: 700
- dir: meta
mode: 700
- dir: archives
mode: 700
- dir: data
owner: "{{ bitwarden_user }}"
group: "{{ bitwarden_user }}"
mode: 700
- dir: web-vault
- dir: backup
mode: 700
tags: bitwarden

@ -0,0 +1,67 @@
---
- name: Set initial install modes
block:
- set_fact: bitwarden_install_mode='none'
- set_fact: bitwarden_current_version=''
- set_fact: bitwarden_web_install_mode='none'
- set_fact: bitwarden_web_current_version=''
tags: bitwarden
- name: Check if server is installed
stat: path={{ bitwarden_root_dir }}/meta/ansible_version
register: bitwarden_version_file
tags: bitwarden
- when: bitwarden_version_file.stat.exists
block:
- name: Check installed version
slurp: src={{ bitwarden_root_dir }}/meta/ansible_version
register: bitwarden_current_version
- set_fact: bitwarden_current_version={{ bitwarden_current_version.content | b64decode | trim }}
- set_fact: bitwarden_install_mode='upgrade'
when: bitwarden_current_version != bitwarden_version
tags: bitwarden
- when: not bitwarden_version_file.stat.exists
block:
- set_fact: bitwarden_install_mode='install'
tags: bitwarden
- name: Check if web vault is installed
stat: path={{ bitwarden_root_dir }}/meta/ansible_web_version
register: bitwarden_web_version_file
tags: bitwarden
- when: bitwarden_web_version_file.stat.exists
block:
- name: Check installed version
slurp: src={{ bitwarden_root_dir }}/meta/ansible_web_version
register: bitwarden_web_current_version
- set_fact: bitwarden_web_current_version={{ bitwarden_web_current_version.content | b64decode | trim }}
- set_fact: bitwarden_web_install_mode='upgrade'
when: bitwarden_web_current_version != bitwarden_web_version
tags: bitwarden
- when: not bitwarden_web_version_file.stat.exists
block:
- set_fact: bitwarden_web_install_mode='install'
tags: bitwarden
- when: bitwarden_admin_token is not defined
name: Generate a random admin token
block:
- import_tasks: ../includes/get_rand_pass.yml
vars:
- pass_file: "{{ bitwarden_root_dir }}/meta/ansible_admin_token"
- set_fact: bitwarden_admin_token={{ rand_pass }}
tags: bitwarden
- when: bitwarden_db_pass is not defined
tags: bitwarden
block:
- import_tasks: ../includes/get_rand_pass.yml
vars:
- pass_file: "{{ bitwarden_root_dir }}/meta/ansible_dbpass"
- set_fact: bitwarden_db_pass={{ rand_pass }}

@ -0,0 +1,97 @@
---
- name: Install needed packages
yum:
name:
- openssl-devel
- gcc
- sqlite
tags: bitwarden
- name: Check if MariaDB version is set
fail: msg="Need to define mysql_mariadb_version"
when:
- bitwarden_db_engine == 'mysql'
- mysql_mariadb_version is not defined or mysql_mariadb_version == 'default'
tags: bitwarden
- name: Install MariaDB devel package
yum:
name:
- MariaDB-devel
- /usr/lib64/libmariadb.so
when: bitwarden_db_engine == 'mysql'
tags: bitwarden
- when: bitwarden_install_mode != 'none'
tags: bitwarden
block:
- name: Download bitwarden
get_url:
url: "{{ bitwarden_archive_url }}"
dest: "{{ bitwarden_root_dir }}/tmp"
checksum: sha1:{{ bitwarden_archive_sha1 }}
- name: Extract bitwarden archive
unarchive:
src: "{{ bitwarden_root_dir }}/tmp/bitwarden_rs-{{ bitwarden_version }}.tar.gz"
dest: "{{ bitwarden_root_dir }}/tmp"
remote_src: True
- name: Build bitwarden
command: bash -lc 'cargo build --features={{ (bitwarden_db_engine == "mysql") | ternary("mysql","sqlite") }} --release'
args:
chdir: "{{ bitwarden_root_dir }}/tmp/bitwarden_rs-{{ bitwarden_version }}"
- name: Install binary
copy: src={{ bitwarden_root_dir }}/tmp/bitwarden_rs-{{ bitwarden_version }}/target/release/bitwarden_rs dest="{{ bitwarden_root_dir }}/" mode=755 remote_src=True
notify: restart bitwarden_rs
- when: bitwarden_web_install_mode != 'none'
tags: bitwarden
block:
- name: Download bitwarden web vault
get_url:
url: "{{ bitwarden_web_archive_url }}"
dest: "{{ bitwarden_root_dir }}/tmp"
checksum: sha1:{{ bitwarden_web_archive_sha1 }}
- name: Extract the archive
unarchive:
src: "{{ bitwarden_root_dir }}/tmp/bw_web_v{{ bitwarden_web_version }}.tar.gz"
dest: "{{ bitwarden_root_dir }}/tmp"
remote_src: True
- name: Move files to their final location
synchronize:
src: "{{ bitwarden_root_dir }}/tmp/web-vault/"
dest: "{{ bitwarden_root_dir }}/web-vault/"
recursive: True
delete: True
delegate_to: "{{ inventory_hostname }}"
- name: Install systemd unit
template: src=bitwarden_rs.service.j2 dest=/etc/systemd/system/bitwarden_rs.service
register: bitwarden_unit
tags: bitwarden
- name: Reload systemd
systemd: daemon_reload=True
when: bitwarden_unit.changed
tags: bitwarden
- name: Install pre/post backup hooks
template: src={{ item }}-backup.sh.j2 dest=/etc/backup/{{ item }}.d/bitwarden_rs.sh mode=755
loop:
- pre
- post
tags: bitwarden
- import_tasks: ../includes/webapps_create_mysql_db.yml
vars:
- db_name: "{{ bitwarden_db_name }}"
- db_user: "{{ bitwarden_db_user }}"
- db_server: "{{ bitwarden_db_server }}"
- db_pass: "{{ bitwarden_db_pass }}"
when: bitwarden_db_engine == 'mysql'
tags: bitwarden

@ -0,0 +1,9 @@
---
- name: Handle bitwarden_rs ports in the firewall
iptables_raw:
name: bitwarden_rs
state: "{{ (bitwarden_src_ip | length > 0) | ternary('present','absent') }}"
rules: "-A INPUT -m state --state NEW -m multiport -p tcp --dports {{ bitwarden_http_port }},{{ bitwarden_ws_port }} -s {{ bitwarden_src_ip | join(',') }} -j ACCEPT"
when: iptables_manage | default(True)
tags: firewall,bitwarden

@ -0,0 +1,14 @@
---
- include: user.yml
- include: directories.yml
- include: facts.yml
- include: archive_pre.yml
when: bitwarden_install_mode == 'upgrade' or bitwarden_web_install_mode == 'upgrade'
- include: install.yml
- include: conf.yml
- include: iptables.yml
- include: service.yml
- include: write_version.yml
- include: archive_post.yml
when: bitwarden_install_mode == 'upgrade' or bitwarden_web_install_mode == 'upgrade'

@ -0,0 +1,6 @@
---
- name: Start and enable the service
service: name=bitwarden_rs state=started enabled=True
register: bitwarden_started
tags: bitwarden

@ -0,0 +1,5 @@
---
- name: Create bitwarden_rs user
user: name={{ bitwarden_user }} home={{ bitwarden_root_dir }} system=True
tags: bitwarden

@ -0,0 +1,10 @@
---
- name: Write versions
copy: content={{ item.version }} dest={{ bitwarden_root_dir }}/meta/{{ item.file }}
loop:
- version: "{{ bitwarden_version }}"
file: ansible_version
- version: "{{ bitwarden_web_version }}"
file: ansible_web_version
tags: bitwarden

@ -0,0 +1,25 @@
IP_HEADER=X-Forwarded-For
SIGNUPS_VERIFY=true
SIGNUPS_ALLOWED={{ bitwarden_registration | ternary('true','false') }}
{% if bitwarden_domains_whitelist | length > 0 %}
SIGNUPS_DOMAINS_WHITELIST={{ bitwarden_domains_whitelist | join(',') }}
{% endif %}
ADMIN_TOKEN={{ bitwarden_admin_token }}
DISABLE_ADMIN_TOKEN={{ bitwarden_disable_admin_token | ternary('true','false') }}
DOMAIN={{ bitwarden_public_url }}
ROCKET_ENV=prod
ROCKET_ADDRESS=0.0.0.0
ROCKET_PORT={{ bitwarden_http_port }}
WEBSOCKET_ENABLED=true
WEBSOCKET_PORT={{ bitwarden_ws_port }}
SMTP_HOST=localhost
SMTP_PORT=25
SMTP_SSL=false
SMTP_FROM=bitwarden-rs-noreply@{{ ansible_domain }}
{% if bitwarden_db_engine == 'mysql' %}
DATABASE_URL=mysql://{{ bitwarden_db_user }}:{{ bitwarden_db_pass | urlencode | regex_replace('/','%2F') }}@{{ bitwarden_db_server }}:{{ bitwarden_db_port }}/{{ bitwarden_db_name }}
ENABLE_DB_WAL=false
{% else %}
DATABASE_URL=data/db.sqlite3
{% endif %}
# vim: syntax=ini

@ -0,0 +1,27 @@
[Unit]
Description=Bitwarden Server (Rust Edition)
Documentation=https://github.com/dani-garcia/bitwarden_rs
After=network.target
{% if bitwarden_db_engine == 'mysql' and (bitwarden_db_server == 'localhost' or bitwarden_db_server == '127.0.0.1') %}
After=mariadb.service
Requires=mariadb.service
{% endif %}
[Service]
User={{ bitwarden_user }}
Group={{ bitwarden_user }}
EnvironmentFile={{ bitwarden_root_dir }}/etc/bitwarden_rs.conf
ExecStart={{ bitwarden_root_dir }}/bitwarden_rs
PrivateTmp=true
PrivateDevices=true
ProtectHome=true
ProtectSystem=full
WorkingDirectory={{ bitwarden_root_dir }}
ReadWriteDirectories={{ bitwarden_root_dir }}/data
ReadOnlyDirectories={{ bitwarden_root_dir }}/etc {{ bitwarden_root_dir }}/web-vault
Restart=on-failure
StartLimitInterval=0
RestartSec=30
[Install]
WantedBy=multi-user.target

@ -0,0 +1,71 @@
server {
listen 443 ssl http2;
server_name {{ bitwarden_public_url | urlsplit('hostname') }};
include /etc/nginx/ansible_conf.d/acme.inc;
{% if bitwarden_cert_path is defined and bitwarden_key_path is defined %}
ssl_certificate {{ bitwarden_cert_path }};
ssl_certificate_key {{ bitwarden_key_path }};
{% elif bitwarden_letsencrypt_cert is defined and bitwarden_letsencrypt_cert == True %}
ssl_certificate /var/lib/dehydrated/certificates/certs/{{ bitwarden_public_url | urlsplit('hostname') }}/fullchain.pem;
ssl_certificate_key /var/lib/dehydrated/certificates/certs/{{ bitwarden_public_url | urlsplit('hostname') }}/privkey.pem;
{% elif bitwarden_letsencrypt_cert is string %}
ssl_certificate /var/lib/dehydrated/certificates/certs/{{ bitwarden_letsencrypt_cert }}/fullchain.pem;
ssl_certificate_key /var/lib/dehydrated/certificates/certs/{{ bitwarden_letsencrypt_cert }}/privkey.pem;
{% endif %}
server_name {{ bitwarden_public_url | urlsplit('hostname') }};
root {{ bitwarden_root_dir }}/web-vault;
client_max_body_size 512M;
if ($request_method !~ ^(GET|POST|HEAD|PUT|DELETE)$ ) {
return 405;
}
location /notifications/hub {
proxy_pass http://localhost:{{ bitwarden_ws_port }};
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
location /notifications/hub/negotiate {
proxy_pass http://localhost:{{ bitwarden_http_port }};
}
location @proxy {
proxy_pass http://localhost:{{ bitwarden_http_port }};
}
location / {
try_files $uri $uri/index.html @proxy;
}
add_header X-Frame-Options "DENY";
add_header X-Content-Type-Options "nosniff";
add_header X-XSS-Protection "1; mode=block";
add_header Strict-Transport-Security "$hsts_header";
# Send info about the original request to the backend
proxy_set_header X-Forwarded-For "$proxy_add_x_forwarded_for";
proxy_set_header X-Real-IP "$remote_addr";
proxy_set_header X-Forwarded-Proto "$scheme";
proxy_set_header X-Forwarded-Host "$host";
proxy_set_header Host "$host";
# Set the timeout to read responses from the backend
proxy_read_timeout 60s;
# Enable Keep Alive to the backend
proxy_socket_keepalive on;
# Disable buffering large files
proxy_max_temp_file_size 5m;
allow 127.0.0.1;
{% for ip in bitwarden_web_src_ip %}
allow {{ ip }};
{% endfor %}
deny all;
}

@ -0,0 +1,4 @@
#!/bin/bash -e
rm -f {{ bitwarden_root_dir }}/backup/*
umount /home/lbkp/bitwarden

@ -0,0 +1,17 @@
#!/bin/bash -e
mkdir -p /home/lbkp/bitwarden_rs/
cp {{ bitwarden_root_dir }}/data/rsa* {{ bitwarden_root_dir }}/backup/
{% if bitwarden_db_engine == 'mysql' %}
/usr/bin/mysqldump \
{% if bitwarden_db_server != 'localhost' and bitwarden_db_server != '127.0.0.1' %}
--user='{{ bitwarden_db_user }}' \
--password='{{ bitwarden_db_pass }}' \
--host='{{ bitwarden_db_server }}' \
{% endif %}
--quick --single-transaction \
--add-drop-table {{ bitwarden_db_name }} | zstd -T0 -c > {{ bitwarden_root_dir }}/backup/{{ bitwarden_db_name }}.sql.zstd
{% else %}
sqlite3 {{ bitwarden_root_dir }}/data/db.sqlite3 ".backup '{{ bitwarden_root_dir }}/backup/db.sqlite3'"
{% endif %}
mount -o bind,ro {{ bitwarden_root_dir }}/backup/ /home/lbkp/bitwarden_rs/

@ -0,0 +1,117 @@
---
bm_http_ports:
- 80
- 443
bm_http_src_ip:
- 0.0.0.0/0
bm_imap_ports:
- 143
- 993
bm_imap_src_ip:
- 0.0.0.0/0
bm_pop_ports:
- 110
- 995
bm_pop_src_ip:
- 0.0.0.0/0
bm_smtp_ports:
- 25
- 465
- 587
bm_smtp_src_ip:
- 0.0.0.0/0
bm_milter_ports:
- 2500
bm_milter_src:ip: []
bm_int_ports:
- 24
- 144
- 1110
- 1143
- 2000
- 2400
- 2500
- 4444
- 5280
- 5290
- 5432
- '5701:5715'
- 8021
- 8022
- 8079
- 8080
- 8082
- 8084
- 8087
- 9083
- 9086
- 9090
- 9099
- 9200
- 9300
bm_int_src_ip: []
# bm_letsencrypt_cert: bluemind.domain.tld
bm_mem_alloc_base:
bm-core:
heap: 512
direct: 512
spare: 20
bm-node:
heap: 128
direct: 128
spare: 0
bm-eas:
heap: 256
direct: 128
spare: 2
bm-mapi:
heap: 512
direct: 256
spare: 10
bm-ips:
heap: 64
direct: 64
spare: 0
bm-hps:
heap: 128
direct: 128
spare: 0
bm-lmtpd:
heap: 128
direct: 128
spare: 0
bm-locator:
heap: 64
direct: 64
spare: 0
bm-milter:
heap: 64
direct: 64
spare: 0
bm-tika:
heap: 128
direct: 128
spare: 0
bm-xmpp:
heap: 32
direct: 32
spare: 0
bm-ysnp:
heap: 64
direct: 64
spare: 0
bm-elasticsearch:
heap: 512
direct: 512
spare: 20
bm_mem_alloc: {}
bm_mem_alloc_rules: "{{ bm_mem_alloc_base | combine(bm_mem_alloc, recursive=True) }}"

@ -0,0 +1,4 @@
---
- name: restart bluemind
command: bmctl restart

@ -0,0 +1,118 @@
---
- name: Install tools
yum:
name:
- socat
tags: bm
- name: Create dehydrated hook dir
file: path=/etc/dehydrated/hooks_deploy_cert.d state=directory
tags: bm
- name: Deploy dehydrated hook
template: src=dehydrated_deploy_hook.j2 dest=/etc/dehydrated/hooks_deploy_cert.d/bluemind mode=755
tags: bm
- name: Create local conf directory
file: path=/etc/bm/local state=directory
tags: bm
- name: Configure proxy
lineinfile:
regex: '^PROXY_OPTS=.*'
line: "PROXY_OPTS=\"{{ (system_proxy is defined and system_proxy != '') | ternary('-Dhttps.proxyHost=' ~ system_proxy | urlsplit('hostname') ~ ' -Dhttps.proxyPort=' ~ system_proxy | urlsplit('port') ~ ' -Dhttp.proxyHost=' ~ system_proxy | urlsplit('hostname') ~ ' -Dhttp.proxyPort=' ~ system_proxy | urlsplit('port'),'') }}\""
path: /etc/bm/local/{{ item }}.ini
create: True
loop:
- bm-core
- bm-webserver
notify: restart bluemind
tags: bm
- name: Configure JVM options
lineinfile:
regex: '^JVM_OPTS=.*'
line: "JVM_OPTS=\"${PROXY_OPTS}\""
path: /etc/bm/local/{{ item }}.ini
insertafter: '^PROXY_OPTS=.*'
loop:
- bm-core
- bm-webserver
notify: restart bluemind
tags: bm
- name: Configure memory allocation rules
template: src=rules.json.j2 dest=/etc/bm/local/rules.json
notify: restart bluemind
tags: bm
- set_fact:
bm_restart_services: "[ 'bm-elasticsearch', 'bm-mapi' ]"
tags: bm
- name: Create systemd unit snippet dirs
file: path=/etc/systemd/system/{{ item }}.service.d state=directory
loop: "{{ bm_restart_services }}"
tags: bm
- name: Configure systemd to restart services on failure
copy:
content: |
[Service]
TimeoutSec=60
StartLimitInterval=0
RestartSec=1
Restart=on-failure
dest: /etc/systemd/system/{{ item }}.service.d/restart.conf
loop: "{{ bm_restart_services }}"
register: bm_units
notify: restart bluemind
tags: bm
- name: Reload systemd
systemd: daemon_reload=True
when: bm_units.results | selectattr('changed','equalto',True) | list | length > 0
tags: bm
- name: Handle firewall ports
iptables_raw:
name: "{{ item.name }}"
state: "{{ (item.src | length > 0) | ternary('present','absent') }}"
rules: "{% if 'tcp' in item.proto | default(['tcp']) or item.proto | default('tcp') == 'tcp' %}-A INPUT -m state --state NEW -p tcp -m multiport --dports {{ item.ports | join(',') }} -s {{ item.src | join(',') }} -j ACCEPT\n{% endif %}
{% if 'udp' in item.proto | default(['tcp']) or item.proto | default('tcp') == 'udp' %}-A INPUT -m state --state NEW -p udp -m multiport --dports {{ item.ports | join(',') }} -s {{ item.src | join(',') }} -j ACCEPT{% endif %}"
when: iptables_manage | default(True)
with_items:
- ports: "{{ bm_http_ports }}"
name: bm_http_ports
src: "{{ bm_http_src_ip }}"
- ports: "{{ bm_imap_ports }}"
name: bm_imap_ports
src: "{{ bm_imap_src_ip }}"
- ports: "{{ bm_pop_ports }}"
name: bm_pop_ports
src: "{{ bm_pop_src_ip }}"
- ports: "{{ bm_smtp_ports }}"
name: bm_smtp_ports
src: "{{ bm_smtp_src_ip }}"
- ports: "{{ bm_milter_ports }}"
name: bm_milter_ports
src: "{{ bm_milter_src_ip }}"
- ports: "{{ bm_int_ports }}"
name: bm_int_ports
src: "{{ bm_int_src_ip }}"
tags: bm,firewall
- name: Create pre/post backup hook dir
file: path=/etc/backup/{{ item }}.d state=directory mode=750
loop:
- pre
- post
tags: bm
- name: Deploy pre and post backup script
template: src={{ item }}-backup.j2 dest=/etc/backup/{{ item }}.d/bluemind mode=755
loop:
- pre
- post
tags: bm

@ -0,0 +1,53 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="CORE" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-core - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<logger name="org.apache.directory.shared" level="ERROR" />
<root level="INFO">
<appender-ref ref="CORE" />
</root>
<appender name="XMPP" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-xmpp - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<logger name="net.bluemind.xmpp" level="INFO" additivity="false" />
<appender-ref ref="XMPP" />
</logger>
<appender name="MAILINDEX" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-mailindex - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<logger name="net.bluemind.index.mail" level="INFO" additivity="false" />
<appender-ref ref="MAILINDEX" />
</logger>
<appender name="SLOWRESTCALL" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-slowrestcall - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<logger name="net.bluemind.core.rest.log.CallLogger" level="WARN" additivity="false" />
<appender-ref ref="SLOWRESTCALL" />
</logger>
<appender name="RESTSOCKJSPROXYHANDLER" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-js - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<logger name="net.bluemind.core.rest.sockjs.vertx.RestSockJsProxyHandler" level="INFO" additivity="false" />
<appender-ref ref="RESTSOCKJSPROXYHANDLER" />
</logger>
</configuration>

@ -0,0 +1,59 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="ALL" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-eas - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<appender name="REQUESTS" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-eas-requests - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<appender name="SIFT" class="ch.qos.logback.classic.sift.SiftingAppender">
<!-- in the absence of the class attribute, it is assumed that the
desired discriminator type is
ch.qos.logback.classic.sift.MDCBasedDiscriminator -->
<discriminator>
<key>user</key>
<defaultValue>anonymous</defaultValue>
</discriminator>
<sift>
<appender name="FILE-${user}" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>/var/log/bm-eas/user-eas-${user}.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
<maxIndex>10</maxIndex>
<FileNamePattern>/var/log/bm-eas/user-eas-${user}.log.%i.gz</FileNamePattern>
</rollingPolicy>
<triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
<MaxFileSize>5000KB</MaxFileSize>
</triggeringPolicy>
<encoder>
<pattern>%d [%thread] %c{1} %p - %m\n</pattern>
</encoder>
</appender>
</sift>
</appender>
<appender name="ASYNC_SIFT" class="ch.qos.logback.classic.AsyncAppender">
<queueSize>500</queueSize>
<discardingThreshold>0</discardingThreshold>
<appender-ref ref="SIFT" />
</appender>
<logger name="org.apache.directory.shared.asn1.ber" level="ERROR">
<appender-ref ref="ALL"/>
</logger>
<logger name="net.bluemind.vertx.common.request.impl.WrappedResponse" level="INFO" additivity="true">
<appender-ref ref="REQUESTS"/>
</logger>
<root level="INFO">
<appender-ref ref="ALL"/>
<appender-ref ref="ASYNC_SIFT"/>
</root>
</configuration>

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="SYSLOG" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-hps - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<root level="INFO">
<appender-ref ref="SYSLOG" />
</root>
</configuration>

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="SYSLOG" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-ips - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<root level="INFO">
<appender-ref ref="SYSLOG" />
</root>
</configuration>

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="SYSLOG" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-lmtp - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<root level="INFO">
<appender-ref ref="SYSLOG" />
</root>
</configuration>

@ -0,0 +1,13 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="SYSLOG" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-locator - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<logger name="org.apache.directory.shared.asn1.ber" level="ERROR" />
<root level="INFO">
<appender-ref ref="SYSLOG" />
</root>
</configuration>

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="SYSLOG" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-milter - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<root level="INFO">
<appender-ref ref="SYSLOG" />
</root>
</configuration>

@ -0,0 +1,13 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="SYSLOG" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-node - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<logger name="org.apache.sshd.server" level="WARN" />
<root level="INFO">
<appender-ref ref="SYSLOG" />
</root>
</configuration>

@ -0,0 +1,19 @@
[Unit]
Description=Bluemind syslog daemon
After=syslog.target
[Service]
Type=simple
ExecStart=/bin/socat -t0 -T0 -u -s udp4-recv:10514 stdout
User=bm-syslog
Group=bm-syslog
Restart=always
PrivateTmp=yes
PrivateDevices=yes
ProtectSystem=full
ProtectHome=yes
NoNewPrivileges=yes
[Install]
WantedBy=multi-user.target

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="SYSLOG" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-locator - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<root level="INFO">
<appender-ref ref="SYSLOG" />
</root>
</configuration>

@ -0,0 +1,43 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="SYSLOG" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-webserver - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<root level="INFO">
<appender-ref ref="SYSLOG" />
</root>
<appender name="DAV" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-dav - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<logger name="net.bluemind.dav.server" level="INFO" additivity="false">
<appender-ref ref="DAV" />
</logger>
<appender name="SETUP" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-setup - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<logger name="net.bluemind.sw.server" level="INFO" additivity="false">
<appender-ref ref="SETUP" />
</logger>
<appender name="JSLOG" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-js-errors - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<logger name="net.bluemind.webmodule.server.handlers.LogHandler" level="INFO" additivity="false">
<appender-ref ref="JSLOG" />
</logger>
</configuration>

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="SYSLOG" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-xmpp - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<root level="INFO">
<appender-ref ref="SYSLOG" />
</root>
</configuration>

@ -0,0 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="SYSLOG" class="ch.qos.logback.classic.net.SyslogAppender">
<syslogHost>localhost</syslogHost>
<port>10514</port>
<facility>DAEMON</facility>
<suffixPattern>bm-ysnp - [%thread] %c{1} %p - %m\n</suffixPattern>
</appender>
<logger name="org.apache.directory.shared.asn1.ber" level="ERROR" />
<root level="INFO">
<appender-ref ref="SYSLOG" />
</root>
</configuration>

@ -0,0 +1,12 @@
#!/bin/bash -e
{% if bm_letsencrypt_cert is defined %}
if [ $1 == "{{ bm_letsencrypt_cert }}" ]; then
cat /var/lib/dehydrated/certificates/certs/{{ bm_letsencrypt_cert }}/privkey.pem > /etc/ssl/certs/bm_cert.pem
cat /var/lib/dehydrated/certificates/certs/{{ bm_letsencrypt_cert }}/fullchain.pem >> /etc/ssl/certs/bm_cert.pem
chown root:root /etc/ssl/certs/bm_cert.pem
chmod 644 /etc/ssl/certs/bm_cert.pem
/bin/systemctl reload postfix
/bin/systemctl reload bm-nginx
fi
{% endif %}

@ -0,0 +1,5 @@
#!/bin/sh
set -e
rm -rf /home/lbkp/bm/*

@ -0,0 +1,17 @@
#!/bin/sh
set -e
DEST=/home/lbkp/bm/pgsql
mkdir -p $DEST
chown postgres:postgres $DEST
chmod 700 $DEST
for DB in $(su - postgres -c "/bin/psql -d postgres -qtc 'SELECT datname from pg_database' | grep -vP '^\s+?template[01]$'")
do
su - postgres -c "/bin/pg_dump -Fp -Cc $DB" | /bin/nice -n 10 lz4 -c > $DEST/$DB.sql.lz4
done
su - postgres -c "/bin/pg_dumpall --globals-only" | /bin/nice -n 10 lz4 -c > $DEST/pg_globals.sql.lz4
su - postgres -c "/bin/pg_dumpall --schema-only" | /bin/nice -n 10 lz4 -c > $DEST/pg_schema.sql.lz4
cp -a /etc/bm/local /home/lbkp/bm/conf

@ -0,0 +1,11 @@
[
{% for product in bm_mem_alloc_rules.keys() | list %}
{
"product":"{{ product }}",
"defaultHeap":"{{ bm_mem_alloc_rules[product].heap }}",
"defaultDirect":"{{ bm_mem_alloc_rules[product].direct }}",
"sparePercent":{{ bm_mem_alloc_rules[product].spare }}
}{% if not loop.last %},{% endif %}
{% endfor %}
]

@ -0,0 +1,19 @@
---
bounca_version: 0.1.1
#bounca_version: master
#bounca_git_url: https://github.com/repleo/bounca.git
bounca_archive_url: https://github.com/repleo/bounca/archive/v{{ bounca_version }}.tar.gz
bounca_root_dir: /opt/bounca
bounca_port: 8084
bounca_src_ip: []
bounca_user: bounca
bounca_db_server: "{{ pg_server | default('localhost') }}"
bounca_db_name: bounca
bounca_db_user: bounca
# Will be generated if not defined
# bounca_db_pass:
# bounca_secret_key:
bounca_admin_mail: "{{ system_admin_email }}"
bounca_from_mail: bounca@{{ ansible_domain }}

@ -0,0 +1,5 @@
---
- include: ../common/handlers/main.yml
- name: restart bounca
service: name=bounca state=restarted

@ -0,0 +1,323 @@
---
- name: Set default install mode to none
set_fact: bounca_install_mode="none"
tags: bounca
- name: Check if bounca is installed
stat: path={{ bounca_root_dir }}/meta/ansible_version
register: bounca_version_file
tags: bounca
- name: Check installed version
command: cat {{ bounca_root_dir }}/meta/ansible_version
register: bounca_current_version
changed_when: False
when: bounca_version_file.stat.exists
tags: bounca
- name: Set install mode to install
set_fact: bounca_install_mode='install'
when: not bounca_version_file.stat.exists
tags: bounca
- name: Set install mode to upgrade
set_fact: bounca_install_mode='upgrade'
when:
- bounca_version_file.stat.exists
- bounca_current_version is defined
- bounca_current_version.stdout != bounca_version
# - bounca_manage_upgrade
tags: bounca
- name: Install dependencies
yum:
name:
- python34-virtualenv
- python34-pip
- uwsgi-plugin-python3
- uwsgi-logger-systemd
- python-psycopg2
- openssl-devel
- postgresql-devel
- postgresql
- gcc
- git
tags: bounca
- name: Create user account for bounca
user:
name: bounca
system: True
shell: /sbin/nologin
home: "{{ bounca_root_dir }}"
tags: bounca
- name: Create directories
file: path={{ item.dir }} state=directory owner={{ item.owner | default(omit) }} group={{ item.group | default(omit) }} mode={{ item.mode | default(omit) }}
with_items:
- dir: "{{ bounca_root_dir }}/tmp"
- dir: "{{ bounca_root_dir }}/app"
- dir: "{{ bounca_root_dir }}/data"
mode: 700
group: "{{ bounca_user }}"
owner: "{{ bounca_user }}"
- dir: "{{ bounca_root_dir }}/meta"
mode: 700
- dir: "{{ bounca_root_dir }}/archives"
mode: 700
- dir: /etc/bounca
mode: 750
group: "{{ bounca_user }}"
tags: bounca
- name: Create archive dir
file: path={{ bounca_root_dir }}/archives/{{ bounca_current_version.stdout }} state=directory mode=700
when: bounca_install_mode == "upgrade"
tags: bounca
- name: Archive current BounCA install
synchronize:
src: "{{ bounca_root_dir }}/app"
dest: "{{ bounca_root_dir }}/archives/{{ bounca_current_version.stdout }}/app"
recursive: True
delegate_to: "{{ inventory_hostname }}"
when: bounca_install_mode == "upgrade"
tags: bounca
- name: Dump database
postgresql_db:
name: "{{ bounca_db_name }}"
state: dump
login_host: "{{ bounca_db_server }}"
login_user: sqladmin
login_password: "{{ pg_admin_pass }}"
target: "{{ bounca_root_dir }}/archives/{{ bounca_current_version.stdout }}/{{ bounca_db_name }}.sql.gz"
when: bounca_install_mode == "upgrade"
tags: bounca
- name: Compress previous version
command: tar cJf {{ bounca_root_dir }}/archives/{{ bounca_current_version.stdout }}.txz ./
environment:
XZ_OPT: -T0
args:
chdir: "{{ bounca_root_dir }}/archives/{{ bounca_current_version.stdout }}"
when: bounca_install_mode == 'upgrade'
tags: bounca
- name: Remove the archive directory
file: path={{ bounca_root_dir }}/archives/{{ bounca_current_version.stdout }} state=absent
when: bounca_install_mode == 'upgrade'
tags: bounca
- name: Download BounCA
get_url:
url: "{{ bounca_archive_url }}"
dest: "{{ bounca_root_dir }}/tmp"
when: bounca_install_mode != 'none'
tags: bounca
- name: Extract BounCA
unarchive:
src: "{{ bounca_root_dir }}/tmp/bounca-{{ bounca_version }}.tar.gz"
dest: "{{ bounca_root_dir }}/tmp"
remote_src: yes
when: bounca_install_mode != "none"
tags: bounca
- name: Move BounCA to it's directory
synchronize:
src: "{{ bounca_root_dir }}/tmp/bounca-{{ bounca_version }}/"
dest: "{{ bounca_root_dir }}/app/"
recursive: True
delete: True
when: bounca_install_mode != "none"
delegate_to: "{{ inventory_hostname }}"
tags: bounca
#- name: Clone GIT repo
# git:
# repo: "{{ bounca_git_url }}"
# dest: "{{ bounca_root_dir }}/app"
# version: "{{ bounca_version }}"
# force: True
# register: bounca_git
# tags: bounca
#
#- name: Get new git commit
# command: git rev-parse HEAD
# args:
# chdir: "{{ bounca_root_dir }}/app"
# register: bounca_git_commit
# changed_when: False
# tags: bounca
#
#- name: Set install mode to upgrade
# set_fact: bounca_install_mode='upgrade'
# when:
# - bounca_install_mode == 'none'
# - bounca_git_commit.stdout != bounca_current_version.stdout
# tags: bounca
- name: Create archive dir
file: path={{ bounca_root_dir }}/archives/{{ bounca_current_version.stdout }} state=directory mode=700
when: bounca_install_mode == "upgrade"
tags: bounca
- name: Dump database
postgresql_db:
name: "{{ bounca_db_name }}"
state: dump
login_host: "{{ bounca_db_server }}"
login_user: sqladmin
login_password: "{{ pg_admin_pass }}"
target: "{{ bounca_root_dir }}/archives/{{ bounca_current_version.stdout }}/{{ bounca_db_name }}.sql.gz"
when: bounca_install_mode == "upgrade"
tags: bounca
- name: Create the virtualenv
pip:
state: latest
virtualenv: "{{ bounca_root_dir }}"
virtualenv_command: /usr/bin/virtualenv-3
requirements: "{{ bounca_root_dir }}/app/requirements.txt"
tags: bounca
- name: Link pki to the data dir
file: src={{ bounca_root_dir }}/data dest={{ bounca_root_dir }}/app/pki state=link
tags: bounca
- name: Handle bounca ports
iptables_raw:
name: bounca_ports
state: "{{ (bounca_src_ip | length > 0) | ternary('present','absent') }}"
rules: "-A INPUT -m state --state NEW -p tcp -m multiport --dports {{ bounca_port }} -s {{ bounca_src_ip | join(',') }} -j ACCEPT"
tags: [firewall,bounca]
#- name: Install additional python module
# pip:
# state: latest
# virtualenv: "{{ bounca_root_dir }}"
# name: "{{ item }}"
# with_items:
# - django-lemonldap
# tags: bounca
- name: Generate a random pass for the database
shell: openssl rand -base64 45 > {{ bounca_root_dir }}/meta/ansible_dbpass
args:
creates: "{{ bounca_root_dir }}/meta/ansible_dbpass"
when: bounca_db_pass is not defined
tags: bounca
- name: Read database password
command: cat {{ bounca_root_dir }}/meta/ansible_dbpass
register: bounca_rand_pass
when: bounca_db_pass is not defined
changed_when: False
tags: bounca
- name: Set database pass
set_fact: bounca_db_pass={{ bounca_rand_pass.stdout }}
when: bounca_db_pass is not defined
tags: bounca
- name: Generate a random secret
shell: openssl rand -base64 45 > {{ bounca_root_dir }}/meta/ansible_secret
args:
creates: "{{ bounca_root_dir }}/meta/ansible_secret"
when: bounca_secret_key is not defined
tags: bounca
- name: Read secret_key
command: cat {{ bounca_root_dir }}/meta/ansible_secret
register: bounca_rand_secret
when: bounca_secret_key is not defined
changed_when: False
tags: bounca
- name: Set secret_key
set_fact: bounca_secret_key={{ bounca_rand_secret.stdout }}
when: bounca_secret_key is not defined
tags: bounca
- name: Create the PostgreSQL role
postgresql_user:
db: postgres
name: "{{ bounca_db_user }}"
password: "{{ bounca_db_pass }}"
login_host: "{{ bounca_db_server }}"
login_user: sqladmin
login_password: "{{ pg_admin_pass }}"
tags: bounca
- name: Create the PostgreSQL database
postgresql_db:
name: "{{ bounca_db_name }}"
encoding: UTF-8
lc_collate: C
lc_ctype: C
template: template0
owner: "{{ bounca_db_user }}"
login_host: "{{ bounca_db_server }}"
login_user: sqladmin
login_password: "{{ pg_admin_pass }}"
tags: bounca
- name: Deploy configuration
template: src={{ item.src }} dest={{ item.dest }} owner={{ item.owner | default(omit) }} group={{ item.group | default(omit) }} mode={{ item.mode | default(omit) }}
with_items:
- src: main.ini.j2
dest: /etc/bounca/main.ini
group: bounca
mode: 640
- src: uwsgi.ini.j2
dest: /etc/bounca/uwsgi.ini
group: bounca
mode: 640
notify: restart bounca
tags: bounca
#- name: Add a tmpfiles.d snippet
# copy: content="d /run/bounca 750 bounca apache" dest=/etc/tmpfiles.d/bounca.conf
# register: bounca_tmpfiles
# tags: bounca
#
#- name: Create tmpdir
# command: systemd-tmpfiles --create
# when: bounca_tmpfiles.changed
# tags: bounca
- name: Deploy BounCA unit
template: src=bounca.service.j2 dest=/etc/systemd/system/bounca.service
register: bounca_unit
tags: bounca
- name: Reload systemd
command: systemctl daemon-reload
when: bounca_unit.changed
tags: bounca
- name: Stop BounCA daemon for DB upgrade
service: name=bounca state=stopped
when: bounca_install_mode == 'upgrade'
tags: bounca
- name: Migrate BounCA DB
django_manage: command="migrate --noinput" app_path={{ bounca_root_dir }}/app virtualenv={{ bounca_root_dir }}
when: bounca_install_mode != 'none'
tags: bounca
- name: Collect static assets
django_manage: command="collectstatic --noinput" app_path={{ bounca_root_dir }}/app virtualenv={{ bounca_root_dir }}
when: bounca_install_mode != 'none'
tags: bounca
- name: Start and enable the daemon
service: name=bounca state=started enabled=True
tags: bounca
- name: Write installed version
# copy: content={{ bounca_git_commit.stdout}} dest={{ bounca_root_dir }}/meta/ansible_version
copy: content={{ bounca_version }} dest={{ bounca_root_dir }}/meta/ansible_version
tags: bounca

@ -0,0 +1,17 @@
[Unit]
Description=BounCA PKI Daemon
After=syslog.target
[Service]
Environment=PYTHONPATH=/usr/bin/python34
ExecStart=/usr/sbin/uwsgi --ini /etc/bounca/uwsgi.ini
ExecReload=/bin/kill -HUP $MAINPID
User={{ bounca_user }}
Group={{ bounca_user }}
KillSignal=SIGINT
Restart=always
Type=notify
NotifyAccess=all
[Install]
WantedBy=multi-user.target

@ -0,0 +1,14 @@
[database]
DATABASE_USER: {{ bounca_db_user }}
DATABASE_PASSWORD: {{ bounca_db_pass }}
DATABASE_HOST: {{ bounca_db_server }}
DATABASE_NAME: {{ bounca_db_name }}
[secrets]
SECRET_KEY: {{ bounca_secret_key }}
[email]
EMAIL_HOST: localhost
ADMIN_MAIL: {{ bounca_admin_mail }}
FROM_MAIL: {{ bounca_from_mail }}

@ -0,0 +1,17 @@
[uwsgi]
plugin = python3
thread = 4
master = 1
processes = 30
vacuum = true
http11-socket = 0.0.0.0:{{ bounca_port }}
chdir = {{ bounca_root_dir }}/app
home = {{ bounca_root_dir }}
module = bounca.wsgi
check-static = {{ bounca_root_dir }}/app/media
static-skip-ext = .php
static-skip-ext = .cgi
static-skip-ext = .py
offload-threads = 4
cache2 = name=bounca,items=200
static-cache-paths = 300

@ -0,0 +1,16 @@
---
clam_mirror: database.clamav.net
clam_user: clamav
clam_group: clamav
clam_enable_clamd: False
clam_custom_db_url: []
clam_safebrowsing: True
clam_listen_port: 3310
clam_ports: "{{ [clam_listen_port] + [clam_stream_port_min + ':' + clam_stream_port_max] }}"
clam_listen_ip: 127.0.0.1
clam_src_ip: []
# Max stream size, in MB
clam_stream_max_size: 50
clam_stream_port_min: 30000
clam_stream_port_max: 32000

@ -0,0 +1,9 @@
---
- include: ../common/handlers/main.yml
- name: restart freshclam
service: name=freshclam state=restarted
- name: restart clamd
service: name=clamd state={{ clam_enable_clamd | ternary('restarted','stopped') }}

@ -0,0 +1,57 @@
---
- name: Install packages
yum:
name:
- clamav
- clamav-data-empty
- clamav-server-systemd
- clamav-update
- name: Create clamav user account
user:
name: clamav
system: True
shell: /sbin/nologin
comment: "ClamAV antivirus user account"
- name: Set SELinux
seboolean: name={{ item }} state=True persistent=True
with_items:
- clamd_use_jit
- antivirus_can_scan_system
when: ansible_selinux.status == 'enabled'
- name: Deploy freshclam configuration
template: src=freshclam.conf.j2 dest=/etc/freshclam.conf mode=644
notify: restart freshclam
- name: Deploy clamd configuration
template: src=clamd.conf.j2 dest=/etc/clamd.conf
notify: restart clamd
- name: Deploy systemd units
template: src={{ item }}.j2 dest=/etc/systemd/system/{{ item }}
with_items:
- freshclam.service
- clamd.service
notify:
- restart freshclam
- restart clamd
register: clamav_units
- name: Deploy tmpfiles.d fragment
copy:
content: 'd /var/run/clamav 755 {{ clam_user }} {{ clam_group }}'
dest: /etc/tmpfiles.d/clamav.conf
notify: systemd-tmpfiles
- name: Reload systemd
command: systemctl daemon-reload
when: clamav_units.changed
- name: Start and enable freshclam
service: name=freshclam state=started enabled=True
- name: Handle clamd service
service: name=clamd state={{ clam_enable_clamd | ternary('started','stopped') }} enabled={{ clam_enable_clamd }}

@ -0,0 +1,12 @@
LogSyslog yes
LogVerbose yes
ExtendedDetectionInfo yes
LocalSocket /var/run/clamav/clamd.sock
LocalSocketMode 666
TCPSocket {{ clam_listen_port }}
TCPAddr {{ clam_listen_ip }}
StreamMinPort {{ clam_stream_port_min }}
StreamMaxPort {{ clam_stream_port_max }}
StreamMaxLength {{ clam_stream_max_size }}M
ExitOnOOM yes
Foreground yes

@ -0,0 +1,13 @@
[Unit]
Description=ClamAV antivirus daemon
After=syslog.target network.target
[Service]
Type=simple
ExecStart=/usr/sbin/clamd -c /etc/clamd.conf
User={{ clam_user }}
Group={{ clam_group }}
Restart=on-failure
[Install]
WantedBy=multi-user.target

@ -0,0 +1,13 @@
DatabaseDirectory /var/lib/clamav
LogVerbose yes
LogSyslog yes
PidFile /var/run/freshclam.pid
Checks {{ clam_safebrowsing | ternary('48','12') }}
DatabaseOwner clamupdate
DatabaseMirror {{ clam_mirror }}
{% for custom in clam_custom_db_url %}
DatabaseCustomURL={{ custom }}
{% endfor %}
NotifyClamd /etc/clamd.conf
Foreground yes
SafeBrowsing {{ clam_safebrowsing | ternary('yes','no') }}

@ -0,0 +1,15 @@
[Unit]
Description=ClamAV signature updater
After=network.target
[Service]
Type=simple
User=clamupdate
Group=clamupdate
ExecStart=/usr/bin/freshclam --stdout --daemon
Restart=on-failure
PrivateTmp=true
[Install]
WantedBy=multi-user.target

@ -0,0 +1,120 @@
---
# List of UNIX group which will have full root access, using sudo
system_admin_groups: ['admins','Domain\ Admins']
# Email address of the admin (will receive root email)
# system_admin_email: admin@domain.net
# List of basic system utilisties to install
# (Common list for EL and Debian based distro)
system_utils:
- htop
- screen
- iftop
- tcpdump
- bzip2
- pbzip2
- lzop
- zstd
- vim
- bash-completion
- rsync
- lsof
- net-tools
- sysstat
- pciutils
- strace
- wget
- man-db
- unzip
- openssl
- pv
- less
- nano
- tree
- mc
# List specific for EL based
system_utils_el:
- openssh-clients
- nc
- xz
- lz4
- yum-utils
- fuse-sshfs
- policycoreutils-python
# List specific for Debian based
system_utils_deb:
- openssh-client
- netcat
- xz-utils
- liblz4-tool
- sshfs
# Kernel modules to load
system_kmods: []
# List of extra package to install
system_extra_pkgs: []
# MegaCLI tool version
megacli_version: 8.07.14-1
# List of FS to mount
fstab: []
# fstab:
# - name: /mnt/data
# src: files.domain.org:/data
# opts: noatime
# fstype: nfs
# state: present
# boot: yes
# Various SELinux booleans
sebool: []
# sebool:
# - name: httpd_use_fusefs
# state: True
# persistent: True
system_swappiness: 10
system_sysctl: {}
# system_sysctl:
# vm.vfs_cache_pressure: 500
# vm.dirty_ratio: 10
# vm.dirty_background_ratio: 5
# Disable traditional rsyslog daemon
system_disable_syslog: False
# Send journald logs to a remote server using systemd-journal-upload
# system_journal_remote_uri: http://logs.example.com:19532
# Max disk space used by the Journal. Default is 10% of the available space. But must be exressed as an absolute value in the conf
# We can specify the max amount of space used, and the min amount of space left free. The smallest limit will apply
system_journal_max_use: 3G
system_journal_keep_free: 2G
# System Timezone
system_tz: 'Europe/Paris'
# Tuned profile to apply. If undefined, virt-host and virt-guest are applied automatically when needed
# system_tuned_profile: enterprise-storage
# Frquency of the fstrim cron job. Can be daily, weekly or monthly
system_fstrim_freq: daily
system_base_bash_aliases:
ls: 'ls $LS_OPTIONS'
ll: 'ls $LS_OPTIONS -l'
l: 'ls $LS_OPTIONS -lA'
rm: 'rm -i'
cp: 'cp -i'
mv: 'mv -i'
system_extra_bash_aliases: {}
system_bash_aliases: "{{ system_base_bash_aliases | combine(system_extra_bash_aliases, recursive=True) }}"
...

@ -0,0 +1,10 @@
#!/bin/bash
export LS_OPTIONS='--color=auto'
eval "`dircolors`"
alias ls='ls $LS_OPTIONS'
alias ll='ls $LS_OPTIONS -l'
alias l='ls $LS_OPTIONS -lA'
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'

@ -0,0 +1 @@
CRONDARGS="-s"

@ -0,0 +1,10 @@
#!/bin/bash
/sbin/fstrim -v --all
# Proxmox container support
if [ -x /usr/sbin/pct ]; then
for CONTAINER in $(/usr/sbin/pct list | awk '/^[0-9]/ {print $1}'); do
/sbin/fstrim -v /proc/$(lxc-info -n $CONTAINER -p | awk '{print $2}')/root
done
fi

@ -0,0 +1,4 @@
let g:skip_defaults_vim=1
set mouse-=a
set background=dark
syntax on

@ -0,0 +1,33 @@
---
- name: rehash postfix
command: "postmap /etc/postfix/{{ item }}"
with_items:
- relay_auth
- name: restart postfix
service: name=postfix state=restarted
- name: newaliases
command: newaliases
- name: restart journald
service: name=systemd-journald state=restarted
- name: systemd-tmpfiles
command: systemd-tmpfiles --create
- name: reload systemd
command: systemctl daemon-reload
- name: restart crond
service: name=crond state=restarted
- name: restart journal-upload
service: name=systemd-journal-upload state=restarted
when: remote_journal is defined
- name: restart journald
service: name=systemd-journald state=restarted
- name: load kmods
service: name=systemd-modules-load state=restarted

@ -0,0 +1,28 @@
---
allow_duplicates: no
dependencies:
- role: mkdir
- role: system_proxy
- role: repo_base
when: ansible_os_family == 'RedHat'
- role: network
- role: iptables
when: iptables_manage | default(True)
- role: zabbix_agent
- role: fusioninventory_agent
- role: sssd_ldap_auth
when: ldap_auth | default(False)
- role: sssd_ad_auth
when: ad_auth | default(False)
- role: ntp_client
when: ansible_virtualization_role == 'host' or ansible_virtualization_type != 'lxc'
- role: sudo
- role: ssh
- role: patrix
when:
- patrix_enabled | default(True)
- patrix_server is defined
- patrix_user is defined
- patrix_pass is defined
- role: postfix
when: system_postfix | default(True)

@ -0,0 +1,16 @@
---
- name: Check if qemu agent channel is available
stat: path=/dev/virtio-ports/org.qemu.guest_agent.0
register: qemu_ga_dev
- include: guest_{{ ansible_os_family }}.yml
when:
- qemu_ga_dev.stat.exists
- ansible_virtualization_type == 'kvm'
- name: Start and enable qemu guest agent
service: name=qemu-guest-agent state=started enabled=yes
when:
- qemu_ga_dev.stat.exists
- ansible_virtualization_type == 'kvm'

@ -0,0 +1,4 @@
---
- name: Install qemu guest agent
apt: name=qemu-guest-agent state=present

@ -0,0 +1,5 @@
---
- name: Install qemu guest agent
yum: name=qemu-guest-agent state=present

@ -0,0 +1,18 @@
---
- set_fact:
controllers: "{{ controllers | default([]) + [ ansible_devices[item].host ] }}"
with_items: "{{ ansible_devices.keys() | list }}"
- set_fact:
lsi_controllers: "{{ controllers | select('match', '(?i).*(lsi|megaraid).*') | list | unique }}"
- include_tasks: hardware_{{ ansible_os_family }}.yml
- name: Remove MegaCli package
file: path=/tmp/{{ megacli }} state=absent
when:
- lsi_controllers | length > 0
- megacli_installed_version.stdout != megacli_version
...

@ -0,0 +1,30 @@
---
- set_fact: megacli=megacli_{{ megacli_version }}_all.deb
- name: Install libncurses
apt:
name:
- libncurses5
- name: Check if MegaCLi is installed (Debian)
shell: dpkg -s megacli | grep Version | awk '{ print $2 }' 2>/dev/null
args:
warn: False
register: megacli_installed_version
failed_when: False
changed_when: False
when: lsi_controllers | length > 0
- name: Copy MegaCli package
copy: src={{ megacli }} dest=/tmp
when:
- lsi_controllers | length > 0
- megacli_installed_version.stdout != megacli_version
- name: Install MegaCli (Debian)
apt: deb=/tmp/{{ megacli }} allow_unauthenticated=yes
when:
- lsi_controllers | length > 0
- megacli_installed_version.stdout != megacli_version

@ -0,0 +1,24 @@
---
- set_fact:
megacli: MegaCli-{{ megacli_version }}.noarch.rpm
- name: Check if MegaCLi is installed
shell: rpm -q --qf "%{VERSION}-%{RELEASE}" MegaCli 2>/dev/null
register: megacli_installed_version
changed_when: False
failed_when: False
when: lsi_controllers | length > 0
- name: Copy MegaCli package
copy: src={{ megacli }} dest=/tmp
when:
- lsi_controllers | length > 0
- megacli_installed_version.stdout != megacli_version
- name: Install MegaCli
yum: name=/tmp/{{ megacli }} state=present
when:
- lsi_controllers | length > 0
- megacli_installed_version.stdout != megacli_version

@ -0,0 +1,11 @@
---
- name: Set system hostname
hostname: name={{ system_hostname | default(inventory_hostname | regex_replace('^([^\.]+)\..*','\\1')) }}
- name: Prevent PVE from changing /etc/hostname
copy: content='' dest=/etc/.pve-ignore.hostname
when: ansible_virtualization_type == 'lxc'
...

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save