From 0261e875679f1bf63c8d689da7fc7e014597885d Mon Sep 17 00:00:00 2001 From: Stonewall Jackson Date: Sat, 4 Feb 2023 01:23:43 -0500 Subject: initial commit --- .gitignore | 6 + .gitmodules | 3 + LICENSE | 21 + README.md | 100 ++++ ansible.cfg | 20 + inventory-example/10-hosts | 65 +++ inventory-example/20-by-hostname.yml | 43 ++ inventory-example/30-constructed.yml | 15 + inventory-example/40-groups | 62 +++ .../group_vars/access_points/vars.yml | 12 + .../group_vars/access_points/vault.yml | 6 + inventory-example/group_vars/all/apache.yml | 1 + inventory-example/group_vars/all/archive.yml | 2 + inventory-example/group_vars/all/asterisk.yml | 105 ++++ inventory-example/group_vars/all/coturn.yml | 3 + inventory-example/group_vars/all/cups.yml | 1 + inventory-example/group_vars/all/firefox.yml | 73 +++ inventory-example/group_vars/all/freeipa.yml | 144 +++++ inventory-example/group_vars/all/freeradius.yml | 1 + inventory-example/group_vars/all/git.yml | 2 + inventory-example/group_vars/all/global.yml | 105 ++++ inventory-example/group_vars/all/hastebin.yml | 3 + inventory-example/group_vars/all/invidious.yml | 4 + inventory-example/group_vars/all/jellyfin.yml | 1 + inventory-example/group_vars/all/mail.yml | 21 + inventory-example/group_vars/all/mediawiki.yml | 9 + inventory-example/group_vars/all/nagios.yml | 90 +++ inventory-example/group_vars/all/nfs.yml | 11 + inventory-example/group_vars/all/nitter.yml | 3 + inventory-example/group_vars/all/nsd.yml | 54 ++ inventory-example/group_vars/all/packages.yml | 4 + .../group_vars/all/photostructure.yml | 3 + inventory-example/group_vars/all/polkit.yml | 1 + inventory-example/group_vars/all/postgres.yml | 4 + inventory-example/group_vars/all/prosody.yml | 16 + inventory-example/group_vars/all/proxmox.yml | 7 + inventory-example/group_vars/all/psitransfer.yml | 7 + inventory-example/group_vars/all/root.yml | 6 + inventory-example/group_vars/all/sudo.yml | 2 + inventory-example/group_vars/all/syncthing.yml | 6 + inventory-example/group_vars/all/syslog.yml | 2 + inventory-example/group_vars/all/teddit.yml | 3 + inventory-example/group_vars/all/vault.yml | 124 +++++ inventory-example/group_vars/all/vaultwarden.yml | 1 + inventory-example/group_vars/all/wireguard.yml | 2 + inventory-example/group_vars/all/yum.yml | 1 + inventory-example/group_vars/dav_servers.yml | 6 + inventory-example/group_vars/dmz.yml | 1 + inventory-example/group_vars/el8.yml | 3 + inventory-example/group_vars/freeipa_master.yml | 6 + inventory-example/group_vars/git_servers.yml | 1 + inventory-example/group_vars/linux_desktops.yml | 1 + inventory-example/group_vars/linux_laptops.yml | 2 + inventory-example/group_vars/nagios_servers.yml | 1 + inventory-example/group_vars/nfs_servers.yml | 10 + .../group_vars/opnsense_firewalls.yml | 7 + .../group_vars/photostructure_servers.yml | 2 + .../group_vars/proxmox_hypervisors.yml | 1 + inventory-example/group_vars/proxmox_instances.yml | 2 + inventory-example/group_vars/rspamd_servers.yml | 2 + inventory-example/group_vars/switches/vars.yml | 6 + inventory-example/group_vars/switches/vault.yml | 5 + inventory-example/group_vars/syncthing_servers.yml | 1 + inventory-example/group_vars/ttrss_servers.yml | 5 + inventory-example/group_vars/unifi_controllers.yml | 3 + inventory-example/group_vars/wiki_servers.yml | 7 + inventory-example/group_vars/xmpp_servers.yml | 1 + inventory-example/host_vars/bitwarden1.yml | 1 + inventory-example/host_vars/dmz-git1.yml | 21 + inventory-example/host_vars/dmz-mx1.yml | 1 + inventory-example/host_vars/dmz-www1.yml | 9 + inventory-example/host_vars/nas1.yml | 128 +++++ inventory-example/host_vars/opnsense1/vars.yml | 8 + inventory-example/host_vars/opnsense1/vault.yml | 6 + inventory-example/host_vars/privbrowse1.yml | 8 + inventory-example/host_vars/switch1/vars.yml | 15 + inventory-example/host_vars/switch1/vault.yml | 4 + inventory-example/host_vars/ttrss1.yml | 1 + inventory-example/host_vars/tuxbook1.yml | 1 + inventory-example/host_vars/tuxstation1.yml | 5 + inventory-example/host_vars/tuxstation2.yml | 8 + inventory-example/host_vars/wiki1.yml | 1 + inventory-example/host_vars/www1.yml | 1 + playbooks/archiver.yml | 9 + playbooks/asterisk.yml | 18 + playbooks/bitwarden.yml | 15 + playbooks/common.yml | 112 ++++ playbooks/cups.yml | 14 + playbooks/dav.yml | 25 + playbooks/dev_servers.yml | 9 + playbooks/dns_records.yml | 28 + playbooks/freeipa.yml | 24 + playbooks/freeipa_bootstrap.yml | 10 + playbooks/freeipa_replica.yml | 9 + playbooks/git.yml | 54 ++ playbooks/jellyfin.yml | 20 + playbooks/linux_desktops.yml | 24 + playbooks/linux_laptops.yml | 27 + playbooks/mail.yml | 36 ++ playbooks/nagios.yml | 15 + playbooks/nameservers.yml | 9 + playbooks/nfs.yml | 9 + playbooks/opnsense.yml | 17 + playbooks/photostructure.yml | 20 + playbooks/populate_domain.yml | 99 ++++ playbooks/postgres.yml | 15 + playbooks/privbrowse.yml | 33 ++ playbooks/proxmox.yml | 21 + playbooks/proxmox_instance.yml | 5 + playbooks/radius.yml | 9 + playbooks/site.yml | 37 ++ playbooks/syncthing.yml | 19 + playbooks/syslog.yml | 9 + playbooks/test.yml | 4 + playbooks/ttrss.yml | 21 + playbooks/turn.yml | 10 + playbooks/unifi.yml | 15 + playbooks/util/backup.yml | 606 +++++++++++++++++++++ playbooks/util/client_cert.yml | 71 +++ playbooks/util/decomission_host.yml | 56 ++ playbooks/util/restore.yml | 477 ++++++++++++++++ playbooks/util/wireguard_config.yml | 49 ++ playbooks/webserver_internal.yml | 46 ++ playbooks/webserver_public.yml | 38 ++ playbooks/wiki.yml | 25 + playbooks/xmpp.yml | 9 + playbooks/yum.yml | 33 ++ playbooks/znc.yml | 14 + plugins/tests/ip_in_subnet.py | 16 + requirements.txt | 4 + roles/apache/defaults/main.yml | 11 + .../systemd/system/httpd.service.d/override.conf | 6 + roles/apache/handlers/main.yml | 9 + roles/apache/meta/main.yml | 5 + roles/apache/tasks/gssapi.yml | 49 ++ roles/apache/tasks/main.yml | 63 +++ .../templates/etc/httpd/conf.d/letsencrypt.conf.j2 | 8 + .../apache/templates/etc/httpd/conf.d/ssl.conf.j2 | 17 + .../apache/templates/etc/httpd/conf/httpd.conf.j2 | 98 ++++ roles/apache/vars/main.yml | 37 ++ roles/apache_vhost/defaults/main.yml | 14 + roles/apache_vhost/meta/main.yml | 2 + roles/apache_vhost/tasks/main.yml | 18 + .../templates/etc/httpd/conf.d/vhost.conf.j2 | 135 +++++ roles/apache_vhost/vars/main.yml | 26 + roles/archive_client/defaults/main.yml | 4 + roles/archive_client/tasks/main.yml | 49 ++ roles/archive_client/vars/main.yml | 2 + roles/archive_job/defaults/main.yml | 8 + roles/archive_job/meta/main.yml | 2 + roles/archive_job/tasks/main.yml | 19 + roles/archive_server/defaults/main.yml | 4 + .../usr/local/libexec/archiver/archive_edgeswitch | 36 ++ .../usr/local/libexec/archiver/archive_opnsense | 12 + roles/archive_server/tasks/freeipa.yml | 51 ++ roles/archive_server/tasks/main.yml | 79 +++ .../archive_server/templates/etc/archiver.conf.j2 | 16 + .../templates/usr/local/bin/archiver.sh.j2 | 99 ++++ roles/archive_server/vars/main.yml | 22 + roles/asterisk/defaults/main.yml | 74 +++ .../system/asterisk.service.d/override.conf | 6 + roles/asterisk/handlers/main.yml | 9 + roles/asterisk/meta/main.yml | 4 + roles/asterisk/tasks/main.yml | 81 +++ roles/asterisk/templates/etc/asterisk/ari.conf.j2 | 11 + .../templates/etc/asterisk/extensions.conf.j2 | 7 + roles/asterisk/templates/etc/asterisk/http.conf.j2 | 13 + .../asterisk/templates/etc/asterisk/logger.conf.j2 | 3 + .../asterisk/templates/etc/asterisk/pjsip.conf.j2 | 28 + .../templates/etc/asterisk/pjsip_wizard.conf.j2 | 57 ++ .../asterisk/templates/etc/asterisk/queues.conf.j2 | 46 ++ roles/asterisk/templates/etc/asterisk/rtp.conf.j2 | 3 + .../templates/etc/asterisk/voicemail.conf.j2 | 37 ++ roles/asterisk/vars/main.yml | 20 + roles/certbot/defaults/main.yml | 10 + roles/certbot/files/etc/pki/tls/certbot-post.sh | 40 ++ roles/certbot/meta/main.yml | 4 + roles/certbot/tasks/main.yml | 50 ++ roles/certbot/vars/main.yml | 13 + roles/cgit/defaults/main.yml | 27 + roles/cgit/meta/main.yml | 4 + roles/cgit/tasks/main.yml | 51 ++ roles/cgit/templates/etc/cgitrc.j2 | 68 +++ roles/cgit/vars/main.yml | 9 + roles/chrony/defaults/main.yml | 1 + roles/chrony/handlers/main.yml | 4 + roles/chrony/tasks/main.yml | 16 + roles/chrony/templates/etc/chrony.conf.j2 | 22 + roles/coturn/defaults/main.yml | 4 + roles/coturn/handlers/main.yml | 4 + roles/coturn/tasks/main.yml | 25 + .../coturn/templates/etc/coturn/turnserver.conf.j2 | 46 ++ roles/coturn/vars/main.yml | 2 + roles/cups_client/defaults/main.yml | 1 + roles/cups_client/handlers/main.yml | 4 + roles/cups_client/tasks/main.yml | 19 + .../cups_client/templates/etc/cups/client.conf.j2 | 3 + roles/cups_server/defaults/main.yml | 3 + roles/cups_server/handlers/main.yml | 4 + roles/cups_server/tasks/freeipa.yml | 58 ++ roles/cups_server/tasks/main.yml | 70 +++ .../templates/etc/cups/cups-files.conf.j2 | 9 + roles/cups_server/templates/etc/cups/cupsd.conf.j2 | 93 ++++ roles/cups_server/vars/main.yml | 14 + roles/dev_environment/meta/main.yml | 4 + roles/dev_environment/tasks/main.yml | 21 + roles/dev_environment/vars/main.yml | 33 ++ roles/devd/handlers/main.yml | 4 + roles/devd/tasks/main.yml | 9 + roles/dnf_automatic/defaults/main.yml | 3 + roles/dnf_automatic/files/etc/dnf/automatic.conf | 12 + .../files/usr/local/sbin/dnf-auto-restart | 30 + roles/dnf_automatic/handlers/main.yml | 4 + roles/dnf_automatic/tasks/main.yml | 50 ++ .../dnf-automatic.service.d/override.conf.j2 | 4 + .../system/dnf-automatic.timer.d/override.conf.j2 | 3 + roles/dnf_automatic/vars/main.yml | 4 + roles/dns_records/defaults/main.yml | 3 + roles/dns_records/tasks/main.yml | 41 ++ roles/dnsmasq/defaults/main.yml | 8 + roles/dnsmasq/handlers/main.yml | 4 + roles/dnsmasq/tasks/main.yml | 16 + .../etc/NetworkManager/conf.d/9A-dns.conf.j2 | 9 + .../NetworkManager/dnsmasq.d/00-dnsmasq.conf.j2 | 7 + roles/dovecot/defaults/main.yml | 24 + .../etc/dovecot/sieve.before.d/10-rspamd.sieve | 5 + .../files/etc/dovecot/sieve/report-ham.sieve | 15 + .../files/etc/dovecot/sieve/report-spam.sieve | 7 + .../dovecot/virtual/All Messages/dovecot-virtual | 2 + .../etc/dovecot/virtual/Flagged/dovecot-virtual | 2 + .../etc/dovecot/virtual/INBOX/dovecot-virtual | 2 + .../systemd/system/dovecot.service.d/override.conf | 6 + .../files/var/lib/solr/dovecot/conf/schema.xml | 48 ++ roles/dovecot/handlers/main.yml | 4 + roles/dovecot/meta/main.yml | 12 + roles/dovecot/tasks/freeipa.yml | 109 ++++ roles/dovecot/tasks/main.yml | 127 +++++ roles/dovecot/tasks/rspamd.yml | 43 ++ roles/dovecot/tasks/solr.yml | 40 ++ .../templates/etc/dovecot/conf.d/10-auth.conf.j2 | 10 + .../templates/etc/dovecot/conf.d/10-mail.conf.j2 | 31 ++ .../templates/etc/dovecot/conf.d/10-master.conf.j2 | 31 ++ .../templates/etc/dovecot/conf.d/10-ssl.conf.j2 | 10 + .../templates/etc/dovecot/conf.d/15-lda.conf.j2 | 10 + .../etc/dovecot/conf.d/15-mailboxes.conf.j2 | 36 ++ .../templates/etc/dovecot/conf.d/20-imap.conf.j2 | 3 + .../templates/etc/dovecot/conf.d/20-lmtp.conf.j2 | 3 + .../etc/dovecot/conf.d/20-managesieve.conf.j2 | 11 + .../templates/etc/dovecot/conf.d/90-fts.conf.j2 | 6 + .../templates/etc/dovecot/conf.d/90-quota.conf.j2 | 34 ++ .../dovecot/conf.d/90-sieve-extprograms.conf.j2 | 5 + .../templates/etc/dovecot/conf.d/90-sieve.conf.j2 | 30 + .../etc/dovecot/conf.d/auth-ldap.conf.ext.j2 | 4 + .../etc/dovecot/conf.d/auth-system.conf.ext.j2 | 3 + .../templates/etc/dovecot/dovecot-ldap.conf.ext.j2 | 16 + .../dovecot/templates/etc/dovecot/dovecot.conf.j2 | 5 + .../usr/lib/dovecot/sieve-pipe/report-ham.sh.j2 | 7 + .../usr/lib/dovecot/sieve-pipe/report-spam.sh.j2 | 7 + .../templates/usr/local/bin/dovecot-archive.sh.j2 | 19 + .../usr/local/bin/dovecot-quota-warning.sh.j2 | 19 + .../var/lib/solr/dovecot/conf/solrconfig.xml.j2 | 91 ++++ roles/dovecot/vars/main.yml | 64 +++ roles/evolution/defaults/main.yml | 6 + roles/evolution/handlers/main.yml | 2 + roles/evolution/tasks/main.yml | 23 + .../templates/etc/dconf/db/site.d/10-evolution.j2 | 2 + .../share/evolution/sources/ac-caldav.source.j2 | 41 ++ .../share/evolution/sources/ac-carddav.source.j2 | 41 ++ .../share/evolution/sources/ac-imap.source.j2 | 60 ++ .../share/evolution/sources/ac-mail.source.j2 | 51 ++ .../share/evolution/sources/ac-smtp.source.j2 | 22 + .../share/evolution/sources/ac-tasks.source.j2 | 41 ++ roles/evolution/vars/main.yml | 1 + roles/firefox/defaults/main.yml | 33 ++ roles/firefox/files/etc/profile.d/firefox.sh | 3 + roles/firefox/tasks/main.yml | 10 + .../lib64/firefox/distribution/policies.json.j2 | 116 ++++ roles/firewalld/tasks/main.yml | 17 + roles/freebsd_loader/defaults/main.yml | 1 + roles/freebsd_loader/tasks/main.yml | 14 + roles/freeipa_client/defaults/main.yml | 1 + .../files/etc/gssproxy/99-nfs-client.conf | 9 + roles/freeipa_client/handlers/main.yml | 14 + roles/freeipa_client/tasks/main.yml | 54 ++ roles/freeipa_client/vars/main.yml | 5 + roles/freeipa_keytab/defaults/main.yml | 4 + roles/freeipa_keytab/tasks/main.yml | 37 ++ roles/freeipa_keytab/vars/main.yml | 1 + roles/freeipa_server/defaults/main.yml | 33 ++ .../files/usr/local/share/dirsrv/schema/jid.ldif | 3 + roles/freeipa_server/handlers/main.yml | 19 + roles/freeipa_server/tasks/custom_schema.yml | 101 ++++ roles/freeipa_server/tasks/main.yml | 77 +++ roles/freeipa_server/tasks/master.yml | 138 +++++ roles/freeipa_server/tasks/replica.yml | 21 + .../templates/etc/named/ipa-options-ext.conf.j2 | 7 + .../templates/etc/pki/caIPAclientAuth.cfg.j2 | 113 ++++ .../templates/etc/rsyslog.d/freeipa.conf.j2 | 8 + roles/freeipa_server/vars/main.yml | 65 +++ roles/freeipa_system_account/defaults/main.yml | 1 + roles/freeipa_system_account/tasks/main.yml | 14 + roles/freeradius/defaults/main.yml | 3 + .../systemd/system/radiusd.service.d/override.conf | 6 + roles/freeradius/handlers/main.yml | 4 + roles/freeradius/tasks/freeipa.yml | 50 ++ roles/freeradius/tasks/main.yml | 74 +++ .../freeradius/templates/etc/raddb/clients.conf.j2 | 6 + .../templates/etc/raddb/mods-available/eap.j2 | 54 ++ .../templates/etc/raddb/mods-available/ldap.j2 | 113 ++++ .../freeradius/templates/etc/raddb/radiusd.conf.j2 | 73 +++ .../etc/raddb/sites-available/inner-tunnel.j2 | 90 +++ roles/freeradius/vars/main.yml | 12 + roles/gather_facts/tasks/main.yml | 2 + roles/gathio/defaults/main.yml | 5 + roles/gathio/handlers/main.yml | 4 + roles/gathio/meta/main.yml | 6 + roles/gathio/tasks/main.yml | 102 ++++ .../templates/etc/systemd/system/gathio.service.j2 | 34 ++ .../var/lib/gathio/gathio/config/api.js.j2 | 6 + .../var/lib/gathio/gathio/config/database.js.j2 | 3 + .../var/lib/gathio/gathio/config/domain.js.j2 | 10 + roles/gathio/vars/main.yml | 20 + roles/getcert_request/defaults/main.yml | 11 + roles/getcert_request/tasks/main.yml | 96 ++++ roles/getcert_request/vars/main.yml | 1 + roles/gitolite/defaults/main.yml | 7 + roles/gitolite/handlers/main.yml | 4 + roles/gitolite/meta/main.yml | 4 + roles/gitolite/tasks/freeipa.yml | 49 ++ roles/gitolite/tasks/main.yml | 119 ++++ roles/gitolite/tasks/sshd.yml | 24 + .../etc/ssh/sshd_config.d/gitolite.conf.j2 | 4 + .../usr/local/bin/gitolite-authorizedkeys.j2 | 37 ++ .../templates/usr/local/bin/gitolite-grouplist.j2 | 42 ++ .../templates/var/www/cgi-bin/gitolite-wrapper.j2 | 14 + .../gitolite/templates/var/www/git/.gitolite.rc.j2 | 28 + .../var/www/git/.gitolite/conf/gitolite.conf.j2 | 11 + roles/gitolite/vars/main.yml | 40 ++ roles/grub/defaults/main.yml | 2 + roles/grub/tasks/main.yml | 23 + roles/gssproxy_client/defaults/main.yml | 8 + roles/gssproxy_client/tasks/main.yml | 17 + .../templates/etc/gssproxy/client.conf.j2 | 16 + roles/hastebin/defaults/main.yml | 9 + .../lib/hastebin/haste-server/static/index.html | 70 +++ roles/hastebin/handlers/main.yml | 4 + roles/hastebin/tasks/main.yml | 119 ++++ .../etc/systemd/system/hastebin.service.j2 | 35 ++ .../var/lib/hastebin/haste-server/config.js.j2 | 32 ++ roles/hastebin/vars/main.yml | 30 + roles/hostname/defaults/main.yml | 3 + roles/hostname/tasks/main.yml | 18 + roles/hostname/templates/etc/hosts.j2 | 3 + roles/invidious/defaults/main.yml | 38 ++ roles/invidious/handlers/main.yml | 4 + roles/invidious/meta/main.yml | 4 + roles/invidious/tasks/database.yml | 69 +++ roles/invidious/tasks/main.yml | 116 ++++ .../etc/systemd/system/invidious.service.j2 | 32 ++ .../opt/invidious/invidious-db-cleanup.sh.j2 | 11 + .../templates/opt/invidious/invidious-update.sh.j2 | 42 ++ .../opt/invidious/invidious/config/config.yml.j2 | 34 ++ roles/invidious/vars/main.yml | 42 ++ roles/jellyfin/defaults/main.yml | 11 + roles/jellyfin/handlers/main.yml | 4 + roles/jellyfin/meta/main.yml | 10 + roles/jellyfin/tasks/freeipa.yml | 67 +++ roles/jellyfin/tasks/main.yml | 94 ++++ .../jellyfin/templates/etc/jellyfin/network.xml.j2 | 36 ++ roles/jellyfin/templates/etc/sysconfig/jellyfin.j2 | 22 + .../etc/systemd/system/jellyfin.service.j2 | 51 ++ .../plugins/configurations/LDAP-Auth.xml.j2 | 23 + roles/jellyfin/vars/main.yml | 34 ++ roles/journald/defaults/main.yml | 3 + roles/journald/handlers/main.yml | 4 + roles/journald/tasks/main.yml | 23 + .../templates/etc/systemd/journald.conf.j2 | 6 + roles/linux_desktop/defaults/main.yml | 6 + .../files/etc/dconf/db/local.d/00-hidpi | 2 + .../files/etc/dconf/db/local.d/locks/hidpi | 1 + .../usr/local/share/thumbnailers/totem.thumbnailer | 4 + roles/linux_desktop/handlers/main.yml | 7 + roles/linux_desktop/meta/main.yml | 9 + roles/linux_desktop/tasks/freeipa.yml | 33 ++ roles/linux_desktop/tasks/main.yml | 109 ++++ .../templates/etc/dconf/db/local.d/00-gnome.j2 | 18 + .../linux_desktop/templates/etc/gdm/custom.conf.j2 | 16 + roles/linux_desktop/vars/main.yml | 65 +++ roles/linux_laptop/defaults/main.yml | 9 + roles/linux_laptop/tasks/freeipa.yml | 33 ++ roles/linux_laptop/tasks/main.yml | 58 ++ .../etc/sysconfig/network-scripts/ifcfg-ssid.j2 | 32 ++ .../etc/sysconfig/network-scripts/keys-ssid.j2 | 1 + roles/linux_laptop/vars/main.yml | 6 + .../files/etc/profile.d/local-homedirs.sh | 16 + .../files/etc/security/pam_env_xdg.conf | 4 + .../files/usr/local/sbin/create-local-homedir.sh | 13 + roles/local_homedirs/tasks/main.yml | 76 +++ roles/local_homedirs/vars/main.yml | 3 + roles/locale/defaults/main.yml | 1 + roles/locale/tasks/main.yml | 10 + roles/mediawiki/defaults/main.yml | 52 ++ roles/mediawiki/files/var/www/mediawiki/robots.txt | 2 + roles/mediawiki/meta/main.yml | 8 + roles/mediawiki/tasks/database.yml | 50 ++ roles/mediawiki/tasks/extension.yml | 12 + roles/mediawiki/tasks/freeipa.yml | 40 ++ roles/mediawiki/tasks/main.yml | 134 +++++ .../var/www/mediawiki/LocalSettings.php.j2 | 288 ++++++++++ roles/mediawiki/vars/main.yml | 125 +++++ roles/motd/tasks/main.yml | 10 + .../files/usr/lib64/nagios/plugins/check_mem | 452 +++++++++++++++ .../usr/lib64/nagios/plugins/check_needs_restart | 30 + .../files/usr/lib64/nagios/plugins/check_systemd | 20 + .../files/usr/lib64/nagios/plugins/check_zpools | 74 +++ roles/nagios_client/meta/main.yml | 8 + roles/nagios_client/tasks/main.yml | 54 ++ .../templates/etc/sudoers.d/nagios.j2 | 3 + roles/nagios_client/vars/main.yml | 11 + roles/nagios_server/defaults/main.yml | 34 ++ .../lib64/nagios/plugins/check_asterisk_endpoints | 62 +++ roles/nagios_server/handlers/main.yml | 9 + roles/nagios_server/meta/main.yml | 4 + roles/nagios_server/tasks/freeipa.yml | 42 ++ roles/nagios_server/tasks/main.yml | 90 +++ roles/nagios_server/tasks/objects.yml | 32 ++ .../nagios_server/templates/etc/nagios/cgi.cfg.j2 | 24 + .../templates/etc/nagios/nagios.cfg.j2 | 105 ++++ .../templates/etc/nagios/objects/commands.cfg.j2 | 285 ++++++++++ .../templates/etc/nagios/objects/contacts.cfg.j2 | 6 + .../templates/etc/nagios/objects/hostgroups.cfg.j2 | 10 + .../templates/etc/nagios/objects/hosts.cfg.j2 | 31 ++ .../etc/nagios/objects/servicedependencies.cfg.j2 | 8 + .../etc/nagios/objects/servicegroups.cfg.j2 | 19 + .../templates/etc/nagios/objects/services.cfg.j2 | 375 +++++++++++++ .../templates/etc/nagios/objects/templates.cfg.j2 | 51 ++ .../etc/nagios/objects/timeperiods.cfg.j2 | 39 ++ .../templates/etc/nagios/private/resource.cfg.j2 | 1 + .../usr/share/nagios/html/config.inc.php.j2 | 11 + roles/nagios_server/vars/main.yml | 78 +++ roles/nfs_server/defaults/main.yml | 14 + roles/nfs_server/files/etc/samba/local.conf | 14 + roles/nfs_server/handlers/main.yml | 19 + roles/nfs_server/meta/main.yml | 3 + roles/nfs_server/tasks/autofs.yml | 57 ++ roles/nfs_server/tasks/exports.yml | 55 ++ roles/nfs_server/tasks/homedirs.yml | 112 ++++ roles/nfs_server/tasks/main.yml | 19 + roles/nfs_server/tasks/nfs.yml | 41 ++ roles/nfs_server/tasks/smb.yml | 54 ++ roles/nfs_server/templates/etc/exports.j2 | 20 + roles/nfs_server/templates/etc/nfs.conf.j2 | 10 + .../nfs_server/templates/etc/samba/shares.conf.j2 | 19 + roles/nfs_server/vars/main.yml | 9 + roles/nim/defaults/main.yml | 1 + roles/nim/tasks/main.yml | 12 + roles/nim/vars/main.yml | 2 + roles/nitter/defaults/main.yml | 21 + roles/nitter/handlers/main.yml | 4 + roles/nitter/meta/main.yml | 7 + roles/nitter/tasks/main.yml | 97 ++++ .../templates/etc/systemd/system/nitter.service.j2 | 34 ++ .../templates/opt/nitter/nitter-update.sh.j2 | 40 ++ .../templates/opt/nitter/nitter/nitter.conf.j2 | 38 ++ roles/nitter/vars/main.yml | 14 + roles/nsd/defaults/main.yml | 2 + roles/nsd/handlers/main.yml | 9 + roles/nsd/tasks/generate_zone.yml | 50 ++ roles/nsd/tasks/main.yml | 35 ++ roles/nsd/templates/etc/nsd/nsd.conf.j2 | 24 + roles/nsd/vars/main.yml | 15 + roles/packages/defaults/main.yml | 1 + roles/packages/tasks/main.yml | 10 + roles/photostructure/defaults/main.yml | 11 + roles/photostructure/handlers/main.yml | 4 + roles/photostructure/meta/main.yml | 7 + roles/photostructure/tasks/freeipa.yml | 47 ++ roles/photostructure/tasks/main.yml | 78 +++ .../templates/etc/sysconfig/photostructure | 18 + .../etc/systemd/system/photostructure.service.j2 | 19 + .../opt/photostructure/photostructure-update.sh.j2 | 48 ++ roles/photostructure/vars/main.yml | 46 ++ roles/php/defaults/main.yml | 17 + .../systemd/system/php-fpm.service.d/override.conf | 2 + roles/php/handlers/main.yml | 4 + roles/php/tasks/main.yml | 32 ++ roles/php/templates/etc/php-fpm.conf.j2 | 6 + roles/php/templates/etc/php-fpm.d/www.conf.j2 | 40 ++ roles/php/templates/etc/php.ini.j2 | 130 +++++ roles/php/vars/main.yml | 5 + roles/polkit/defaults/main.yml | 1 + roles/polkit/tasks/main.yml | 4 + .../etc/polkit-1/rules.d/40-default.rules.j2 | 3 + roles/postfix_client/defaults/main.yml | 3 + roles/postfix_client/handlers/main.yml | 4 + roles/postfix_client/tasks/main.yml | 16 + .../templates/etc/postfix/main.cf.j2 | 41 ++ roles/postfix_client/vars/main.yml | 9 + roles/postfix_server/defaults/main.yml | 13 + roles/postfix_server/files/etc/sasl2/smtpd.conf | 2 + .../systemd/system/postfix.service.d/override.conf | 6 + roles/postfix_server/handlers/main.yml | 9 + roles/postfix_server/tasks/freeipa.yml | 95 ++++ roles/postfix_server/tasks/main.yml | 61 +++ .../templates/etc/postfix/main.cf.j2 | 109 ++++ .../templates/etc/postfix/master.cf.j2 | 34 ++ .../templates/etc/postfix/virtual_aliases.cf.j2 | 8 + .../templates/etc/postfix/virtual_mailboxes.cf.j2 | 7 + roles/postfix_server/vars/main.yml | 64 +++ roles/postgresql_server/defaults/main.yml | 5 + .../system/postgresql.service.d/override.conf | 6 + roles/postgresql_server/handlers/main.yml | 4 + roles/postgresql_server/tasks/freeipa.yml | 49 ++ roles/postgresql_server/tasks/main.yml | 53 ++ .../templates/var/lib/pgsql/data/pg_hba.conf.j2 | 7 + .../var/lib/pgsql/data/postgresql.conf.j2 | 34 ++ roles/postgresql_server/vars/main.yml | 40 ++ roles/prosody/defaults/main.yml | 21 + .../systemd/system/prosody.service.d/override.conf | 6 + roles/prosody/handlers/main.yml | 4 + roles/prosody/meta/main.yml | 16 + roles/prosody/tasks/database.yml | 17 + roles/prosody/tasks/freeipa.yml | 64 +++ roles/prosody/tasks/main.yml | 97 ++++ .../templates/etc/prosody/prosody.cfg.lua.j2 | 119 ++++ .../usr/local/bin/prosody-update-roster.j2 | 56 ++ roles/prosody/vars/main.yml | 38 ++ roles/prosody_letsencrypt_proxy/defaults/main.yml | 2 + roles/prosody_letsencrypt_proxy/handlers/main.yml | 4 + roles/prosody_letsencrypt_proxy/tasks/main.yml | 1 + roles/prosody_letsencrypt_proxy/tasks/master.yml | 47 ++ roles/prosody_letsencrypt_proxy/tasks/slave.yml | 32 ++ .../etc/ssh/sshd_config.d/99-prosody-le-proxy.conf | 7 + .../usr/local/sbin/prosody-letsencrypt-proxy.j2 | 51 ++ roles/prosody_letsencrypt_proxy/vars/main.yml | 9 + roles/proxmox_hypervisor/defaults/main.yml | 33 ++ .../files/etc/apt/apt.conf.d/20auto-upgrades | 3 + .../files/etc/apt/apt.conf.d/50unattended-upgrades | 14 + .../files/usr/lib/nagios/plugins | 1 + roles/proxmox_hypervisor/handlers/main.yml | 24 + roles/proxmox_hypervisor/tasks/chrony.yml | 11 + roles/proxmox_hypervisor/tasks/main.yml | 31 ++ roles/proxmox_hypervisor/tasks/nagios.yml | 68 +++ roles/proxmox_hypervisor/tasks/postfix.yml | 18 + roles/proxmox_hypervisor/tasks/pve.yml | 58 ++ roles/proxmox_hypervisor/tasks/pve_api_user.yml | 21 + .../proxmox_hypervisor/tasks/pve_kvm_template.yml | 32 ++ roles/proxmox_hypervisor/tasks/rsyslog.yml | 16 + roles/proxmox_hypervisor/tasks/sudo.yml | 5 + .../tasks/unattended_upgrades.yml | 11 + roles/proxmox_hypervisor/tasks/zfs.yml | 34 ++ .../templates/etc/chrony/chrony.conf.j2 | 10 + .../templates/etc/postfix/main.cf.j2 | 19 + .../templates/etc/rsyslog.d/forward.conf.j2 | 7 + .../templates/etc/snmp/snmpd.conf.j2 | 10 + .../templates/etc/sudoers.d/nagios.j2 | 3 + roles/proxmox_hypervisor/templates/etc/sudoers.j2 | 15 + .../etc/systemd/system/zfs-scrub@.service.j2 | 11 + .../etc/systemd/system/zfs-scrub@.timer.j2 | 10 + .../etc/systemd/system/zfs-trim@.service.j2 | 11 + .../etc/systemd/system/zfs-trim@.timer.j2 | 10 + .../templates/etc/zfs/zed.d/zed.rc.j2 | 7 + .../templates/var/lib/vz/snippets/userdata.yaml.j2 | 17 + roles/proxmox_hypervisor/vars/main.yml | 34 ++ roles/proxmox_instance/defaults/main.yml | 31 ++ roles/proxmox_instance/tasks/main.yml | 143 +++++ roles/psitransfer/defaults/main.yml | 22 + roles/psitransfer/handlers/main.yml | 4 + roles/psitransfer/tasks/main.yml | 76 +++ .../etc/systemd/system/psitransfer.service.j2 | 36 ++ .../psitransfer/config.production.js.j2 | 12 + roles/psitransfer/vars/main.yml | 33 ++ roles/pxe_server/README.txt | 18 + roles/pxe_server/defaults/main.yml | 4 + roles/pxe_server/tasks/extract_iso.yml | 16 + roles/pxe_server/tasks/main.yml | 39 ++ roles/pxe_server/templates/grub/grub.cfg.j2 | 22 + .../templates/grub/menuentry-redhat.cfg.j2 | 18 + .../templates/kickstart/rocky8-ks.cfg.j2 | 89 +++ roles/pxe_server/vars/main.yml | 23 + roles/qemu_guest_agent/tasks/main.yml | 10 + roles/redis/defaults/main.yml | 3 + .../redis/files/etc/systemd/system/redis@.service | 18 + roles/redis/tasks/main.yml | 43 ++ roles/redis/templates/etc/redis.conf.j2 | 65 +++ roles/redis/vars/main.yml | 1 + roles/root_authorized_keys/defaults/main.yml | 1 + roles/root_authorized_keys/tasks/main.yml | 5 + roles/root_password/defaults/main.yml | 2 + roles/root_password/tasks/main.yml | 4 + roles/rspamd/defaults/main.yml | 12 + roles/rspamd/handlers/main.yml | 4 + roles/rspamd/meta/main.yml | 19 + roles/rspamd/tasks/main.yml | 76 +++ .../etc/rspamd/local.d/classifier-bayes.conf.j2 | 3 + .../etc/rspamd/local.d/dkim_signing.conf.j2 | 3 + .../templates/etc/rspamd/local.d/greylist.conf.j2 | 1 + .../templates/etc/rspamd/local.d/logging.inc.j2 | 1 + .../templates/etc/rspamd/local.d/multimap.conf.j2 | 9 + .../templates/etc/rspamd/local.d/phishing.conf.j2 | 1 + .../templates/etc/rspamd/local.d/redis.conf.j2 | 1 + .../templates/etc/rspamd/local.d/replies.conf.j2 | 1 + .../etc/rspamd/local.d/worker-controller.inc.j2 | 11 + .../etc/rspamd/local.d/worker-normal.inc.j2 | 1 + .../etc/rspamd/local.d/worker-proxy.inc.j2 | 7 + roles/rspamd/vars/main.yml | 30 + roles/rsyslog_client/defaults/main.yml | 7 + roles/rsyslog_client/handlers/main.yml | 4 + roles/rsyslog_client/tasks/main.yml | 27 + roles/rsyslog_client/templates/etc/rsyslog.conf.j2 | 61 +++ roles/rsyslog_client/vars/main.yml | 8 + roles/rsyslog_server/defaults/main.yml | 14 + roles/rsyslog_server/handlers/main.yml | 10 + roles/rsyslog_server/tasks/main.yml | 74 +++ roles/rsyslog_server/templates/etc/rsyslog.conf.j2 | 97 ++++ roles/rsyslog_server/vars/main.yml | 20 + roles/sabredav/defaults/main.yml | 7 + roles/sabredav/tasks/composer.yml | 10 + roles/sabredav/tasks/database.yml | 46 ++ roles/sabredav/tasks/freeipa.yml | 27 + roles/sabredav/tasks/main.yml | 77 +++ .../templates/var/www/sabredav/server.php.j2 | 61 +++ roles/sabredav/vars/main.yml | 60 ++ roles/selinux/defaults/main.yml | 1 + roles/selinux/tasks/main.yml | 22 + roles/selinux/vars/main.yml | 4 + roles/selinux_policy/tasks/main.yml | 44 ++ roles/selinux_policy/vars/main.yml | 1 + roles/snmp/defaults/main.yml | 9 + roles/snmp/handlers/main.yml | 4 + roles/snmp/tasks/main.yml | 51 ++ roles/snmp/templates/etc/snmp/snmpd.conf.j2 | 8 + roles/snmp/vars/main.yml | 2 + roles/solr/defaults/main.yml | 5 + roles/solr/handlers/main.yml | 4 + roles/solr/tasks/main.yml | 77 +++ roles/solr/templates/etc/solr/log4j2.xml.j2 | 18 + roles/solr/templates/etc/solr/solrconfig.xml.j2 | 280 ++++++++++ roles/solr/templates/etc/sysconfig/solr.j2 | 6 + .../templates/etc/systemd/system/solr.service.j2 | 63 +++ roles/solr/vars/main.yml | 3 + roles/ssh/defaults/main.yml | 1 + roles/ssh/tasks/main.yml | 4 + roles/ssh/templates/etc/ssh/ssh_config.j2 | 13 + roles/sudo/defaults/main.yml | 2 + roles/sudo/tasks/main.yml | 5 + roles/sudo/templates/etc/sudoers.j2 | 48 ++ roles/syncthing/defaults/main.yml | 5 + roles/syncthing/meta/main.yml | 4 + roles/syncthing/tasks/main.yml | 73 +++ roles/syncthing/tasks/syncthing_user.yml | 36 ++ .../etc/systemd/system/syncthing-user@.service.j2 | 27 + .../templates/var/lib/syncthing/config.xml.j2 | 116 ++++ .../syncthing/templates/var/www/html/index.html.j2 | 15 + roles/syncthing/vars/main.yml | 46 ++ roles/systemd_timer/defaults/main.yml | 11 + roles/systemd_timer/tasks/main.yml | 21 + .../templates/etc/systemd/system/task.service.j2 | 23 + .../templates/etc/systemd/system/task.timer.j2 | 9 + roles/teddit/defaults/main.yml | 24 + roles/teddit/handlers/main.yml | 4 + roles/teddit/meta/main.yml | 10 + roles/teddit/tasks/main.yml | 104 ++++ .../templates/etc/systemd/system/teddit.service.j2 | 36 ++ .../templates/opt/teddit/teddit-update.sh.j2 | 36 ++ .../templates/opt/teddit/teddit/config.js.j2 | 71 +++ roles/teddit/vars/main.yml | 13 + roles/tika/defaults/main.yml | 3 + roles/tika/handlers/main.yml | 4 + roles/tika/tasks/main.yml | 69 +++ roles/tika/templates/etc/sysconfig/tika.j2 | 3 + .../templates/etc/systemd/system/tika.service.j2 | 53 ++ roles/tika/templates/etc/tika/config.xml.j2 | 15 + roles/tika/templates/etc/tika/log4j2.xml.j2 | 18 + roles/tika/vars/main.yml | 4 + roles/timezone/default/main.yml | 1 + roles/timezone/tasks/main.yml | 12 + roles/ttrss/defaults/main.yml | 16 + roles/ttrss/handlers/main.yml | 4 + roles/ttrss/tasks/database.yml | 26 + roles/ttrss/tasks/freeipa.yml | 46 ++ roles/ttrss/tasks/main.yml | 96 ++++ .../templates/etc/systemd/system/ttrss.service.j2 | 18 + .../templates/usr/local/sbin/ttrss-update.sh.j2 | 27 + roles/ttrss/templates/var/www/ttrss/config.php.j2 | 23 + roles/ttrss/vars/main.yml | 47 ++ roles/tuned/defaults/main.yml | 1 + roles/tuned/tasks/main.yml | 19 + roles/udev/defaults/main.yml | 2 + roles/udev/handlers/main.yml | 9 + roles/udev/tasks/main.yml | 13 + .../templates/etc/udev/rules.d/pci_pm.rules.j2 | 5 + roles/unifi/files/etc/rsyslog.d/unifi.conf | 4 + roles/unifi/handlers/main.yml | 9 + roles/unifi/meta/main.yml | 8 + roles/unifi/tasks/main.yml | 81 +++ .../usr/local/sbin/unifi-certificate-update.sh.j2 | 33 ++ roles/unifi/vars/main.yml | 41 ++ roles/vaultwarden/defaults/main.yml | 21 + roles/vaultwarden/handlers/main.yml | 4 + roles/vaultwarden/tasks/database.yml | 18 + roles/vaultwarden/tasks/freeipa.yml | 38 ++ roles/vaultwarden/tasks/main.yml | 100 ++++ .../templates/etc/sysconfig/vaultwarden.j2 | 48 ++ .../etc/systemd/system/vaultwarden.service.j2 | 35 ++ roles/vaultwarden/vars/main.yml | 54 ++ roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-8 | 28 + roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-9 | 29 + .../yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-8 | 29 + .../yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9 | 31 ++ .../files/etc/pki/rpm-gpg/RPM-GPG-KEY-crystal-8 | 21 + .../files/etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb-4.0 | 30 + .../files/etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb-4.4 | 30 + .../files/etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb-6.0 | 30 + .../pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-free-el-8 | 29 + .../pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-free-el-9 | 29 + .../pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-nonfree-el-8 | 29 + .../pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-nonfree-el-9 | 29 + .../etc/pki/rpm-gpg/RPM-GPG-KEY-rspamd-stable | 52 ++ .../files/etc/pki/rpm-gpg/RPM-GPG-KEY-zfsonlinux-8 | 30 + .../files/etc/pki/rpm-gpg/RPM-GPG-KEY-zfsonlinux-9 | 57 ++ roles/yum/tasks/main.yml | 29 + roles/yum/vars/main.yml | 89 +++ roles/yum_disable_default_repos/tasks/main.yml | 8 + roles/yum_disable_default_repos/vars/main.yml | 11 + roles/yum_mirror/defaults/main.yml | 2 + roles/yum_mirror/handlers/main.yml | 5 + roles/yum_mirror/tasks/main.yml | 47 ++ .../templates/usr/local/sbin/reposync.sh.j2 | 17 + roles/yum_mirror/vars/main.yml | 130 +++++ roles/zfs/defaults/main.yml | 10 + roles/zfs/handlers/main.yml | 4 + roles/zfs/meta/main.yml | 4 + roles/zfs/tasks/create_zpool.yml | 20 + roles/zfs/tasks/main.yml | 70 +++ .../etc/systemd/system/zfs-scrub@.service.j2 | 11 + .../etc/systemd/system/zfs-scrub@.timer.j2 | 10 + .../etc/systemd/system/zfs-trim@.service.j2 | 11 + .../etc/systemd/system/zfs-trim@.timer.j2 | 10 + roles/zfs/templates/etc/zfs/zed.d/zed.rc.j2 | 10 + roles/zfs/vars/main.yml | 6 + roles/znc/defaults/main.yml | 4 + roles/znc/files/etc/sasl2/znc.conf | 2 + roles/znc/handlers/main.yml | 8 + roles/znc/meta/main.yml | 4 + roles/znc/tasks/freeipa.yml | 49 ++ roles/znc/tasks/main.yml | 86 +++ .../templates/var/lib/znc/.znc/configs/znc.conf.j2 | 62 +++ .../lib/znc/.znc/moddata/cyrusauth/.registry.j2 | 2 + roles/znc/vars/main.yml | 20 + vendor/ansible-freeipa | 1 + 751 files changed, 21964 insertions(+) create mode 100644 .gitignore create mode 100644 .gitmodules create mode 100644 LICENSE create mode 100644 README.md create mode 100644 ansible.cfg create mode 100644 inventory-example/10-hosts create mode 100644 inventory-example/20-by-hostname.yml create mode 100644 inventory-example/30-constructed.yml create mode 100644 inventory-example/40-groups create mode 100644 inventory-example/group_vars/access_points/vars.yml create mode 100644 inventory-example/group_vars/access_points/vault.yml create mode 100644 inventory-example/group_vars/all/apache.yml create mode 100644 inventory-example/group_vars/all/archive.yml create mode 100644 inventory-example/group_vars/all/asterisk.yml create mode 100644 inventory-example/group_vars/all/coturn.yml create mode 100644 inventory-example/group_vars/all/cups.yml create mode 100644 inventory-example/group_vars/all/firefox.yml create mode 100644 inventory-example/group_vars/all/freeipa.yml create mode 100644 inventory-example/group_vars/all/freeradius.yml create mode 100644 inventory-example/group_vars/all/git.yml create mode 100644 inventory-example/group_vars/all/global.yml create mode 100644 inventory-example/group_vars/all/hastebin.yml create mode 100644 inventory-example/group_vars/all/invidious.yml create mode 100644 inventory-example/group_vars/all/jellyfin.yml create mode 100644 inventory-example/group_vars/all/mail.yml create mode 100644 inventory-example/group_vars/all/mediawiki.yml create mode 100644 inventory-example/group_vars/all/nagios.yml create mode 100644 inventory-example/group_vars/all/nfs.yml create mode 100644 inventory-example/group_vars/all/nitter.yml create mode 100644 inventory-example/group_vars/all/nsd.yml create mode 100644 inventory-example/group_vars/all/packages.yml create mode 100644 inventory-example/group_vars/all/photostructure.yml create mode 100644 inventory-example/group_vars/all/polkit.yml create mode 100644 inventory-example/group_vars/all/postgres.yml create mode 100644 inventory-example/group_vars/all/prosody.yml create mode 100644 inventory-example/group_vars/all/proxmox.yml create mode 100644 inventory-example/group_vars/all/psitransfer.yml create mode 100644 inventory-example/group_vars/all/root.yml create mode 100644 inventory-example/group_vars/all/sudo.yml create mode 100644 inventory-example/group_vars/all/syncthing.yml create mode 100644 inventory-example/group_vars/all/syslog.yml create mode 100644 inventory-example/group_vars/all/teddit.yml create mode 100644 inventory-example/group_vars/all/vault.yml create mode 100644 inventory-example/group_vars/all/vaultwarden.yml create mode 100644 inventory-example/group_vars/all/wireguard.yml create mode 100644 inventory-example/group_vars/all/yum.yml create mode 100644 inventory-example/group_vars/dav_servers.yml create mode 100644 inventory-example/group_vars/dmz.yml create mode 100644 inventory-example/group_vars/el8.yml create mode 100644 inventory-example/group_vars/freeipa_master.yml create mode 100644 inventory-example/group_vars/git_servers.yml create mode 100644 inventory-example/group_vars/linux_desktops.yml create mode 100644 inventory-example/group_vars/linux_laptops.yml create mode 100644 inventory-example/group_vars/nagios_servers.yml create mode 100644 inventory-example/group_vars/nfs_servers.yml create mode 100644 inventory-example/group_vars/opnsense_firewalls.yml create mode 100644 inventory-example/group_vars/photostructure_servers.yml create mode 100644 inventory-example/group_vars/proxmox_hypervisors.yml create mode 100644 inventory-example/group_vars/proxmox_instances.yml create mode 100644 inventory-example/group_vars/rspamd_servers.yml create mode 100644 inventory-example/group_vars/switches/vars.yml create mode 100644 inventory-example/group_vars/switches/vault.yml create mode 100644 inventory-example/group_vars/syncthing_servers.yml create mode 100644 inventory-example/group_vars/ttrss_servers.yml create mode 100644 inventory-example/group_vars/unifi_controllers.yml create mode 100644 inventory-example/group_vars/wiki_servers.yml create mode 100644 inventory-example/group_vars/xmpp_servers.yml create mode 100644 inventory-example/host_vars/bitwarden1.yml create mode 100644 inventory-example/host_vars/dmz-git1.yml create mode 100644 inventory-example/host_vars/dmz-mx1.yml create mode 100644 inventory-example/host_vars/dmz-www1.yml create mode 100644 inventory-example/host_vars/nas1.yml create mode 100644 inventory-example/host_vars/opnsense1/vars.yml create mode 100644 inventory-example/host_vars/opnsense1/vault.yml create mode 100644 inventory-example/host_vars/privbrowse1.yml create mode 100644 inventory-example/host_vars/switch1/vars.yml create mode 100644 inventory-example/host_vars/switch1/vault.yml create mode 100644 inventory-example/host_vars/ttrss1.yml create mode 100644 inventory-example/host_vars/tuxbook1.yml create mode 100644 inventory-example/host_vars/tuxstation1.yml create mode 100644 inventory-example/host_vars/tuxstation2.yml create mode 100644 inventory-example/host_vars/wiki1.yml create mode 100644 inventory-example/host_vars/www1.yml create mode 100644 playbooks/archiver.yml create mode 100644 playbooks/asterisk.yml create mode 100644 playbooks/bitwarden.yml create mode 100644 playbooks/common.yml create mode 100644 playbooks/cups.yml create mode 100644 playbooks/dav.yml create mode 100644 playbooks/dev_servers.yml create mode 100644 playbooks/dns_records.yml create mode 100644 playbooks/freeipa.yml create mode 100644 playbooks/freeipa_bootstrap.yml create mode 100644 playbooks/freeipa_replica.yml create mode 100644 playbooks/git.yml create mode 100644 playbooks/jellyfin.yml create mode 100644 playbooks/linux_desktops.yml create mode 100644 playbooks/linux_laptops.yml create mode 100644 playbooks/mail.yml create mode 100644 playbooks/nagios.yml create mode 100644 playbooks/nameservers.yml create mode 100644 playbooks/nfs.yml create mode 100644 playbooks/opnsense.yml create mode 100644 playbooks/photostructure.yml create mode 100644 playbooks/populate_domain.yml create mode 100644 playbooks/postgres.yml create mode 100644 playbooks/privbrowse.yml create mode 100644 playbooks/proxmox.yml create mode 100644 playbooks/proxmox_instance.yml create mode 100644 playbooks/radius.yml create mode 100644 playbooks/site.yml create mode 100644 playbooks/syncthing.yml create mode 100644 playbooks/syslog.yml create mode 100644 playbooks/test.yml create mode 100644 playbooks/ttrss.yml create mode 100644 playbooks/turn.yml create mode 100644 playbooks/unifi.yml create mode 100644 playbooks/util/backup.yml create mode 100644 playbooks/util/client_cert.yml create mode 100644 playbooks/util/decomission_host.yml create mode 100644 playbooks/util/restore.yml create mode 100644 playbooks/util/wireguard_config.yml create mode 100644 playbooks/webserver_internal.yml create mode 100644 playbooks/webserver_public.yml create mode 100644 playbooks/wiki.yml create mode 100644 playbooks/xmpp.yml create mode 100644 playbooks/yum.yml create mode 100644 playbooks/znc.yml create mode 100644 plugins/tests/ip_in_subnet.py create mode 100644 requirements.txt create mode 100644 roles/apache/defaults/main.yml create mode 100644 roles/apache/files/etc/systemd/system/httpd.service.d/override.conf create mode 100644 roles/apache/handlers/main.yml create mode 100644 roles/apache/meta/main.yml create mode 100644 roles/apache/tasks/gssapi.yml create mode 100644 roles/apache/tasks/main.yml create mode 100644 roles/apache/templates/etc/httpd/conf.d/letsencrypt.conf.j2 create mode 100644 roles/apache/templates/etc/httpd/conf.d/ssl.conf.j2 create mode 100644 roles/apache/templates/etc/httpd/conf/httpd.conf.j2 create mode 100644 roles/apache/vars/main.yml create mode 100644 roles/apache_vhost/defaults/main.yml create mode 100644 roles/apache_vhost/meta/main.yml create mode 100644 roles/apache_vhost/tasks/main.yml create mode 100644 roles/apache_vhost/templates/etc/httpd/conf.d/vhost.conf.j2 create mode 100644 roles/apache_vhost/vars/main.yml create mode 100644 roles/archive_client/defaults/main.yml create mode 100644 roles/archive_client/tasks/main.yml create mode 100644 roles/archive_client/vars/main.yml create mode 100644 roles/archive_job/defaults/main.yml create mode 100644 roles/archive_job/meta/main.yml create mode 100644 roles/archive_job/tasks/main.yml create mode 100644 roles/archive_server/defaults/main.yml create mode 100644 roles/archive_server/files/usr/local/libexec/archiver/archive_edgeswitch create mode 100644 roles/archive_server/files/usr/local/libexec/archiver/archive_opnsense create mode 100644 roles/archive_server/tasks/freeipa.yml create mode 100644 roles/archive_server/tasks/main.yml create mode 100644 roles/archive_server/templates/etc/archiver.conf.j2 create mode 100644 roles/archive_server/templates/usr/local/bin/archiver.sh.j2 create mode 100644 roles/archive_server/vars/main.yml create mode 100644 roles/asterisk/defaults/main.yml create mode 100644 roles/asterisk/files/etc/systemd/system/asterisk.service.d/override.conf create mode 100644 roles/asterisk/handlers/main.yml create mode 100644 roles/asterisk/meta/main.yml create mode 100644 roles/asterisk/tasks/main.yml create mode 100644 roles/asterisk/templates/etc/asterisk/ari.conf.j2 create mode 100644 roles/asterisk/templates/etc/asterisk/extensions.conf.j2 create mode 100644 roles/asterisk/templates/etc/asterisk/http.conf.j2 create mode 100644 roles/asterisk/templates/etc/asterisk/logger.conf.j2 create mode 100644 roles/asterisk/templates/etc/asterisk/pjsip.conf.j2 create mode 100644 roles/asterisk/templates/etc/asterisk/pjsip_wizard.conf.j2 create mode 100644 roles/asterisk/templates/etc/asterisk/queues.conf.j2 create mode 100644 roles/asterisk/templates/etc/asterisk/rtp.conf.j2 create mode 100644 roles/asterisk/templates/etc/asterisk/voicemail.conf.j2 create mode 100644 roles/asterisk/vars/main.yml create mode 100644 roles/certbot/defaults/main.yml create mode 100644 roles/certbot/files/etc/pki/tls/certbot-post.sh create mode 100644 roles/certbot/meta/main.yml create mode 100644 roles/certbot/tasks/main.yml create mode 100644 roles/certbot/vars/main.yml create mode 100644 roles/cgit/defaults/main.yml create mode 100644 roles/cgit/meta/main.yml create mode 100644 roles/cgit/tasks/main.yml create mode 100644 roles/cgit/templates/etc/cgitrc.j2 create mode 100644 roles/cgit/vars/main.yml create mode 100644 roles/chrony/defaults/main.yml create mode 100644 roles/chrony/handlers/main.yml create mode 100644 roles/chrony/tasks/main.yml create mode 100644 roles/chrony/templates/etc/chrony.conf.j2 create mode 100644 roles/coturn/defaults/main.yml create mode 100644 roles/coturn/handlers/main.yml create mode 100644 roles/coturn/tasks/main.yml create mode 100644 roles/coturn/templates/etc/coturn/turnserver.conf.j2 create mode 100644 roles/coturn/vars/main.yml create mode 100644 roles/cups_client/defaults/main.yml create mode 100644 roles/cups_client/handlers/main.yml create mode 100644 roles/cups_client/tasks/main.yml create mode 100644 roles/cups_client/templates/etc/cups/client.conf.j2 create mode 100644 roles/cups_server/defaults/main.yml create mode 100644 roles/cups_server/handlers/main.yml create mode 100644 roles/cups_server/tasks/freeipa.yml create mode 100644 roles/cups_server/tasks/main.yml create mode 100644 roles/cups_server/templates/etc/cups/cups-files.conf.j2 create mode 100644 roles/cups_server/templates/etc/cups/cupsd.conf.j2 create mode 100644 roles/cups_server/vars/main.yml create mode 100644 roles/dev_environment/meta/main.yml create mode 100644 roles/dev_environment/tasks/main.yml create mode 100644 roles/dev_environment/vars/main.yml create mode 100644 roles/devd/handlers/main.yml create mode 100644 roles/devd/tasks/main.yml create mode 100644 roles/dnf_automatic/defaults/main.yml create mode 100644 roles/dnf_automatic/files/etc/dnf/automatic.conf create mode 100644 roles/dnf_automatic/files/usr/local/sbin/dnf-auto-restart create mode 100644 roles/dnf_automatic/handlers/main.yml create mode 100644 roles/dnf_automatic/tasks/main.yml create mode 100644 roles/dnf_automatic/templates/etc/systemd/system/dnf-automatic.service.d/override.conf.j2 create mode 100644 roles/dnf_automatic/templates/etc/systemd/system/dnf-automatic.timer.d/override.conf.j2 create mode 100644 roles/dnf_automatic/vars/main.yml create mode 100644 roles/dns_records/defaults/main.yml create mode 100644 roles/dns_records/tasks/main.yml create mode 100644 roles/dnsmasq/defaults/main.yml create mode 100644 roles/dnsmasq/handlers/main.yml create mode 100644 roles/dnsmasq/tasks/main.yml create mode 100644 roles/dnsmasq/templates/etc/NetworkManager/conf.d/9A-dns.conf.j2 create mode 100644 roles/dnsmasq/templates/etc/NetworkManager/dnsmasq.d/00-dnsmasq.conf.j2 create mode 100644 roles/dovecot/defaults/main.yml create mode 100644 roles/dovecot/files/etc/dovecot/sieve.before.d/10-rspamd.sieve create mode 100644 roles/dovecot/files/etc/dovecot/sieve/report-ham.sieve create mode 100644 roles/dovecot/files/etc/dovecot/sieve/report-spam.sieve create mode 100644 roles/dovecot/files/etc/dovecot/virtual/All Messages/dovecot-virtual create mode 100644 roles/dovecot/files/etc/dovecot/virtual/Flagged/dovecot-virtual create mode 100644 roles/dovecot/files/etc/dovecot/virtual/INBOX/dovecot-virtual create mode 100644 roles/dovecot/files/etc/systemd/system/dovecot.service.d/override.conf create mode 100644 roles/dovecot/files/var/lib/solr/dovecot/conf/schema.xml create mode 100644 roles/dovecot/handlers/main.yml create mode 100644 roles/dovecot/meta/main.yml create mode 100644 roles/dovecot/tasks/freeipa.yml create mode 100644 roles/dovecot/tasks/main.yml create mode 100644 roles/dovecot/tasks/rspamd.yml create mode 100644 roles/dovecot/tasks/solr.yml create mode 100644 roles/dovecot/templates/etc/dovecot/conf.d/10-auth.conf.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/conf.d/10-mail.conf.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/conf.d/10-master.conf.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/conf.d/10-ssl.conf.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/conf.d/15-lda.conf.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/conf.d/15-mailboxes.conf.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/conf.d/20-imap.conf.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/conf.d/20-lmtp.conf.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/conf.d/20-managesieve.conf.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/conf.d/90-fts.conf.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/conf.d/90-quota.conf.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/conf.d/90-sieve-extprograms.conf.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/conf.d/90-sieve.conf.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/conf.d/auth-ldap.conf.ext.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/conf.d/auth-system.conf.ext.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/dovecot-ldap.conf.ext.j2 create mode 100644 roles/dovecot/templates/etc/dovecot/dovecot.conf.j2 create mode 100644 roles/dovecot/templates/usr/lib/dovecot/sieve-pipe/report-ham.sh.j2 create mode 100644 roles/dovecot/templates/usr/lib/dovecot/sieve-pipe/report-spam.sh.j2 create mode 100644 roles/dovecot/templates/usr/local/bin/dovecot-archive.sh.j2 create mode 100644 roles/dovecot/templates/usr/local/bin/dovecot-quota-warning.sh.j2 create mode 100644 roles/dovecot/templates/var/lib/solr/dovecot/conf/solrconfig.xml.j2 create mode 100644 roles/dovecot/vars/main.yml create mode 100644 roles/evolution/defaults/main.yml create mode 100644 roles/evolution/handlers/main.yml create mode 100644 roles/evolution/tasks/main.yml create mode 100644 roles/evolution/templates/etc/dconf/db/site.d/10-evolution.j2 create mode 100644 roles/evolution/templates/usr/local/share/evolution/sources/ac-caldav.source.j2 create mode 100644 roles/evolution/templates/usr/local/share/evolution/sources/ac-carddav.source.j2 create mode 100644 roles/evolution/templates/usr/local/share/evolution/sources/ac-imap.source.j2 create mode 100644 roles/evolution/templates/usr/local/share/evolution/sources/ac-mail.source.j2 create mode 100644 roles/evolution/templates/usr/local/share/evolution/sources/ac-smtp.source.j2 create mode 100644 roles/evolution/templates/usr/local/share/evolution/sources/ac-tasks.source.j2 create mode 100644 roles/evolution/vars/main.yml create mode 100644 roles/firefox/defaults/main.yml create mode 100644 roles/firefox/files/etc/profile.d/firefox.sh create mode 100644 roles/firefox/tasks/main.yml create mode 100644 roles/firefox/templates/usr/lib64/firefox/distribution/policies.json.j2 create mode 100644 roles/firewalld/tasks/main.yml create mode 100644 roles/freebsd_loader/defaults/main.yml create mode 100644 roles/freebsd_loader/tasks/main.yml create mode 100644 roles/freeipa_client/defaults/main.yml create mode 100644 roles/freeipa_client/files/etc/gssproxy/99-nfs-client.conf create mode 100644 roles/freeipa_client/handlers/main.yml create mode 100644 roles/freeipa_client/tasks/main.yml create mode 100644 roles/freeipa_client/vars/main.yml create mode 100644 roles/freeipa_keytab/defaults/main.yml create mode 100644 roles/freeipa_keytab/tasks/main.yml create mode 100644 roles/freeipa_keytab/vars/main.yml create mode 100644 roles/freeipa_server/defaults/main.yml create mode 100644 roles/freeipa_server/files/usr/local/share/dirsrv/schema/jid.ldif create mode 100644 roles/freeipa_server/handlers/main.yml create mode 100644 roles/freeipa_server/tasks/custom_schema.yml create mode 100644 roles/freeipa_server/tasks/main.yml create mode 100644 roles/freeipa_server/tasks/master.yml create mode 100644 roles/freeipa_server/tasks/replica.yml create mode 100644 roles/freeipa_server/templates/etc/named/ipa-options-ext.conf.j2 create mode 100644 roles/freeipa_server/templates/etc/pki/caIPAclientAuth.cfg.j2 create mode 100644 roles/freeipa_server/templates/etc/rsyslog.d/freeipa.conf.j2 create mode 100644 roles/freeipa_server/vars/main.yml create mode 100644 roles/freeipa_system_account/defaults/main.yml create mode 100644 roles/freeipa_system_account/tasks/main.yml create mode 100644 roles/freeradius/defaults/main.yml create mode 100644 roles/freeradius/files/etc/systemd/system/radiusd.service.d/override.conf create mode 100644 roles/freeradius/handlers/main.yml create mode 100644 roles/freeradius/tasks/freeipa.yml create mode 100644 roles/freeradius/tasks/main.yml create mode 100644 roles/freeradius/templates/etc/raddb/clients.conf.j2 create mode 100644 roles/freeradius/templates/etc/raddb/mods-available/eap.j2 create mode 100644 roles/freeradius/templates/etc/raddb/mods-available/ldap.j2 create mode 100644 roles/freeradius/templates/etc/raddb/radiusd.conf.j2 create mode 100644 roles/freeradius/templates/etc/raddb/sites-available/inner-tunnel.j2 create mode 100644 roles/freeradius/vars/main.yml create mode 100644 roles/gather_facts/tasks/main.yml create mode 100644 roles/gathio/defaults/main.yml create mode 100644 roles/gathio/handlers/main.yml create mode 100644 roles/gathio/meta/main.yml create mode 100644 roles/gathio/tasks/main.yml create mode 100644 roles/gathio/templates/etc/systemd/system/gathio.service.j2 create mode 100644 roles/gathio/templates/var/lib/gathio/gathio/config/api.js.j2 create mode 100644 roles/gathio/templates/var/lib/gathio/gathio/config/database.js.j2 create mode 100644 roles/gathio/templates/var/lib/gathio/gathio/config/domain.js.j2 create mode 100644 roles/gathio/vars/main.yml create mode 100644 roles/getcert_request/defaults/main.yml create mode 100644 roles/getcert_request/tasks/main.yml create mode 100644 roles/getcert_request/vars/main.yml create mode 100644 roles/gitolite/defaults/main.yml create mode 100644 roles/gitolite/handlers/main.yml create mode 100644 roles/gitolite/meta/main.yml create mode 100644 roles/gitolite/tasks/freeipa.yml create mode 100644 roles/gitolite/tasks/main.yml create mode 100644 roles/gitolite/tasks/sshd.yml create mode 100644 roles/gitolite/templates/etc/ssh/sshd_config.d/gitolite.conf.j2 create mode 100644 roles/gitolite/templates/usr/local/bin/gitolite-authorizedkeys.j2 create mode 100644 roles/gitolite/templates/usr/local/bin/gitolite-grouplist.j2 create mode 100644 roles/gitolite/templates/var/www/cgi-bin/gitolite-wrapper.j2 create mode 100644 roles/gitolite/templates/var/www/git/.gitolite.rc.j2 create mode 100644 roles/gitolite/templates/var/www/git/.gitolite/conf/gitolite.conf.j2 create mode 100644 roles/gitolite/vars/main.yml create mode 100644 roles/grub/defaults/main.yml create mode 100644 roles/grub/tasks/main.yml create mode 100644 roles/gssproxy_client/defaults/main.yml create mode 100644 roles/gssproxy_client/tasks/main.yml create mode 100644 roles/gssproxy_client/templates/etc/gssproxy/client.conf.j2 create mode 100644 roles/hastebin/defaults/main.yml create mode 100644 roles/hastebin/files/var/lib/hastebin/haste-server/static/index.html create mode 100644 roles/hastebin/handlers/main.yml create mode 100644 roles/hastebin/tasks/main.yml create mode 100644 roles/hastebin/templates/etc/systemd/system/hastebin.service.j2 create mode 100644 roles/hastebin/templates/var/lib/hastebin/haste-server/config.js.j2 create mode 100644 roles/hastebin/vars/main.yml create mode 100644 roles/hostname/defaults/main.yml create mode 100644 roles/hostname/tasks/main.yml create mode 100644 roles/hostname/templates/etc/hosts.j2 create mode 100644 roles/invidious/defaults/main.yml create mode 100644 roles/invidious/handlers/main.yml create mode 100644 roles/invidious/meta/main.yml create mode 100644 roles/invidious/tasks/database.yml create mode 100644 roles/invidious/tasks/main.yml create mode 100644 roles/invidious/templates/etc/systemd/system/invidious.service.j2 create mode 100644 roles/invidious/templates/opt/invidious/invidious-db-cleanup.sh.j2 create mode 100644 roles/invidious/templates/opt/invidious/invidious-update.sh.j2 create mode 100644 roles/invidious/templates/opt/invidious/invidious/config/config.yml.j2 create mode 100644 roles/invidious/vars/main.yml create mode 100644 roles/jellyfin/defaults/main.yml create mode 100644 roles/jellyfin/handlers/main.yml create mode 100644 roles/jellyfin/meta/main.yml create mode 100644 roles/jellyfin/tasks/freeipa.yml create mode 100644 roles/jellyfin/tasks/main.yml create mode 100644 roles/jellyfin/templates/etc/jellyfin/network.xml.j2 create mode 100644 roles/jellyfin/templates/etc/sysconfig/jellyfin.j2 create mode 100644 roles/jellyfin/templates/etc/systemd/system/jellyfin.service.j2 create mode 100644 roles/jellyfin/templates/var/lib/jellyfin/plugins/configurations/LDAP-Auth.xml.j2 create mode 100644 roles/jellyfin/vars/main.yml create mode 100644 roles/journald/defaults/main.yml create mode 100644 roles/journald/handlers/main.yml create mode 100644 roles/journald/tasks/main.yml create mode 100644 roles/journald/templates/etc/systemd/journald.conf.j2 create mode 100644 roles/linux_desktop/defaults/main.yml create mode 100644 roles/linux_desktop/files/etc/dconf/db/local.d/00-hidpi create mode 100644 roles/linux_desktop/files/etc/dconf/db/local.d/locks/hidpi create mode 100644 roles/linux_desktop/files/usr/local/share/thumbnailers/totem.thumbnailer create mode 100644 roles/linux_desktop/handlers/main.yml create mode 100644 roles/linux_desktop/meta/main.yml create mode 100644 roles/linux_desktop/tasks/freeipa.yml create mode 100644 roles/linux_desktop/tasks/main.yml create mode 100644 roles/linux_desktop/templates/etc/dconf/db/local.d/00-gnome.j2 create mode 100644 roles/linux_desktop/templates/etc/gdm/custom.conf.j2 create mode 100644 roles/linux_desktop/vars/main.yml create mode 100644 roles/linux_laptop/defaults/main.yml create mode 100644 roles/linux_laptop/tasks/freeipa.yml create mode 100644 roles/linux_laptop/tasks/main.yml create mode 100644 roles/linux_laptop/templates/etc/sysconfig/network-scripts/ifcfg-ssid.j2 create mode 100644 roles/linux_laptop/templates/etc/sysconfig/network-scripts/keys-ssid.j2 create mode 100644 roles/linux_laptop/vars/main.yml create mode 100644 roles/local_homedirs/files/etc/profile.d/local-homedirs.sh create mode 100644 roles/local_homedirs/files/etc/security/pam_env_xdg.conf create mode 100644 roles/local_homedirs/files/usr/local/sbin/create-local-homedir.sh create mode 100644 roles/local_homedirs/tasks/main.yml create mode 100644 roles/local_homedirs/vars/main.yml create mode 100644 roles/locale/defaults/main.yml create mode 100644 roles/locale/tasks/main.yml create mode 100644 roles/mediawiki/defaults/main.yml create mode 100644 roles/mediawiki/files/var/www/mediawiki/robots.txt create mode 100644 roles/mediawiki/meta/main.yml create mode 100644 roles/mediawiki/tasks/database.yml create mode 100644 roles/mediawiki/tasks/extension.yml create mode 100644 roles/mediawiki/tasks/freeipa.yml create mode 100644 roles/mediawiki/tasks/main.yml create mode 100644 roles/mediawiki/templates/var/www/mediawiki/LocalSettings.php.j2 create mode 100644 roles/mediawiki/vars/main.yml create mode 100644 roles/motd/tasks/main.yml create mode 100644 roles/nagios_client/files/usr/lib64/nagios/plugins/check_mem create mode 100644 roles/nagios_client/files/usr/lib64/nagios/plugins/check_needs_restart create mode 100644 roles/nagios_client/files/usr/lib64/nagios/plugins/check_systemd create mode 100644 roles/nagios_client/files/usr/lib64/nagios/plugins/check_zpools create mode 100644 roles/nagios_client/meta/main.yml create mode 100644 roles/nagios_client/tasks/main.yml create mode 100644 roles/nagios_client/templates/etc/sudoers.d/nagios.j2 create mode 100644 roles/nagios_client/vars/main.yml create mode 100644 roles/nagios_server/defaults/main.yml create mode 100644 roles/nagios_server/files/usr/lib64/nagios/plugins/check_asterisk_endpoints create mode 100644 roles/nagios_server/handlers/main.yml create mode 100644 roles/nagios_server/meta/main.yml create mode 100644 roles/nagios_server/tasks/freeipa.yml create mode 100644 roles/nagios_server/tasks/main.yml create mode 100644 roles/nagios_server/tasks/objects.yml create mode 100644 roles/nagios_server/templates/etc/nagios/cgi.cfg.j2 create mode 100644 roles/nagios_server/templates/etc/nagios/nagios.cfg.j2 create mode 100644 roles/nagios_server/templates/etc/nagios/objects/commands.cfg.j2 create mode 100644 roles/nagios_server/templates/etc/nagios/objects/contacts.cfg.j2 create mode 100644 roles/nagios_server/templates/etc/nagios/objects/hostgroups.cfg.j2 create mode 100644 roles/nagios_server/templates/etc/nagios/objects/hosts.cfg.j2 create mode 100644 roles/nagios_server/templates/etc/nagios/objects/servicedependencies.cfg.j2 create mode 100644 roles/nagios_server/templates/etc/nagios/objects/servicegroups.cfg.j2 create mode 100644 roles/nagios_server/templates/etc/nagios/objects/services.cfg.j2 create mode 100644 roles/nagios_server/templates/etc/nagios/objects/templates.cfg.j2 create mode 100644 roles/nagios_server/templates/etc/nagios/objects/timeperiods.cfg.j2 create mode 100644 roles/nagios_server/templates/etc/nagios/private/resource.cfg.j2 create mode 100644 roles/nagios_server/templates/usr/share/nagios/html/config.inc.php.j2 create mode 100644 roles/nagios_server/vars/main.yml create mode 100644 roles/nfs_server/defaults/main.yml create mode 100644 roles/nfs_server/files/etc/samba/local.conf create mode 100644 roles/nfs_server/handlers/main.yml create mode 100644 roles/nfs_server/meta/main.yml create mode 100644 roles/nfs_server/tasks/autofs.yml create mode 100644 roles/nfs_server/tasks/exports.yml create mode 100644 roles/nfs_server/tasks/homedirs.yml create mode 100644 roles/nfs_server/tasks/main.yml create mode 100644 roles/nfs_server/tasks/nfs.yml create mode 100644 roles/nfs_server/tasks/smb.yml create mode 100644 roles/nfs_server/templates/etc/exports.j2 create mode 100644 roles/nfs_server/templates/etc/nfs.conf.j2 create mode 100644 roles/nfs_server/templates/etc/samba/shares.conf.j2 create mode 100644 roles/nfs_server/vars/main.yml create mode 100644 roles/nim/defaults/main.yml create mode 100644 roles/nim/tasks/main.yml create mode 100644 roles/nim/vars/main.yml create mode 100644 roles/nitter/defaults/main.yml create mode 100644 roles/nitter/handlers/main.yml create mode 100644 roles/nitter/meta/main.yml create mode 100644 roles/nitter/tasks/main.yml create mode 100644 roles/nitter/templates/etc/systemd/system/nitter.service.j2 create mode 100644 roles/nitter/templates/opt/nitter/nitter-update.sh.j2 create mode 100644 roles/nitter/templates/opt/nitter/nitter/nitter.conf.j2 create mode 100644 roles/nitter/vars/main.yml create mode 100644 roles/nsd/defaults/main.yml create mode 100644 roles/nsd/handlers/main.yml create mode 100644 roles/nsd/tasks/generate_zone.yml create mode 100644 roles/nsd/tasks/main.yml create mode 100644 roles/nsd/templates/etc/nsd/nsd.conf.j2 create mode 100644 roles/nsd/vars/main.yml create mode 100644 roles/packages/defaults/main.yml create mode 100644 roles/packages/tasks/main.yml create mode 100644 roles/photostructure/defaults/main.yml create mode 100644 roles/photostructure/handlers/main.yml create mode 100644 roles/photostructure/meta/main.yml create mode 100644 roles/photostructure/tasks/freeipa.yml create mode 100644 roles/photostructure/tasks/main.yml create mode 100644 roles/photostructure/templates/etc/sysconfig/photostructure create mode 100644 roles/photostructure/templates/etc/systemd/system/photostructure.service.j2 create mode 100644 roles/photostructure/templates/opt/photostructure/photostructure-update.sh.j2 create mode 100644 roles/photostructure/vars/main.yml create mode 100644 roles/php/defaults/main.yml create mode 100644 roles/php/files/etc/systemd/system/php-fpm.service.d/override.conf create mode 100644 roles/php/handlers/main.yml create mode 100644 roles/php/tasks/main.yml create mode 100644 roles/php/templates/etc/php-fpm.conf.j2 create mode 100644 roles/php/templates/etc/php-fpm.d/www.conf.j2 create mode 100644 roles/php/templates/etc/php.ini.j2 create mode 100644 roles/php/vars/main.yml create mode 100644 roles/polkit/defaults/main.yml create mode 100644 roles/polkit/tasks/main.yml create mode 100644 roles/polkit/templates/etc/polkit-1/rules.d/40-default.rules.j2 create mode 100644 roles/postfix_client/defaults/main.yml create mode 100644 roles/postfix_client/handlers/main.yml create mode 100644 roles/postfix_client/tasks/main.yml create mode 100644 roles/postfix_client/templates/etc/postfix/main.cf.j2 create mode 100644 roles/postfix_client/vars/main.yml create mode 100644 roles/postfix_server/defaults/main.yml create mode 100644 roles/postfix_server/files/etc/sasl2/smtpd.conf create mode 100644 roles/postfix_server/files/etc/systemd/system/postfix.service.d/override.conf create mode 100644 roles/postfix_server/handlers/main.yml create mode 100644 roles/postfix_server/tasks/freeipa.yml create mode 100644 roles/postfix_server/tasks/main.yml create mode 100644 roles/postfix_server/templates/etc/postfix/main.cf.j2 create mode 100644 roles/postfix_server/templates/etc/postfix/master.cf.j2 create mode 100644 roles/postfix_server/templates/etc/postfix/virtual_aliases.cf.j2 create mode 100644 roles/postfix_server/templates/etc/postfix/virtual_mailboxes.cf.j2 create mode 100644 roles/postfix_server/vars/main.yml create mode 100644 roles/postgresql_server/defaults/main.yml create mode 100644 roles/postgresql_server/files/etc/systemd/system/postgresql.service.d/override.conf create mode 100644 roles/postgresql_server/handlers/main.yml create mode 100644 roles/postgresql_server/tasks/freeipa.yml create mode 100644 roles/postgresql_server/tasks/main.yml create mode 100644 roles/postgresql_server/templates/var/lib/pgsql/data/pg_hba.conf.j2 create mode 100644 roles/postgresql_server/templates/var/lib/pgsql/data/postgresql.conf.j2 create mode 100644 roles/postgresql_server/vars/main.yml create mode 100644 roles/prosody/defaults/main.yml create mode 100644 roles/prosody/files/etc/systemd/system/prosody.service.d/override.conf create mode 100644 roles/prosody/handlers/main.yml create mode 100644 roles/prosody/meta/main.yml create mode 100644 roles/prosody/tasks/database.yml create mode 100644 roles/prosody/tasks/freeipa.yml create mode 100644 roles/prosody/tasks/main.yml create mode 100644 roles/prosody/templates/etc/prosody/prosody.cfg.lua.j2 create mode 100644 roles/prosody/templates/usr/local/bin/prosody-update-roster.j2 create mode 100644 roles/prosody/vars/main.yml create mode 100644 roles/prosody_letsencrypt_proxy/defaults/main.yml create mode 100644 roles/prosody_letsencrypt_proxy/handlers/main.yml create mode 100644 roles/prosody_letsencrypt_proxy/tasks/main.yml create mode 100644 roles/prosody_letsencrypt_proxy/tasks/master.yml create mode 100644 roles/prosody_letsencrypt_proxy/tasks/slave.yml create mode 100644 roles/prosody_letsencrypt_proxy/templates/etc/ssh/sshd_config.d/99-prosody-le-proxy.conf create mode 100644 roles/prosody_letsencrypt_proxy/templates/usr/local/sbin/prosody-letsencrypt-proxy.j2 create mode 100644 roles/prosody_letsencrypt_proxy/vars/main.yml create mode 100644 roles/proxmox_hypervisor/defaults/main.yml create mode 100644 roles/proxmox_hypervisor/files/etc/apt/apt.conf.d/20auto-upgrades create mode 100644 roles/proxmox_hypervisor/files/etc/apt/apt.conf.d/50unattended-upgrades create mode 120000 roles/proxmox_hypervisor/files/usr/lib/nagios/plugins create mode 100644 roles/proxmox_hypervisor/handlers/main.yml create mode 100644 roles/proxmox_hypervisor/tasks/chrony.yml create mode 100644 roles/proxmox_hypervisor/tasks/main.yml create mode 100644 roles/proxmox_hypervisor/tasks/nagios.yml create mode 100644 roles/proxmox_hypervisor/tasks/postfix.yml create mode 100644 roles/proxmox_hypervisor/tasks/pve.yml create mode 100644 roles/proxmox_hypervisor/tasks/pve_api_user.yml create mode 100644 roles/proxmox_hypervisor/tasks/pve_kvm_template.yml create mode 100644 roles/proxmox_hypervisor/tasks/rsyslog.yml create mode 100644 roles/proxmox_hypervisor/tasks/sudo.yml create mode 100644 roles/proxmox_hypervisor/tasks/unattended_upgrades.yml create mode 100644 roles/proxmox_hypervisor/tasks/zfs.yml create mode 100644 roles/proxmox_hypervisor/templates/etc/chrony/chrony.conf.j2 create mode 100644 roles/proxmox_hypervisor/templates/etc/postfix/main.cf.j2 create mode 100644 roles/proxmox_hypervisor/templates/etc/rsyslog.d/forward.conf.j2 create mode 100644 roles/proxmox_hypervisor/templates/etc/snmp/snmpd.conf.j2 create mode 100644 roles/proxmox_hypervisor/templates/etc/sudoers.d/nagios.j2 create mode 100644 roles/proxmox_hypervisor/templates/etc/sudoers.j2 create mode 100644 roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-scrub@.service.j2 create mode 100644 roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-scrub@.timer.j2 create mode 100644 roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-trim@.service.j2 create mode 100644 roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-trim@.timer.j2 create mode 100644 roles/proxmox_hypervisor/templates/etc/zfs/zed.d/zed.rc.j2 create mode 100644 roles/proxmox_hypervisor/templates/var/lib/vz/snippets/userdata.yaml.j2 create mode 100644 roles/proxmox_hypervisor/vars/main.yml create mode 100644 roles/proxmox_instance/defaults/main.yml create mode 100644 roles/proxmox_instance/tasks/main.yml create mode 100644 roles/psitransfer/defaults/main.yml create mode 100644 roles/psitransfer/handlers/main.yml create mode 100644 roles/psitransfer/tasks/main.yml create mode 100644 roles/psitransfer/templates/etc/systemd/system/psitransfer.service.j2 create mode 100644 roles/psitransfer/templates/var/lib/psitransfer/psitransfer/config.production.js.j2 create mode 100644 roles/psitransfer/vars/main.yml create mode 100644 roles/pxe_server/README.txt create mode 100644 roles/pxe_server/defaults/main.yml create mode 100644 roles/pxe_server/tasks/extract_iso.yml create mode 100644 roles/pxe_server/tasks/main.yml create mode 100644 roles/pxe_server/templates/grub/grub.cfg.j2 create mode 100644 roles/pxe_server/templates/grub/menuentry-redhat.cfg.j2 create mode 100644 roles/pxe_server/templates/kickstart/rocky8-ks.cfg.j2 create mode 100644 roles/pxe_server/vars/main.yml create mode 100644 roles/qemu_guest_agent/tasks/main.yml create mode 100644 roles/redis/defaults/main.yml create mode 100644 roles/redis/files/etc/systemd/system/redis@.service create mode 100644 roles/redis/tasks/main.yml create mode 100644 roles/redis/templates/etc/redis.conf.j2 create mode 100644 roles/redis/vars/main.yml create mode 100644 roles/root_authorized_keys/defaults/main.yml create mode 100644 roles/root_authorized_keys/tasks/main.yml create mode 100644 roles/root_password/defaults/main.yml create mode 100644 roles/root_password/tasks/main.yml create mode 100644 roles/rspamd/defaults/main.yml create mode 100644 roles/rspamd/handlers/main.yml create mode 100644 roles/rspamd/meta/main.yml create mode 100644 roles/rspamd/tasks/main.yml create mode 100644 roles/rspamd/templates/etc/rspamd/local.d/classifier-bayes.conf.j2 create mode 100644 roles/rspamd/templates/etc/rspamd/local.d/dkim_signing.conf.j2 create mode 100644 roles/rspamd/templates/etc/rspamd/local.d/greylist.conf.j2 create mode 100644 roles/rspamd/templates/etc/rspamd/local.d/logging.inc.j2 create mode 100644 roles/rspamd/templates/etc/rspamd/local.d/multimap.conf.j2 create mode 100644 roles/rspamd/templates/etc/rspamd/local.d/phishing.conf.j2 create mode 100644 roles/rspamd/templates/etc/rspamd/local.d/redis.conf.j2 create mode 100644 roles/rspamd/templates/etc/rspamd/local.d/replies.conf.j2 create mode 100644 roles/rspamd/templates/etc/rspamd/local.d/worker-controller.inc.j2 create mode 100644 roles/rspamd/templates/etc/rspamd/local.d/worker-normal.inc.j2 create mode 100644 roles/rspamd/templates/etc/rspamd/local.d/worker-proxy.inc.j2 create mode 100644 roles/rspamd/vars/main.yml create mode 100644 roles/rsyslog_client/defaults/main.yml create mode 100644 roles/rsyslog_client/handlers/main.yml create mode 100644 roles/rsyslog_client/tasks/main.yml create mode 100644 roles/rsyslog_client/templates/etc/rsyslog.conf.j2 create mode 100644 roles/rsyslog_client/vars/main.yml create mode 100644 roles/rsyslog_server/defaults/main.yml create mode 100644 roles/rsyslog_server/handlers/main.yml create mode 100644 roles/rsyslog_server/tasks/main.yml create mode 100644 roles/rsyslog_server/templates/etc/rsyslog.conf.j2 create mode 100644 roles/rsyslog_server/vars/main.yml create mode 100644 roles/sabredav/defaults/main.yml create mode 100644 roles/sabredav/tasks/composer.yml create mode 100644 roles/sabredav/tasks/database.yml create mode 100644 roles/sabredav/tasks/freeipa.yml create mode 100644 roles/sabredav/tasks/main.yml create mode 100644 roles/sabredav/templates/var/www/sabredav/server.php.j2 create mode 100644 roles/sabredav/vars/main.yml create mode 100644 roles/selinux/defaults/main.yml create mode 100644 roles/selinux/tasks/main.yml create mode 100644 roles/selinux/vars/main.yml create mode 100644 roles/selinux_policy/tasks/main.yml create mode 100644 roles/selinux_policy/vars/main.yml create mode 100644 roles/snmp/defaults/main.yml create mode 100644 roles/snmp/handlers/main.yml create mode 100644 roles/snmp/tasks/main.yml create mode 100644 roles/snmp/templates/etc/snmp/snmpd.conf.j2 create mode 100644 roles/snmp/vars/main.yml create mode 100644 roles/solr/defaults/main.yml create mode 100644 roles/solr/handlers/main.yml create mode 100644 roles/solr/tasks/main.yml create mode 100644 roles/solr/templates/etc/solr/log4j2.xml.j2 create mode 100644 roles/solr/templates/etc/solr/solrconfig.xml.j2 create mode 100644 roles/solr/templates/etc/sysconfig/solr.j2 create mode 100644 roles/solr/templates/etc/systemd/system/solr.service.j2 create mode 100644 roles/solr/vars/main.yml create mode 100644 roles/ssh/defaults/main.yml create mode 100644 roles/ssh/tasks/main.yml create mode 100644 roles/ssh/templates/etc/ssh/ssh_config.j2 create mode 100644 roles/sudo/defaults/main.yml create mode 100644 roles/sudo/tasks/main.yml create mode 100644 roles/sudo/templates/etc/sudoers.j2 create mode 100644 roles/syncthing/defaults/main.yml create mode 100644 roles/syncthing/meta/main.yml create mode 100644 roles/syncthing/tasks/main.yml create mode 100644 roles/syncthing/tasks/syncthing_user.yml create mode 100644 roles/syncthing/templates/etc/systemd/system/syncthing-user@.service.j2 create mode 100644 roles/syncthing/templates/var/lib/syncthing/config.xml.j2 create mode 100644 roles/syncthing/templates/var/www/html/index.html.j2 create mode 100644 roles/syncthing/vars/main.yml create mode 100644 roles/systemd_timer/defaults/main.yml create mode 100644 roles/systemd_timer/tasks/main.yml create mode 100644 roles/systemd_timer/templates/etc/systemd/system/task.service.j2 create mode 100644 roles/systemd_timer/templates/etc/systemd/system/task.timer.j2 create mode 100644 roles/teddit/defaults/main.yml create mode 100644 roles/teddit/handlers/main.yml create mode 100644 roles/teddit/meta/main.yml create mode 100644 roles/teddit/tasks/main.yml create mode 100644 roles/teddit/templates/etc/systemd/system/teddit.service.j2 create mode 100644 roles/teddit/templates/opt/teddit/teddit-update.sh.j2 create mode 100644 roles/teddit/templates/opt/teddit/teddit/config.js.j2 create mode 100644 roles/teddit/vars/main.yml create mode 100644 roles/tika/defaults/main.yml create mode 100644 roles/tika/handlers/main.yml create mode 100644 roles/tika/tasks/main.yml create mode 100644 roles/tika/templates/etc/sysconfig/tika.j2 create mode 100644 roles/tika/templates/etc/systemd/system/tika.service.j2 create mode 100644 roles/tika/templates/etc/tika/config.xml.j2 create mode 100644 roles/tika/templates/etc/tika/log4j2.xml.j2 create mode 100644 roles/tika/vars/main.yml create mode 100644 roles/timezone/default/main.yml create mode 100644 roles/timezone/tasks/main.yml create mode 100644 roles/ttrss/defaults/main.yml create mode 100644 roles/ttrss/handlers/main.yml create mode 100644 roles/ttrss/tasks/database.yml create mode 100644 roles/ttrss/tasks/freeipa.yml create mode 100644 roles/ttrss/tasks/main.yml create mode 100644 roles/ttrss/templates/etc/systemd/system/ttrss.service.j2 create mode 100644 roles/ttrss/templates/usr/local/sbin/ttrss-update.sh.j2 create mode 100644 roles/ttrss/templates/var/www/ttrss/config.php.j2 create mode 100644 roles/ttrss/vars/main.yml create mode 100644 roles/tuned/defaults/main.yml create mode 100644 roles/tuned/tasks/main.yml create mode 100644 roles/udev/defaults/main.yml create mode 100644 roles/udev/handlers/main.yml create mode 100644 roles/udev/tasks/main.yml create mode 100644 roles/udev/templates/etc/udev/rules.d/pci_pm.rules.j2 create mode 100644 roles/unifi/files/etc/rsyslog.d/unifi.conf create mode 100644 roles/unifi/handlers/main.yml create mode 100644 roles/unifi/meta/main.yml create mode 100644 roles/unifi/tasks/main.yml create mode 100644 roles/unifi/templates/usr/local/sbin/unifi-certificate-update.sh.j2 create mode 100644 roles/unifi/vars/main.yml create mode 100644 roles/vaultwarden/defaults/main.yml create mode 100644 roles/vaultwarden/handlers/main.yml create mode 100644 roles/vaultwarden/tasks/database.yml create mode 100644 roles/vaultwarden/tasks/freeipa.yml create mode 100644 roles/vaultwarden/tasks/main.yml create mode 100644 roles/vaultwarden/templates/etc/sysconfig/vaultwarden.j2 create mode 100644 roles/vaultwarden/templates/etc/systemd/system/vaultwarden.service.j2 create mode 100644 roles/vaultwarden/vars/main.yml create mode 100644 roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-8 create mode 100644 roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-9 create mode 100644 roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-8 create mode 100644 roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9 create mode 100644 roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-crystal-8 create mode 100644 roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb-4.0 create mode 100644 roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb-4.4 create mode 100644 roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb-6.0 create mode 100644 roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-free-el-8 create mode 100644 roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-free-el-9 create mode 100644 roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-nonfree-el-8 create mode 100644 roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-nonfree-el-9 create mode 100644 roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rspamd-stable create mode 100644 roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-zfsonlinux-8 create mode 100644 roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-zfsonlinux-9 create mode 100644 roles/yum/tasks/main.yml create mode 100644 roles/yum/vars/main.yml create mode 100644 roles/yum_disable_default_repos/tasks/main.yml create mode 100644 roles/yum_disable_default_repos/vars/main.yml create mode 100644 roles/yum_mirror/defaults/main.yml create mode 100644 roles/yum_mirror/handlers/main.yml create mode 100644 roles/yum_mirror/tasks/main.yml create mode 100644 roles/yum_mirror/templates/usr/local/sbin/reposync.sh.j2 create mode 100644 roles/yum_mirror/vars/main.yml create mode 100644 roles/zfs/defaults/main.yml create mode 100644 roles/zfs/handlers/main.yml create mode 100644 roles/zfs/meta/main.yml create mode 100644 roles/zfs/tasks/create_zpool.yml create mode 100644 roles/zfs/tasks/main.yml create mode 100644 roles/zfs/templates/etc/systemd/system/zfs-scrub@.service.j2 create mode 100644 roles/zfs/templates/etc/systemd/system/zfs-scrub@.timer.j2 create mode 100644 roles/zfs/templates/etc/systemd/system/zfs-trim@.service.j2 create mode 100644 roles/zfs/templates/etc/systemd/system/zfs-trim@.timer.j2 create mode 100644 roles/zfs/templates/etc/zfs/zed.d/zed.rc.j2 create mode 100644 roles/zfs/vars/main.yml create mode 100644 roles/znc/defaults/main.yml create mode 100644 roles/znc/files/etc/sasl2/znc.conf create mode 100644 roles/znc/handlers/main.yml create mode 100644 roles/znc/meta/main.yml create mode 100644 roles/znc/tasks/freeipa.yml create mode 100644 roles/znc/tasks/main.yml create mode 100644 roles/znc/templates/var/lib/znc/.znc/configs/znc.conf.j2 create mode 100644 roles/znc/templates/var/lib/znc/.znc/moddata/cyrusauth/.registry.j2 create mode 100644 roles/znc/vars/main.yml create mode 160000 vendor/ansible-freeipa diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..df1f019 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +*.pyc +*.swp +*.swo +.nfs* +.vault_password +backups diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..98938a0 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "vendor/ansible-freeipa"] + path = vendor/ansible-freeipa + url = https://github.com/freeipa/ansible-freeipa diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..6fa4003 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 stonewall@sacredheartsc.com + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..52ac46b --- /dev/null +++ b/README.md @@ -0,0 +1,100 @@ +sacredheart-selfhosted +====================== + +An Ansible framework for selfhosted infrastructure, based on +[Rocky Linux](https://rockylinux.org/) and [FreeIPA](https://www.freeipa.org/). + +## What is this? + +I believe that self-hosting our online services is the best way to recapture the +original pioneer spirit of the Internet. By owning our own digital footprints, we +can resist dragnet surveillance, stymie omniscient advertising networks, and curb +technocratic censorship. + +`sacredheart-selfhosted` is a collection of Ansible roles based on multiple years +of experience self-hosting my family's digital infrastructure from home. + +Although this project is not intended to be a turn-key appliance, it should be +easy to adapt it to your own environment with some basic sysadmin skills. I've +provided an example inventory to get you started. + +## Prerequisites + +The [example inventory](inventory-example) is based on my home network, which +consists of the following: + + - A residential internet connection with a handful of static IPv4 addresses. + + - Some desktop computers, laptops, VOIP phones, and a NAS. + + - A [Proxmox](https://www.proxmox.com/en/proxmox-ve) hypervisor for running + virtual machines. + + - An [OPNsense](https://opnsense.org/) firewall and various VLANs for managing + internet and intranet traffic. + +It's assumed that you already have a working network. Other than setting VLAN +tags and `cloud-init` IP configuration for virtual machines, none of the playbooks +touch your network infrastructure. + +## Design + +`sacredheart-selfhosted` is designed for [Rocky Linux] (https://rockylinux.org/) +9. A small number of roles require Rocky Linux 8 due to package availability. + +There's no Docker, no containers, and no `curl | bash.` Everything is installed +from official repos or [EPEL](https://docs.fedoraproject.org/en-US/epel/), +and managed using systemd. For services that lack official RPMs, the software is +built locally from the upstream source repository during the playbook. + +All network services listen on the local IP of the virtual machine. If you want +to expose a service to the internet, it is assumed that you will configure your +firewall for 1:1 NAT. + +There is no IPv6 support whatsoever. If my ISP ever rolls out IPv6, I'll look +into it. + +## Features + +Modular [Ansible roles](roles) are used to manage VMs and configure each service. + +| Role | Description | +----------------------------------------------|-------------| +`[proxmox_instance](roles/proxmox_instance)` | Automatically provisions a [Proxmox](https://www.proxmox.com/) VM with the given hardware and cloud-init configuration +`[freeipa_server](roles/freeipa_server)` | [FreeIPA](https://www.freeipa.org/) provides provides identity management, access control, certificate management, and Single Sign-On for all services via Kerberos/GSSAPI +`[yum_mirror](roles/yum_mirror)` | Mirrors all package repositories locally +`[rsyslog_server](roles/rsyslog_server)` | Centralized syslog storage using [Rsyslog](https://www.rsyslog.com/) +`[postfix_server](roles/postfix_server)` | Public-facing mail server using [Postfix](https://www.postfix.org/) +`[dovecot](roles/dovecot)` | [Dovecot](https://www.dovecot.org/) IMAP server, with full text and attachment search +`[rspamd](roles/rspamd)` | [Rspamd](https://rspamd.com/) spam filtering system +`[sabredav](roles/sabredav)` | [sabre/dav](https://sabre.io/) CalDAV and CardDAV server with custom FreeIPA integration +`[prosody](roles/prosody)` | [Prosody](https://prosody.im/) XMPP server +`[gitolite](roles/gitolite)` | Git repository with [Gitolite](https://gitolite.com/gitolite/index.html) access control +`[cgit](roles/cgit)` | [cgit](https://git.zx2c4.com/cgit/) web frontend for Git +`[vaultwarden](roles/vaultwarden)` | [Bitwarden-compatible password manager](https://github.com/dani-garcia/vaultwarden) +`[ttrss](roles/ttrss)` | [Tiny Tiny RSS](https://tt-rss.org/) feed aggregator +`[mediawiki](roles/mediawiki)` | [MediaWiki](https://www.mediawiki.org/) wiki platform +`[jellyfin](roles/jellyfin)` | [Jellyfin](https://jellyfin.org/) media system +`[invidious](roles/invidious)` | [Invidious](https://invidious.io/) open source YouTube frontend +`[nitter](roles/nitter)` | [Nitter](https://github.com/zedeus/nitter) open source Twitter frontend +`[teddit](roles/teddit)` | [Teddit](https://codeberg.org/teddit/teddit) open source Reddit frontend +`[hastebin](roles/hastebin) | [Hastebin](https://github.com/toptal/haste-server) open source pastebin +`[psitransfer](roles/psitransfer) | [PsiTransfer](https://github.com/psi-4ward/psitransfer) public file sharing +`[nfs_server](roles/nfs_server)` | Configures ZFS datasets, NFS exports, SMB shares, ACLs, and autofs maps. +`[syncthing](roles/syncthing]` | Per-user [Syncthing](https://syncthing.net/) instances that sync files to your NFS home directory +`[asterisk](roles/asterisk]` | [Asterisk](https://www.asterisk.org/) PBX for VOIP phones +`[nsd](roles/nsd)` | Authoritative DNS server +`[nagios](roles/nagios)` | Monitors all hosts and services, automatically generated configuration +`[znc](roles/znc)` | [ZNC](https://znc.in/) IRC bouncer +`[cups](roles/cups)` | Centralized network printing +`[unifi](roles/unifi)` | [UniFi](https://www.ui.com/) controller for managing Ubiquiti access points +`[freeradius](roles/freeradius)` | WPA Enterprise authentication for WiFi using FreeIPA credentials or SSL certificates + +All services authenticate against the local FreeIPA domain. On a domain-joined +workstation, Kerberos/GSSAPI is used for single sign-on. + + +## Todo + +Currently, this repository is just a big pile of YAML. More documentation and +how-to guides are coming soon! diff --git a/ansible.cfg b/ansible.cfg new file mode 100644 index 0000000..2f76249 --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,20 @@ +[defaults] +remote_user = root +gathering = explicit +retry_files_enabled = False +vault_password_file = .vault_password +inventory = inventory +roles_path = roles +filter_plugins = plugins/filters +test_plugins = plugins/tests +library = plugins/modules:vendor/ansible-freeipa/plugins/modules +module_utils = plugins/module_utils:vendor/ansible-freeipa/plugins/module_utils +host_key_checking = False +force_handlers = True + +[inventory] +any_unparsed_is_failed = True + +[ssh_connection] +pipelining = True +ssh_args = -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ControlMaster=auto -o ControlPersist=60s diff --git a/inventory-example/10-hosts b/inventory-example/10-hosts new file mode 100644 index 0000000..d8c4cc6 --- /dev/null +++ b/inventory-example/10-hosts @@ -0,0 +1,65 @@ +[baremetal] +opnsense1 ip=10.10.11.1 cname=opnsense +proxmox1 ip=10.10.11.11 cname=proxmox +nas1 ip=10.10.12.5 cname=nas +tuxstation1 ip=10.10.12.51 +tuxbook1 ip=10.10.12.52 + +[proxmox_instances] +freeipa1 ip=10.10.12.2 cores=4 ram=8g disk=64g +freeipa2 ip=10.10.12.3 cores=4 ram=8g disk=64g +yum1 ip=10.10.12.4 cname=yum cores=4 disk=256g +syslog1 ip=10.10.12.6 cname=syslog disk=256g +imap1 ip=10.10.12.7 cname=imap cores=4 ram=8g disk=256g +rspamd1 ip=10.10.12.8 cname=rspamd +radius1 ip=10.10.12.9 cname=radius +postgres1 ip=10.10.12.10 cname=postgres cores=4 ram=8g +ttrss1 ip=10.10.12.11 cname=ttrss +znc1 ip=10.10.12.12 cname=znc +dav1 ip=10.10.12.13 cname=dav disk=64g +bitwarden1 ip=10.10.12.14 cname=bitwarden +cups1 ip=10.10.12.15 cname=cups +dev1 ip=10.10.12.16 cores=4 disk=256g +www1 ip=10.10.12.17 cname=www +syncthing1 ip=10.10.12.18 cname=syncthing +wiki1 ip=10.10.12.19 cname=wiki disk=64g +jellyfin1 ip=10.10.12.20 cname=jellyfin cores=8 ram=32g disk=512g +privbrowse1 ip=10.10.12.21 cores=4 ram=8g disk=64g +nagios1 ip=10.10.12.22 cname=nagios cores=4 +archive1 ip=10.10.12.23 cname=archive +photostructure1 ip=10.10.12.24 cname=photos cores=8 ram=16g disk=256g +unifi1 ip=10.10.11.30 cname=unifi +dmz-dns1 ip=10.10.19.2 +dmz-mx1 ip=10.10.19.3 cname=smtp +dmz-www1 ip=10.10.19.4 +dmz-xmpp1 ip=10.10.19.5 cname=xmpp +dmz-turn1 ip=10.10.19.6 cname=turn +dmz-git1 ip=10.10.19.13 +dmz-asterisk1 ip=10.10.14.10 cname=asterisk cores=4 + +[unmanaged] +switch1 ip=10.10.11.2 +wap-livingroom ip=10.10.11.31 +wap-bedroom ip=10.10.11.32 +wap-kitchen ip=10.10.11.33 +pixel1 ip=10.10.13.11 +pixel2 ip=10.10.13.12 +printer1 ip=10.10.15.2 +phone1 ip=10.10.14.11 +phone2 ip=10.10.14.12 + + +# Only one host can be the freeipa master. +[freeipa_master] +freeipa1 + + +# The "all" group must be specified explicitly; otherwise the constructed inventory +# plugin won't parse the group_vars. +# https://github.com/ansible/ansible/issues/71738 +[all:children] +baremetal +proxmox_instances +unmanaged + +# vi: ft=dosini diff --git a/inventory-example/20-by-hostname.yml b/inventory-example/20-by-hostname.yml new file mode 100644 index 0000000..165bd37 --- /dev/null +++ b/inventory-example/20-by-hostname.yml @@ -0,0 +1,43 @@ +plugin: constructed +strict: yes +groups: + internal: inventory_hostname is not match('dmz-') + dmz: inventory_hostname is match('dmz-') + switches: inventory_hostname is match('switch[0-9]') + access_points: inventory_hostname is match('wap-') + printers: inventory_hostname is match('printer[0-9]') + cellphones: inventory_hostname is match('pixel') + phones: inventory_hostname is match('phone[0-9]') + opnsense_firewalls: inventory_hostname is match('opnsense[0-9]') + proxmox_hypervisors: inventory_hostname is match('proxmox[0-9]') + nfs_servers: inventory_hostname is match('nas[0-9]') + linux_desktops: inventory_hostname is match('tuxstation[0-9]') + linux_laptops: inventory_hostname is match('tuxbook[0-9]') + freeipa_servers: inventory_hostname is match('freeipa[0-9]') + unifi_controllers: inventory_hostname is match('unifi[0-9]') + yum_mirrors: inventory_hostname is match('yum[0-9]') + syslog_servers: inventory_hostname is match('syslog[0-9]') + mail_servers: inventory_hostname is match('(dmz-)?mx[0-9]') + imap_servers: inventory_hostname is match('imap[0-9]') + radius_servers: inventory_hostname is match('radius[0-9]') + postgresql_servers: inventory_hostname is match('postgres[0-9]') + ttrss_servers: inventory_hostname is match('ttrss[0-9]') + znc_servers: inventory_hostname is match('znc[0-9]') + dav_servers: inventory_hostname is match('dav[0-9]') + bitwarden_servers: inventory_hostname is match('bitwarden[0-9]') + cups_servers: inventory_hostname is match('cups[0-9]') + xmpp_servers: inventory_hostname is match('(dmz-)?xmpp[0-9]') + dev_servers: inventory_hostname is match('dev[0-9]') + web_servers: inventory_hostname is match('(dmz-)?www[0-9]') + git_servers: inventory_hostname is match('(dmz-)?git[0-9]') + syncthing_servers: inventory_hostname is match('syncthing[0-9]') + wiki_servers: inventory_hostname is match('(dmz-)?wiki[0-9]') + jellyfin_servers: inventory_hostname is match('jellyfin[0-9]') + privbrowse_servers: inventory_hostname is match('privbrowse[0-9]') + nagios_servers: inventory_hostname is match('nagios[0-9]') + archive_servers: inventory_hostname is match('archive[0-9]') + photostructure_servers: inventory_hostname is match('photostructure[0-9]') + rspamd_servers: inventory_hostname is match('rspamd[0-9]') + authoritative_nameservers: inventory_hostname is match('(dmz-)?dns[0-9]') + turn_servers: inventory_hostname is match('(dmz-)?turn[0-9]') + asterisk_servers: inventory_hostname is match('(dmz-)?asterisk[0-9]') diff --git a/inventory-example/30-constructed.yml b/inventory-example/30-constructed.yml new file mode 100644 index 0000000..f519d22 --- /dev/null +++ b/inventory-example/30-constructed.yml @@ -0,0 +1,15 @@ +plugin: constructed +strict: yes +use_vars_plugins: yes +compose: + vlan: vlans.values() | selectattr('cidr', 'contains_ip', ip) | first + ansible_host: ansible_ip | default(ip) | default(ansible_host) + fqdn: inventory_hostname ~ '.' ~ domain + cnames: ([] if cname is not defined else (((cname | split(',')) if cname is string else cname) | map('regex_replace', '$', '.'~domain))) + proxmox_disk: (disk | default('32g') | human_to_bytes / 1073741824) | int + proxmox_memory: (ram | default('4g') | human_to_bytes / 1048576) | int + proxmox_cores: cores | default(2) +groups: + el: (group_names | intersect(['unmanaged','opnsense_firewalls','proxmox_hypervisors']) | length) == 0 + nagios_ansible_managed_clients: (group_names | intersect(['unmanaged','opnsense_firewalls','linux_laptops']) | length) == 0 + nagios_el_clients: (group_names | intersect(['unmanaged','opnsense_firewalls','linux_laptops','proxmox_hypervisors']) | length) == 0 diff --git a/inventory-example/40-groups b/inventory-example/40-groups new file mode 100644 index 0000000..41c1705 --- /dev/null +++ b/inventory-example/40-groups @@ -0,0 +1,62 @@ +# Hosts that aren't ready for Rocky 9 yet. +[el8:children] +asterisk_servers +git_servers +imap_servers +nagios_servers +rspamd_servers +unifi_controllers +xmpp_servers + +##### Nagios Hostgroups ##### +[nagios_net_snmp_clients:children] +nagios_ansible_managed_clients +opnsense_firewalls + +[nagios_check_load:children] +nagios_net_snmp_clients + +[nagios_check_mem:children] +nagios_net_snmp_clients + +[nagios_check_disk:children] +nagios_net_snmp_clients + +[nagios_check_interfaces:children] +nagios_net_snmp_clients +switches +access_points + +[nagios_check_systemd:children] +nagios_ansible_managed_clients + +[nagios_check_ssh:children] +baremetal +proxmox_instances +switches +access_points + +[nagios_check_zfs:children] +nfs_servers +proxmox_hypervisors + +[nagios_check_https:children] +freeipa_servers +yum_mirrors +ttrss_servers +znc_servers +dav_servers +bitwarden_servers +cups_servers +web_servers +git_servers +syncthing_servers +wiki_servers +jellyfin_servers +privbrowse_servers +photostructure_servers +rspamd_servers +unifi_controllers +xmpp_servers + +# vi: ft=dosini diff --git a/inventory-example/group_vars/access_points/vars.yml b/inventory-example/group_vars/access_points/vars.yml new file mode 100644 index 0000000..05aaf5d --- /dev/null +++ b/inventory-example/group_vars/access_points/vars.yml @@ -0,0 +1,12 @@ +nagios_snmp_priv_pass: '{{ vault_nagios_snmp_priv_pass }}' +nagios_snmp_auth_pass: '{{ vault_nagios_snmp_auth_pass }}' + +nagios_interfaces: + - eth0 + - regex: '^wifi[0-9]' + description: wifi + down_ok: yes + discard_warn: 500 + discard_crit: 1000 + error_warn: 500 + error_crit: 1000 diff --git a/inventory-example/group_vars/access_points/vault.yml b/inventory-example/group_vars/access_points/vault.yml new file mode 100644 index 0000000..f39f186 --- /dev/null +++ b/inventory-example/group_vars/access_points/vault.yml @@ -0,0 +1,6 @@ +# This is a sample file with fake secrets. For a real deployment, encrypt this +# file with `ansible-vault encrypt` and add your own secrets. +--- +# Unifi APs require the privpass and authpass to be identical...sad! +vault_nagios_snmp_priv_pass: changeme +vault_nagios_snmp_auth_pass: changeme diff --git a/inventory-example/group_vars/all/apache.yml b/inventory-example/group_vars/all/apache.yml new file mode 100644 index 0000000..85c7abf --- /dev/null +++ b/inventory-example/group_vars/all/apache.yml @@ -0,0 +1 @@ +apache_sysaccount_password: '{{ vault_apache_sysaccount_password }}' diff --git a/inventory-example/group_vars/all/archive.yml b/inventory-example/group_vars/all/archive.yml new file mode 100644 index 0000000..65d8144 --- /dev/null +++ b/inventory-example/group_vars/all/archive.yml @@ -0,0 +1,2 @@ +archive_ssh_privkey: '{{ vault_archive_ssh_privkey }}' +archive_ssh_pubkey: ssh-rsa AAAAAAAAAchangeme diff --git a/inventory-example/group_vars/all/asterisk.yml b/inventory-example/group_vars/all/asterisk.yml new file mode 100644 index 0000000..0f4f1b2 --- /dev/null +++ b/inventory-example/group_vars/all/asterisk.yml @@ -0,0 +1,105 @@ +asterisk_external_ip: 203.0.113.62 # changeme +asterisk_fqdn: pbx.example.com # changeme +asterisk_local_nets: + - '{{ vlans.voip.cidr }}' + +asterisk_password_salt: '{{ vault_asterisk_password_salt }}' + +asterisk_voicemail_contexts: # changeme + default: + - address: 6000 + password: 1234 + name: Doe Family + email: doefamily@example.com + +asterisk_sip_trunks: '{{ vault_asterisk_sip_trunks }}' +asterisk_sip_extensions: '{{ vault_asterisk_sip_extensions }}' +asterisk_ari_users: '{{ vault_asterisk_ari_users }}' + +asterisk_queues: # changeme + - name: house-phones + strategy: ringall + retry: 1 + timeout: 30 + members: + - 6001 + - 6002 + - 6003 + +# changeme - dump your asterisk dialplan into this variable +asterisk_dialplan: | + [globals] + AREA_CODE = 555 + + ; voicemail + VOICEMAIL_NUMBER = *99 + VOICEMAIL_CONTEXT = default + VOICEMAIL_RING_TIMEOUT = 25 + + ; extension patterns + INTERCOM = 6000 + HOUSE = _6XXX + + ; Queue for all local home phones + HOME_QUEUE = house-phones + + ; All home phones use the same voicemail box. + HOME_MAILBOX = 6000 + + ; Caller ID for outgoing PSTN calls from the home phone line. + HOME_CID = John Doe <+15555555555> + + [gosub-voicemail] + ; Dial the given channel, if no answer send to voicemail. + ; ${ARG1} - channel to dial + ; ${ARG2} - voicemail box + exten => s,1,Dial(${ARG1},${VOICEMAIL_RING_TIMEOUT}) + same => n,Answer(500) + same => n,Voicemail(${ARG2},su) + same => n,Hangup() + + [gosub-intercom] + exten => s,1,Set(PJSIP_HEADER(add,Alert-Info)=auto answer) + same => n,Return() + + [subscribe] + exten => _XXXX,hint,PJSIP/${EXTEN} + + [internal] + ; For INTERCOM, page all participants into 2-way conference + exten => ${INTERCOM},1,Set(CALLERID(all)=Intercom <${EXTEN}> + same => n,Page(${STRREPLACE(QUEUE_MEMBER_LIST(${HOME_QUEUE}),",","&")},db(gosub-intercom^s^1),10) + + ; For HOME extensions, ring indefinitely. + exten => ${HOME},1,Dial(PJSIP/${EXTEN}) + same => n,Hangup() + + [from-upstream-provider] + ; Ring all house phones for incoming PSTN calls, if no answer send to voicemail. + exten => _X.,1,Queue(${HOME_QUEUE},nr,,,${VOICEMAIL_RING_TIMEOUT}) + same => n,Answer(500) + same => n,Voicemail(${HOME_MAILBOX}@${VOICEMAIL_CONTEXT},su) + same => n,Hangup() + + [from-house-phones] + include => internal + ; local voicemail access + exten => ${VOICEMAIL_NUMBER},1,Answer(500) + same => n,VoiceMailMain(${HOME_MAILBOX}@${VOICEMAIL_CONTEXT},s) + same => n,Hangup() + ; pstn - normalize all outgoing numbers to +1XXXXXXXXXX + exten => _+1NXXNXXXXXX,1,Set(CALLERID(all)=${HOME_CID}) + same => n,Dial(PJSIP/${EXTEN}@upstream-provider) + same => n,Hangup() + exten => _1NXXNXXXXXX,1,Set(CALLERID(all)=${HOME_CID}) + same => n,Dial(PJSIP/+${EXTEN}@upstream-provider) + same => n,Hangup() + exten => _NXXNXXXXXX,1,Set(CALLERID(all)=${HOME_CID}) + same => n,Dial(PJSIP/+1${EXTEN}@upstream-provider) + same => n,Hangup() + exten => _NXXXXXX,1,Set(CALLERID(all)=${HOME_CID}) + same => n,Dial(PJSIP/+1${AREA_CODE}${EXTEN}@upstream-provider) + same => n,Hangup() + exten => _N11,1,Set(CALLERID(all)=${HOME_CID}) + same => n,Dial(PJSIP/${EXTEN}@upstream-provider) + same => n,Hangup() diff --git a/inventory-example/group_vars/all/coturn.yml b/inventory-example/group_vars/all/coturn.yml new file mode 100644 index 0000000..0af566b --- /dev/null +++ b/inventory-example/group_vars/all/coturn.yml @@ -0,0 +1,3 @@ +coturn_auth_secret: '{{ vault_coturn_auth_secret }}' +coturn_external_ip: 203.0.113.61 # changeme +coturn_realm: turn.example.com # changeme diff --git a/inventory-example/group_vars/all/cups.yml b/inventory-example/group_vars/all/cups.yml new file mode 100644 index 0000000..11087a1 --- /dev/null +++ b/inventory-example/group_vars/all/cups.yml @@ -0,0 +1 @@ +cups_host: cups.{{ domain }} diff --git a/inventory-example/group_vars/all/firefox.yml b/inventory-example/group_vars/all/firefox.yml new file mode 100644 index 0000000..5ebc61b --- /dev/null +++ b/inventory-example/group_vars/all/firefox.yml @@ -0,0 +1,73 @@ +# Managed firefox settings go in this file. +--- +firefox_offer_to_save_logins_default: no + +firefox_extensions: + - name: ublock-origin + id: uBlock0@raymondhill.net + mode: force_installed + policy: + toOverwrite: + filterLists: + - user-filters + - ublock-filters + - ublock-badware + - ublock-privacy + - ublock-abuse + - ublock-unbreak + - ublock-annoyances + - easylist + - easyprivacy + - urlhaus-1 + - plowe-0 + - fanboy-annoyance + - fanboy-thirdparty_social + - adguard-spyware-url + - ublock-quick-fixes + toAdd: + trustedSiteDirectives: + - id.spectrum.net + - '{{ domain }}' + + - name: bitwarden-password-manager + id: '{446900e4-71c2-419f-a6a7-df9c091e268b}' + + - name: libredirect + id: 7esoorv3@alefvanoon.anonaddy.me + +firefox_preferences: + - name: dom.security.https_only_mode + value: true + status: locked + +firefox_managed_bookmarks: + - name: Bitwarden + url: 'https://bitwarden.{{ domain }}' + - name: Git + url: 'https://git.example.com' + - name: Invidious + url: 'https://invidious.{{ domain }}' + - name: Jellyfin + url: 'https://jellyfin.{{ domain }}' + - name: Nagios + url: 'https://nagios.{{ domain }}' + - name: Nitter + url: 'https://nitter.{{ domain }}' + - name: Photostructure + url: 'https://photos.{{ domain }}/' + - name: Printers + url: 'https://cups.{{ domain }}/printers/' + - name: Rspamd + url: 'https://rspamd.{{ domain }}' + - name: Syncthing + url: 'https://syncthing.{{ domain }}' + - name: Teddit + url: 'https://teddit.{{ domain }}' + - name: TinyTinyRSS + url: 'https://ttrss.{{ domain }}' + - name: Unifi + url: 'https://unifi.{{ domain }}' + - name: Wiki + url: 'https://wiki.{{ domain }}' + - name: ZNC + url: 'https://znc.{{ domain }}' diff --git a/inventory-example/group_vars/all/freeipa.yml b/inventory-example/group_vars/all/freeipa.yml new file mode 100644 index 0000000..3501061 --- /dev/null +++ b/inventory-example/group_vars/all/freeipa.yml @@ -0,0 +1,144 @@ +# This file contains a bunch of example data for populating your FreeIPA +# domain with users, groups, sudo rules, etc. +--- +freeipa_workgroup: ACME +freeipa_nfs_homedirs: yes +freeipa_dns_forwarders: + - 10.10.12.1 + +freeipa_users: + - name: johndoe + givenname: John + sn: Doe + mail: john@example.com + jid: john@example.com + mail_aliases: + - john.nickname@example.com + - john.alias@exmaple.com + + - name: bobbytables + givenname: Bobby + sn: Tables + mail: btables@example.com + jid: btables@example.com + + - name: janedoe + givenname: Jane + sn: Doe + mail: jane@example.com + jid: jane@example.com + +freeipa_groups: + # built-in freeipa admin group - be careful! + - name: admins + append: yes + user: + - johndoe + + - name: sysadmins + mail: sysadmins@example.com + mail_aliases: + - root@example.com + - postmaster@example.com + - hostmaster@example.com + - webmaster@example.com + - abuse@example.com + description: System Administrators + user: + - johndoe + - btables + + - name: webmasters + user: + - johndoe + + - name: doefamily + description: Doe Family + mail: doefamily@example.com + user: + - johndoe + - janedoe + + - name: role-nagios-access + group: sysadmins + + - name: role-bitwarden-admin + group: sysadmins + + - name: role-cups-admin + group: sysadmins + + - name: role-ttrss-admin + group: sysadmins + + - name: role-music-admin + group: sysadmins + append: yes + + - name: role-rspamd-admin + group: sysadmins + + - name: role-imap-access + group: doefamily + + - name: role-music-access + group: doefamily + append: yes + + - name: role-dav-access + group: doefamily + + - name: role-linux-desktop-access + group: doefamily + + - name: role-ttrss-access + group: doefamily + + - name: role-znc-access + group: doefamily + + - name: role-wiki-access + group: doefamily + + - name: role-wiki-admin + group: sysadmins + + - name: role-wifi-access + group: doefamily + + - name: role-media-admin + group: sysadmins + + - name: role-media-access + group: doefamily + + - name: role-photo-admin + group: doefamily + append: yes + + - name: role-xmpp-access + group: doefamily + + - name: role-git-access + group: doefamily + + - name: role-git-admin + group: sysadmins + +freeipa_hbac_rules: + - name: sysadmins_ssh_and_console_to_all + description: allow sysadmins to ssh to all hosts + usergroup: sysadmins + hostcategory: all + service: + - sshd + - login + +freeipa_sudo_rules: + - name: sysadmins_all + description: allow sysadmins to run anything as any user + cmdcategory: all + hostcategory: all + runasusercategory: all + runasgroupcategory: all + usergroup: sysadmins diff --git a/inventory-example/group_vars/all/freeradius.yml b/inventory-example/group_vars/all/freeradius.yml new file mode 100644 index 0000000..8172e44 --- /dev/null +++ b/inventory-example/group_vars/all/freeradius.yml @@ -0,0 +1 @@ +freeradius_clients: '{{ vault_freeradius_clients }}' diff --git a/inventory-example/group_vars/all/git.yml b/inventory-example/group_vars/all/git.yml new file mode 100644 index 0000000..9975c7e --- /dev/null +++ b/inventory-example/group_vars/all/git.yml @@ -0,0 +1,2 @@ +cgit_logo: ~/Development/assets/cgit/acme-logo.png # changeme (or delete) +cgit_favicon: ~/Development/assets/cgit/acme-favicon.svg # changeme (or delete) diff --git a/inventory-example/group_vars/all/global.yml b/inventory-example/group_vars/all/global.yml new file mode 100644 index 0000000..f4ea98e --- /dev/null +++ b/inventory-example/group_vars/all/global.yml @@ -0,0 +1,105 @@ +# By convention, variables defined in this file are safe to use in all roles. +# +# In other words, this should be the only place where you should see variables +# without a 'rolename_' prefix. +--- +ansible_python_interpreter: /usr/libexec/platform-python + +timezone: America/New_York +domain: ipa.example.com # changeme +email_domain: example.com # changeme + +organization: ACME, Inc. # changeme + +# This variable will be used to configure an SSID with certificate-based auth +# for any hosts in the linux-laptops group. +wifi_ssid: acme-wifi + +# Hosts in these CIDRs should be capable of kerberos authentication. +# We use this in many apache configs to determine when to force GSSAPI auth. +kerberized_cidrs: # changeme + - 10.10.12.0/24 + +backup_path: ~/backups + +# Use your external MX hostname so that TLS validation works. +mail_host: mx1.exmaple.com + +imap_host: imap.{{ domain }} +rspamd_host: rspamd.{{ domain }} + +# changeme: specify your vlans here. +# This dictionary is used to discover which VLAN a host belongs to. +# The appropriate VLAN object will end up in the `vlan` variable in host_vars. +vlans: + mgmt: + id: 11 + cidr: 10.10.11.0/24 + gateway: 10.10.11.1 + dns_servers: # freeipa servers + - 10.10.12.2 + - 10.10.12.3 + ntp_servers: ['10.10.11.1'] + + trusted: + id: 12 + cidr: 10.10.12.0/23 + dns_servers: # freeipa servers + - 10.10.12.2 + - 10.10.12.3 + gateway: 10.10.12.1 + ntp_servers: ['10.10.12.1'] + + voip: + id: 14 + cidr: 10.10.14.0/24 + gateway: 10.10.14.1 + dns_servers: # freeipa servers + - 10.10.12.2 + - 10.10.12.3 + ntp_servers: ['10.10.14.1'] + + print: + id: 15 + cidr: 10.10.15.0/24 + gateway: 10.10.15.1 + dns_servers: # freeipa servers + - 10.10.12.2 + - 10.10.12.3 + ntp_servers: ['10.10.15.1'] + + vpn: + id: 16 + cidr: 10.10.16.0/24 + gateway: 10.10.16.1 + dns_servers: # freeipa servers + - 10.10.12.2 + - 10.10.12.3 + ntp_servers: ['10.10.16.1'] + + dmz: + id: 19 + cidr: 10.10.19.0/24 + dns_servers: # freeipa servers + - 10.10.12.2 + - 10.10.12.3 + gateway: 10.10.19.1 + ntp_servers: ['10.10.19.1'] + + +# standard freeipa variables +freeipa_realm: '{{ domain | upper }}' +freeipa_basedn: "dc={{ domain.split('.') | join(',dc=') }}" +freeipa_hosts: "{{ groups['freeipa_servers'] | map('regex_replace', '$', '.' ~ domain) }}" +freeipa_ldap_uri: "{{ groups['freeipa_servers'] | map('regex_replace', '^(.*)$', 'ldap://\\1.' ~ domain) | join(' ') }}" +freeipa_master: "{{ groups['freeipa_master'][0] }}" +freeipa_sysaccount_basedn: 'cn=sysaccounts,cn=etc,{{ freeipa_basedn }}' +freeipa_user_basedn: cn=users,cn=accounts,{{ freeipa_basedn }} +freeipa_group_basedn: cn=groups,cn=accounts,{{ freeipa_basedn }} +freeipa_accounts_basedn: cn=accounts,{{ freeipa_basedn }} +freeipa_service_basedn: cn=services,cn=accounts,{{ freeipa_basedn }} +freeipa_ds_password: '{{ vault_freeipa_ds_password }}' +freeipa_admin_password: '{{ vault_freeipa_admin_password }}' +ipa_host: '{{ freeipa_master }}.{{ domain }}' +ipa_user: admin +ipa_pass: '{{ freeipa_admin_password }}' diff --git a/inventory-example/group_vars/all/hastebin.yml b/inventory-example/group_vars/all/hastebin.yml new file mode 100644 index 0000000..d6c6a43 --- /dev/null +++ b/inventory-example/group_vars/all/hastebin.yml @@ -0,0 +1,3 @@ +hastebin_upload_cidrs: + - '{{ vlans.trusted.cidr }}' + - '{{ vlans.vpn.cidr }}' diff --git a/inventory-example/group_vars/all/invidious.yml b/inventory-example/group_vars/all/invidious.yml new file mode 100644 index 0000000..31f3cf2 --- /dev/null +++ b/inventory-example/group_vars/all/invidious.yml @@ -0,0 +1,4 @@ +invidious_port: 8080 +invidious_db_password: '{{ vault_invidious_db_password }}' +invidious_hmac_key: '{{ vault_invidious_hmac_key }}' +invidious_db_user: s-invidious diff --git a/inventory-example/group_vars/all/jellyfin.yml b/inventory-example/group_vars/all/jellyfin.yml new file mode 100644 index 0000000..954e498 --- /dev/null +++ b/inventory-example/group_vars/all/jellyfin.yml @@ -0,0 +1 @@ +jellyfin_sysaccount_password: '{{ vault_jellyfin_sysaccount_password }}' diff --git a/inventory-example/group_vars/all/mail.yml b/inventory-example/group_vars/all/mail.yml new file mode 100644 index 0000000..120ca91 --- /dev/null +++ b/inventory-example/group_vars/all/mail.yml @@ -0,0 +1,21 @@ +dovecot_default_user_quota: 20G + +# accept mail for these domains: +postfix_virtual_domains: + - example.com + - example.net + +rspamd_domain_whitelist: + - badly.configured.domain.com + - dont.mark.mail.from.this.domain.as.spam.com + +rspamd_password: '{{ vault_rspamd_password }}' +rspamd_password_hash: '{{ vault_rspamd_password_hash }}' +rspamd_dkim_keys: '{{ vault_rspamd_dkim_keys }}' + +# generate with `rspamadm keypair` +rspamd_privkey: '{{ vault_rspamd_privkey }}' +rspamd_pubkey: AAAAAAAAAAAAAchangeme + +rspamd_redis_port: 6379 +rspamd_redis_bayes_port: 6380 diff --git a/inventory-example/group_vars/all/mediawiki.yml b/inventory-example/group_vars/all/mediawiki.yml new file mode 100644 index 0000000..d54f199 --- /dev/null +++ b/inventory-example/group_vars/all/mediawiki.yml @@ -0,0 +1,9 @@ +mediawiki_upgrade_key: '{{ vault_mediawiki_upgrade_key }}' +mediawiki_secret_key: '{{ vault_mediawiki_secret_key }}' +mediawiki_admin_password: '{{ vault_mediawiki_admin_password }}' + +mediawiki_sysaccount_password: '{{ vault_mediawiki_sysaccount_password }}' + +mediawiki_logo_1x: ~/Development/assets/mediawiki/acme-logo.svg # changeme (or delete) +mediawiki_logo_icon: ~/Development/assets/mediawiki/acme-icon.svg # changeme (or delete) +mediawiki_favicon: ~/Development/assets/mediawiki/acme-favicon.svg # changeme (or delete) diff --git a/inventory-example/group_vars/all/nagios.yml b/inventory-example/group_vars/all/nagios.yml new file mode 100644 index 0000000..84fc7ce --- /dev/null +++ b/inventory-example/group_vars/all/nagios.yml @@ -0,0 +1,90 @@ +nagios_email: sysadmins@example.com +nagios_ssh_privkey: '{{ vault_nagios_ssh_privkey }}' +nagios_ssh_pubkey: ssh-ed25519 AAAAAAAAAAAAAAchangeme + +nagios_excluded_groups: + - linux_laptops + - cellphones + +nagios_snmp_user: nagios +nagios_snmp_community: public +nagios_snmp_priv_proto: AES +nagios_snmp_auth_proto: SHA +nagios_snmp_auth_pass: '{{ vault_nagios_snmp_auth_pass }}' +nagios_snmp_priv_pass: '{{ vault_nagios_snmp_priv_pass }}' + +nagios_ping_count: 5 +nagios_ping_rtt_warn: 50.0 +nagios_ping_rtt_crit: 100.0 +nagios_ping_loss_warn: 20% +nagios_ping_loss_crit: 40% + +nagios_temp_warn: 60 +nagios_temp_crit: 70 + +nagios_power_draw_warn: 50% +nagios_power_draw_crit: 75% + +nagios_load_1m_warn: 1.0 +nagios_load_5m_warn: 0.9 +nagios_load_15m_warn: 0.8 +nagios_load_1m_crit: 2.0 +nagios_load_5m_crit: 1.8 +nagios_load_15m_crit: 1.6 + +nagios_mem_warn: 80% +nagios_mem_crit: 90% + +nagios_swap_warn: 50% +nagios_swap_crit: 80% + +nagios_interface_bandwidth_warn: 0 +nagios_interface_bandwidth_crit: 0 +nagios_interface_discard_warn: 10 +nagios_interface_discard_crit: 50 +nagios_interface_error_warn: 5 +nagios_interface_error_crit: 20 + +nagios_interfaces: + - regex: ^(?!.*(lo[0-9]*|virbr[0-9]*|tap.*|vmbr.*|lagg[0-9]+_vlan)) + description: interfaces + down_ok: no + bandwidth_warn: '{{ nagios_interface_bandwidth_warn }}' + bandwidth_crit: '{{ nagios_interface_bandwidth_crit }}' + discard_warn: '{{ nagios_interface_discard_warn }}' + discard_crit: '{{ nagios_interface_discard_crit }}' + error_warn: '{{ nagios_interface_error_warn }}' + error_crit: '{{ nagios_interface_error_crit }}' + +nagios_disk_warn: 80% +nagios_disk_crit: 90% + +nagios_disks: + - regex: ^(/sys|/dev|/run|/rpool|/tank) + exclude: yes + description: disks + warn: '{{ nagios_disk_warn }}' + crit: '{{ nagios_disk_crit }}' + +nagios_certificate_warn: 28 +nagios_certificate_crit: 14 + +nagios_smtp_warn: 0.5 +nagios_smtp_crit: 1.0 +nagios_mailq_warn: 5 +nagios_mailq_crit: 20 + +nagios_imap_warn: 0.5 +nagios_imap_crit: 1.0 + +nagios_http_warn: 0.5 +nagios_http_crit: 1.0 + +nagios_check_dns: + - name: www.example.com + server: 8.8.8.8 + expect: 203.0.113.42 + + - name: mx1.example.com + server: 8.8.8.8 + expect: 203.0.113.43 diff --git a/inventory-example/group_vars/all/nfs.yml b/inventory-example/group_vars/all/nfs.yml new file mode 100644 index 0000000..713b5d3 --- /dev/null +++ b/inventory-example/group_vars/all/nfs.yml @@ -0,0 +1,11 @@ +nfs_homedir_options: rw,crossmnt + +# These clients will be added to the export list for NFS home directories. +nfs_homedir_clients: + - client: '{{ vlans.trusted.cidr }}' + options: sec=krb5p + + # We can't use kerberos for Syncthing, because the Syncthing daemons have + # to impersonate each user, and I don't feel like shuffling keytabs around. + - client: syncthing1 + options: sec=sys diff --git a/inventory-example/group_vars/all/nitter.yml b/inventory-example/group_vars/all/nitter.yml new file mode 100644 index 0000000..3d13f76 --- /dev/null +++ b/inventory-example/group_vars/all/nitter.yml @@ -0,0 +1,3 @@ +nitter_port: 8082 +nitter_redis_port: 16379 +nitter_hmac_key: '{{ vault_nitter_hmac_key }}' diff --git a/inventory-example/group_vars/all/nsd.yml b/inventory-example/group_vars/all/nsd.yml new file mode 100644 index 0000000..ff1afe6 --- /dev/null +++ b/inventory-example/group_vars/all/nsd.yml @@ -0,0 +1,54 @@ +# Put the desired contents of any zone files in nsd_zones. +# +# I only recommend self-hosting DNS if you're farming out your *real* query +# traffic to a secondary DNS provider. +--- +nsd_zones: + - name: example.com + slave_nameservers: + - 203.0.113.50 + - 203.0.113.51 + ttl: 3600 + content: | + @ IN NS ns1.example.com. + @ IN NS ns2.example.com. + ns1 IN A 203.0.113.52 + ns1 IN AAAA 2001:db8::2 + ns2 IN A 203.0.113.53 + ns2 IN AAAA 2001:db8::3 + + @ IN CAA 0 issue "letsencrypt.org" + + ; mail + @ IN MX 10 mx1.example.com. + @ IN TXT "v=spf1 mx -all" + dkim._domainkey IN TXT ( "v=DKIM1; k=rsa; " + "p=AAAAAAAAAAAAAAAAchangeme" + "AAAAAAAAAAAAAAAAAAchangeme" + ) ; + _dmarc IN TXT "v=DMARC1; p=reject; ruf=mailto:postmaster@example.com" + + @ IN A 203.0.113.54 + mx1 IN A 203.0.113.55 + www1 IN A 203.0.113.56 + xmpp1 IN A 203.0.113.57 + turn1 IN A 203.0.113.58 + pbx1 IN A 203.0.113.59 + www IN CNAME www1 + xmpp IN CNAME xmpp1 + conference IN CNAME xmpp1 + turn IN CNAME turn1 + pbx IN CNAME pbx1 + + _xmpp-client._tcp IN SRV 0 5 5222 xmpp1 + _xmpp-server._tcp IN SRV 0 5 5269 xmpp1 + _xmpp-server._tcp.conference IN SRV 0 5 5269 xmpp1 + + _stun._tcp IN SRV 0 5 3478 turn1 + _stun._udp IN SRV 0 5 3478 turn1 + _turn._tcp IN SRV 0 5 3478 turn1 + _turn._udp IN SRV 0 5 3478 turn1 + + _sip._udp IN SRV 0 5 5060 pbx1 + _sip._tcp IN SRV 0 5 5060 pbx1 + _sip._tls IN SRV 0 5 5061 pbx1 diff --git a/inventory-example/group_vars/all/packages.yml b/inventory-example/group_vars/all/packages.yml new file mode 100644 index 0000000..2883e64 --- /dev/null +++ b/inventory-example/group_vars/all/packages.yml @@ -0,0 +1,4 @@ +packages_install: + - man + - less + - tmux diff --git a/inventory-example/group_vars/all/photostructure.yml b/inventory-example/group_vars/all/photostructure.yml new file mode 100644 index 0000000..6f7963e --- /dev/null +++ b/inventory-example/group_vars/all/photostructure.yml @@ -0,0 +1,3 @@ +photostructure_access_group: role-photo-admin +photostructure_scan_paths: + - /nfs/media/pictures diff --git a/inventory-example/group_vars/all/polkit.yml b/inventory-example/group_vars/all/polkit.yml new file mode 100644 index 0000000..fed46cc --- /dev/null +++ b/inventory-example/group_vars/all/polkit.yml @@ -0,0 +1 @@ +polkit_admin_group: sysadmins diff --git a/inventory-example/group_vars/all/postgres.yml b/inventory-example/group_vars/all/postgres.yml new file mode 100644 index 0000000..be90568 --- /dev/null +++ b/inventory-example/group_vars/all/postgres.yml @@ -0,0 +1,4 @@ +postgresql_host: postgres.{{ domain }} +postgresql_inventory_host: "{{ postgresql_host.split('.')[0] }}" +postgresql_password_users: + - '{{ invidious_db_user }}' diff --git a/inventory-example/group_vars/all/prosody.yml b/inventory-example/group_vars/all/prosody.yml new file mode 100644 index 0000000..b317a96 --- /dev/null +++ b/inventory-example/group_vars/all/prosody.yml @@ -0,0 +1,16 @@ +prosody_http_host: xmpp.example.com # changeme +prosody_sysaccount_password: '{{ vault_prosody_sysaccount_password }}' +prosody_vhosts: # changeme - your jabber domain(s) + - example.com + +# XMPP clients expect a certificate matching the domain of the given JID. +# Unfortunately, this situation only works for LetsEncrypt if you run your XMPP +# server on the same host as your webserver (or if you use the ACME DNS +# challenge). +# +# Check out the prosody_letsencrypt_proxy role for how we get around this. +# Basically, just specify the hostname of your public webserver here, along with +# and ssh keypair. +prosody_le_proxy_host: dmz-www1 +prosody_le_ssh_privkey: '{{ vault_prosody_le_ssh_privkey }}' +prosody_le_ssh_pubkey: ssh-ed25519 AAAAAAAchangeme diff --git a/inventory-example/group_vars/all/proxmox.yml b/inventory-example/group_vars/all/proxmox.yml new file mode 100644 index 0000000..44cb9a1 --- /dev/null +++ b/inventory-example/group_vars/all/proxmox.yml @@ -0,0 +1,7 @@ +# These settings are used when provisioning new proxmox VMs. +--- +proxmox_api_host: '{{ groups["proxmox_hypervisors"] | first }}' +proxmox_api_user: ansible@pam +proxmox_api_password: '{{ vault_proxmox_api_password }}' +proxmox_node: '{{ proxmox_api_host }}' +proxmox_password_salt: '{{ vault_proxmox_password_salt }}' diff --git a/inventory-example/group_vars/all/psitransfer.yml b/inventory-example/group_vars/all/psitransfer.yml new file mode 100644 index 0000000..eb61ea9 --- /dev/null +++ b/inventory-example/group_vars/all/psitransfer.yml @@ -0,0 +1,7 @@ +psitransfer_upload_cidrs: + - '{{ vlans.trusted.cidr }}' + - '{{ vlans.vpn.cidr }}' +psitransfer_admin_cidrs: + - '{{ vlans.trusted.cidr }}' + - '{{ vlans.vpn.cidr }}' +psitransfer_admin_password: '{{ vault_psitransfer_admin_password }}' diff --git a/inventory-example/group_vars/all/root.yml b/inventory-example/group_vars/all/root.yml new file mode 100644 index 0000000..bd86f96 --- /dev/null +++ b/inventory-example/group_vars/all/root.yml @@ -0,0 +1,6 @@ +root_authorized_keys: + - ssh-ed25519 AAAAAAAchangeme + - ssh-ed25519 AAAAAAAchangeme + +root_password: '{{ vault_root_password }}' +root_password_salt: '{{ vault_root_password_salt }}' diff --git a/inventory-example/group_vars/all/sudo.yml b/inventory-example/group_vars/all/sudo.yml new file mode 100644 index 0000000..f6e93db --- /dev/null +++ b/inventory-example/group_vars/all/sudo.yml @@ -0,0 +1,2 @@ +sudo_email: yes +sudo_mailto: sysadmins@example.com diff --git a/inventory-example/group_vars/all/syncthing.yml b/inventory-example/group_vars/all/syncthing.yml new file mode 100644 index 0000000..ac3257f --- /dev/null +++ b/inventory-example/group_vars/all/syncthing.yml @@ -0,0 +1,6 @@ +# Each user with a dedicated syncthing instance must have his or her own unique +# port number for the sync traffic. +--- +syncthing_users: + johndoe: 22001 + janedoe: 22002 diff --git a/inventory-example/group_vars/all/syslog.yml b/inventory-example/group_vars/all/syslog.yml new file mode 100644 index 0000000..390c157 --- /dev/null +++ b/inventory-example/group_vars/all/syslog.yml @@ -0,0 +1,2 @@ +syslog_host: syslog.{{ domain }} +syslog_host_ip: "{{ hostvars[groups['syslog_servers'] | sort | first].ip }}" diff --git a/inventory-example/group_vars/all/teddit.yml b/inventory-example/group_vars/all/teddit.yml new file mode 100644 index 0000000..269bb27 --- /dev/null +++ b/inventory-example/group_vars/all/teddit.yml @@ -0,0 +1,3 @@ +teddit_port: 8081 +teddit_redis_port: 6379 +teddit_reddit_app_id: '{{ vault_teddit_reddit_app_id }}' diff --git a/inventory-example/group_vars/all/vault.yml b/inventory-example/group_vars/all/vault.yml new file mode 100644 index 0000000..c3e29c5 --- /dev/null +++ b/inventory-example/group_vars/all/vault.yml @@ -0,0 +1,124 @@ +# This is a sample file with fake secrets. For a real deployment, encrypt this +# file with `ansible-vault encrypt` and add your own secrets. +--- +# apache +vault_apache_sysaccount_password: changeme + + +# archiver +vault_archive_ssh_privkey: | + -----BEGIN OPENSSH PRIVATE KEY----- + AAAAAAAAAAAAchangeme + -----END OPENSSH PRIVATE KEY----- + + +# asterisk +vault_asterisk_ari_users: + - name: nagios + readonly: yes + password: changeme + +vault_asterisk_password_salt: changeme + +vault_asterisk_sip_extensions: + - name: 6001 + context: house-phones + mailbox: 6000@default + cid_name: Living Room + password: changeme + + - name: 6002 + context: house-phones + mailbox: 6000@default + cid_name: Kitchen + password: changeme + +vault_asterisk_sip_trunks: + - name: upstream-provider + host: 'sip.example.com:5060' + username: changeme + password: changeme + + +# coturn +vault_coturn_auth_secret: changeme + + +# freeipa +vault_freeipa_admin_password: changeme +vault_freeipa_ds_password: changeme + + +# freeradius +vault_freeradius_clients: + - name: unifi + address: '{{ vlans.mgmt.cidr }}' + secret: changeme + + +# invidious +vault_invidious_db_password: changeme +vault_invidious_hmac_key: changeme + + +# jellyfin +vault_jellyfin_sysaccount_password: changeme + + +# mediawiki +vault_mediawiki_admin_password: changeme +vault_mediawiki_upgrade_key: changeme +vault_mediawiki_secret_key: changeme +vault_mediawiki_sysaccount_password: changeme + + +# nagios +vault_nagios_snmp_auth_pass: changeme +vault_nagios_snmp_priv_pass: changeme +vault_nagios_ssh_privkey: | + -----BEGIN OPENSSH PRIVATE KEY----- + AAAAAAAAAAAAAAAAchangeme + -----END OPENSSH PRIVATE KEY----- + + +# nitter +vault_nitter_hmac_key: changeme + + +# prosody +vault_prosody_le_ssh_privkey: | + -----BEGIN OPENSSH PRIVATE KEY----- + AAAAAAAAAAAAAAAAchangeme + -----END OPENSSH PRIVATE KEY----- +vault_prosody_sysaccount_password: changeme + + +# proxmox +vault_proxmox_api_password: changeme +vault_proxmox_password_salt: changeme + + +# psitransfer +vault_psitransfer_admin_password: changeme + + +# root user +vault_root_password_salt: changeme +vault_root_password: changeme + + +# rspamd +vault_rspamd_password: changeme +vault_rspamd_password_hash: $2$changeme # generate with `rspamadm pw` +vault_rspamd_privkey: changeme # generate with `rspamadm keypair` +vault_rspamd_dkim_keys: # generate with `rspamadm dkim_keygen` + example.com: | + -----BEGIN RSA PRIVATE KEY----- + AAAAAAAAAAAAAAAAchangeme + -----END RSA PRIVATE KEY----- + +# teddit +vault_teddit_reddit_app_id: changeme + +# vaultwarden +vault_vaultwarden_admin_token: changeme # generate with `openssl rand -base64 48` diff --git a/inventory-example/group_vars/all/vaultwarden.yml b/inventory-example/group_vars/all/vaultwarden.yml new file mode 100644 index 0000000..71637f7 --- /dev/null +++ b/inventory-example/group_vars/all/vaultwarden.yml @@ -0,0 +1 @@ +vaultwarden_admin_token: '{{ vault_vaultwarden_admin_token }}' diff --git a/inventory-example/group_vars/all/wireguard.yml b/inventory-example/group_vars/all/wireguard.yml new file mode 100644 index 0000000..1c0a33c --- /dev/null +++ b/inventory-example/group_vars/all/wireguard.yml @@ -0,0 +1,2 @@ +wireguard_host: 203.0.113.41 # your external VPN IP - changeme +wireguard_pubkey: AAAAAAAAAAchangeme diff --git a/inventory-example/group_vars/all/yum.yml b/inventory-example/group_vars/all/yum.yml new file mode 100644 index 0000000..6cbfae5 --- /dev/null +++ b/inventory-example/group_vars/all/yum.yml @@ -0,0 +1 @@ +yum_host: yum.{{ domain }} diff --git a/inventory-example/group_vars/dav_servers.yml b/inventory-example/group_vars/dav_servers.yml new file mode 100644 index 0000000..239067a --- /dev/null +++ b/inventory-example/group_vars/dav_servers.yml @@ -0,0 +1,6 @@ +apache_can_sendmail: yes +apache_can_network_connect_db: yes +apache_can_connect_ldap: yes +apache_gssapi: yes + +nagios_http_status: 401 diff --git a/inventory-example/group_vars/dmz.yml b/inventory-example/group_vars/dmz.yml new file mode 100644 index 0000000..ba0b0c9 --- /dev/null +++ b/inventory-example/group_vars/dmz.yml @@ -0,0 +1 @@ +freeipa_autofs: no diff --git a/inventory-example/group_vars/el8.yml b/inventory-example/group_vars/el8.yml new file mode 100644 index 0000000..1aedd96 --- /dev/null +++ b/inventory-example/group_vars/el8.yml @@ -0,0 +1,3 @@ +# Force legacy BIOS for Rocky 8 VMs - UEFI doesn't seem to work. +proxmox_template: rocky8.7 +proxmox_bios: seabios diff --git a/inventory-example/group_vars/freeipa_master.yml b/inventory-example/group_vars/freeipa_master.yml new file mode 100644 index 0000000..fbaa5b2 --- /dev/null +++ b/inventory-example/group_vars/freeipa_master.yml @@ -0,0 +1,6 @@ +# The initial FreeIPA installation requires an upstream DNS server to bootstrap itself. +proxmox_nameservers: '{{ freeipa_dns_forwarders }}' + +# Update the FreeIPA master every *other* day. If there's a botched automatic +# update, we don't want to take the entire domain down overnight. +dnf_automatic_on_calendar: '*-*-1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31 04:00:00' diff --git a/inventory-example/group_vars/git_servers.yml b/inventory-example/group_vars/git_servers.yml new file mode 100644 index 0000000..5f975fc --- /dev/null +++ b/inventory-example/group_vars/git_servers.yml @@ -0,0 +1 @@ +apache_gssapi: yes diff --git a/inventory-example/group_vars/linux_desktops.yml b/inventory-example/group_vars/linux_desktops.yml new file mode 100644 index 0000000..af4775a --- /dev/null +++ b/inventory-example/group_vars/linux_desktops.yml @@ -0,0 +1 @@ +tuned_profile: desktop diff --git a/inventory-example/group_vars/linux_laptops.yml b/inventory-example/group_vars/linux_laptops.yml new file mode 100644 index 0000000..428c40b --- /dev/null +++ b/inventory-example/group_vars/linux_laptops.yml @@ -0,0 +1,2 @@ +tuned_profile: powersave +rsyslog_forward: no diff --git a/inventory-example/group_vars/nagios_servers.yml b/inventory-example/group_vars/nagios_servers.yml new file mode 100644 index 0000000..5f975fc --- /dev/null +++ b/inventory-example/group_vars/nagios_servers.yml @@ -0,0 +1 @@ +apache_gssapi: yes diff --git a/inventory-example/group_vars/nfs_servers.yml b/inventory-example/group_vars/nfs_servers.yml new file mode 100644 index 0000000..59135b8 --- /dev/null +++ b/inventory-example/group_vars/nfs_servers.yml @@ -0,0 +1,10 @@ +dnf_automatic_restart: no + +nagios_disks: + - regex: ^(/sys|/dev|/run|/rpool|/tank) + exclude: yes + description: disks + + - regex: ^/tank + description: zfs + terse: yes diff --git a/inventory-example/group_vars/opnsense_firewalls.yml b/inventory-example/group_vars/opnsense_firewalls.yml new file mode 100644 index 0000000..8a4ac7b --- /dev/null +++ b/inventory-example/group_vars/opnsense_firewalls.yml @@ -0,0 +1,7 @@ +ansible_python_interpreter: /usr/local/bin/python3 + +# If you want OPNsense to serve PXE, you need the following plugins: +# - os-tftp +# - os-nginx +pxe_root: /usr/local/tftp +pxe_http_port: 8080 diff --git a/inventory-example/group_vars/photostructure_servers.yml b/inventory-example/group_vars/photostructure_servers.yml new file mode 100644 index 0000000..a5542b4 --- /dev/null +++ b/inventory-example/group_vars/photostructure_servers.yml @@ -0,0 +1,2 @@ +apache_gssapi: yes +nagios_http_status: 401 diff --git a/inventory-example/group_vars/proxmox_hypervisors.yml b/inventory-example/group_vars/proxmox_hypervisors.yml new file mode 100644 index 0000000..f1a3ed4 --- /dev/null +++ b/inventory-example/group_vars/proxmox_hypervisors.yml @@ -0,0 +1 @@ +ansible_python_interpreter: /usr/bin/python3 diff --git a/inventory-example/group_vars/proxmox_instances.yml b/inventory-example/group_vars/proxmox_instances.yml new file mode 100644 index 0000000..e6e7eab --- /dev/null +++ b/inventory-example/group_vars/proxmox_instances.yml @@ -0,0 +1,2 @@ +tuned_profile: virtual-guest +grub_cmdline: console=ttyS0,115200n8 no_timer_check net.ifnames=0 diff --git a/inventory-example/group_vars/rspamd_servers.yml b/inventory-example/group_vars/rspamd_servers.yml new file mode 100644 index 0000000..54e8be4 --- /dev/null +++ b/inventory-example/group_vars/rspamd_servers.yml @@ -0,0 +1,2 @@ +nagios_http_status: 401 +apache_gssapi: yes diff --git a/inventory-example/group_vars/switches/vars.yml b/inventory-example/group_vars/switches/vars.yml new file mode 100644 index 0000000..8892a35 --- /dev/null +++ b/inventory-example/group_vars/switches/vars.yml @@ -0,0 +1,6 @@ +nagios_snmp_priv_proto: DES +nagios_snmp_priv_pass: '{{ vault_nagios_snmp_priv_pass }}' +nagios_snmp_auth_pass: '{{ vault_nagios_snmp_auth_pass }}' + +nagios_interface_discard_warn: 1000 +nagios_interface_discard_crit: 2000 diff --git a/inventory-example/group_vars/switches/vault.yml b/inventory-example/group_vars/switches/vault.yml new file mode 100644 index 0000000..2015d5f --- /dev/null +++ b/inventory-example/group_vars/switches/vault.yml @@ -0,0 +1,5 @@ +# This is a sample file with fake secrets. For a real deployment, encrypt this +# file with `ansible-vault encrypt` and add your own secrets. +--- +vault_nagios_snmp_priv_pass: changeme +vault_nagios_snmp_auth_pass: changeme diff --git a/inventory-example/group_vars/syncthing_servers.yml b/inventory-example/group_vars/syncthing_servers.yml new file mode 100644 index 0000000..5f975fc --- /dev/null +++ b/inventory-example/group_vars/syncthing_servers.yml @@ -0,0 +1 @@ +apache_gssapi: yes diff --git a/inventory-example/group_vars/ttrss_servers.yml b/inventory-example/group_vars/ttrss_servers.yml new file mode 100644 index 0000000..fc33f6a --- /dev/null +++ b/inventory-example/group_vars/ttrss_servers.yml @@ -0,0 +1,5 @@ +apache_gssapi: yes +apache_can_sendmail: yes +apache_can_network_connect_db: yes +apache_can_network_connect: yes +apache_can_connect_ldap: yes diff --git a/inventory-example/group_vars/unifi_controllers.yml b/inventory-example/group_vars/unifi_controllers.yml new file mode 100644 index 0000000..d3a5574 --- /dev/null +++ b/inventory-example/group_vars/unifi_controllers.yml @@ -0,0 +1,3 @@ +nagios_interface_discard_warn: 500 +nagios_interface_discard_crit: 1000 +freeipa_autofs: no diff --git a/inventory-example/group_vars/wiki_servers.yml b/inventory-example/group_vars/wiki_servers.yml new file mode 100644 index 0000000..527d9ef --- /dev/null +++ b/inventory-example/group_vars/wiki_servers.yml @@ -0,0 +1,7 @@ +apache_gssapi: yes +apache_can_sendmail: yes +apache_can_network_connect_db: yes +apache_can_connect_ldap: yes +apache_can_network_connect: yes + +nagios_http_status: 401 diff --git a/inventory-example/group_vars/xmpp_servers.yml b/inventory-example/group_vars/xmpp_servers.yml new file mode 100644 index 0000000..dd6b7b4 --- /dev/null +++ b/inventory-example/group_vars/xmpp_servers.yml @@ -0,0 +1 @@ +nagios_https_vhosts: ['{{ prosody_http_host | default(ansible_fqdn) }}'] diff --git a/inventory-example/host_vars/bitwarden1.yml b/inventory-example/host_vars/bitwarden1.yml new file mode 100644 index 0000000..feb6baa --- /dev/null +++ b/inventory-example/host_vars/bitwarden1.yml @@ -0,0 +1 @@ +vaultwarden_server_name: bitwarden.{{ domain }} diff --git a/inventory-example/host_vars/dmz-git1.yml b/inventory-example/host_vars/dmz-git1.yml new file mode 100644 index 0000000..e5b5f76 --- /dev/null +++ b/inventory-example/host_vars/dmz-git1.yml @@ -0,0 +1,21 @@ +apache_letsencrypt: yes +apache_server_name: git.example.com # changeme +nagios_https_vhosts: ['{{ apache_server_name }}'] + +cgit_clone_prefixes: # changeme - public clone URL displayed in cgit interface + - https://git.example.com + +cgit_cache_size: 10000 + +# changeme: everything below this line +cgit_title: 'ACME Corp : git' +cgit_description: Source code for ACME Corporation + +cgit_about_html: > + This is just an example. Change me! + +cgit_logo: ~/assets/cgit/acme_logo.png +cgit_favicon: ~/assets/cgit/acme_favicon.png +cgit_css: ~/assets/cgit/acme.css +cgit_header: ~/assets/cgit/acme-header.html +cgit_head_include: ~/assets/cgit/acme-head-include.html diff --git a/inventory-example/host_vars/dmz-mx1.yml b/inventory-example/host_vars/dmz-mx1.yml new file mode 100644 index 0000000..2ee6004 --- /dev/null +++ b/inventory-example/host_vars/dmz-mx1.yml @@ -0,0 +1 @@ +postfix_myhostname: mx1.example.com # changeme - your public MX hostname diff --git a/inventory-example/host_vars/dmz-www1.yml b/inventory-example/host_vars/dmz-www1.yml new file mode 100644 index 0000000..b44309e --- /dev/null +++ b/inventory-example/host_vars/dmz-www1.yml @@ -0,0 +1,9 @@ +nagios_https_vhosts: # changeme - https vhosts to monitor + - example.com + - example.net + - www.example.com + - www.example.net + +# subdirs of /var/www to be included in the backup.yml playbook +apache_backup_dirs: + - www.example.com diff --git a/inventory-example/host_vars/nas1.yml b/inventory-example/host_vars/nas1.yml new file mode 100644 index 0000000..304e16f --- /dev/null +++ b/inventory-example/host_vars/nas1.yml @@ -0,0 +1,128 @@ +# This file contains a few complex dictionaries used to set up ZFS datasets, +# NFS exports, autofs mounts, and file permissions for network shares. +# +# changeme: everything in this file, probably. +--- +# zpools for this host, and any pool-level properties you wish to set +zfs_pools: + - name: tank + mountpoint: /tank + properties: + ashift: 12 + autotrim: 'on' + vdevs: + - type: raidz2 + devices: + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000001 + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000002 + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000003 + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000004 + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000005 + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000006 + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000007 + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000008 + - type: raidz2 + devices: + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000009 + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000010 + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000011 + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000012 + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000013 + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000014 + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000015 + - /dev/disk/by-id/scsi-SSEAGATE_SSSSSSSSSSSS_00000016 + - type: log + devices: + - /dev/disk/by-id/nvme-INTEL_IIIIIIIIIIIII_000000000000000001 + +# ZFS datasets for this host, and any properties you wish to set. +zfs_datasets: + - name: tank + properties: + compression: lz4 + acltype: posix + xattr: sa + relatime: 'on' + com.sun:auto-snapshot:frequent: 'false' + +# For each NFS export on this host, specify the following: +# - dataset: zfs dataset +# - zfs_properties: zfs dataset properties +# - owner: unix owner of the directory +# - group: unix group owner of the directory +# - acl: list of POSIX ACLs for the directory +# - options: NFS export options +# - client: NFS client list +# - automount_map: autofs map name +# - autofs_key: autofs key name (default: basename) +# - smb_share: SMB share name if you want to share directory over CIFS +nfs_exports: + - dataset: tank/archive + zfs_properties: + refquota: 500G + owner: s-archiver + group: sysadmins + mode: 02770 + acl: + - entity: sysadmins + etype: group + permissions: rwX + default: yes + options: crossmnt + clients: + - client: archive1 + options: sec=krb5p,rw + automount_map: auto.nfs + + - dataset: tank/media/pictures + group: role-photo-admin + mode: 02770 + acl: + - entity: role-photo-admin + etype: group + permissions: rwX + default: yes + options: rw,crossmnt + clients: + - client: '{{ vlans.trusted.cidr }}' + options: sec=krb5p + - client: syncthing1 + options: sec=sys + automount_map: auto.nfs_media + + - dataset: tank/media/music + group: role-music-admin + mode: 02770 + acl: + - entity: role-music-admin + etype: group + permissions: rwX + default: yes + + - entity: role-music-access + etype: group + permissions: rX + default: yes + options: rw,crossmnt + clients: + - client: '{{ vlans.trusted.cidr }}' + options: sec=krb5p + - client: syncthing1 + options: sec=sys + automount_map: auto.nfs_media + +# This list contains all users whose homedirs should live on this host. +# ZFS datasets, NFS exports, and autofs maps will be created automatically. +nfs_homedirs: + - user: johndoe + priv_quota: 250G + - user: janedoe + priv_quota: 250G + - group: doefamily + priv_quota: 500G + +# List any SMB shares to create here. +# All home directories automatically get an SMB share. +smb_shares: + - name: media + path: /tank/media diff --git a/inventory-example/host_vars/opnsense1/vars.yml b/inventory-example/host_vars/opnsense1/vars.yml new file mode 100644 index 0000000..ec5ab37 --- /dev/null +++ b/inventory-example/host_vars/opnsense1/vars.yml @@ -0,0 +1,8 @@ +freebsd_loader_config: + 'mrsas_load': 'YES' + 'hw.mfi.mrsas_enable': 1 + 'kern.ipc.nmbclusters': 1000000 + 'kern.ipc.nmbjumbop': 524288 + +opnsense_backup_api_key: '{{ vault_opnsense_backup_api_key }}' +opnsense_backup_api_secret: '{{ vault_opnsense_backup_api_secret }}' diff --git a/inventory-example/host_vars/opnsense1/vault.yml b/inventory-example/host_vars/opnsense1/vault.yml new file mode 100644 index 0000000..fbc5b60 --- /dev/null +++ b/inventory-example/host_vars/opnsense1/vault.yml @@ -0,0 +1,6 @@ +# This is a sample file with fake secrets. For a real deployment, encrypt this +# file with `ansible-vault encrypt` and add your own secrets. +--- +# Generate these values from the OPNsense web interface. +vault_opnsense_backup_api_key: AAAAAAAAAAAchangeme +vault_opnsense_backup_api_secret: AAAAAAAAAchangeme diff --git a/inventory-example/host_vars/privbrowse1.yml b/inventory-example/host_vars/privbrowse1.yml new file mode 100644 index 0000000..155cbf0 --- /dev/null +++ b/inventory-example/host_vars/privbrowse1.yml @@ -0,0 +1,8 @@ +cname: + - invidious + - nitter + - teddit + +invidious_server_name: invidious.{{ domain }} +teddit_server_name: teddit.{{ domain }} +nitter_server_name: nitter.{{ domain }} diff --git a/inventory-example/host_vars/switch1/vars.yml b/inventory-example/host_vars/switch1/vars.yml new file mode 100644 index 0000000..f09d6f3 --- /dev/null +++ b/inventory-example/host_vars/switch1/vars.yml @@ -0,0 +1,15 @@ +edgeswitch_backup_username: changeme +edgeswitch_backup_password: '{{ vault_edgeswitch_backup_password }}' + +nagios_interfaces: # changeme (or delete) + - 0/1 + - 0/2 + - 0/3 + - 0/4 + - 0/5 + - 0/6 + - 0/7 + - 0/8 + - 0/9 + - 0/10 + - 3/1 diff --git a/inventory-example/host_vars/switch1/vault.yml b/inventory-example/host_vars/switch1/vault.yml new file mode 100644 index 0000000..7067cd6 --- /dev/null +++ b/inventory-example/host_vars/switch1/vault.yml @@ -0,0 +1,4 @@ +# This is a sample file with fake secrets. For a real deployment, encrypt this +# file with `ansible-vault encrypt` and add your own secrets. +--- +vault_edgeswitch_backup_password: changeme diff --git a/inventory-example/host_vars/ttrss1.yml b/inventory-example/host_vars/ttrss1.yml new file mode 100644 index 0000000..f81784a --- /dev/null +++ b/inventory-example/host_vars/ttrss1.yml @@ -0,0 +1 @@ +ttrss_server_name: ttrss.{{ domain }} diff --git a/inventory-example/host_vars/tuxbook1.yml b/inventory-example/host_vars/tuxbook1.yml new file mode 100644 index 0000000..9fd1945 --- /dev/null +++ b/inventory-example/host_vars/tuxbook1.yml @@ -0,0 +1 @@ +linux_laptop_wlan_device: wlp2s0 diff --git a/inventory-example/host_vars/tuxstation1.yml b/inventory-example/host_vars/tuxstation1.yml new file mode 100644 index 0000000..92f34ef --- /dev/null +++ b/inventory-example/host_vars/tuxstation1.yml @@ -0,0 +1,5 @@ +# When powersave is enabled on the communication controller of the Dell +# Optiplex Micro, the onboad NIC drops a *huge* amount of packets. +# see https://bugzilla.kernel.org/show_bug.cgi?id=213377 +udev_pci_powersave_blacklist: + - 8086:43e0 diff --git a/inventory-example/host_vars/tuxstation2.yml b/inventory-example/host_vars/tuxstation2.yml new file mode 100644 index 0000000..ca83f4e --- /dev/null +++ b/inventory-example/host_vars/tuxstation2.yml @@ -0,0 +1,8 @@ +# When powersave is enabled on the communication controller of the Dell +# Optiplex Micro, the onboad NIC drops a *huge* amount of packets. +# see https://bugzilla.kernel.org/show_bug.cgi?id=213377 +udev_pci_powersave_blacklist: + - 8086:7ae8 + +# This i915 parameter was required in EL8 +grub_cmdline: resume=/dev/mapper/rl-swap rd.lvm.lv=rl/root rd.lvm.lv=rl/swap i915.force_probe=4680 diff --git a/inventory-example/host_vars/wiki1.yml b/inventory-example/host_vars/wiki1.yml new file mode 100644 index 0000000..3141618 --- /dev/null +++ b/inventory-example/host_vars/wiki1.yml @@ -0,0 +1 @@ +mediawiki_fqdn: wiki.{{ domain }} diff --git a/inventory-example/host_vars/www1.yml b/inventory-example/host_vars/www1.yml new file mode 100644 index 0000000..d65643b --- /dev/null +++ b/inventory-example/host_vars/www1.yml @@ -0,0 +1 @@ +apache_use_nfs: yes diff --git a/playbooks/archiver.yml b/playbooks/archiver.yml new file mode 100644 index 0000000..9056db3 --- /dev/null +++ b/playbooks/archiver.yml @@ -0,0 +1,9 @@ +- import_playbook: common.yml + vars: + hostlist: archive_servers + +- name: configure archiver + hosts: archive_servers + tags: archive,archiver + roles: + - archive_server diff --git a/playbooks/asterisk.yml b/playbooks/asterisk.yml new file mode 100644 index 0000000..153176a --- /dev/null +++ b/playbooks/asterisk.yml @@ -0,0 +1,18 @@ +- import_playbook: common.yml + vars: + hostlist: asterisk_servers + +- name: configure asterisk pbx + hosts: asterisk_servers + tags: asterisk + roles: + - role: asterisk + + - role: archive_job + archive_name: asterisk + archive_user: asterisk + archive_shell: >- + TIMESTAMP=$(date +%Y%m%d%H%M%S); + tar czf "asterisk-${TIMESTAMP}.tar.gz" + --transform "s|^\.|asterisk-${TIMESTAMP}|" -C {{ asterisk_data_dir }} . + tags: archive diff --git a/playbooks/bitwarden.yml b/playbooks/bitwarden.yml new file mode 100644 index 0000000..cb9a911 --- /dev/null +++ b/playbooks/bitwarden.yml @@ -0,0 +1,15 @@ +- import_playbook: common.yml + vars: + hostlist: bitwarden_servers + +- name: configure vaultwarden + hosts: bitwarden_servers + tags: vaultwarden,bitwarden + roles: + - role: vaultwarden + + - role: apache_vhost + apache_default_vhost: yes + apache_canonical_hostname: '{{ vaultwarden_server_name }}' + apache_config: '{{ vaultwarden_apache_config }}' + tags: apache diff --git a/playbooks/common.yml b/playbooks/common.yml new file mode 100644 index 0000000..e96be0b --- /dev/null +++ b/playbooks/common.yml @@ -0,0 +1,112 @@ +- hosts: '{{ hostlist | default("el") }}' + gather_facts: no + tags: common + roles: + - role: proxmox_instance + when: '"proxmox_instances" in group_names' + tags: proxmox + + - role: dns_records + when: not (bootstrap | default(false)) + tags: dns + + - role: gather_facts + when: not ansible_facts + tags: always + + - role: udev + when: not ansible_virtualization_tech_guest + tags: udev + + - role: root_authorized_keys + tags: authorized_keys + + - role: root_password + tags: root_password + + - role: polkit + tags: polkit + + - role: grub + tags: grub + + - role: sudo + tags: sudo,sudoers + + - role: hostname + tags: hostname + + - role: timezone + tags: timezone + + - role: journald + tags: journald + + - role: yum_disable_default_repos + when: '"yum_mirrors" not in group_names' + + - role: yum + yum_repositories: + - rocky-baseos + - rocky-appstream + - rocky-extras + when: + - '"yum_mirrors" not in group_names' + - not (bootstrap | default(false)) + tags: yum + + - role: dnsmasq + when: '"freeipa_servers" not in group_names' + tags: dnsmasq + + - role: locale + tags: locale + + - role: selinux + tags: selinux + + - role: qemu_guest_agent + when: '"kvm" in ansible_virtualization_tech_guest' + tags: qemu + + - role: firewalld + tags: firewalld + + - role: chrony + tags: chrony,ntp + + - role: dnf_automatic + tags: yum + + - role: ssh + tags: ssh + + - role: tuned + tags: tuned + + - role: motd + tags: motd + + - role: packages + tags: packages + + - role: postfix_client + when: '"mail_servers" not in group_names' + tags: postfix,mail + + - role: freeipa_client + when: '"freeipa_servers" not in group_names' + tags: freeipa + + - role: rsyslog_client + when: + - '"syslog_servers" not in group_names' + - not (bootstrap | default(false)) + tags: rsyslog + + - role: nagios_client + when: + - (group_names | intersect(nagios_excluded_groups) | length) == 0 + - '"yum_mirrors" not in group_names' + - not (bootstrap | default(false)) + tags: nagios diff --git a/playbooks/cups.yml b/playbooks/cups.yml new file mode 100644 index 0000000..20bed0b --- /dev/null +++ b/playbooks/cups.yml @@ -0,0 +1,14 @@ +- import_playbook: common.yml + vars: + hostlist: cups_servers + +- name: configure cups + hosts: cups_servers + tags: cups + roles: + - role: cups_server + + - role: archive_job + archive_name: cups + archive_shell: '{{ cups_archive_shell }}' + tags: archive diff --git a/playbooks/dav.yml b/playbooks/dav.yml new file mode 100644 index 0000000..21c4a97 --- /dev/null +++ b/playbooks/dav.yml @@ -0,0 +1,25 @@ +- import_playbook: common.yml + vars: + hostlist: dav_servers + +- name: configure sabredav + hosts: dav_servers + tags: sabredav,dav + roles: + - role: sabredav + + - role: apache_vhost + apache_default_vhost: yes + apache_document_root: '{{ sabredav_home }}' + apache_config: '{{ sabredav_apache_config }}' + tags: apache + + - role: php + php_fpm_environment: '{{ sabredav_php_environment }}' + php_fpm_admin_flags: '{{ sabredav_php_flags }}' + tags: php + + - role: archive_job + archive_name: webdav + archive_shell: '{{ sabredav_archive_shell }}' + tags: archive diff --git a/playbooks/dev_servers.yml b/playbooks/dev_servers.yml new file mode 100644 index 0000000..2602d6d --- /dev/null +++ b/playbooks/dev_servers.yml @@ -0,0 +1,9 @@ +- import_playbook: common.yml + vars: + hostlist: dev_servers + +- name: configure development environment + hosts: dev_servers + tags: dev + roles: + - dev_environment diff --git a/playbooks/dns_records.yml b/playbooks/dns_records.yml new file mode 100644 index 0000000..93d635e --- /dev/null +++ b/playbooks/dns_records.yml @@ -0,0 +1,28 @@ +- name: add dns records for infrastructure hosts + hosts: proxmox_hypervisors:opnsense_firewalls:unmanaged + tags: dns + roles: + - dns_records + +- name: add reverse dns records for firewall vlan interfaces + hosts: freeipa_master + tags: dns + tasks: + - name: create reverse dns zones + ipadnszone: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + zone_name: '{{ item }}' + state: present + loop: "{{ vlans.values() | map(attribute='gateway') | ansible.utils.ipaddr('revdns') | map('regex_replace', '^[^.]+\\.', '') | unique }}" + + - name: create ptr records + ipadnsrecord: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + zone_name: "{{ item | ansible.utils.ipaddr('revdns') | regex_replace('^[^.]+\\.', '') }}" + record_name: '{{ item | split(".") | last }}' + record_type: PTR + record_value: '{{ groups["opnsense_firewalls"] | sort | first }}.{{ domain }}.' + state: present + loop: "{{ vlans.values() | map(attribute='gateway') }}" diff --git a/playbooks/freeipa.yml b/playbooks/freeipa.yml new file mode 100644 index 0000000..a83aaf6 --- /dev/null +++ b/playbooks/freeipa.yml @@ -0,0 +1,24 @@ +- import_playbook: common.yml + vars: + hostlist: freeipa_servers + +- name: configure freeipa master + hosts: freeipa_master + tags: freeipa + roles: + - role: freeipa_server + + - role: archive_job + archive_name: ipa + archive_on_calendar: 'Sat *-*-* 02:00:00' + archive_shell: >- + ipa-backup && + mv -v /var/lib/ipa/backup/* . && + find . -mindepth 1 -type d -exec chmod -v 770 {} + + tags: archive + +- name: configure freeipa replicas + hosts: freeipa_servers:!freeipa_master + tags: freeipa + roles: + - freeipa_server diff --git a/playbooks/freeipa_bootstrap.yml b/playbooks/freeipa_bootstrap.yml new file mode 100644 index 0000000..38865a1 --- /dev/null +++ b/playbooks/freeipa_bootstrap.yml @@ -0,0 +1,10 @@ +- import_playbook: common.yml + vars: + hostlist: freeipa_master + bootstrap: yes + +- name: configure freeipa master + hosts: freeipa_master + tags: freeipa + roles: + - freeipa_server diff --git a/playbooks/freeipa_replica.yml b/playbooks/freeipa_replica.yml new file mode 100644 index 0000000..adac739 --- /dev/null +++ b/playbooks/freeipa_replica.yml @@ -0,0 +1,9 @@ +- import_playbook: common.yml + vars: + hostlist: freeipa_master + +- name: configure freeipa replicas + hosts: freeipa_servers:!freeipa_master + tags: freeipa + roles: + - freeipa_replica diff --git a/playbooks/git.yml b/playbooks/git.yml new file mode 100644 index 0000000..9e4c112 --- /dev/null +++ b/playbooks/git.yml @@ -0,0 +1,54 @@ +- import_playbook: common.yml + vars: + hostlist: git_servers + +- name: configure git repository + hosts: git_servers + tags: git + roles: + - role: gitolite + tags: gitolite + + - role: archive_job + archive_name: gitolite + archive_user: '{{ gitolite_user }}' + archive_shell: '{{ gitolite_archive_shell }}' + tags: archive + + - role: cgit + tags: cgit + + - role: apache_vhost + apache_default_vhost: yes + apache_document_root: '{{ cgit_static_dir }}' + apache_config: | + SetEnv "GIT_PROJECT_ROOT" "{{ gitolite_home }}/repositories" + SetEnv "GIT_HTTP_EXPORT_ALL" "1" + + + AuthType GSSAPI + AuthName "FreeIPA Single Sign-On" + AuthLDAPUrl "{{ apache_ldap_url }}?krbprincipalname" + {{ apache_ldap_creds }} + + + Require ip {{ kerberized_cidrs | join(" ") }} + + Require ldap-attribute memberof=cn={{ gitolite_access_group }},{{ freeipa_group_basedn }} + Require ldap-attribute memberof=cn={{ gitolite_admin_group }},{{ freeipa_group_basedn }} + + + + Require not ip {{ kerberized_cidrs | join(" ") }} + Require all granted + + + + + Alias /static "{{ cgit_static_dir }}" + + ScriptAliasMatch "{{ git_backend_regex }}" "{{ gitolite_cgi_script }}/$1" + ScriptAlias "/" "{{ cgit_cgi_script }}/" + vars: + git_backend_regex: '(?x)^/(.*/(HEAD | info/refs | objects/(info/[^/]+ | [0-9a-f]{2}/[0-9a-f]{38} | pack/pack-[0-9a-f]{40}\.(pack|idx)) | git-(upload|receive)-pack))$' + tags: apache diff --git a/playbooks/jellyfin.yml b/playbooks/jellyfin.yml new file mode 100644 index 0000000..7fa6721 --- /dev/null +++ b/playbooks/jellyfin.yml @@ -0,0 +1,20 @@ +- import_playbook: common.yml + vars: + hostlist: jellyfin_servers + +- name: configure jellyfin + hosts: jellyfin_servers + tags: jellyfin + roles: + - role: jellyfin + + - role: apache_vhost + apache_default_vhost: yes + apache_config: '{{ jellyfin_apache_config }}' + tags: apache + + - role: archive_job + archive_name: jellyfin + archive_user: '{{ jellyfin_user }}' + archive_shell: '{{ jellyfin_archive_shell }}' + tags: archive diff --git a/playbooks/linux_desktops.yml b/playbooks/linux_desktops.yml new file mode 100644 index 0000000..d7c2fee --- /dev/null +++ b/playbooks/linux_desktops.yml @@ -0,0 +1,24 @@ +- import_playbook: common.yml + vars: + hostlist: linux_desktops + +- name: configure linux desktop environment + hosts: linux_desktops + roles: + - role: dev_environment + tags: dev + + - role: linux_desktop + tags: desktop,linux_desktop + + - role: local_homedirs + tags: local_homedirs,homedirs,homedir + + - role: firefox + tags: firefox + + - role: evolution + tags: evolution + + - role: cups_client + tags: cups diff --git a/playbooks/linux_laptops.yml b/playbooks/linux_laptops.yml new file mode 100644 index 0000000..c841e95 --- /dev/null +++ b/playbooks/linux_laptops.yml @@ -0,0 +1,27 @@ +- import_playbook: common.yml + vars: + hostlist: linux_laptops + +- name: configure linux desktop environment + hosts: linux_laptops + roles: + - role: dev_environment + tags: dev + + - role: linux_desktop + tags: desktop,linux_desktop + + - role: local_homedirs + tags: local_homedirs,homedirs,homedir + + - role: firefox + tags: firefox + + - role: evolution + tags: evolution + + - role: cups_client + tags: cups + + - role: linux_laptop + tags: laptop,linux_laptop diff --git a/playbooks/mail.yml b/playbooks/mail.yml new file mode 100644 index 0000000..6df70f3 --- /dev/null +++ b/playbooks/mail.yml @@ -0,0 +1,36 @@ +- import_playbook: common.yml + vars: + hostlist: rspamd_servers,mail_servers,imap_servers + +- name: configure rspamd + hosts: rspamd_servers + tags: rspamd + roles: + - role: rspamd + + - role: apache_vhost + apache_default_vhost: yes + apache_config: '{{ rspamd_apache_config }}' + tags: apache + + - role: archive_job + archive_name: rspamd + archive_shell: '{{ rspamd_archive_shell }}' + tags: archive + +- name: configure Postfix + hosts: mail_servers + tags: postfix,smtp + roles: + - postfix_server + +- name: configure Dovecot + hosts: imap_servers + tags: dovecot,imap + roles: + - role: dovecot + + - role: archive_job + archive_name: dovecot + archive_command: '{{ dovecot_archive_script }}' + tags: archive diff --git a/playbooks/nagios.yml b/playbooks/nagios.yml new file mode 100644 index 0000000..cb13d57 --- /dev/null +++ b/playbooks/nagios.yml @@ -0,0 +1,15 @@ +- import_playbook: common.yml + vars: + hostlist: nagios_servers + +- name: configure nagios + hosts: nagios_servers + tags: nagios + roles: + - role: nagios_server + + - role: apache_vhost + apache_default_vhost: yes + apache_document_root: '{{ nagios_html_dir }}' + apache_config: '{{ nagios_apache_config }}' + tags: apache diff --git a/playbooks/nameservers.yml b/playbooks/nameservers.yml new file mode 100644 index 0000000..a977744 --- /dev/null +++ b/playbooks/nameservers.yml @@ -0,0 +1,9 @@ +- import_playbook: common.yml + vars: + hostlist: authoritative_nameservers + +- name: configure nsd + hosts: authoritative_nameservers + tags: nsd + roles: + - nsd diff --git a/playbooks/nfs.yml b/playbooks/nfs.yml new file mode 100644 index 0000000..a066afb --- /dev/null +++ b/playbooks/nfs.yml @@ -0,0 +1,9 @@ +- import_playbook: common.yml + vars: + hostlist: nfs_servers + +- name: configure nfs exports + hosts: nfs_servers + tags: nfs + roles: + - nfs_server diff --git a/playbooks/opnsense.yml b/playbooks/opnsense.yml new file mode 100644 index 0000000..dd23a91 --- /dev/null +++ b/playbooks/opnsense.yml @@ -0,0 +1,17 @@ +- name: configure opnsense firewall + hosts: opnsense_firewalls + gather_facts: yes + vars: + unbound_max_negative_cache: 5 + roles: + - freebsd_loader + - devd + - pxe_server + tasks: + - name: set unbound negative ttl + copy: + content: | + server: + cache-max-negative-ttl: {{ unbound_max_negative_cache }} + dest: /usr/local/etc/unbound.opnsense.d/custom.conf + tags: unbound diff --git a/playbooks/photostructure.yml b/playbooks/photostructure.yml new file mode 100644 index 0000000..12ebe1f --- /dev/null +++ b/playbooks/photostructure.yml @@ -0,0 +1,20 @@ +- import_playbook: common.yml + vars: + hostlist: photostructure_servers + +- name: configure photostructure + hosts: photostructure_servers + tags: photostructure + roles: + - role: photostructure + + - role: apache_vhost + apache_default_vhost: yes + apache_config: '{{ photostructure_apache_config }}' + tags: apache + + - role: archive_job + archive_name: photostructure + archive_shell: '{{ photostructure_archive_shell }}' + archive_on_calendar: monthly + tags: archive diff --git a/playbooks/populate_domain.yml b/playbooks/populate_domain.yml new file mode 100644 index 0000000..acb1ec7 --- /dev/null +++ b/playbooks/populate_domain.yml @@ -0,0 +1,99 @@ +- name: populate freeipa domain + hosts: freeipa_master + vars: + default_user_password: ChangeMe123! + tasks: + - name: create users + ipauser: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ item.name }}' + givenname: '{{ item.givenname }}' + sn: '{{ item.sn }}' + email: '{{ [item.mail] if item.mail is defined else omit }}' + loginshell: '{{ item.loginshell | default(omit) }}' + password: '{{ item.password | default(default_user_password) }}' + update_password: on_create + state: present + loop: '{{ freeipa_users | default([]) }}' + tags: users + + - name: add custom attributes + ldap_attrs: + dn: 'uid={{ item.name }},{{ freeipa_user_basedn }}' + attributes: + mailAlternateAddress: '{{ item.mail_aliases | default([]) }}' + jid: '{{ item.jid | default([]) }}' + bind_dn: uid={{ ipa_user }},{{ freeipa_user_basedn }} + bind_pw: '{{ ipa_pass }}' + server_uri: ldaps://{{ ipa_host }} + state: exact + loop: "{{ freeipa_users | default([]) }}" + tags: users + + - name: create groups + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ item.name }}' + description: '{{ item.description | default(omit) }}' + user: '{{ item.user | default(omit) }}' + group: '{{ item.group | default(omit) }}' + nonposix: '{{ item.nonposix | default(omit) }}' + action: '{{ "member" if (item.append | default(false)) else "group" }}' + state: present + loop: '{{ freeipa_groups | default([]) }}' + tags: groups + + - name: add group email addresses + ldap_attrs: + dn: 'cn={{ item.name }},{{ freeipa_group_basedn }}' + attributes: + mail: '{{ item.mail | default([]) }}' + mailAlternateAddress: '{{ item.mail_aliases | default([]) }}' + bind_dn: uid={{ ipa_user }},{{ freeipa_user_basedn }} + bind_pw: '{{ ipa_pass }}' + server_uri: ldaps://{{ ipa_host }} + state: exact + loop: "{{ freeipa_groups | default([]) }}" + tags: groups + + - name: create sudo rules + ipasudorule: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ item.name }}' + description: '{{ item.description | default(omit) }}' + allow_sudocmd: '{{ item.cmd | default(omit) }}' + cmdcategory: '{{ item.cmdcategory | default(omit) }}' + allow_sudocmdgroup: '{{ item.cmdgroup | default(omit) }}' + host: '{{ item.host | default(omit) }}' + hostcategory: '{{ item.hostcategory | default(omit) }}' + hostgroup: '{{ item.hostgroup | default(omit) }}' + runasusercategory: '{{ item.runasusercategory | default(omit) }}' + runasgroupcategory: '{{ item.runasgroupcategory | default(omit) }}' + user: '{{ item.user | default(omit) }}' + usercategory: '{{ item.usercategory | default(omit) }}' + group: '{{ item.usergroup | default(omit) }}' + state: present + loop: '{{ freeipa_sudo_rules | default([]) }}' + tags: sudo + + - name: create hbac rules + ipahbacrule: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ item.name }}' + description: '{{ item.description | default(omit) }}' + host: '{{ item.host | default(omit) }}' + hostcategory: '{{ item.hostcategory | default(omit) }}' + hostgroup: '{{ item.hostgroup | default(omit) }}' + hbacsvc: '{{ item.service | default(omit) }}' + servicecategory: '{{ item.servicecategory | default(omit) }}' + hbacsvcgroup: '{{ item.servicegroup | default(omit) }}' + user: '{{ item.user | default(omit) }}' + usercategory: '{{ item.usercategory | default(omit) }}' + group: '{{ item.usergroup | default(omit) }}' + state: present + loop: '{{ freeipa_hbac_rules | default([]) }}' + tags: hbac diff --git a/playbooks/postgres.yml b/playbooks/postgres.yml new file mode 100644 index 0000000..72192ec --- /dev/null +++ b/playbooks/postgres.yml @@ -0,0 +1,15 @@ +- import_playbook: common.yml + vars: + hostlist: postgresql_servers + +- name: configure postgresql + hosts: postgresql_servers + tags: postgres,postgresql + roles: + - role: postgresql_server + + - role: archive_job + archive_name: postgres + archive_user: '{{ postgresql_user }}' + archive_shell: '{{ postgresql_archive_shell }}' + tags: archive diff --git a/playbooks/privbrowse.yml b/playbooks/privbrowse.yml new file mode 100644 index 0000000..8b61d5c --- /dev/null +++ b/playbooks/privbrowse.yml @@ -0,0 +1,33 @@ +- import_playbook: common.yml + vars: + hostlist: privbrowse_servers + +- name: configure web service frontends + hosts: privbrowse_servers + roles: + - role: invidious + tags: invidious + + - role: apache_vhost + apache_server_name: '{{ invidious_server_name }}' + apache_server_aliases: [] + apache_config: '{{ invidious_apache_config }}' + tags: apache + + - role: teddit + tags: teddit + + - role: apache_vhost + apache_server_name: '{{ teddit_server_name }}' + apache_server_aliases: [] + apache_config: '{{ teddit_apache_config }}' + tags: apache + + - role: nitter + tags: nitter + + - role: apache_vhost + apache_server_name: '{{ nitter_server_name }}' + apache_server_aliases: [] + apache_config: '{{ nitter_apache_config }}' + tags: apache diff --git a/playbooks/proxmox.yml b/playbooks/proxmox.yml new file mode 100644 index 0000000..9c449a4 --- /dev/null +++ b/playbooks/proxmox.yml @@ -0,0 +1,21 @@ +- name: configure proxmox hypervisor + hosts: proxmox_hypervisors + gather_facts: yes + roles: + - role: root_password + tags: root_password + + - role: root_authorized_keys + tags: ssh,authorized_keys + + - role: journald + tags: journald + + - role: hostname + tags: hostname + + - role: timezone + tags: timezone + + - role: proxmox_hypervisor + tags: proxmox,pve diff --git a/playbooks/proxmox_instance.yml b/playbooks/proxmox_instance.yml new file mode 100644 index 0000000..f326b4a --- /dev/null +++ b/playbooks/proxmox_instance.yml @@ -0,0 +1,5 @@ +- name: build proxmox virtual machine + hosts: proxmox_instances + tags: proxmox + roles: + - proxmox_instance diff --git a/playbooks/radius.yml b/playbooks/radius.yml new file mode 100644 index 0000000..6529365 --- /dev/null +++ b/playbooks/radius.yml @@ -0,0 +1,9 @@ +- import_playbook: common.yml + vars: + hostlist: radius_servers + +- name: configure freeradius + hosts: radius_servers + tags: freeradius,radius + roles: + - freeradius diff --git a/playbooks/site.yml b/playbooks/site.yml new file mode 100644 index 0000000..d4bcd11 --- /dev/null +++ b/playbooks/site.yml @@ -0,0 +1,37 @@ +# internal hosts +- import_playbook: opnsense.yml +- import_playbook: proxmox.yml +- import_playbook: freeipa_bootstrap.yml +- import_playbook: dns_records.yml +- import_playbook: yum.yml +- import_playbook: freeipa.yml +- import_playbook: archiver.yml +- import_playbook: syslog.yml +- import_playbook: mail.yml +- import_playbook: cups.yml +- import_playbook: radius.yml +- import_playbook: unifi.yml +- import_playbook: postgres.yml +- import_playbook: dav.yml +- import_playbook: bitwarden.yml +- import_playbook: ttrss.yml +- import_playbook: znc.yml +- import_playbook: git.yml +- import_playbook: wiki.yml +- import_playbook: jellyfin.yml +- import_playbook: privbrowse.yml +- import_playbook: populate_domain.yml +- import_playbook: syncthing.yml +- import_playbook: photostructure.yml +- import_playbook: nfs.yml +- import_playbook: webserver_internal.yml +- import_playbook: dev_servers.yml +- import_playbook: linux_desktops.yml +- import_playbook: nagios.yml + +# public-facing hosts +- import_playbook: nameservers.yml +- import_playbook: webserver_public.yml +- import_playbook: turn.yml +- import_playbook: xmpp.yml +- import_playbook: asterisk.yml diff --git a/playbooks/syncthing.yml b/playbooks/syncthing.yml new file mode 100644 index 0000000..3fad588 --- /dev/null +++ b/playbooks/syncthing.yml @@ -0,0 +1,19 @@ +- import_playbook: common.yml + vars: + hostlist: syncthing_servers + +- name: configure syncthing + hosts: syncthing_servers + tags: syncthing + roles: + - role: syncthing + + - role: archive_job + archive_name: syncthing + archive_shell: '{{ syncthing_archive_shell }}' + tags: archive + + - role: apache_vhost + apache_default_vhost: yes + apache_config: '{{ syncthing_apache_config }}' + tags: apache diff --git a/playbooks/syslog.yml b/playbooks/syslog.yml new file mode 100644 index 0000000..2891dc6 --- /dev/null +++ b/playbooks/syslog.yml @@ -0,0 +1,9 @@ +- import_playbook: common.yml + vars: + hostlist: syslog_servers + +- name: configure rsyslog server + hosts: syslog_servers + tags: rsyslog,syslog + roles: + - rsyslog_server diff --git a/playbooks/test.yml b/playbooks/test.yml new file mode 100644 index 0000000..f3eaa62 --- /dev/null +++ b/playbooks/test.yml @@ -0,0 +1,4 @@ +- hosts: all + tasks: + - debug: + var: vlan diff --git a/playbooks/ttrss.yml b/playbooks/ttrss.yml new file mode 100644 index 0000000..befd157 --- /dev/null +++ b/playbooks/ttrss.yml @@ -0,0 +1,21 @@ +- import_playbook: common.yml + tags: common + vars: + hostlist: ttrss_servers + +- name: configure tinytinyrss + hosts: ttrss_servers + tags: ttrss + roles: + - role: ttrss + + - role: apache_vhost + apache_default_vhost: yes + apache_canonical_hostname: '{{ ttrss_server_name }}' + apache_document_root: '{{ ttrss_home }}' + apache_config: '{{ ttrss_apache_config }}' + tags: apache + + - role: php + php_fpm_environment: '{{ ttrss_php_environment }}' + tags: php diff --git a/playbooks/turn.yml b/playbooks/turn.yml new file mode 100644 index 0000000..20b6196 --- /dev/null +++ b/playbooks/turn.yml @@ -0,0 +1,10 @@ +- import_playbook: common.yml + tags: common + vars: + hostlist: turn_servers + +- name: configure coturn + hosts: turn_servers + tags: coturn,turn + roles: + - role: coturn diff --git a/playbooks/unifi.yml b/playbooks/unifi.yml new file mode 100644 index 0000000..1b0864d --- /dev/null +++ b/playbooks/unifi.yml @@ -0,0 +1,15 @@ +- import_playbook: common.yml + tags: common + vars: + hostlist: unifi_controllers + +- name: configure unifi controller + hosts: unifi_controllers + tags: unifi + roles: + - role: unifi + + - role: archive_job + archive_name: unifi + archive_shell: '{{ unifi_archive_shell }}' + tags: archive diff --git a/playbooks/util/backup.yml b/playbooks/util/backup.yml new file mode 100644 index 0000000..0c99eea --- /dev/null +++ b/playbooks/util/backup.yml @@ -0,0 +1,606 @@ +################# +# Set backup name +################# +- hosts: localhost + tags: always + tasks: + - name: get current timestamp + setup: + filter: ansible_date_time + + - name: create backup directory + file: + path: '{{ backup_path }}' + state: directory + +- hosts: all:localhost:!unmanaged + tags: always + tasks: + - name: set backup name + set_fact: + backup_name: '{{ backup_name | default(hostvars.localhost.ansible_date_time.iso8601_basic_short) }}' + + +################ +# IMAP Mailboxes +################ +- name: backup dovecot mailboxes + hosts: imap_servers + vars_files: ../../roles/dovecot/vars/main.yml + vars: + dovecot_backup_dir: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-mailboxes + dovecot_backup_tarball: '{{ dovecot_backup_dir }}.tar.gz' + dovecot_backup_sieve_tarball: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-sieve.tar.gz + tags: dovecot,imap + tasks: + - name: create backup directory + file: + path: '{{ dovecot_backup_dir }}' + owner: '{{ dovecot_vmail_user }}' + group: '{{ dovecot_vmail_user }}' + mode: 0770 + state: directory + + - name: collect dovecot users + command: doveadm user * + register: dovecot_users + changed_when: no + + - name: export mailboxes + command: >- + doveadm -o plugin/quota= backup -n inbox -f -u {{ item | quote }} + mdbox:{{ dovecot_backup_dir | quote }}/{{ item | quote }}/mdbox:LAYOUT=fs + loop: '{{ dovecot_users.stdout_lines }}' + + - name: compress backup directory + archive: + path: '{{ dovecot_backup_dir }}' + dest: '{{ dovecot_backup_tarball }}' + mode: 0400 + remove: yes + + - name: fetch mailbox tarball + fetch: + src: '{{ dovecot_backup_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete mailbox tarball from remote host + file: + path: '{{ dovecot_backup_tarball }}' + state: absent + + - name: compress sieve scripts + archive: + path: + - '{{ dovecot_vmail_dir }}/*/sieve' + - '{{ dovecot_vmail_dir }}/*/.dovecot.sieve' + dest: '{{ dovecot_backup_sieve_tarball }}' + mode: 0400 + + - name: fetch sieve tarball + fetch: + src: '{{ dovecot_backup_sieve_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete sieve tarball from remote host + file: + path: '{{ dovecot_backup_sieve_tarball }}' + state: absent + + +################## +# Rspamd Databases +################## +- name: backup rspamd databases + hosts: rspamd_servers + vars_files: + - ../../roles/redis/vars/main.yml + - ../../roles/rspamd/vars/main.yml + vars: + rspamd_backup_tarball: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-rspamd.tar.gz + tags: rspamd + tasks: + - name: dump redis databases to disk + command: + cmd: redis-cli -p {{ item }} + stdin: save + loop: + - '{{ rspamd_redis_port }}' + - '{{ rspamd_redis_bayes_port }}' + + - name: compress redis directory + archive: + path: '{{ redis_home }}' + dest: '{{ rspamd_backup_tarball }}' + mode: 0400 + + - name: fetch backup tarball + fetch: + src: '{{ rspamd_backup_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete backup tarball from remote host + file: + path: '{{ rspamd_backup_tarball }}' + state: absent + + +################### +# ZNC Configuration +################### +- name: backup znc configuration + hosts: znc_servers + vars_files: ../../roles/znc/vars/main.yml + vars: + znc_backup_tarball: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-znc.tar.gz + tags: znc + tasks: + - name: compress znc directory + archive: + path: '{{ znc_home }}' + dest: '{{ znc_backup_tarball }}' + mode: 0400 + + - name: fetch backup tarball + fetch: + src: '{{ znc_backup_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete backup tarball from remote host + file: + path: '{{ znc_backup_tarball }}' + state: absent + + +######################### +# Syncthing Configuration +######################### +- name: backup syncthing configuration + hosts: syncthing_servers + vars_files: ../../roles/syncthing/vars/main.yml + vars: + syncthing_backup_tarball: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-syncthing.tar.gz + tags: syncthing + tasks: + - name: compress syncthing directory + archive: + path: '{{ syncthing_home }}' + dest: '{{ syncthing_backup_tarball }}' + exclusion_patterns: + - '*/index-*.db*' + mode: 0400 + + - name: fetch backup tarball + fetch: + src: '{{ syncthing_backup_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete backup tarball from remote host + file: + path: '{{ syncthing_backup_tarball }}' + state: absent + + +################## +# Git Repositories +################## +- name: backup git respositories + hosts: git_servers + vars_files: ../../roles/gitolite/vars/main.yml + vars: + git_backup_tarball: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-git.tar.gz + tags: git + tasks: + - name: compress git directory + archive: + path: '{{ gitolite_home }}' + dest: '{{ git_backup_tarball }}' + exclusion_patterns: + - git/.ansible* + mode: 0400 + + - name: fetch backup tarball + fetch: + src: '{{ git_backup_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete backup tarball from remote host + file: + path: '{{ git_backup_tarball }}' + state: absent + + +###################### +# PostgreSQL Databases +###################### +- name: backup postgresql databases + hosts: postgresql_servers + vars_files: ../../roles/postgresql_server/vars/main.yml + vars: + postgresql_backup_file: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-pg_dumpall.sql + postgresql_backup_gzip: '{{ postgresql_backup_file }}.gz' + tags: postgres,postgresql + tasks: + - name: dump databases + command: pg_dumpall -f {{ postgresql_backup_file | quote }} + become: yes + become_user: '{{ postgresql_user }}' + + - name: compress sql file + archive: + path: '{{ postgresql_backup_file }}' + dest: '{{ postgresql_backup_gzip }}' + mode: 0400 + remove: yes + + - name: fetch backup gzip + fetch: + src: '{{ postgresql_backup_gzip }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete backup gzip from remote + file: + path: '{{ postgresql_backup_gzip }}' + state: absent + + + +######################## +# Jellyfin Configuration +######################## +- name: backup jellyfin configuration + hosts: jellyfin_servers + vars_files: ../../roles/jellyfin/vars/main.yml + vars: + jellyfin_backup_tarball: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-jellyfin.tar.gz + tags: jellyfin + tasks: + - name: compress jellyfin directories + archive: + path: + - '{{ jellyfin_home }}/data' + - '{{ jellyfin_home }}/metadata' + - '{{ jellyfin_home }}/plugins' + - '{{ jellyfin_home }}/root' + - '{{ jellyfin_conf_dir }}' + dest: '{{ jellyfin_backup_tarball }}' + mode: 0400 + + - name: fetch backup tarball + fetch: + src: '{{ jellyfin_backup_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete backup tarball from remote host + file: + path: '{{ jellyfin_backup_tarball }}' + state: absent + + +################## +# Mediawiki Images +################## +- name: backup mediawiki images + hosts: wiki_servers + vars_files: ../../roles/mediawiki/vars/main.yml + vars: + mediawiki_backup_tarball: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-mediawiki.tar.gz + tags: mediawiki,wiki + tasks: + - name: compress images directory + archive: + path: '{{ mediawiki_home }}/images' + dest: '{{ mediawiki_backup_tarball }}' + mode: 0400 + + - name: fetch backup tarball + fetch: + src: '{{ mediawiki_backup_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete backup tarball from remote host + file: + path: '{{ mediawiki_backup_tarball }}' + state: absent + + +######################### +# Photostructure Database +######################### +- name: backup photostructure database + hosts: photostructure_servers + vars_files: ../../roles/photostructure/vars/main.yml + vars: + photostructure_backup_tarball: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-photostructure.tar + tags: photostructure + tasks: + - name: stop photostructure + systemd: + name: photostructure + state: stopped + + - name: archive photostructure library + archive: + path: '{{ photostructure_library }}' + dest: '{{ photostructure_backup_tarball }}' + format: tar + mode: 0400 + + - name: start photostructure + systemd: + name: photostructure + state: started + + - name: fetch backup tarball + fetch: + src: '{{ photostructure_backup_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + validate_checksum: no # The tarball is way too big. + + - name: delete backup tarball from remote host + file: + path: '{{ photostructure_backup_tarball }}' + state: absent + + +############### +# Asterisk Data +############### +- name: backup asterisk data + hosts: asterisk_servers + vars_files: ../../roles/asterisk/vars/main.yml + vars: + asterisk_backup_tarball: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-asterisk.tar.gz + tags: asterisk + tasks: + - name: stop asterisk + systemd: + name: asterisk + state: stopped + + - name: compress asterisk directory + archive: + path: '{{ asterisk_data_dir }}' + dest: '{{ asterisk_backup_tarball }}' + mode: 0400 + + - name: start asterisk + systemd: + name: asterisk + state: started + + - name: fetch backup tarball + fetch: + src: '{{ asterisk_backup_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete backup tarball from remote host + file: + path: '{{ asterisk_backup_tarball }}' + state: absent + + +#################### +# Cups Configuration +#################### +- name: backup cups configuration + hosts: cups_servers + vars: + cups_backup_tarball: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-cups.tar.gz + tags: cups + tasks: + - name: compress cups configuration + archive: + path: + - /etc/cups/ppd + - /etc/cups/printers.conf + dest: '{{ cups_backup_tarball }}' + mode: 0400 + + - name: fetch backup tarball + fetch: + src: '{{ cups_backup_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete backup tarball from remote host + file: + path: '{{ cups_backup_tarball }}' + state: absent + + +#################### +# WebDAV Directories +#################### +- name: backup webdav directories + hosts: dav_servers + vars_files: ../../roles/sabredav/vars/main.yml + vars: + sabredav_backup_tarball: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-webdav.tar.gz + tags: dav,sabredav,webdav + tasks: + - name: compress webdav directory + archive: + path: '{{ sabredav_home }}/webdav' + dest: '{{ sabredav_backup_tarball }}' + mode: 0400 + + - name: fetch backup tarball + fetch: + src: '{{ sabredav_backup_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete backup tarball from remote host + file: + path: '{{ sabredav_backup_tarball }}' + state: absent + + +############### +# Hastebin Data +############### +- name: backup hastebin data + hosts: pastebin_servers + vars_files: ../../roles/hastebin/vars/main.yml + vars: + hastebin_backup_tarball: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-hastebin.tar.gz + tags: pastebin,hastebin + tasks: + - name: compress paste directory + archive: + path: '{{ hastebin_data_dir }}' + dest: '{{ hastebin_backup_tarball }}' + mode: 0400 + + - name: fetch backup tarball + fetch: + src: '{{ hastebin_backup_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete backup tarball from remote host + file: + path: '{{ hastebin_backup_tarball }}' + state: absent + + +################## +# Psitransfer Data +################## +- name: backup psitransfer data + hosts: filedrop_servers + vars_files: ../../roles/psitransfer/vars/main.yml + vars: + psitransfer_backup_tarball: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-psitransfer.tar.gz + tags: psitransfer + tasks: + - name: compress files directory + archive: + path: '{{ psitransfer_data_dir }}' + dest: '{{ psitransfer_backup_tarball }}' + mode: 0400 + + - name: fetch backup tarball + fetch: + src: '{{ psitransfer_backup_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete backup tarball from remote host + file: + path: '{{ psitransfer_backup_tarball }}' + state: absent + + +################## +# Apache WWW files +################## +- name: backup public apache files + hosts: web_servers + vars_files: + - ../../roles/apache/vars/main.yml + vars: + apache_backup_tarball: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-www.tar.gz + tags: apache,www + tasks: + - when: apache_backup_dirs | default([]) | length > 0 + block: + - name: compress www directory + archive: + path: "{{ apache_backup_dirs | map('regex_replace', '^', apache_public_dir~'/') }}" + dest: '{{ apache_backup_tarball }}' + mode: 0400 + + - name: fetch backup tarball + fetch: + src: '{{ apache_backup_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete backup tarball from remote host + file: + path: '{{ apache_backup_tarball }}' + state: absent + + +#################### +# Unifi Controllers +#################### +- name: backup unifi controllers + hosts: unifi_controllers + vars_files: ../../roles/unifi/vars/main.yml + tags: unifi + tasks: + - name: collect autobackup files + find: + paths: '{{ unifi_autobackup_dir }}' + patterns: '*.unf' + file_type: file + register: unifi_autobackups + + - name: fetch most recent autobackup file + fetch: + src: "{{ unifi_autobackups.files | sort(attribute='mtime') | map(attribute='path') | last }}" + dest: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-unifi.unf' + flat: yes + + +################ +# FreeIPA Domain +################ +- name: backup freeipa domain + hosts: freeipa_master + vars_files: ../../roles/freeipa_server/vars/main.yml + vars: + freeipa_backup_tarball: /var/tmp/{{ backup_name }}-ipa-{{ freeipa_realm }}.tar.gz + tags: ipa,freeipa + tasks: + - name: create full ipa backup + command: ipa-backup + + - name: collect files in backup directory + find: + paths: '{{ freeipa_backup_dir }}' + patterns: ipa-full-* + file_type: directory + register: freeipa_backups + + - name: compress latest backup + archive: + path: "{{ freeipa_backups.files | sort(attribute='mtime') | map(attribute='path') | last }}" + dest: '{{ freeipa_backup_tarball }}' + mode: 0400 + remove: yes + + - name: fetch backup archive + fetch: + src: '{{ freeipa_backup_tarball }}' + dest: '{{ backup_path }}/' + flat: yes + + - name: delete backup archive from remote host + file: + path: '{{ freeipa_backup_tarball }}' + state: absent + + +############### +# Print summary +############### +- hosts: localhost + tags: always + tasks: + - debug: + msg: Backup {{ backup_name }} written to {{ backup_path }}. diff --git a/playbooks/util/client_cert.yml b/playbooks/util/client_cert.yml new file mode 100644 index 0000000..c81b298 --- /dev/null +++ b/playbooks/util/client_cert.yml @@ -0,0 +1,71 @@ +- name: generate client certificate + hosts: localhost + connection: local + become: no + vars_prompt: + - name: username + prompt: Enter username for the certificate subject + private: no + - name: passphrase + prompt: Enter password for the p12 file + private: yes + vars: + cert_dir: "{{ lookup('env', 'HOME') }}/pki" + key_size: 2048 + key_path: '{{ cert_dir }}/{{ username }}.key' + csr_path: '{{ cert_dir }}/{{ username }}.csr' + crt_path: '{{ cert_dir }}/{{ username }}.crt' + p12_path: '{{ cert_dir }}/{{ username }}.p12' + profile_id: caIPAclientAuth + tasks: + - name: create output directory + file: + path: '{{ cert_dir }}' + state: directory + + - name: generate private key + openssl_privatekey: + path: '{{ key_path }}' + size: '{{ key_size }}' + mode: 0600 + + - name: generate CSR + openssl_csr: + path: '{{ csr_path }}' + privatekey_path: '{{ key_path }}' + common_name: '{{ username }}' + use_common_name_for_san: no + + - name: request certificate from IPA + shell: + cmd: > + ipa cert-request {{ csr_path }} + --principal {{ username }} + --profile-id {{ profile_id }} + --chain + --certificate-out {{ crt_path }} + + # The openssl_pkcs12 ansible module seems to generate files that can't be + # decrypted by Android clients. The openssl CLI works fine though. + - name: generate PKCS#12 file + command: + cmd: > + openssl pkcs12 -export + -out {{ p12_path }} + -inkey {{ key_path }} + -in {{ crt_path }} + -name {{ username }}@{{ domain }} + -password pass:{{ passphrase | quote }} + creates: '{{ p12_path }}' + + - name: cleanup files + file: + path: '{{ item }}' + state: absent + loop: + - '{{ key_path }}' + - '{{ csr_path }}' + - '{{ crt_path }}' + + - debug: + msg: 'PKCS#12 file written to {{ p12_path }}. Passphrase: {{ passphrase }}' diff --git a/playbooks/util/decomission_host.yml b/playbooks/util/decomission_host.yml new file mode 100644 index 0000000..dae4b16 --- /dev/null +++ b/playbooks/util/decomission_host.yml @@ -0,0 +1,56 @@ +- name: decomission host + hosts: '{{ host }}' + tasks: + - name: delete A record + ipadnsrecord: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + zone_name: '{{ domain }}' + record_name: '{{ host }}' + record_type: A + record_value: '{{ ip }}' + state: absent + delegate_to: '{{ freeipa_master }}' + + - name: delete PTR record + ipadnsrecord: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + zone_name: "{{ ip | ansible.utils.ipaddr('revdns') | regex_replace('^[^.]+\\.', '') }}" + record_name: '{{ ip.split(".") | last }}' + record_type: PTR + record_value: '{{ fqdn ~ "." }}' + state: absent + delegate_to: '{{ freeipa_master }}' + + - name: delete CNAME records + ipadnsrecord: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + zone_name: "{{ domain }}" + record_name: '{{ item.split(".") | first }}' + record_type: CNAME + record_value: '{{ fqdn ~ "." }}' + state: absent + delegate_to: '{{ freeipa_master }}' + loop: '{{ cnames }}' + + - name: delete host object + ipahost: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ fqdn }}' + state: absent + delegate_to: '{{ ipa_host }}' + + - name: delete proxmox vm + proxmox_kvm: + node: '{{ proxmox_node }}' + api_host: localhost + api_user: '{{ proxmox_api_user }}' + api_password: '{{ proxmox_api_password }}' + name: '{{ inventory_hostname }}' + force: yes + state: absent + delegate_to: '{{ proxmox_api_host }}' + when: "'proxmox_instances' in group_names" diff --git a/playbooks/util/restore.yml b/playbooks/util/restore.yml new file mode 100644 index 0000000..3a0154c --- /dev/null +++ b/playbooks/util/restore.yml @@ -0,0 +1,477 @@ +################ +# IMAP Mailboxes +################ +- name: restore dovecot mailboxes + hosts: imap_servers + vars_files: ../../roles/dovecot/vars/main.yml + vars: + dovecot_temp_dir: /var/tmp/{{ backup_name }}-{{ inventory_hostname }}-mailboxes + dovecot_backup_tarball: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-mailboxes.tar.gz' + dovecot_backup_sieve_tarball: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-sieve.tar.gz' + tags: dovecot,imap + tasks: + - name: create temporary directory + file: + path: '{{ dovecot_temp_dir }}' + owner: '{{ dovecot_vmail_user }}' + group: '{{ dovecot_vmail_user }}' + mode: 0770 + state: directory + + - name: extract mailbox tarball + unarchive: + src: '{{ dovecot_backup_tarball }}' + dest: '{{ dovecot_temp_dir }}' + extra_opts: + - --same-owner + - --strip-components=1 + + - name: collect dovecot users + command: doveadm user * + register: dovecot_users + changed_when: no + + - name: import mailboxes + command: >- + doveadm -o plugin/quota= sync -u {{ item | quote }} + mdbox:{{ dovecot_temp_dir }}/{{ item | quote }}/mdbox + loop: '{{ dovecot_users.stdout_lines }}' + + - name: drop FTS indexes + command: doveadm fts rescan -A + + - name: reindex mailboxes + command: doveadm index -A -q * + + - name: delete temporary directory + file: + path: '{{ dovecot_temp_dir }}' + state: absent + + - name: extract sieve scripts + unarchive: + src: '{{ dovecot_backup_sieve_tarball }}' + dest: '{{ dovecot_vmail_dir }}' + extra_opts: + - --same-owner + + +################## +# Rspamd Databases +################## +- name: restore rspamd databases + hosts: rspamd_servers + vars_files: + - ../../roles/redis/vars/main.yml + - ../../roles/rspamd/vars/main.yml + vars: + rspamd_backup_tarball: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-rspamd.tar.gz' + tags: rspamd + tasks: + - name: stop redis instances + systemd: + name: redis@{{ item }} + state: stopped + loop: + - '{{ rspamd_redis_port }}' + - '{{ rspamd_redis_bayes_port }}' + + - name: stop rspamd + systemd: + name: rspamd + state: stopped + + - name: extract redis tarballs + unarchive: + src: '{{ rspamd_backup_tarball }}' + dest: '{{ redis_home }}' + extra_opts: + - --strip-components=1 + - --same-owner + + - name: start redis instances + systemd: + name: redis@{{ item }} + state: started + loop: + - '{{ rspamd_redis_port }}' + - '{{ rspamd_redis_bayes_port }}' + + - name: start rspamd + systemd: + name: rspamd + state: started + + +################### +# ZNC Configuration +################### +- name: restore znc configuration + hosts: znc_servers + vars_files: ../../roles/znc/vars/main.yml + vars: + znc_backup_tarball: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-znc.tar.gz' + tags: znc + tasks: + - name: stop znc + systemd: + name: znc + state: stopped + + - name: extract config tarball + unarchive: + src: '{{ znc_backup_tarball }}' + dest: '{{ znc_home }}' + extra_opts: + - --strip-components=1 + - --same-owner + + - name: start znc + systemd: + name: znc + state: started + + +######################### +# Syncthing Configuration +######################### +- name: restore syncthing configuration + hosts: syncthing_servers + vars_files: ../../roles/syncthing/vars/main.yml + vars: + syncthing_backup_tarball: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-syncthing.tar.gz' + tags: syncthing + tasks: + - name: stop syncthing daemons + command: systemctl stop syncthing-user@* + + - name: extract config tarball + unarchive: + src: '{{ syncthing_backup_tarball }}' + dest: '{{ syncthing_home }}' + extra_opts: + - --strip-components=1 + - --same-owner + + - name: collect syncthing users + find: + paths: '{{ syncthing_home }}' + recurse: no + file_type: directory + register: syncthing_users + + - name: start syncthing daemons + systemd: + name: syncthing-user@{{ item }} + state: started + loop: "{{ syncthing_users.files | map(attribute='path') | map('basename') }}" + + +################## +# Git Repositories +################## +- name: restore git repositories + hosts: git_servers + vars_files: + - ../../roles/gitolite/vars/main.yml + - ../../roles/cgit/vars/main.yml + vars: + git_backup_tarball: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-git.tar.gz' + tags: git + tasks: + - name: extract git tarball + unarchive: + src: '{{ git_backup_tarball }}' + dest: '{{ gitolite_home }}' + extra_opts: + - --strip-components=1 + - --same-owner + + - name: clear cgit cache + file: + path: '{{ cgit_cache_dir }}' + owner: apache + mode: 0755 + setype: _default + state: '{{ item }}' + loop: + - absent + - directory + + +###################### +# PostgreSQL Databases +###################### +- name: restore postgresql databases + hosts: postgresql_servers + vars_files: ../../roles/postgresql_server/vars/main.yml + vars: + postgresql_backup_gzip: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-pg_dumpall.sql.gz' + postgresql_remote_gzip: /var/tmp/{{ postgresql_backup_gzip | basename }} + tags: postgres,postgresql + tasks: + - name: copy backup gzip to remote host + copy: + src: '{{ postgresql_backup_gzip }}' + dest: '{{ postgresql_remote_gzip }}' + owner: '{{ postgresql_user }}' + group: '{{ postgresql_user }}' + mode: 0400 + + - name: import database backup + shell: gunzip {{ postgresql_remote_gzip | quote }} --to-stdout | psql + become: yes + become_user: '{{ postgresql_user }}' + + - name: delete gzip file from remote host + file: + path: '{{ postgresql_remote_gzip }}' + state: absent + + +######################## +# Jellyfin Configuration +######################## +- name: restore jellyfin configuration + hosts: jellyfin_servers + vars_files: ../../roles/jellyfin/vars/main.yml + vars: + jellyfin_backup_tarball: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-jellyfin.tar.gz' + tags: jellyfin + tasks: + - name: stop jellyfin + systemd: + name: jellyfin + state: stopped + + - name: extract backup tarball + unarchive: + src: '{{ jellyfin_backup_tarball }}' + dest: / + extra_opts: + - --same-owner + + - name: start jellyfin + systemd: + name: jellyfin + state: started + + +################## +# Mediawiki Images +################## +- name: restore mediawiki images + hosts: wiki_servers + vars_files: ../../roles/mediawiki/vars/main.yml + vars: + mediawiki_backup_tarball: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-mediawiki.tar.gz' + tags: mediawiki,wiki + tasks: + - name: extract backup tarball + unarchive: + src: '{{ mediawiki_backup_tarball }}' + dest: '{{ mediawiki_home }}/images' + extra_opts: + - --strip-components=1 + - --same-owner + + +######################### +# Photostructure Database +######################### +- name: restore photostructure database + hosts: photostructure_servers + vars_files: + - ../../roles/photostructure/vars/main.yml + vars: + photostructure_backup_tarball: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-photostructure.tar' + tags: photostructure + tasks: + - name: stop photostructure + systemd: + name: photostructure + state: stopped + + - name: extract backup tarball + unarchive: + src: '{{ photostructure_backup_tarball }}' + dest: '{{ photostructure_library }}' + extra_opts: + - --strip-components=1 + - --same-owner + + - name: start photostructure + systemd: + name: photostructure + state: started + + +#################### +# Cups Configuration +#################### +- name: restore cups configuration + hosts: cups_servers + vars: + cups_backup_tarball: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-cups.tar.gz' + tags: cups + tasks: + - name: stop cups + systemd: + name: cups + state: stopped + + - name: extract backup tarball + unarchive: + src: '{{ cups_backup_tarball }}' + dest: /etc/cups + extra_opts: + - --same-owner + + - name: start cups + systemd: + name: cups + state: started + + +############### +# Asterisk Data +############### +- name: restore asterisk data + hosts: asterisk_servers + vars_files: ../../roles/asterisk/vars/main.yml + vars: + asterisk_backup_tarball: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-asterisk.tar.gz' + tags: asterisk + tasks: + - name: stop asterisk + systemd: + name: asterisk + state: stopped + + - name: extract backup tarball + unarchive: + src: '{{ asterisk_backup_tarball }}' + dest: '{{ asterisk_data_dir }}' + extra_opts: + - --strip-components=1 + - --same-owner + + - name: start asterisk + systemd: + name: asterisk + state: started + + +#################### +# WebDAV Directories +#################### +- name: restore webdav directories + hosts: dav_servers + vars_files: ../../roles/sabredav/vars/main.yml + vars: + sabredav_backup_tarball: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-webdav.tar.gz' + tags: sabredav,dav,webdav + tasks: + - name: extract backup tarball + unarchive: + src: '{{ sabredav_backup_tarball }}' + dest: '{{ sabredav_home }}/webdav' + extra_opts: + - --strip-components=1 + - --same-owner + + +############### +# Hastebin Data +############### +- name: restore hastebin data + hosts: pastebin_servers + vars_files: ../../roles/hastebin/vars/main.yml + vars: + hastebin_backup_tarball: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-hastebin.tar.gz' + tags: hastebin,pastebin + tasks: + - name: extract backup tarball + unarchive: + src: '{{ hastebin_backup_tarball }}' + dest: '{{ hastebin_data_dir }}' + extra_opts: + - --strip-components=1 + - --same-owner + + +################## +# Psitransfer Data +################## +- name: restore psitransfer data + hosts: filedrop_servers + vars_files: ../../roles/psitransfer/vars/main.yml + vars: + psitransfer_backup_tarball: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-psitransfer.tar.gz' + tags: psitransfer + tasks: + - name: extract backup tarball + unarchive: + src: '{{ psitransfer_backup_tarball }}' + dest: '{{ psitransfer_data_dir }}' + extra_opts: + - --strip-components=1 + - --same-owner + + +################## +# Apache WWW Files +################## +- name: restore public apache files + hosts: web_servers + vars_files: ../../roles/apache/vars/main.yml + vars: + apache_backup_tarball: '{{ backup_path }}/{{ backup_name }}-{{ inventory_hostname }}-www.tar.gz' + tags: apache,www + tasks: + - name: extract backup tarball + unarchive: + src: '{{ apache_backup_tarball }}' + dest: '{{ apache_public_dir }}' + extra_opts: + - --same-owner + + +################ +# FreeIPA Domain +################ +- name: restore freeipa domain + hosts: freeipa_master + vars_files: ../../roles/freeipa_server/vars/main.yml + vars: + freeipa_backup_tarball: '{{ backup_path }}/{{ backup_name }}-ipa-{{ freeipa_realm }}.tar.gz' + freeipa_remote_backup_path: '{{ freeipa_backup_dir }}/{{ backup_name }}' + tags: ipa,freeipa + tasks: + # Only restore FreeIPA when explicitly requested - it is quite disruptive. + - when: ansible_run_tags | intersect(['ipa','freeipa']) | length > 0 + block: + - name: create backup directory on remote host + file: + path: '{{ freeipa_remote_backup_path }}' + state: directory + mode: 0700 + + - name: extract backup tarball + unarchive: + src: '{{ freeipa_backup_tarball }}' + dest: '{{ freeipa_remote_backup_path }}' + extra_opts: + - --strip-components=1 + - --same-owner + + - name: restore freeipa domain from backup + command: ipa-restore {{ backup_name | quote }} --unattended --password={{ freeipa_ds_password | quote }} + + - name: clear sssd cache + command: sss_cache -E + + - name: delete backup files from remote host + file: + path: '{{ freeipa_remote_backup_path }}' + state: absent diff --git a/playbooks/util/wireguard_config.yml b/playbooks/util/wireguard_config.yml new file mode 100644 index 0000000..fb98ca4 --- /dev/null +++ b/playbooks/util/wireguard_config.yml @@ -0,0 +1,49 @@ +- name: generate client certificate + hosts: localhost + connection: local + become: no + vars_prompt: + - name: client_ip + prompt: Enter client ip address + private: no + vars: + config_path: "{{ lookup('env', 'HOME') }}/{{ organization | replace(' ', '-') | lower }}-wg.conf" + server_pubkey: '{{ wireguard_pubkey }}' + server_port: '{{ wireguard_port | default(51820) }}' + server_host: '{{ wireguard_host }}' + gateway: '{{ vlans.vpn.gateway }}' + dns_server: "{{ vlans.vpn.dns_servers | join(',') }}" + tasks: + - name: generate private key + command: + cmd: wg genkey + register: wg_genkey + changed_when: no + + - name: generate public key + command: + cmd: wg pubkey + stdin: '{{ wg_genkey.stdout }}' + register: wg_pubkey + changed_when: no + + - name: generate wireguard config file + copy: + dest: '{{ config_path }}' + mode: 0600 + content: | + [Interface] + Address = {{ client_ip }}/32 + PrivateKey = {{ wg_genkey.stdout }} + DNS = {{ dns_server }} + + [Peer] + PublicKey = {{ server_pubkey }} + AllowedIPs = 0.0.0.0/0 + Endpoint = {{ server_host }}:{{ server_port }} + + - debug: + msg: 'wireguard client config written to {{ config_path }}' + + - debug: + msg: 'Add the following client to the wireguard server: {{ client_ip }}/32 {{ wg_pubkey.stdout }}' diff --git a/playbooks/webserver_internal.yml b/playbooks/webserver_internal.yml new file mode 100644 index 0000000..eb27c97 --- /dev/null +++ b/playbooks/webserver_internal.yml @@ -0,0 +1,46 @@ +- import_playbook: common.yml + vars: + hostlist: www1 + +- name: configure internal web servers + hosts: www1 + tags: apache + roles: + - role: apache_vhost + apache_default_vhost: yes + apache_config: | + AliasMatch "^/pub/user/([^/]+)(.*)" "/nfs/user/$1/pub$2" + AliasMatch "^/pub/group/([^/]+)(.*)" "/nfs/group/$1/pub$2" + + + Options -FollowSymLinks +Indexes + AllowOverride None + Require all granted + + + + Options -FollowSymLinks +Indexes + AllowOverride None + Require all granted + + tasks: + - name: generate index.html + tags: apache + copy: + dest: /var/www/html/index.html + content: | + + + + + {{ domain }} webserver + + +

This is the {{ organization }} internal webserver. To access files in user or group + public directories, try paths like the following: +

+ diff --git a/playbooks/webserver_public.yml b/playbooks/webserver_public.yml new file mode 100644 index 0000000..17221e6 --- /dev/null +++ b/playbooks/webserver_public.yml @@ -0,0 +1,38 @@ +- import_playbook: common.yml + vars: + hostlist: dmz-www1 + +- name: configure public web server + hosts: dmz-www1 + roles: + - role: apache_vhost + apache_server_name: www.example.com + apache_server_aliases: [example.com] + apache_canonical_hostname: www.example.com + apache_letsencrypt: yes + apache_document_root: /var/www/www.example.com + tags: apache + + - role: archive_job + tags: archive + archive_name: www + archive_shell: >- + TIMESTAMP=$(date +%Y%m%d%H%M%S); + tar czf "www-${TIMESTAMP}.tar.gz" + --transform "s|^\.|www-${TIMESTAMP}|" + -C "{{ apache_public_dir }}" {% for dir in apache_backup_dirs %}{{ dir | quote }} {% endfor %} + + # prosody letsencrypt proxy + - role: prosody_letsencrypt_proxy + prosody_le_role: master + tags: prosody + + tasks: + - name: create webroot + file: + path: /var/www/www.example.com + state: directory + owner: root + group: webmasters + mode: 02770 + tags: apache diff --git a/playbooks/wiki.yml b/playbooks/wiki.yml new file mode 100644 index 0000000..794eb74 --- /dev/null +++ b/playbooks/wiki.yml @@ -0,0 +1,25 @@ +- import_playbook: common.yml + vars: + hostlist: wiki_servers + +- name: configure mediawiki + hosts: wiki_servers + tags: wiki,mediawiki + roles: + - role: mediawiki + + - role: apache_vhost + apache_default_vhost: yes + apache_document_root: '{{ mediawiki_home }}' + apache_config: '{{ mediawiki_apache_config }}' + tags: apache + + - role: php + php_fpm_environment: '{{ mediawiki_php_environment }}' + php_fpm_admin_values: '{{ mediawiki_php_admin_values }}' + tags: php + + - role: archive_job + archive_name: mediawiki + archive_shell: '{{ mediawiki_archive_shell }}' + tags: archive diff --git a/playbooks/xmpp.yml b/playbooks/xmpp.yml new file mode 100644 index 0000000..38d0ce4 --- /dev/null +++ b/playbooks/xmpp.yml @@ -0,0 +1,9 @@ +- import_playbook: common.yml + vars: + hostlist: xmpp_servers + +- name: configure prosody + hosts: xmpp_servers + tags: xmpp,prosody + roles: + - role: prosody diff --git a/playbooks/yum.yml b/playbooks/yum.yml new file mode 100644 index 0000000..e0c829f --- /dev/null +++ b/playbooks/yum.yml @@ -0,0 +1,33 @@ +- import_playbook: common.yml + vars: + hostlist: yum_mirrors + +- name: configure yum mirrors + hosts: yum_mirrors + tags: yum + roles: + - role: yum_mirror + + - role: apache_vhost + apache_default_vhost: yes + apache_document_root: '{{ yum_mirror_webroot }}' + apache_autoindex: yes + apache_redirect_to_https: no + tags: apache + +- name: configure mirror for local packages + hosts: yum_mirrors + tags: yum + roles: + - role: yum_disable_default_repos + + - role: yum + yum_repositories: + - rocky-baseos + - rocky-appstream + - rocky-extras + - epel + + # nagios_client has to run *after* EPEL repository has been configured. + - role: nagios_client + tags: nagios diff --git a/playbooks/znc.yml b/playbooks/znc.yml new file mode 100644 index 0000000..79f3721 --- /dev/null +++ b/playbooks/znc.yml @@ -0,0 +1,14 @@ +- import_playbook: common.yml + vars: + hostlist: znc_servers + +- name: configure znc + hosts: znc_servers + tags: znc + roles: + - role: znc + + - role: archive_job + archive_name: znc + archive_shell: '{{ znc_archive_shell }}' + tags: archive diff --git a/plugins/tests/ip_in_subnet.py b/plugins/tests/ip_in_subnet.py new file mode 100644 index 0000000..0b5a4a6 --- /dev/null +++ b/plugins/tests/ip_in_subnet.py @@ -0,0 +1,16 @@ +import ipaddress + +def test_ip_in_subnet(ip, cidr): + return ipaddress.ip_address(ip) in ipaddress.ip_network(cidr) + +def test_subnet_contains_ip(cidr, ip): + return ipaddress.ip_address(ip) in ipaddress.ip_network(cidr) + +class TestModule(object): + test_map = { + 'in_subnet': test_ip_in_subnet, + 'contains_ip': test_subnet_contains_ip, + } + + def tests(self): + return self.test_map diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..37ae937 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +# pip3 install --user -r requirements.txt +ansible == 5.8.0 +passlib +netaddr diff --git a/roles/apache/defaults/main.yml b/roles/apache/defaults/main.yml new file mode 100644 index 0000000..b0605ba --- /dev/null +++ b/roles/apache/defaults/main.yml @@ -0,0 +1,11 @@ +apache_use_nfs: no +apache_can_network_relay: yes +apache_can_network_connect: no +apache_can_network_connect_db: no +apache_can_connect_ldap: no +apache_can_sendmail: no +apache_gssapi: no + +apache_sysaccount_username: apache + +apache_backup_dirs: [] diff --git a/roles/apache/files/etc/systemd/system/httpd.service.d/override.conf b/roles/apache/files/etc/systemd/system/httpd.service.d/override.conf new file mode 100644 index 0000000..2b6650f --- /dev/null +++ b/roles/apache/files/etc/systemd/system/httpd.service.d/override.conf @@ -0,0 +1,6 @@ +[Unit] +Wants=httpd-init.service gssproxy.service +After=network.target remote-fs.target nss-lookup.target httpd-init.service gssproxy.service + +[Service] +Environment=GSS_USE_PROXY=yes diff --git a/roles/apache/handlers/main.yml b/roles/apache/handlers/main.yml new file mode 100644 index 0000000..395e802 --- /dev/null +++ b/roles/apache/handlers/main.yml @@ -0,0 +1,9 @@ +- name: restart apache + systemd: + name: httpd + state: restarted + +- name: reload apache + systemd: + name: httpd + state: reloaded diff --git a/roles/apache/meta/main.yml b/roles/apache/meta/main.yml new file mode 100644 index 0000000..742c491 --- /dev/null +++ b/roles/apache/meta/main.yml @@ -0,0 +1,5 @@ +dependencies: + - role: freeipa_system_account + system_account_username: '{{ apache_sysaccount_username }}' + system_account_password: '{{ apache_sysaccount_password }}' + when: apache_gssapi diff --git a/roles/apache/tasks/gssapi.yml b/roles/apache/tasks/gssapi.yml new file mode 100644 index 0000000..c006d54 --- /dev/null +++ b/roles/apache/tasks/gssapi.yml @@ -0,0 +1,49 @@ +- name: create HTTP service principal + ipaservice: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: 'HTTP/{{ ansible_fqdn }}' + state: present + +- name: retrieve HTTP keytab + include_role: + name: freeipa_keytab + vars: + keytab_principal: 'HTTP/{{ ansible_fqdn }}' + keytab_path: '{{ apache_keytab }}' + +- name: configure gssproxy for kerberized HTTP + include_role: + name: gssproxy_client + vars: + gssproxy_name: httpd + gssproxy_section: service/HTTP + gssproxy_keytab: '{{ apache_keytab }}' + gssproxy_cred_usage: accept + gssproxy_euid: apache + gssproxy_program: /usr/sbin/httpd + +- name: create systemd override directory + file: + path: /etc/systemd/system/httpd.service.d + state: directory + +- name: set GSS_USE_PROXY=yes in httpd environment + copy: + src: etc/systemd/system/httpd.service.d/override.conf + dest: /etc/systemd/system/httpd.service.d/override.conf + register: apache_systemd_unit + notify: restart apache + +- name: reload systemd units + systemd: + daemon_reload: yes + when: apache_systemd_unit.changed + +- name: create gssapi session directory + file: + path: '{{ apache_session_dir }}' + mode: 0700 + owner: apache + group: apache + state: directory diff --git a/roles/apache/tasks/main.yml b/roles/apache/tasks/main.yml new file mode 100644 index 0000000..4892782 --- /dev/null +++ b/roles/apache/tasks/main.yml @@ -0,0 +1,63 @@ +- name: install packages + dnf: + name: '{{ apache_packages }}' + state: present + notify: restart apache + +- name: remove default configuration + copy: + content: | + # this file intentionally empty to avoid clobbering during package upgrades + dest: /etc/httpd/conf.d/welcome.conf + notify: reload apache + +- name: generate config files + template: + src: etc/httpd/{{ item }}.j2 + dest: /etc/httpd/{{ item }} + loop: + - conf/httpd.conf + - conf.d/ssl.conf + - conf.d/letsencrypt.conf + register: apache_global_config + +- name: reload apache + systemd: + name: httpd + state: reloaded + when: apache_global_config.changed + +- name: set selinux booleans + seboolean: + name: '{{ item.sebool }}' + state: '{{ item.value }}' + persistent: yes + loop: + - { sebool: httpd_use_nfs, value: '{{ apache_use_nfs }}' } + - { sebool: httpd_can_network_relay, value: '{{ apache_can_network_relay }}' } + - { sebool: httpd_can_network_connect, value: '{{ apache_can_network_connect }}' } + - { sebool: httpd_can_connect_ldap, value: '{{ apache_gssapi or apache_can_connect_ldap}}' } + - { sebool: httpd_can_network_connect_db, value: '{{ apache_can_network_connect_db }}' } + - { sebool: httpd_can_sendmail, value: '{{ apache_can_sendmail }}' } + tags: selinux + +- name: configure mod_gssapi + import_tasks: gssapi.yml + when: apache_gssapi or apache_use_nfs + +- name: enable apache + systemd: + name: httpd + enabled: yes + state: started + +- name: open firewall ports + firewalld: + service: '{{ item }}' + permanent: yes + immediate: yes + state: enabled + loop: + - http + - https + tags: firewalld diff --git a/roles/apache/templates/etc/httpd/conf.d/letsencrypt.conf.j2 b/roles/apache/templates/etc/httpd/conf.d/letsencrypt.conf.j2 new file mode 100644 index 0000000..60d092e --- /dev/null +++ b/roles/apache/templates/etc/httpd/conf.d/letsencrypt.conf.j2 @@ -0,0 +1,8 @@ +Alias /.well-known/acme-challenge/ {{ apache_letsencrypt_dir}}/.well-known/acme-challenge/ +ProxyPass /.well-known/acme-challenge/ ! + + Options None + AllowOverride None + ForceType text/plain + RedirectMatch 404 "^(?!/\.well-known/acme-challenge/[\w-]{43}$)" + diff --git a/roles/apache/templates/etc/httpd/conf.d/ssl.conf.j2 b/roles/apache/templates/etc/httpd/conf.d/ssl.conf.j2 new file mode 100644 index 0000000..eb85a29 --- /dev/null +++ b/roles/apache/templates/etc/httpd/conf.d/ssl.conf.j2 @@ -0,0 +1,17 @@ +Listen 443 https + +SSLPassPhraseDialog exec:/usr/libexec/httpd-ssl-pass-dialog + +SSLSessionCache shmcb:/run/httpd/sslcache(512000) +SSLSessionCacheTimeout 300 + +SSLCryptoDevice builtin + +SSLProtocol all -SSLv3 -TLSv1 -TLSv1.1 +SSLCipherSuite ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 +SSLHonorCipherOrder off +SSLSessionTickets off + +# Stapling causes all kinds of hard-to-debug problems on Android clients! +#SSLUseStapling On +#SSLStaplingCache "shmcb:logs/ssl_stapling(32768)" diff --git a/roles/apache/templates/etc/httpd/conf/httpd.conf.j2 b/roles/apache/templates/etc/httpd/conf/httpd.conf.j2 new file mode 100644 index 0000000..d34c4a9 --- /dev/null +++ b/roles/apache/templates/etc/httpd/conf/httpd.conf.j2 @@ -0,0 +1,98 @@ +ServerRoot "/etc/httpd" + +Listen 80 + +Include conf.modules.d/*.conf + +User apache +Group apache + +ServerAdmin root@localhost +ServerName {{ ansible_fqdn }} + +ServerTokens Prod +ServerSignature Off + +# default deny + + AllowOverride none + Require all denied + + +DocumentRoot "{{ apache_public_dir }}/html" + +KeepAlive On + +# relax access to content within {{ apache_public_dir }}. + + AllowOverride None + Require all granted + + +# further relax access to the default document root: + + Options FollowSymLinks + + AllowOverride None + + Require all granted + + +# serve index.html if a directory is requested + + DirectoryIndex index.html + + +# deny .htaccess, .htpasswd + + Require all denied + + +ErrorLog "logs/error_log" + +LogLevel warn + + + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined + LogFormat "%h %l %u %t \"%r\" %>s %b" common + + + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio + + + CustomLog "logs/access_log" combined + + + + ScriptAlias /cgi-bin/ "{{ apache_public_dir }}/cgi-bin/" + + + + + AllowOverride None + Options None + Require all granted + + + + TypesConfig /etc/mime.types + + AddType application/x-compress .Z + AddType application/x-gzip .gz .tgz + + AddType text/html .shtml + AddOutputFilter INCLUDES .shtml + + +AddDefaultCharset UTF-8 + + + MIMEMagicFile conf/magic + + +EnableSendfile on + +AddOutputFilterByType DEFLATE {{ apache_gzip_types | join(" ") }} + +# Load config files in the "/etc/httpd/conf.d" directory, if any. +IncludeOptional conf.d/*.conf diff --git a/roles/apache/vars/main.yml b/roles/apache/vars/main.yml new file mode 100644 index 0000000..fa0a293 --- /dev/null +++ b/roles/apache/vars/main.yml @@ -0,0 +1,37 @@ +apache_packages: + - httpd + - mod_ssl + - mod_auth_gssapi + - mod_session + - mod_ldap + +apache_public_dir: /var/www + +apache_session_dir: /var/lib/httpd/session +apache_gssapi_session_key: '{{ apache_session_dir }}/gssapi.key' +apache_letsencrypt_dir: '{{ apache_public_dir }}/letsencrypt' +apache_keytab: /var/lib/gssproxy/clients/apache.keytab + +apache_gzip_types: + - application/javascript + - application/json + - application/rss+xml + - application/vnd.ms-fontobject + - application/x-font + - application/x-font-opentype + - application/x-font-otf + - application/x-font-truetype + - application/x-font-ttf + - application/x-javascript + - application/xhtml+xml + - application/xml + - font/opentype + - font/otf + - font/ttf + - image/svg+xml + - image/x-icon + - text/css + - text/html + - text/javascript + - text/plain + - text/xml diff --git a/roles/apache_vhost/defaults/main.yml b/roles/apache_vhost/defaults/main.yml new file mode 100644 index 0000000..c9bc05c --- /dev/null +++ b/roles/apache_vhost/defaults/main.yml @@ -0,0 +1,14 @@ +apache_server_name: '{{ ansible_fqdn }}' +apache_server_aliases: '{{ [] if apache_letsencrypt else cnames }}' + +apache_default_vhost: no +apache_autoindex: no + +apache_letsencrypt: no +apache_use_ssl: yes +apache_use_http2: yes +apache_redirect_to_https: yes + +apache_ldap_servers: '{{ freeipa_hosts }}' + +apache_config: '' diff --git a/roles/apache_vhost/meta/main.yml b/roles/apache_vhost/meta/main.yml new file mode 100644 index 0000000..98821ae --- /dev/null +++ b/roles/apache_vhost/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - role: apache diff --git a/roles/apache_vhost/tasks/main.yml b/roles/apache_vhost/tasks/main.yml new file mode 100644 index 0000000..ebe6fe6 --- /dev/null +++ b/roles/apache_vhost/tasks/main.yml @@ -0,0 +1,18 @@ +- name: request https certificate + include_role: + name: '{{ "certbot" if apache_letsencrypt else "getcert_request" }}' + vars: + certificate_sans: '{{ [apache_server_name] + apache_server_aliases }}' + certificate_path: '{{ apache_certificate_path }}' + certificate_key_path: '{{ apache_certificate_key_path }}' + certificate_use_apache: yes + certificate_hook: systemctl reload httpd + when: apache_use_ssl + +- name: generate vhost configuration + template: + src: etc/httpd/conf.d/vhost.conf.j2 + dest: /etc/httpd/conf.d/vhost-{{ '000-default' if apache_default_vhost else (apache_config_name | default(apache_server_name)) }}.conf + mode: 0640 + lstrip_blocks: yes + notify: reload apache diff --git a/roles/apache_vhost/templates/etc/httpd/conf.d/vhost.conf.j2 b/roles/apache_vhost/templates/etc/httpd/conf.d/vhost.conf.j2 new file mode 100644 index 0000000..a925372 --- /dev/null +++ b/roles/apache_vhost/templates/etc/httpd/conf.d/vhost.conf.j2 @@ -0,0 +1,135 @@ +{% if apache_use_ssl and apache_redirect_to_https %} + + ServerName {{ apache_server_name }} + {% for alias in apache_server_aliases %} + ServerAlias {{ alias }} + {% endfor %} + + ErrorLog "logs/{{ apache_server_name }}.error_log" + CustomLog "logs/{{ apache_server_name }}.access_log" combined + + Protocols {% if apache_use_http2 %}h2c {% endif %}http/1.1 + + RewriteEngine On + RewriteCond %{REQUEST_URI} !^\/\.well-known\/acme-challenge\/.*$ + RewriteRule (.*) https://%{HTTP_HOST}$1 [R=permanent,L] + + +{% else %} + +{% if apache_canonical_hostname is defined and (apache_server_aliases | length > 0) %} + + {% for alias in ([apache_server_name] + apache_server_aliases) | reject('equalto', apache_canonical_hostname) | list %} + {% if loop.first %} + ServerName {{ alias }} + {% else %} + ServerAlias {{ alias }} + {% endif %} + {% endfor %} + + ErrorLog "logs/{{ apache_server_name }}.error_log" + CustomLog "logs/{{ apache_server_name }}.access_log" combined + + Protocols {% if apache_use_http2 %}h2c {% endif %}http/1.1 + + RedirectMatch Permanent ^(?!/\.well-known/acme-challenge/).* http://{{ apache_canonical_hostname }}/$0 + +{% endif %} + + + {% if apache_document_root is defined %} + DocumentRoot "{{ apache_document_root }}" + {% endif %} + + {% if apache_canonical_hostname is defined %} + ServerName {{ apache_canonical_hostname }} + {% else %} + ServerName {{ apache_server_name }} + {% for alias in apache_server_aliases %} + ServerAlias {{ alias }} + {% endfor %} + {% endif %} + + ErrorLog "logs/{{ apache_server_name }}.error_log" + CustomLog "logs/{{ apache_server_name }}.access_log" combined + + Protocols {% if apache_use_http2 %}h2c {% endif %}http/1.1 + + {% if apache_document_root is defined and not apache_config is search('') %} + + Options +FollowSymLinks + AllowOverride None + Require all granted + {% if apache_autoindex %} + Options +Indexes + {% endif %} + + {% endif %} + + {{ apache_config }} + +{% endif %} + +{% if apache_use_ssl %} + +{% if apache_canonical_hostname is defined and (apache_server_aliases | length > 0) %} + + {% for alias in ([apache_server_name] + apache_server_aliases) | reject('equalto', apache_canonical_hostname) | list %} + {% if loop.first %} + ServerName {{ alias }} + {% else %} + ServerAlias {{ alias }} + {% endif %} + {% endfor %} + + ErrorLog "logs/{{ apache_server_name }}.error_log" + CustomLog "logs/{{ apache_server_name }}.access_log" combined + + Protocols {% if apache_use_http2 %}h2 {% endif %}http/1.1 + + SSLEngine on + SSLCertificateFile {{ apache_certificate_path }} + SSLCertificateKeyFile {{ apache_certificate_key_path }} + Header always set Strict-Transport-Security "max-age=63072000" + + Redirect permanent / https://{{ apache_canonical_hostname }}/ + +{% endif %} + + + {% if apache_document_root is defined %} + DocumentRoot "{{ apache_document_root }}" + {% endif %} + {% if apache_canonical_hostname is defined %} + ServerName {{ apache_canonical_hostname }} + {% else %} + ServerName {{ apache_server_name }} + {% for alias in apache_server_aliases %} + ServerAlias {{ alias }} + {% endfor %} + {% endif %} + + ErrorLog "logs/{{ apache_server_name }}.error_log" + CustomLog "logs/{{ apache_server_name }}.access_log" combined + + Protocols {% if apache_use_http2 %}h2 {% endif %}http/1.1 + + SSLEngine on + SSLCertificateFile {{ apache_certificate_path }} + SSLCertificateKeyFile {{ apache_certificate_key_path }} + Header always set Strict-Transport-Security "max-age=63072000" + + {% if apache_document_root is defined and not apache_config is search('') %} + + Options +FollowSymLinks + AllowOverride None + Require all granted + {% if apache_autoindex %} + Options +Indexes + {% endif %} + + {% endif %} + + {{ apache_config }} + +{% endif %} diff --git a/roles/apache_vhost/vars/main.yml b/roles/apache_vhost/vars/main.yml new file mode 100644 index 0000000..bbfba62 --- /dev/null +++ b/roles/apache_vhost/vars/main.yml @@ -0,0 +1,26 @@ +apache_certificate_path: /etc/pki/tls/certs/httpd-{{ apache_server_name }}.pem +apache_certificate_key_path: /etc/pki/tls/private/httpd-{{ apache_server_name }}.key + +apache_ldap_url: "ldaps://{{ freeipa_hosts | join(' ') }}/{{ freeipa_user_basedn }}" +apache_ldap_creds: | + AuthLDAPBindDN uid={{ apache_sysaccount_username }},{{ freeipa_sysaccount_basedn }} + AuthLDAPBindPassword {{ apache_sysaccount_password }} +apache_ldap_config: | + AuthLDAPUrl "{{ apache_ldap_url }}?uid" + {{ apache_ldap_creds }} + +apache_gssapi_session_config: | + GssapiUseSessions On + Session On + SessionCookieName gssapi_session path=/;httponly;secure;samesite=strict + GssapiSessionKey file:{{ apache_gssapi_session_key }} + +apache_proxy_vhost_config: | + ProxyPreserveHost On + ProxyRequests Off +apache_proxy_header_config: | + RequestHeader set X-Forwarded-Proto "https" + RequestHeader set X-Real-IP %{REMOTE_ADDR}s +apache_proxy_config: | + {{ apache_proxy_vhost_config }} + {{ apache_proxy_header_config }} diff --git a/roles/archive_client/defaults/main.yml b/roles/archive_client/defaults/main.yml new file mode 100644 index 0000000..42d3aa7 --- /dev/null +++ b/roles/archive_client/defaults/main.yml @@ -0,0 +1,4 @@ +archive_server_user: s-archiver +archive_cleanup_on_calendar: daily +archive_cleanup_older_than_days: 7 +archive_server: '{{ groups.archive_servers | first }}' diff --git a/roles/archive_client/tasks/main.yml b/roles/archive_client/tasks/main.yml new file mode 100644 index 0000000..8730407 --- /dev/null +++ b/roles/archive_client/tasks/main.yml @@ -0,0 +1,49 @@ +- name: install rsync + dnf: + name: rsync + state: present + +- name: add host to archive clients hostgroup + ipahostgroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ archive_clients_hostgroup }}' + host: '{{ ansible_fqdn }}' + action: member + state: present + +- name: create archive directory + file: + path: '{{ archive_path }}' + state: directory + owner: root + group: root + mode: 0755 + +- name: set default user acl for archive directory + acl: + path: '{{ archive_path }}' + default: yes + entity: '{{ archive_server_user }}' + etype: user + permissions: rwX + state: present + +- name: set default group acl for archive directory + acl: + path: '{{ archive_path }}' + default: yes + etype: group + permissions: rwX + state: present + +- name: set up archive-cleanup timer + include_role: + name: systemd_timer + vars: + timer_name: archive-cleanup + timer_description: Cleanup old archive files + timer_after: network.target + timer_on_calendar: '{{ archive_cleanup_on_calendar }}' + timer_user: root + timer_exec: find {{ archive_path }} -type f -mtime +{{ archive_cleanup_older_than_days }} -exec rm {} + diff --git a/roles/archive_client/vars/main.yml b/roles/archive_client/vars/main.yml new file mode 100644 index 0000000..26b8e73 --- /dev/null +++ b/roles/archive_client/vars/main.yml @@ -0,0 +1,2 @@ +archive_path: /var/spool/archive +archive_clients_hostgroup: archive_clients diff --git a/roles/archive_job/defaults/main.yml b/roles/archive_job/defaults/main.yml new file mode 100644 index 0000000..901c2f0 --- /dev/null +++ b/roles/archive_job/defaults/main.yml @@ -0,0 +1,8 @@ +# archive_name: someprog +# archive_command: command with args +# OR +# archive_shell: some $shell | command +archive_description: 'archive {{ archive_name }}' +archive_on_calendar: weekly +archive_user: root +archive_group: '{{ archive_user }}' diff --git a/roles/archive_job/meta/main.yml b/roles/archive_job/meta/main.yml new file mode 100644 index 0000000..7a6d863 --- /dev/null +++ b/roles/archive_job/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - role: archive_client diff --git a/roles/archive_job/tasks/main.yml b/roles/archive_job/tasks/main.yml new file mode 100644 index 0000000..83807b9 --- /dev/null +++ b/roles/archive_job/tasks/main.yml @@ -0,0 +1,19 @@ +- name: create archive client directory + file: + path: '{{ archive_path }}/{{ archive_name }}' + state: directory + owner: root + group: '{{ archive_group | default(archive_name) }}' + mode: 0770 + +- name: create systemd timer + include_role: + name: systemd_timer + vars: + timer_name: 'archive-{{ archive_name }}' + timer_description: '{{ archive_description }}' + timer_on_calendar: '{{ archive_on_calendar }}' + timer_user: '{{ archive_user }}' + timer_chdir: '{{ archive_path }}/{{ archive_name }}' + timer_shell: '{{ archive_shell is defined }}' + timer_exec: '{{ archive_shell | default(archive_command) }}' diff --git a/roles/archive_server/defaults/main.yml b/roles/archive_server/defaults/main.yml new file mode 100644 index 0000000..150a652 --- /dev/null +++ b/roles/archive_server/defaults/main.yml @@ -0,0 +1,4 @@ +archive_dest_path: /nfs/archive +archive_user: s-archiver +archive_on_calendar: '*-*-* 23:00:00' +archive_retention_days: 365 diff --git a/roles/archive_server/files/usr/local/libexec/archiver/archive_edgeswitch b/roles/archive_server/files/usr/local/libexec/archiver/archive_edgeswitch new file mode 100644 index 0000000..43979de --- /dev/null +++ b/roles/archive_server/files/usr/local/libexec/archiver/archive_edgeswitch @@ -0,0 +1,36 @@ +#!/bin/bash + +set -Eeu -o pipefail + +HOST=$1 +USERNAME=$2 +PASSWORD=$3 + +TIMESTAMP=$(date +%Y%m%d%H%M%S) +COOKIE_JAR=$(mktemp -t archiver-XXXXXX) + +trap 'rm -f "$COOKIE_JAR"' EXIT + +curl -sSfk \ + -o /dev/null \ + -c "$COOKIE_JAR" \ + -H "Referer: https://${HOST}/htdocs/login/login.lsp" \ + --data-urlencode "username=${USERNAME}" \ + --data-urlencode "password=${PASSWORD}" \ + -d 'accept_eula=0&require_eula=0' \ + "https://${HOST}/htdocs/login/login.lua" + +curl -sSfk \ + -o /dev/null \ + -c "$COOKIE_JAR" \ + -b "$COOKIE_JAR" \ + -H "Referer: https://${HOST}/htdocs/pages/base/file_upload_modal.lsp?filetypes=6&protocol=6" \ + --data-urlencode 'file_type_sel[]=config' \ + "https://${HOST}/htdocs/lua/ajax/file_upload_ajax.lua?protocol=6" + +curl -sSfk \ + -o "config-${TIMESTAMP}.scr" \ + -c "$COOKIE_JAR" \ + -b "$COOKIE_JAR" \ + -H "Referer: https://${HOST}/htdocs/pages/base/file_upload_modal.lsp?filetypes=6&protocol=6" \ + "https://${HOST}/htdocs/pages/base/http_download_file.lua?filepath=/mnt/download/TempConfigScript.scr" diff --git a/roles/archive_server/files/usr/local/libexec/archiver/archive_opnsense b/roles/archive_server/files/usr/local/libexec/archiver/archive_opnsense new file mode 100644 index 0000000..a51a068 --- /dev/null +++ b/roles/archive_server/files/usr/local/libexec/archiver/archive_opnsense @@ -0,0 +1,12 @@ +#!/bin/bash + +set -Eeu -o pipefail + +HOST=$1 +KEY=$2 +SECRET=$3 + +URL=https://${HOST}/api/backup/backup/download +TIMESTAMP=$(date +%Y%m%d%H%M%S) + +curl -sSfk -u "${KEY}:${SECRET}" -o "opnsense-${TIMESTAMP}.xml" "$URL" diff --git a/roles/archive_server/tasks/freeipa.yml b/roles/archive_server/tasks/freeipa.yml new file mode 100644 index 0000000..f0920f3 --- /dev/null +++ b/roles/archive_server/tasks/freeipa.yml @@ -0,0 +1,51 @@ +- name: create freeipa user + ipauser: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ archive_user }}' + loginshell: /bin/bash + homedir: '{{ archive_home }}' + givenname: archive + sn: Service Account + state: present + run_once: True + +- name: create archive-clients hostgroup + ipahostgroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ archive_clients_hbac_hostgroup }}' + description: Archive Clients + state: present + run_once: True + +- name: create HBAC rule for ssh + ipahbacrule: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: archive_ssh_to_archive_clients + description: Allow archive user to ssh to archive clients + user: + - '{{ archive_user }}' + hostgroup: + - '{{ archive_clients_hbac_hostgroup }}' + hbacsvc: sshd + run_once: True + +- name: retrieve user keytab + include_role: + name: freeipa_keytab + vars: + keytab_principal: '{{ archive_user }}' + keytab_path: '{{ archive_keytab }}' + +- name: configure gssproxy for kerberized nfs + include_role: + name: gssproxy_client + vars: + gssproxy_name: archiver + gssproxy_section: service/archiver + gssproxy_keytab: /etc/krb5.keytab + gssproxy_client_keytab: '{{ archive_keytab }}' + gssproxy_cred_usage: initiate + gssproxy_euid: '{{ archive_user }}' diff --git a/roles/archive_server/tasks/main.yml b/roles/archive_server/tasks/main.yml new file mode 100644 index 0000000..d1bed55 --- /dev/null +++ b/roles/archive_server/tasks/main.yml @@ -0,0 +1,79 @@ +- import_tasks: freeipa.yml + +- name: install rsync + dnf: + name: rsync + state: present + +- name: create home directory + file: + path: '{{ archive_home }}' + owner: '{{ archive_user }}' + group: '{{ archive_user }}' + mode: 0700 + state: directory + +- name: create ssh directory + file: + path: '{{ archive_home }}/.ssh' + owner: '{{ archive_user }}' + group: '{{ archive_user }}' + mode: 0700 + state: directory + +- name: copy ssh privkey + copy: + content: '{{ archive_ssh_privkey }}' + dest: "{{ archive_home }}/.ssh/id_{{ archive_ssh_pubkey | regex_replace('^ssh-(\\w+).*', '\\1') }}" + owner: '{{ archive_user }}' + group: '{{ archive_user }}' + mode: 0600 + +- name: generate archiver script + template: + src: '{{ archive_script_path[1:] }}.j2' + dest: '{{ archive_script_path }}' + mode: 0555 + +- name: create plugin directory + file: + path: '{{ archive_plugin_dir }}' + state: directory + +- name: copy plugins + copy: + src: '{{ item.src }}' + dest: '{{ archive_plugin_dir }}/{{ item.path }}' + mode: 0555 + loop: "{{ lookup('filetree', archive_plugin_dir[1:], wantlist=True) }}" + when: item.state == 'file' + +- name: generate configuration + template: + src: '{{ archive_config_path[1:] }}.j2' + dest: '{{ archive_config_path }}' + owner: '{{ archive_user }}' + group: '{{ archive_user }}' + mode: 0440 + +- name: create SELinux policy to avoid logspam + include_role: + name: selinux_policy + apply: + tags: selinux + vars: + selinux_policy_name: ssh_gssproxy + selinux_policy_te: '{{ archive_selinux_policy_te }}' + tags: selinux + +- name: create systemd timer + include_role: + name: systemd_timer + vars: + timer_name: archiver + timer_description: Remote file archiver + timer_after: nss-user-lookup.target network-online.target gssproxy.service + timer_on_calendar: '{{ archive_on_calendar }}' + timer_user: '{{ archive_user }}' + timer_exec: '{{ archive_script_path }}' + timer_persistent: no diff --git a/roles/archive_server/templates/etc/archiver.conf.j2 b/roles/archive_server/templates/etc/archiver.conf.j2 new file mode 100644 index 0000000..d598a39 --- /dev/null +++ b/roles/archive_server/templates/etc/archiver.conf.j2 @@ -0,0 +1,16 @@ +# The format of this file is: +# +# HOST PLUGIN_NAME [ARGS...] +# +# Beware, each line is naively split on whitespace to tokenize the arguments. +# Quoting, escaping, shell characters, etc are NOT supported. + +# opnsense firewalls +{% for host in groups.opnsense_firewalls %} +{{ host }} archive_opnsense {{ hostvars[host].opnsense_backup_api_key }} {{ hostvars[host].opnsense_backup_api_secret }} +{% endfor %} + +# edgeswitches +{% for host in groups.switches %} +{{ host }} archive_edgeswitch {{ hostvars[host].edgeswitch_backup_username }} {{ hostvars[host].edgeswitch_backup_password }} +{% endfor %} diff --git a/roles/archive_server/templates/usr/local/bin/archiver.sh.j2 b/roles/archive_server/templates/usr/local/bin/archiver.sh.j2 new file mode 100644 index 0000000..582b776 --- /dev/null +++ b/roles/archive_server/templates/usr/local/bin/archiver.sh.j2 @@ -0,0 +1,99 @@ +#!/bin/bash + +set -Eeu -o pipefail + +shopt -s dotglob + +CLIENT_HOSTGROUP={{ archive_clients_hbac_hostgroup | quote}} +ARCHIVE_SRC={{ archive_source_path | quote }} +ARCHIVE_DEST={{ archive_dest_path | quote }} +ARCHIVE_PLUGIN_DIR={{ archive_plugin_dir | quote }} +ARCHIVE_CONFIG={{ archive_config_path }} +ARCHIVE_HOME={{ archive_home | quote }} +ARCHIVE_RETENTION_DAYS={{ archive_retention_days | quote }} +DOMAIN={{ ansible_domain }} +{% raw %} +export GSS_USE_PROXY=yes + +RSYNC_ARGS=( + --recursive + --ignore-existing + --links + --perms + --no-group + --chmod=D2770,F440 + --times + --omit-dir-times + --prune-empty-dirs + --remove-source-files + --human-readable + --itemize-changes +) + +FAILED_HOSTS=() + +trap 'rm -rf "$TMPDIR"' EXIT + +############ +# First, archive the /var/spool/archive directory for all hosts in the +# archive clients host group via ssh. +############ +readarray -t HOSTS < <(ipa hostgroup-show "$CLIENT_HOSTGROUP" --raw \ + | awk '$1 == "member:" { match($2, /^fqdn=([^,]+),/, m); print m[1] }') + +for HOST in "${HOSTS[@]}"; do + echo "archiving ${HOST}..." + TMPDIR=$(mktemp -d "${ARCHIVE_HOME}/.archiver-XXXXXX") + rsync "${RSYNC_ARGS[@]}" "${HOST}:${ARCHIVE_SRC}/" "$TMPDIR" && RC=$? || RC=$? + + if (( RC == 0 )); then + mkdir -p "${ARCHIVE_DEST}/${HOST}" + find "$TMPDIR" -mindepth 2 -maxdepth 2 -print0 | xargs -0 -I{} cp -rpn {} "${ARCHIVE_DEST}/${HOST}" + else + FAILED_HOSTS+=("$HOST") + fi + + rm -rf "$TMPDIR" +done + + +############ +# Next, we archive hosts that don't support pull via ssh. For each line in +# $ARCHIVE_CONFIG, we run the plugin command inside of a temporary directory and +# then rsync any created files to the archive directory. +############ +grep -v '^\s*$\|^\s*\#' "$ARCHIVE_CONFIG" | while read -r HOST CMD ARGS; do + echo "archiving ${HOST} via script..." + + TMPDIR=$(mktemp -d "${ARCHIVE_HOME}/.archiver-XXXXXX") + pushd "$TMPDIR" > /dev/null + "${ARCHIVE_PLUGIN_DIR}/${CMD}" "$HOST" ${ARGS:-} && RC=$? || RC=$? + popd > /dev/null + + if [[ $HOST = *.* ]]; then + FQDN=$HOST + else + FQDN="${HOST}.${DOMAIN}" + fi + + if (( RC == 0 )); then + mkdir -p "${ARCHIVE_DEST}/${FQDN}" + rsync "${RSYNC_ARGS[@]}" "${TMPDIR}/" "${ARCHIVE_DEST}/${FQDN}" + else + FAILED_HOSTS+=("$HOST") + fi + + rm -rf "$TMPDIR" +done + + +############ +# Prune old archive files. +############ +find "$ARCHIVE_DEST" -type f -mtime "+${ARCHIVE_RETENTION_DAYS}" -delete + +if (( ${#FAILED_HOSTS[@]} )); then + echo "the following hosts had errors: ${FAILED_HOSTS[*]}" 1>&2 + exit 1 +fi +{% endraw %} diff --git a/roles/archive_server/vars/main.yml b/roles/archive_server/vars/main.yml new file mode 100644 index 0000000..c59fbce --- /dev/null +++ b/roles/archive_server/vars/main.yml @@ -0,0 +1,22 @@ +archive_home: /var/spool/archive +archive_source_path: /var/spool/archive + +archive_keytab: /var/lib/gssproxy/clients/{{ archive_user }}.keytab +archive_clients_hbac_hostgroup: archive_clients +archive_script_path: /usr/local/bin/archiver.sh +archive_config_path: /etc/archiver.conf + +archive_plugin_dir: /usr/local/libexec/archiver + +archive_selinux_policy_te: | + require { + type gssd_t; + type ssh_exec_t; + type gssproxy_t; + class file getattr; + class key read; + } + + #============= gssproxy_t ============== + allow gssproxy_t gssd_t:key read; + allow gssproxy_t ssh_exec_t:file getattr; diff --git a/roles/asterisk/defaults/main.yml b/roles/asterisk/defaults/main.yml new file mode 100644 index 0000000..e7e5b74 --- /dev/null +++ b/roles/asterisk/defaults/main.yml @@ -0,0 +1,74 @@ +asterisk_local_nets: + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 + +asterisk_timezone: '{{ timezone }}' +asterisk_locale: en_US.UTF-8 + +asterisk_fqdn: '{{ ansible_fqdn }}' +asterisk_from_domain: '{{ email_domain }}' + +asterisk_rtp_port_start: 10000 +asterisk_rtp_port_end: 10999 +asterisk_sip_port: 5060 +asterisk_sip_tls_port: 5061 + +asterisk_http_port: 8088 +asterisk_https_port: 8089 + +asterisk_voicemail_formats: + - wav49 + - gsm + - wav + +asterisk_mail_from: asterisk-noreply@{{ email_domain }} + +asterisk_voicemail_email_subject: 'New voicemail ${VM_MSGNUM} in mailbox ${VM_MAILBOX}' +asterisk_voicemail_email_body: 'Hi ${VM_NAME},\n\nYou have a new voicemail in mailbox ${VM_MAILBOX}.\n\nFrom: ${VM_CALLERID}\nDate: ${VM_DATE}\nDuration: ${VM_DUR}\nMessage Number: ${VM_MSGNUM}' +asterisk_voicemail_email_date_format: '%A, %B %d, %Y at %r' +asterisk_voicemail_min_password: 4 + +asterisk_voicemail_max_message_count: 100 +asterisk_voicemail_max_message_secs: 300 +asterisk_voicemail_max_greeting_secs: 60 +asterisk_voicemail_max_failed_logins: 3 + +asterisk_sip_trunks: + - name: example + host: sip.example.com:5061 + codecs: g722,ulaw + username: testuser + password: testpass + transport: tls + media_encryption: sdes + +asterisk_sip_extensions: + - name: 6001 + context: from-internal + mailbox: 6001@default + cid_name: Test User + username: 6001 + password: testpassword + codecs: g722,ulaw + +asterisk_queues: + - name: home + strategy: ringall + retry: 1 + timeout: 30 + members: + - 6001 + - 6002 + +asterisk_ari_users: + - name: nagios + password: nagios + readonly: yes + +asterisk_voicemail_contexts: + default: + - address: 6001 + password: 1234 + name: John Doe + email: john@example.com diff --git a/roles/asterisk/files/etc/systemd/system/asterisk.service.d/override.conf b/roles/asterisk/files/etc/systemd/system/asterisk.service.d/override.conf new file mode 100644 index 0000000..88f8d60 --- /dev/null +++ b/roles/asterisk/files/etc/systemd/system/asterisk.service.d/override.conf @@ -0,0 +1,6 @@ +[Unit] +After=nss-lookup.target network-online.target + +[Service] +Restart=on-failure +RestartSec=4 diff --git a/roles/asterisk/handlers/main.yml b/roles/asterisk/handlers/main.yml new file mode 100644 index 0000000..b2d74af --- /dev/null +++ b/roles/asterisk/handlers/main.yml @@ -0,0 +1,9 @@ +- name: reload asterisk + systemd: + name: asterisk + state: reloaded + +- name: restart asterisk + systemd: + name: asterisk + state: restarted diff --git a/roles/asterisk/meta/main.yml b/roles/asterisk/meta/main.yml new file mode 100644 index 0000000..29230f9 --- /dev/null +++ b/roles/asterisk/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - role: yum + yum_repositories: epel + tags: yum diff --git a/roles/asterisk/tasks/main.yml b/roles/asterisk/tasks/main.yml new file mode 100644 index 0000000..7bb259d --- /dev/null +++ b/roles/asterisk/tasks/main.yml @@ -0,0 +1,81 @@ +- name: install packages + dnf: + name: '{{ asterisk_packages }}' + state: present + +- name: create systemd override directory + file: + path: /etc/systemd/system/asterisk.service.d + state: directory + +- name: create systemd unit override + copy: + src: etc/systemd/system/asterisk.service.d/override.conf + dest: /etc/systemd/system/asterisk.service.d/override.conf + notify: restart asterisk + register: asterisk_unit + +- name: reload systemd units + systemd: + daemon_reload: yes + when: asterisk_unit.changed + +- name: download sound files + unarchive: + src: '{{ item.url }}' + remote_src: yes + dest: /usr/share/asterisk/sounds + creates: '/usr/share/asterisk/sounds/hello-world.{{ item.codec }}' + loop: "{{ asterisk_sound_tarballs | dict2items(key_name='codec', value_name='url') }}" + +- name: request public TLS certificate + include_role: + name: certbot + vars: + certificate_sans: ['{{ asterisk_fqdn }}'] + certificate_path: '{{ asterisk_certificate_path }}' + certificate_key_path: '{{ asterisk_certificate_key_path }}' + certificate_owner: asterisk + certificate_hook: systemctl reload asterisk + +- name: request internal HTTPS certificate + include_role: + name: getcert_request + vars: + certificate_sans: ['{{ ansible_fqdn }}'] + certificate_path: '{{ asterisk_https_certificate_path }}' + certificate_key_path: '{{ asterisk_https_certificate_key_path }}' + certificate_owner: asterisk + certificate_hook: systemctl reload asterisk + +- name: generate config files + template: + src: '{{ item.src }}' + dest: /etc/asterisk/{{ item.path | splitext | first }} + owner: asterisk + group: asterisk + mode: 0640 + loop: "{{ lookup('filetree', '../templates/etc/asterisk', wantlist=True) }}" + when: item.state == 'file' + notify: reload asterisk + +- name: open firewall ports + firewalld: + permanent: yes + immediate: yes + port: '{{ item }}' + state: enabled + loop: + - '{{ asterisk_https_port }}/tcp' + - '{{ asterisk_sip_port }}/tcp' + - '{{ asterisk_sip_port }}/udp' + - '{{ asterisk_sip_tls_port }}/tcp' + - '{{ asterisk_sip_tls_port }}/udp' + - '{{ asterisk_rtp_port_start }}-{{ asterisk_rtp_port_end }}/udp' + tags: firewalld + +- name: start asterisk + systemd: + name: asterisk + enabled: yes + state: started diff --git a/roles/asterisk/templates/etc/asterisk/ari.conf.j2 b/roles/asterisk/templates/etc/asterisk/ari.conf.j2 new file mode 100644 index 0000000..cc853c4 --- /dev/null +++ b/roles/asterisk/templates/etc/asterisk/ari.conf.j2 @@ -0,0 +1,11 @@ +[general] +enabled = yes +pretty = no + +{% for user in asterisk_ari_users %} +[{{ user.name }}] +type = user +read_only = {{ 'yes' if (user.readonly | default(true)) else 'no' }} +password = {{ user.password | password_hash('sha512', asterisk_password_salt, rounds=5000) }} +password_format = crypt +{% endfor %} diff --git a/roles/asterisk/templates/etc/asterisk/extensions.conf.j2 b/roles/asterisk/templates/etc/asterisk/extensions.conf.j2 new file mode 100644 index 0000000..09345cf --- /dev/null +++ b/roles/asterisk/templates/etc/asterisk/extensions.conf.j2 @@ -0,0 +1,7 @@ +[public] +exten => _X.,1,Hangup(3) + +[default] +exten => _X.,1,Hangup(3) + +{{ asterisk_dialplan }} diff --git a/roles/asterisk/templates/etc/asterisk/http.conf.j2 b/roles/asterisk/templates/etc/asterisk/http.conf.j2 new file mode 100644 index 0000000..d9d92a1 --- /dev/null +++ b/roles/asterisk/templates/etc/asterisk/http.conf.j2 @@ -0,0 +1,13 @@ +[general] +servername = Asterisk +enabled = yes +bindaddr = 127.0.0.1 +bindport = {{ asterisk_http_port }} +enable_status = no +tlsenable = yes +tlsbindaddr = 0.0.0.0:{{ asterisk_https_port }} +tlscertfile = {{ asterisk_https_certificate_path }} +tlsprivatekey = {{ asterisk_https_certificate_key_path }} +tlsdisablev1 = yes +tlsdisablev11 = yes +tlsdisablev12 = no diff --git a/roles/asterisk/templates/etc/asterisk/logger.conf.j2 b/roles/asterisk/templates/etc/asterisk/logger.conf.j2 new file mode 100644 index 0000000..65595d1 --- /dev/null +++ b/roles/asterisk/templates/etc/asterisk/logger.conf.j2 @@ -0,0 +1,3 @@ +[logfiles] +console => verbose(3),notice,warning,error +messages => verbose(3),notice,warning,error diff --git a/roles/asterisk/templates/etc/asterisk/pjsip.conf.j2 b/roles/asterisk/templates/etc/asterisk/pjsip.conf.j2 new file mode 100644 index 0000000..d7dedf8 --- /dev/null +++ b/roles/asterisk/templates/etc/asterisk/pjsip.conf.j2 @@ -0,0 +1,28 @@ +[transport-defaults](!) +type = transport +bind = 0.0.0.0 +local_net = 127.0.0.0/8 +{% for cidr in asterisk_local_nets %} +local_net = {{ cidr }} +{% endfor %} +{% if asterisk_external_ip is defined %} +external_media_address = {{ asterisk_external_ip }} +external_signaling_address = {{ asterisk_external_ip }} +{% endif %} + +[transport-udp](transport-defaults) +protocol = udp + +[transport-tcp](transport-defaults) +protocol = tcp + +[transport-tls](transport-defaults) +protocol = tls +bind = 0.0.0.0:5061 +method = tlsv1_2 +cert_file = {{ asterisk_certificate_path }} +priv_key_file = {{ asterisk_certificate_key_path }} +ca_list_file = {{ asterisk_ca_file }} +verify_client = no +verify_server = yes +allow_reload = yes diff --git a/roles/asterisk/templates/etc/asterisk/pjsip_wizard.conf.j2 b/roles/asterisk/templates/etc/asterisk/pjsip_wizard.conf.j2 new file mode 100644 index 0000000..67a6574 --- /dev/null +++ b/roles/asterisk/templates/etc/asterisk/pjsip_wizard.conf.j2 @@ -0,0 +1,57 @@ +;;;;;;;;;;; +; Trunks +;;;;;;;;;;; + +[trunk-defaults](!) +type = wizard +sends_auth = yes +sends_registrations = yes +endpoint/rtp_symmetric = yes +endpoint/rewrite_contact = yes +endpoint/send_rpid = yes +endpoint/from_domain = {{ asterisk_from_domain }} +endpoint/allow = !all,ulaw +aor/qualify_frequency = 30 + +{% for trunk in asterisk_sip_trunks %} +[{{ trunk.name }}](trunk-defaults) +transport = transport-{{ trunk.transport | default('udp') }} +remote_hosts = {{ trunk.host if trunk.host is string else (trunk.host | join(',')) }} +endpoint/context = from-{{ trunk.name }} +{% if trunk.codecs is defined %} +endpoint/allow = !all,{{ trunk.codecs if trunk.codecs is string else (trunk.codecs | join(',')) }} +{% endif %} +endpoint/media_encryption = {{ trunk.media_encryption | default('no') }} +outbound_auth/username = {{ trunk.username }} +outbound_auth/password = {{ trunk.password }} + +{% endfor %} + + +;;;;;;;;;;;;; +; Extensions +;;;;;;;;;;;;; + +[extension-defaults](!) +type = wizard +accepts_registrations = yes +accepts_auth = yes +aor/remove_existing = yes +aor/qualify_frequency = 30 +endpoint/allow = !all,g722,ulaw +endpoint/from_domain = {{ asterisk_from_domain }} +endpoint/subscribe_context = subscribe + +{% for ext in asterisk_sip_extensions %} +[{{ ext.name }}](extension-defaults) +{% if ext.codecs is defined %} +endpoint/allow = !all,{{ ext.codecs if ext.codecs is string else (ext.codecs | join(',')) }} +{% endif %} +endpoint/context = {{ ext.context }} +endpoint/mailboxes = {{ ext.mailbox if ext.mailbox is string else (ext.mailbox | join(',')) }} +endpoint/callerid = {{ ext.cid_name }} <{{ ext.cid_number | default(ext.name) }}> +inbound_auth/username = {{ ext.username | default(ext.name) }} +inbound_auth/password = {{ ext.password }} +aor/max_contacts = {{ ext.max_contacts | default(1) }} + +{% endfor %} diff --git a/roles/asterisk/templates/etc/asterisk/queues.conf.j2 b/roles/asterisk/templates/etc/asterisk/queues.conf.j2 new file mode 100644 index 0000000..badecfb --- /dev/null +++ b/roles/asterisk/templates/etc/asterisk/queues.conf.j2 @@ -0,0 +1,46 @@ +[general] +persistentmembers = yes +autofill = yes +monitor-type = MixMonitor +shared_lastcall = yes +log_membername_as_agent = yes + + +{% for queue in asterisk_queues %} +[{{ queue.name }}] +{% if queue.music_class is defined %} +musicclass = {{ queue.music_class }} +{% endif %} +strategy = {{ queue.strategy }} +{% if queue.context is defined %} +context = {{ queue.context }} +{% endif %} +timeout = {{ queue.timeout | default(15) }} +retry = {{ queue.retry | default(5) }} +timeoutpriority = app +{% if queue.weight is defined %} +weight = {{ queue.weight }} +{% endif %} +{% if queue.maxlen is defined %} +maxlen = {{ queue.maxlen }} +{% endif %} +announce-frequency = {{ queue.announce_frequency | default(0) }} +min-announce-frequency = {{ queue.min_announce_frequency | default(15) }} +announce-holdtime = {{ queue.announce_holdtime | default('no') }} +announce-position = {{ queue.announce_position | default('no') }} +periodic-announce-frequency = {{ queue.periodic_announce_frequency | default(0) }} +{% if queue.peridic_announce is defined %} +periodic-announce = {{ queue.periodic_announce if queue.periodic_announce is string else (queue.periodic_announce | join(',')) }} +{% endif %} +{% if queue.monitor_format is defined %} +monitor-format = {{ queue.monitor_format if queue.monitor_format is string else (queue.montior_format | join('|')) }} +{% endif %} +joinempty = {{ queue.join_empty | default('yes') }} +leavewhenempty = {{ queue.leave_when_empty | default('no') }} +ringinuse = {{ 'yes' if (queue.ring_in_use | default(true)) else 'no' }} +timeoutrestart = yes +{% for ext in queue.members %} +member => PJSIP/{{ ext }},0,{{ asterisk_sip_extensions | selectattr('name', '==', ext) | map(attribute='cid_name') | first | default('') }},PJSIP/{{ ext }} +{% endfor %} + +{% endfor %} diff --git a/roles/asterisk/templates/etc/asterisk/rtp.conf.j2 b/roles/asterisk/templates/etc/asterisk/rtp.conf.j2 new file mode 100644 index 0000000..3d4edc2 --- /dev/null +++ b/roles/asterisk/templates/etc/asterisk/rtp.conf.j2 @@ -0,0 +1,3 @@ +[general] +rtpstart={{ asterisk_rtp_port_start }} +rtpend={{ asterisk_rtp_port_end }} diff --git a/roles/asterisk/templates/etc/asterisk/voicemail.conf.j2 b/roles/asterisk/templates/etc/asterisk/voicemail.conf.j2 new file mode 100644 index 0000000..32b4d0a --- /dev/null +++ b/roles/asterisk/templates/etc/asterisk/voicemail.conf.j2 @@ -0,0 +1,37 @@ +[general] +format={{ asterisk_voicemail_formats | join('|') }} + +serveremail={{ asterisk_mail_from }} +attach=yes +; Maximum number of messages per folder +maxmsg={{ asterisk_voicemail_max_message_count }} +; Maximum length of a voicemail message in seconds +maxsecs={{ asterisk_voicemail_max_message_secs }} +; Minimum length of a voicemail message in seconds for the message to be kept +maxgreet={{ asterisk_voicemail_max_greeting_secs }} +; How many milliseconds to skip forward/back when rew/ff in message playback +skipms=3000 +; How many seconds of silence before we end the recording +maxsilence=10 +; Silence threshold (what we consider silence: the lower, the more sensitive) +silencethreshold=128 +; Max number of failed login attempts +maxlogins={{ asterisk_voicemail_max_failed_logins }} + +emailsubject={{ asterisk_voicemail_email_subject }} +emailbody={{ asterisk_voicemail_email_body }} +emaildateformat={{ asterisk_voicemail_email_date_format }} + +tz=myzone +locale={{ asterisk_locale }} +minpassword={{ asterisk_voicemail_min_password }} + +[zonemessages] +myzone={{ asterisk_timezone }}|'vm-received' Q 'digits/at' IMp + +{% for item in asterisk_voicemail_contexts | dict2items(key_name='context', value_name='mailboxes') %} +[{{ item.context }}] +{% for mailbox in item.mailboxes %} +{{ mailbox.address }} => {{ mailbox.password }},{{ mailbox.name }},{{ mailbox.email if mailbox.email is string else (mailbox.email | join('|')) }},,, +{% endfor %} +{% endfor %} diff --git a/roles/asterisk/vars/main.yml b/roles/asterisk/vars/main.yml new file mode 100644 index 0000000..c4bf58a --- /dev/null +++ b/roles/asterisk/vars/main.yml @@ -0,0 +1,20 @@ +asterisk_packages: + - asterisk + - asterisk-pjsip + - asterisk-voicemail-plain + +asterisk_sound_tarballs: + g722: https://downloads.asterisk.org/pub/telephony/sounds/asterisk-core-sounds-en-g722-current.tar.gz + g729: https://downloads.asterisk.org/pub/telephony/sounds/asterisk-core-sounds-en-g729-current.tar.gz + gsm: https://downloads.asterisk.org/pub/telephony/sounds/asterisk-core-sounds-en-gsm-current.tar.gz + sln16: https://downloads.asterisk.org/pub/telephony/sounds/asterisk-core-sounds-en-sln16-current.tar.gz + ulaw: https://downloads.asterisk.org/pub/telephony/sounds/asterisk-core-sounds-en-ulaw-current.tar.gz + wav: https://downloads.asterisk.org/pub/telephony/sounds/asterisk-core-sounds-en-wav-current.tar.gz + +asterisk_certificate_path: /etc/asterisk/asterisk.crt +asterisk_certificate_key_path: /etc/asterisk/asterisk.key +asterisk_https_certificate_path: /etc/pki/tls/certs/asterisk-https.crt +asterisk_https_certificate_key_path: /etc/pki/tls/private/asterisk-https.key +asterisk_ca_file: /etc/pki/tls/certs/ca-bundle.crt + +asterisk_data_dir: /var/spool/asterisk diff --git a/roles/certbot/defaults/main.yml b/roles/certbot/defaults/main.yml new file mode 100644 index 0000000..9174deb --- /dev/null +++ b/roles/certbot/defaults/main.yml @@ -0,0 +1,10 @@ +certificate_email: 'root@{{ email_domain }}' + +certificate_sans: '{{ [ansible_fqdn] + cnames }}' +certificate_type: ecdsa +certificate_size: 2048 + +certificate_owner: root +certificate_mode: 0400 + +certificate_use_apache: no diff --git a/roles/certbot/files/etc/pki/tls/certbot-post.sh b/roles/certbot/files/etc/pki/tls/certbot-post.sh new file mode 100644 index 0000000..b39ef67 --- /dev/null +++ b/roles/certbot/files/etc/pki/tls/certbot-post.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +exec 1> >(logger -s -t $(basename "$0")) 2>&1 + +usage() { + echo "$0 -c CERT_PATH -k KEY_PATH [-o OWNER] [-m MODE] [POST_COMMAND ...]" + exit 1 +} + +OWNER=root:root +MODE=400 + +while getopts ':c:k:m:o:' opt; do + case $opt in + c) CERT_PATH=$OPTARG ;; + k) KEY_PATH=$OPTARG ;; + m) MODE=$OPTARG ;; + o) OWNER=$OPTARG ;; + *) usage ;; + esac +done + +shift $((OPTIND-1)) + +if [ -z "$CERT_PATH" -o -z "$KEY_PATH" ]; then + usage +fi + +OWNER_USER=${OWNER%:*} +OWNER_GROUP=${OWNER#*:} +OWNER_GROUP=${OWNER_GROUP:-$OWNER_USER} + +install -v -m "$MODE" -o "${OWNER_USER}" -g "${OWNER_GROUP}" "${RENEWED_LINEAGE}/fullchain.pem" "$CERT_PATH" +install -v -m "$MODE" -o "${OWNER_USER}" -g "${OWNER_GROUP}" "${RENEWED_LINEAGE}/privkey.pem" "$KEY_PATH" + +# run post-command +if (($#)); then + echo "running post-command: $*" + "$@" +fi diff --git a/roles/certbot/meta/main.yml b/roles/certbot/meta/main.yml new file mode 100644 index 0000000..29230f9 --- /dev/null +++ b/roles/certbot/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - role: yum + yum_repositories: epel + tags: yum diff --git a/roles/certbot/tasks/main.yml b/roles/certbot/tasks/main.yml new file mode 100644 index 0000000..3df7304 --- /dev/null +++ b/roles/certbot/tasks/main.yml @@ -0,0 +1,50 @@ +- name: install certbot + dnf: + name: certbot + state: installed + +- name: allow HTTP through firewall + firewalld: + service: http + permanent: yes + immediate: yes + state: enabled + tags: firewalld + +- name: copy certbot hook script + copy: + src: etc/pki/tls/certbot-post.sh + dest: '{{ certificate_postcmd_path }}' + mode: 0555 + +- name: create certbot webroot path + file: + path: '{{ certificate_webroot_path }}' + state: directory + when: certificate_use_apache + +- name: retrieve certificate from letsencrypt + command: + cmd: >- + certbot certonly + --noninteractive + --agree-tos + --no-eff-email + --key-type {{ certificate_type | lower }} + --rsa-key-size {{ certificate_size }} + --email {{ certificate_email }} + {% if certificate_use_apache %} + --webroot + --webroot-path {{ certificate_webroot_path }} + {% else %} + --standalone + {% endif %} + --deploy-hook {{ certificate_postcmd_argv | quote }} + --domains {{ certificate_sans | join(',') }} + creates: '{{ certificate_path }}' + +- name: enable certbot renew timer + systemd: + name: certbot-renew.timer + enabled: yes + state: started diff --git a/roles/certbot/vars/main.yml b/roles/certbot/vars/main.yml new file mode 100644 index 0000000..9045602 --- /dev/null +++ b/roles/certbot/vars/main.yml @@ -0,0 +1,13 @@ +certificate_postcmd_path: /etc/pki/tls/certbot-post.sh + +certificate_postcmd_argv: >- + {{ certificate_postcmd_path }} + -o {{ certificate_owner }} + -m {{ '%0o' % certificate_mode }} + -k {{ certificate_key_path }} + -c {{ certificate_path }} + {% if certificate_hook is defined %} + {{ certificate_hook }} + {% endif %} + +certificate_webroot_path: /var/www/letsencrypt diff --git a/roles/cgit/defaults/main.yml b/roles/cgit/defaults/main.yml new file mode 100644 index 0000000..72b8b8a --- /dev/null +++ b/roles/cgit/defaults/main.yml @@ -0,0 +1,27 @@ +cgit_clone_prefixes: + - https://{{ ansible_fqdn }} + - ssh://{{ ansible_fqdn }} + +cgit_title: '{{ organization }} Git Repository' +cgit_description: Source code of various {{ organization }} projects. + +cgit_cache_size: 1000 +cgit_user: git +cgit_project_list: /var/www/git/projects.list +cgit_scan_path: /var/www/git/repositories + +cgit_enable_http_clone: no + +cgit_repository_sort: name +cgit_branch_sort: name + +# cgit_favicon: /path/to/favicon.ico +# cgit_logo: /path/to/logo.png +# cgit_css: /path/to/style.css +# cgit_head_include: /path/to/head/include.html +# cgit_header: /path/to/header.html + +cgit_about_html: > + This is a git repository for various projects hosted by {{ organization }}. + To request commit access or report technical issues, + contact the administrator. diff --git a/roles/cgit/meta/main.yml b/roles/cgit/meta/main.yml new file mode 100644 index 0000000..29230f9 --- /dev/null +++ b/roles/cgit/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - role: yum + yum_repositories: epel + tags: yum diff --git a/roles/cgit/tasks/main.yml b/roles/cgit/tasks/main.yml new file mode 100644 index 0000000..67f77f6 --- /dev/null +++ b/roles/cgit/tasks/main.yml @@ -0,0 +1,51 @@ +- name: install cgit + dnf: + name: '{{ cgit_packages }}' + state: present + +- name: generate cgit configuration + template: + src: etc/cgitrc.j2 + dest: /etc/cgitrc + +- name: allow selinux mmap files + seboolean: + name: domain_can_mmap_files + state: yes + persistent: yes + tags: selinux + +- name: create cgit about file + copy: + content: '{{ cgit_about_html }}' + dest: '{{ cgit_static_dir }}/about.html' + +- name: copy custom css + copy: + src: '{{ cgit_css }}' + dest: '{{ cgit_static_dir }}/{{ cgit_css | basename }}' + when: cgit_css is defined + +- name: copy custom logo + copy: + src: '{{ cgit_logo }}' + dest: '{{ cgit_static_dir }}/{{ cgit_logo | basename }}' + when: cgit_logo is defined + +- name: copy custom favicon + copy: + src: '{{ cgit_favicon }}' + dest: '{{ cgit_static_dir }}/{{ cgit_favicon | basename }}' + when: cgit_favicon is defined + +- name: copy custom html head include + copy: + src: '{{ cgit_head_include }}' + dest: '{{ cgit_static_dir }}/{{ cgit_head_include | basename }}' + when: cgit_head_include is defined + +- name: copy custom html header + copy: + src: '{{ cgit_header }}' + dest: '{{ cgit_static_dir }}/{{ cgit_header | basename }}' + when: cgit_header is defined diff --git a/roles/cgit/templates/etc/cgitrc.j2 b/roles/cgit/templates/etc/cgitrc.j2 new file mode 100644 index 0000000..f504896 --- /dev/null +++ b/roles/cgit/templates/etc/cgitrc.j2 @@ -0,0 +1,68 @@ +cache-size={{ cgit_cache_size }} +clone-prefix={{ cgit_clone_prefixes | join(' ') }} +enable-http-clone={{ cgit_enable_http_clone | int }} +enable-blame=1 +enable-commit-graph=1 +enable-log-filecount=1 +enable-log-linecount=1 +branch-sort={{ cgit_branch_sort }} + +# static assets +favicon=/static/{{ cgit_favicon | default('favicon.ico') | basename }} +logo=/static/{{ cgit_logo | default('cgit.png') | basename }} +css=/static/{{ cgit_css | default('cgit.css') | basename }} +{% if cgit_head_include is defined %} +head-include={{ cgit_static_dir }}/{{ cgit_head_include | basename }} +{% endif %} +{% if cgit_header is defined %} +header={{ cgit_static_dir }}/{{ cgit_header | basename }} +{% endif %} + +max-stats=year +root-title={{ cgit_title }} +root-desc={{ cgit_description }} +remove-suffix=1 +root-readme={{ cgit_static_dir }}/about.html +repository-sort={{ cgit_repository_sort }} +snapshots=tar.gz zip +local-time=1 + +mimetype.gif=image/gif +mimetype.html=text/html +mimetype.jpg=image/jpeg +mimetype.jpeg=image/jpeg +mimetype.pdf=application/pdf +mimetype.png=image/png +mimetype.svg=image/svg+xml + +source-filter=/usr/libexec/cgit/filters/syntax-highlighting.sh +about-filter=/usr/libexec/cgit/filters/about-formatting.sh + +readme=:README.md +readme=:readme.md +readme=:README.mkd +readme=:readme.mkd +readme=:README.html +readme=:readme.html +readme=:README.htm +readme=:readme.htm +readme=:README.txt +readme=:readme.txt +readme=:README +readme=:readme +readme=:INSTALL.md +readme=:install.md +readme=:INSTALL.mkd +readme=:install.mkd +readme=:INSTALL.html +readme=:install.html +readme=:INSTALL.htm +readme=:install.htm +readme=:INSTALL.txt +readme=:install.txt +readme=:INSTALL +readme=:install + +enable-git-config=1 +project-list={{ cgit_project_list }} +scan-path={{ cgit_scan_path }} diff --git a/roles/cgit/vars/main.yml b/roles/cgit/vars/main.yml new file mode 100644 index 0000000..53e341d --- /dev/null +++ b/roles/cgit/vars/main.yml @@ -0,0 +1,9 @@ +cgit_packages: + - cgit + - highlight + - python3-markdown + - python3-pygments + +cgit_static_dir: /usr/share/cgit +cgit_cgi_script: /var/www/cgi-bin/cgit +cgit_cache_dir: /var/cache/cgit diff --git a/roles/chrony/defaults/main.yml b/roles/chrony/defaults/main.yml new file mode 100644 index 0000000..63fb947 --- /dev/null +++ b/roles/chrony/defaults/main.yml @@ -0,0 +1 @@ +chrony_ntp_servers: '{{ vlan.ntp_servers }}' diff --git a/roles/chrony/handlers/main.yml b/roles/chrony/handlers/main.yml new file mode 100644 index 0000000..35b9ad2 --- /dev/null +++ b/roles/chrony/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart chrony + systemd: + name: chronyd + state: restarted diff --git a/roles/chrony/tasks/main.yml b/roles/chrony/tasks/main.yml new file mode 100644 index 0000000..1e94244 --- /dev/null +++ b/roles/chrony/tasks/main.yml @@ -0,0 +1,16 @@ +- name: install chrony + package: + name: chrony + state: present + +- name: generate chrony.conf + template: + src: etc/chrony.conf.j2 + dest: /etc/chrony.conf + notify: restart chrony + +- name: start chrony + systemd: + name: chronyd + state: started + enabled: yes diff --git a/roles/chrony/templates/etc/chrony.conf.j2 b/roles/chrony/templates/etc/chrony.conf.j2 new file mode 100644 index 0000000..ecdcde6 --- /dev/null +++ b/roles/chrony/templates/etc/chrony.conf.j2 @@ -0,0 +1,22 @@ +{% for server in chrony_ntp_servers %} +server {{ server }} iburst +{% endfor %} + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +# Allow the system clock to be stepped in the first three updates +# if its offset is larger than 1 second. +makestep 1.0 3 + +# Enable kernel synchronization of the real-time clock (RTC). +rtcsync + +# Specify file containing keys for NTP authentication. +keyfile /etc/chrony.keys + +# Get TAI-UTC offset and leap seconds from the system tz database. +leapsectz right/UTC + +# Specify directory for log files. +logdir /var/log/chrony diff --git a/roles/coturn/defaults/main.yml b/roles/coturn/defaults/main.yml new file mode 100644 index 0000000..248975a --- /dev/null +++ b/roles/coturn/defaults/main.yml @@ -0,0 +1,4 @@ +coturn_port: 3478 +coturn_min_port: 49152 +coturn_max_port: 65535 +coturn_realm: '{{ ansible_fqdn }}' diff --git a/roles/coturn/handlers/main.yml b/roles/coturn/handlers/main.yml new file mode 100644 index 0000000..a8eb087 --- /dev/null +++ b/roles/coturn/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart coturn + systemd: + name: coturn + state: restarted diff --git a/roles/coturn/tasks/main.yml b/roles/coturn/tasks/main.yml new file mode 100644 index 0000000..ce1fb30 --- /dev/null +++ b/roles/coturn/tasks/main.yml @@ -0,0 +1,25 @@ +- name: install packages + dnf: + name: '{{ coturn_packages }}' + state: present + +- name: generate coturn configuration + template: + src: etc/coturn/turnserver.conf.j2 + dest: /etc/coturn/turnserver.conf + owner: root + group: coturn + mode: 0640 + notify: restart coturn + +- name: open firewall ports + firewalld: + port: '{{ item }}' + permanent: yes + immediate: yes + state: enabled + loop: + - '{{ coturn_port }}/tcp' + - '{{ coturn_port }}/udp' + - '{{ coturn_min_port }}-{{ coturn_max_port }}/udp' + tags: firewalld diff --git a/roles/coturn/templates/etc/coturn/turnserver.conf.j2 b/roles/coturn/templates/etc/coturn/turnserver.conf.j2 new file mode 100644 index 0000000..33f5d47 --- /dev/null +++ b/roles/coturn/templates/etc/coturn/turnserver.conf.j2 @@ -0,0 +1,46 @@ +listening-port={{ coturn_port }} +tls-listening-port=0 + +listen-ip={{ ansible_default_ipv4.address }} +external-ip={{ coturn_external_ip }} + +min-port={{ coturn_min_port }} +max-port={{ coturn_max_port }} + +use-auth-secret +static-auth-secret={{ coturn_auth_secret }} + +realm={{ coturn_realm }} + +no-tls +no-dtls + +log-file=stdout + +simple-log + +no-software-attribute + +no-multicast-peers +denied-peer-ip=0.0.0.0-0.255.255.255 +denied-peer-ip=10.0.0.0-10.255.255.255 +denied-peer-ip=100.64.0.0-100.127.255.255 +denied-peer-ip=127.0.0.0-127.255.255.255 +denied-peer-ip=169.254.0.0-169.254.255.255 +denied-peer-ip=127.0.0.0-127.255.255.255 +denied-peer-ip=172.16.0.0-172.31.255.255 +denied-peer-ip=192.0.0.0-192.0.0.255 +denied-peer-ip=192.0.2.0-192.0.2.255 +denied-peer-ip=192.88.99.0-192.88.99.255 +denied-peer-ip=192.168.0.0-192.168.255.255 +denied-peer-ip=198.18.0.0-198.19.255.255 +denied-peer-ip=198.51.100.0-198.51.100.255 +denied-peer-ip=203.0.113.0-203.0.113.255 +denied-peer-ip=240.0.0.0-255.255.255.255 +allowed-peer-ip={{ ansible_default_ipv4.address }} + +no-cli + +no-rfc5780 +no-stun-backward-compatibility +response-origin-only-with-rfc5780 diff --git a/roles/coturn/vars/main.yml b/roles/coturn/vars/main.yml new file mode 100644 index 0000000..eb8e04b --- /dev/null +++ b/roles/coturn/vars/main.yml @@ -0,0 +1,2 @@ +coturn_packages: + - coturn diff --git a/roles/cups_client/defaults/main.yml b/roles/cups_client/defaults/main.yml new file mode 100644 index 0000000..ad915f4 --- /dev/null +++ b/roles/cups_client/defaults/main.yml @@ -0,0 +1 @@ +cups_server_name: '{{ cups_host }}' diff --git a/roles/cups_client/handlers/main.yml b/roles/cups_client/handlers/main.yml new file mode 100644 index 0000000..9c3bada --- /dev/null +++ b/roles/cups_client/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart cups + systemd: + name: cups + state: restarted diff --git a/roles/cups_client/tasks/main.yml b/roles/cups_client/tasks/main.yml new file mode 100644 index 0000000..f0521c1 --- /dev/null +++ b/roles/cups_client/tasks/main.yml @@ -0,0 +1,19 @@ +- name: install cups + dnf: + name: '@print-client' + state: present + +- name: generate cups client configuration + template: + src: etc/cups/client.conf.j2 + dest: /etc/cups/client.conf + owner: root + group: lp + mode: 0644 + notify: restart cups + +- name: enable cups + systemd: + name: cups + enabled: yes + state: started diff --git a/roles/cups_client/templates/etc/cups/client.conf.j2 b/roles/cups_client/templates/etc/cups/client.conf.j2 new file mode 100644 index 0000000..0869834 --- /dev/null +++ b/roles/cups_client/templates/etc/cups/client.conf.j2 @@ -0,0 +1,3 @@ +ServerName {{ cups_server_name }}:631 +Encryption Required +ValidateCerts Yes diff --git a/roles/cups_server/defaults/main.yml b/roles/cups_server/defaults/main.yml new file mode 100644 index 0000000..c032530 --- /dev/null +++ b/roles/cups_server/defaults/main.yml @@ -0,0 +1,3 @@ +cups_server_aliases: '{{ cnames }}' +cups_server_admin: root@{{ email_domain }} +cups_admin_group: role-cups-admin diff --git a/roles/cups_server/handlers/main.yml b/roles/cups_server/handlers/main.yml new file mode 100644 index 0000000..9c3bada --- /dev/null +++ b/roles/cups_server/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart cups + systemd: + name: cups + state: restarted diff --git a/roles/cups_server/tasks/freeipa.yml b/roles/cups_server/tasks/freeipa.yml new file mode 100644 index 0000000..0acb36d --- /dev/null +++ b/roles/cups_server/tasks/freeipa.yml @@ -0,0 +1,58 @@ +- name: create admin group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ cups_admin_group }}' + nonposix: no + state: present + run_once: yes + +- name: create HBAC service + ipahbacsvc: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ cups_hbac_service }}' + description: CUPS Print Server + state: present + run_once: yes + +- name: create cups-servers hostgroup + ipahostgroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ cups_hbac_hostgroup }}' + description: CUPS Servers + host: "{{ groups[cups_hostgroup] | map('regex_replace', '$', '.' ~ ansible_domain) }}" + run_once: yes + +- name: create HBAC rule for cups-admin + ipahbacrule: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: allow_cups_on_cups_servers + description: Allow CUPS admin on CUPS servers + hostgroup: '{{ cups_hbac_hostgroup }}' + group: '{{ cups_admin_group }}' + hbacsvc: '{{ cups_hbac_service }}' + run_once: yes + +- name: generate pam configuration + copy: + content: | + auth required pam_sss.so + account required pam_sss.so + dest: /etc/pam.d/cups + +- name: create HTTP service principal + ipaservice: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: 'HTTP/{{ ansible_fqdn }}' + state: present + +- name: retrieve HTTP keytab + include_role: + name: freeipa_keytab + vars: + keytab_principal: 'HTTP/{{ ansible_fqdn }}' + keytab_path: /etc/krb5.keytab diff --git a/roles/cups_server/tasks/main.yml b/roles/cups_server/tasks/main.yml new file mode 100644 index 0000000..b03916e --- /dev/null +++ b/roles/cups_server/tasks/main.yml @@ -0,0 +1,70 @@ +- name: install cups + dnf: + name: cups + state: present + +- name: create certificate directory + file: + path: /etc/pki/tls/cups + state: directory + +- name: request TLS certificate + include_role: + name: getcert_request + vars: + certificate_service: cups + certificate_path: '{{ cups_certificate_path }}' + certificate_key_path: '{{ cups_certificate_key_path }}' + certificate_hook: systemctl restart cups + +- name: generate config files + template: + src: etc/cups/{{ item }}.j2 + dest: /etc/cups/{{ item }} + owner: root + group: lp + mode: 0640 + loop: + - cupsd.conf + - cups-files.conf + notify: restart cups + +- name: allow cups to listen on port 443 + seport: + ports: 443 + proto: tcp + setype: ipp_port_t + state: present + tags: selinux + +- import_tasks: freeipa.yml + tags: freeipa + +- name: enable cups + systemd: + name: cups + enabled: yes + state: started + +- name: forward port 80 to port 631 + firewalld: + permanent: yes + immediate: yes + rich_rule: 'rule family={{ item }} forward-port port=80 protocol=tcp to-port=631' + state: enabled + loop: + - ipv4 + - ipv6 + tags: firewalld + +- name: open firewall ports + firewalld: + permanent: yes + immediate: yes + service: '{{ item }}' + state: enabled + loop: + - ipp + - http + - https + tags: firewalld diff --git a/roles/cups_server/templates/etc/cups/cups-files.conf.j2 b/roles/cups_server/templates/etc/cups/cups-files.conf.j2 new file mode 100644 index 0000000..4550bad --- /dev/null +++ b/roles/cups_server/templates/etc/cups/cups-files.conf.j2 @@ -0,0 +1,9 @@ +# Administrator user group, used to match @SYSTEM in cupsd.conf policy rules... +SystemGroup {{ cups_admin_group }} + +ServerKeychain /etc/pki/tls/cups +CreateSelfSignedCerts no + +AccessLog syslog +ErrorLog syslog +PageLog syslog diff --git a/roles/cups_server/templates/etc/cups/cupsd.conf.j2 b/roles/cups_server/templates/etc/cups/cupsd.conf.j2 new file mode 100644 index 0000000..a2a1032 --- /dev/null +++ b/roles/cups_server/templates/etc/cups/cupsd.conf.j2 @@ -0,0 +1,93 @@ +LogLevel info + +ServerName {{ ansible_fqdn }} +ServerAdmin {{ cups_server_admin }} +{% if cups_server_aliases %} +ServerAlias {{ cups_server_aliases | join(' ') }} +{% endif %} + +# Specifies the maximum size of the log files before they are rotated. The value "0" disables log rotation. +MaxLogSize 1m + +# Default error policy for printers +ErrorPolicy retry-job + +# Only listen for connections from the local machine. +Listen 631 +Listen /run/cups/cups.sock +SSLPort 443 + +# Show shared printers on the local network. +Browsing Off +BrowseLocalProtocols none + +# Default authentication type, when authentication is required... +# Kerberos appears to be broken in cups >=2.2: +# https://github.com/apple/cups/issues/5596 +DefaultAuthType Basic +DefaultEncryption Required + +DefaultShared yes + +# Web interface setting... +WebInterface Yes + +# Timeout after cupsd exits if idle (applied only if cupsd runs on-demand - with -l) +IdleExitTimeout 0 + +# Restrict access to the server... + + Order allow,deny + Allow from All + + +# Restrict access to the admin pages... + + AuthType Default + Allow from All + Require user @SYSTEM + Order allow,deny + + +# Set the default printer/job policies... + + # Job/subscription privacy... + JobPrivateAccess default + JobPrivateValues default + SubscriptionPrivateAccess default + SubscriptionPrivateValues default + + # Job-related operations must be done by the owner or an administrator... + + Order deny,allow + + + + Require user @OWNER @SYSTEM + Order deny,allow + + + # All administration operations require an administrator to authenticate... + + AuthType Default + Require user @SYSTEM + Order deny,allow + + + # All printer operations require a printer operator to authenticate... + + AuthType Default + Require user @SYSTEM + Order deny,allow + + + # Only the owner or an administrator can cancel or authenticate a job... + + Require user @OWNER @SYSTEM + Order deny,allow + + + + Order deny,allow + + diff --git a/roles/cups_server/vars/main.yml b/roles/cups_server/vars/main.yml new file mode 100644 index 0000000..98525bf --- /dev/null +++ b/roles/cups_server/vars/main.yml @@ -0,0 +1,14 @@ +cups_hostgroup: cups_servers +cups_certificate_path: /etc/pki/tls/cups/{{ ansible_fqdn }}.crt +cups_certificate_key_path: /etc/pki/tls/cups/{{ ansible_fqdn }}.key + +cups_hbac_hostgroup: cups-servers +cups_hbac_service: cups + +cups_archive_shell: >- + TIMESTAMP=$(date +%Y%m%d%H%M%S); + tar czf "cups-${TIMESTAMP}.tar.gz" + --transform "s|^\.|cups-${TIMESTAMP}|" + -C /etc/cups + ./ppd + ./printers.conf diff --git a/roles/dev_environment/meta/main.yml b/roles/dev_environment/meta/main.yml new file mode 100644 index 0000000..29230f9 --- /dev/null +++ b/roles/dev_environment/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - role: yum + yum_repositories: epel + tags: yum diff --git a/roles/dev_environment/tasks/main.yml b/roles/dev_environment/tasks/main.yml new file mode 100644 index 0000000..e936007 --- /dev/null +++ b/roles/dev_environment/tasks/main.yml @@ -0,0 +1,21 @@ +- name: install packages + dnf: + name: '{{ dev_packages }}' + state: present + +- name: install sieveconnect + unarchive: + src: '{{ dev_sieveconnect_url }}' + remote_src: yes + dest: /usr/local/bin + extra_opts: + - --strip=1 + - --wildcards + - '*/sieve-connect.pl' + - --transform + - s/sieve-connect.pl/sieve-connect/ + +- name: set sieveconnect permissions + file: + path: /usr/local/bin/sieve-connect + mode: 0555 diff --git a/roles/dev_environment/vars/main.yml b/roles/dev_environment/vars/main.yml new file mode 100644 index 0000000..94b6fd6 --- /dev/null +++ b/roles/dev_environment/vars/main.yml @@ -0,0 +1,33 @@ +dev_packages: + - '@Development Tools' + - tmux + - traceroute + - vim + - tree + - htop + - stow + - sshpass + - openldap-clients + - pwgen + - ImageMagick + - jq + - wireguard-tools + - pciutils + - usbutils + - perl-Image-ExifTool + - rsync + - wget + - discount + - python3-pip + - postgresql + # sieveconnect dependencies + - perl-Authen-SASL + - perl-IO-Socket-INET6 + - perl-Net-DNS + - perl-Sys-Hostname + - perl-Term-ReadLine + - perl-Term-ReadLine-Gnu + - perl-TermReadKey + +dev_sieveconnect_version: '0.90' +dev_sieveconnect_url: https://github.com/philpennock/sieve-connect/releases/download/v{{ dev_sieveconnect_version }}/sieve-connect-{{ dev_sieveconnect_version }}.tar.bz2 diff --git a/roles/devd/handlers/main.yml b/roles/devd/handlers/main.yml new file mode 100644 index 0000000..4af217a --- /dev/null +++ b/roles/devd/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart devd + service: + name: devd + state: restarted diff --git a/roles/devd/tasks/main.yml b/roles/devd/tasks/main.yml new file mode 100644 index 0000000..8e7e3b6 --- /dev/null +++ b/roles/devd/tasks/main.yml @@ -0,0 +1,9 @@ +# Without this, devd spams syslog about scsi sense messages about the virtual +# floppy/optical drive. +- name: add -q flag to devd + lineinfile: + path: /etc/rc.conf.d/devd + create: yes + regexp: ^devd_flags= + line: devd_flags="-q" + notify: restart devd diff --git a/roles/dnf_automatic/defaults/main.yml b/roles/dnf_automatic/defaults/main.yml new file mode 100644 index 0000000..92ffda5 --- /dev/null +++ b/roles/dnf_automatic/defaults/main.yml @@ -0,0 +1,3 @@ +dnf_automatic_on_calendar: 03:00 +dnf_automatic_random_delay: 60m +dnf_automatic_restart: yes diff --git a/roles/dnf_automatic/files/etc/dnf/automatic.conf b/roles/dnf_automatic/files/etc/dnf/automatic.conf new file mode 100644 index 0000000..926207f --- /dev/null +++ b/roles/dnf_automatic/files/etc/dnf/automatic.conf @@ -0,0 +1,12 @@ +[commands] +upgrade_type = default +random_sleep = 0 +network_online_timeout = 60 +download_updates = yes +apply_updates = yes + +[emitters] +emit_via = stdio + +[base] +debuglevel = 1 diff --git a/roles/dnf_automatic/files/usr/local/sbin/dnf-auto-restart b/roles/dnf_automatic/files/usr/local/sbin/dnf-auto-restart new file mode 100644 index 0000000..76cc2d4 --- /dev/null +++ b/roles/dnf_automatic/files/usr/local/sbin/dnf-auto-restart @@ -0,0 +1,30 @@ +#!/bin/bash + +set -Eeu -o pipefail +shopt -s lastpipe + +if ! dnf needs-restarting --reboothint; then + shutdown --reboot '+5' 'Rebooting to apply package upgrades' + exit 0 +fi + +SERVICES=() + +dnf needs-restarting --services | while read -r service; do + if [[ $service = user@* ]]; then + continue + elif [ "$(systemctl show "$service" -P RefuseManualStop)" = yes ]; then + continue + else + SERVICES+=("$service") + fi +done + +printf '\n' +if (( ${#SERVICES[@]} > 0 )); then + echo 'restarting the following units:' + printf ' * %s\n' "${SERVICES[@]}" + systemctl restart "${SERVICES[@]}" +else + echo 'All services are up to date.' +fi diff --git a/roles/dnf_automatic/handlers/main.yml b/roles/dnf_automatic/handlers/main.yml new file mode 100644 index 0000000..8325ce9 --- /dev/null +++ b/roles/dnf_automatic/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart dnf-automatic + systemd: + name: dnf-automatic.timer + state: restarted diff --git a/roles/dnf_automatic/tasks/main.yml b/roles/dnf_automatic/tasks/main.yml new file mode 100644 index 0000000..113fee2 --- /dev/null +++ b/roles/dnf_automatic/tasks/main.yml @@ -0,0 +1,50 @@ +- name: install packages + dnf: + name: '{{ dnf_automatic_packages }}' + state: present + +- name: generate dnf-automatic configuration + copy: + src: etc/dnf/automatic.conf + dest: /etc/dnf/automatic.conf + +- name: copy dnf-automatic restart script + copy: + src: '{{ dnf_automatic_restart_script[1:] }}' + dest: '{{ dnf_automatic_restart_script }}' + mode: 0555 + +- name: create systemd override directories + file: + path: /etc/systemd/system/dnf-automatic.{{ item }}.d + state: directory + loop: + - timer + - service + +- name: create systemd override files + template: + src: etc/systemd/system/dnf-automatic.{{ item }}.d/override.conf.j2 + dest: /etc/systemd/system/dnf-automatic.{{ item }}.d/override.conf + loop: + - timer + - service + register: dnf_automatic_unit + notify: restart dnf-automatic + +- name: reload systemd units + systemd: + daemon_reload: yes + when: dnf_automatic_unit.changed + +- name: enable dnf-automatic systemd timer + systemd: + name: dnf-automatic.timer + enabled: yes + state: started + +- name: disable dnf-makecache timer + systemd: + name: dnf-makecache.timer + state: stopped + enabled: no diff --git a/roles/dnf_automatic/templates/etc/systemd/system/dnf-automatic.service.d/override.conf.j2 b/roles/dnf_automatic/templates/etc/systemd/system/dnf-automatic.service.d/override.conf.j2 new file mode 100644 index 0000000..6eafbd7 --- /dev/null +++ b/roles/dnf_automatic/templates/etc/systemd/system/dnf-automatic.service.d/override.conf.j2 @@ -0,0 +1,4 @@ +{% if dnf_automatic_restart %} +[Service] +ExecStartPost={{ dnf_automatic_restart_script }} +{% endif %} diff --git a/roles/dnf_automatic/templates/etc/systemd/system/dnf-automatic.timer.d/override.conf.j2 b/roles/dnf_automatic/templates/etc/systemd/system/dnf-automatic.timer.d/override.conf.j2 new file mode 100644 index 0000000..20a5678 --- /dev/null +++ b/roles/dnf_automatic/templates/etc/systemd/system/dnf-automatic.timer.d/override.conf.j2 @@ -0,0 +1,3 @@ +[Timer] +OnCalendar={{ dnf_automatic_on_calendar }} +RandomizedDelaySec={{ dnf_automatic_random_delay }} diff --git a/roles/dnf_automatic/vars/main.yml b/roles/dnf_automatic/vars/main.yml new file mode 100644 index 0000000..3d96ec2 --- /dev/null +++ b/roles/dnf_automatic/vars/main.yml @@ -0,0 +1,4 @@ +dnf_automatic_packages: + - dnf-automatic + +dnf_automatic_restart_script: /usr/local/sbin/dnf-auto-restart diff --git a/roles/dns_records/defaults/main.yml b/roles/dns_records/defaults/main.yml new file mode 100644 index 0000000..563f2a6 --- /dev/null +++ b/roles/dns_records/defaults/main.yml @@ -0,0 +1,3 @@ +dns_ip: '{{ ip }}' +dns_fqdn: '{{ fqdn }}' +dns_cnames: '{{ cnames }}' diff --git a/roles/dns_records/tasks/main.yml b/roles/dns_records/tasks/main.yml new file mode 100644 index 0000000..c6ef405 --- /dev/null +++ b/roles/dns_records/tasks/main.yml @@ -0,0 +1,41 @@ +- name: create A record + ipadnsrecord: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + zone_name: "{{ dns_fqdn | regex_replace('^[^.]+\\.', '') }}" + record_name: '{{ dns_fqdn | split(".") | first }}' + record_type: A + record_value: "{{ dns_ip }}" + state: present + delegate_to: '{{ freeipa_master }}' + +- name: create reverse DNS zone + ipadnszone: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + zone_name: "{{ dns_ip | ansible.utils.ipaddr('revdns') | regex_replace('^[^.]+\\.', '') }}" + state: present + delegate_to: '{{ freeipa_master }}' + +- name: create PTR record + ipadnsrecord: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + zone_name: "{{ dns_ip | ansible.utils.ipaddr('revdns') | regex_replace('^[^.]+\\.', '') }}" + record_name: '{{ dns_ip | split(".") | last }}' + record_type: PTR + record_value: '{{ dns_fqdn if dns_fqdn[-1] == "." else (dns_fqdn ~ ".") }}' + state: present + delegate_to: '{{ freeipa_master }}' + +- name: create CNAME records + ipadnsrecord: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + zone_name: "{{ item | regex_replace('^[^.]+\\.', '') }}" + record_name: '{{ item | split(".") | first }}' + record_type: CNAME + record_value: '{{ dns_fqdn if dns_fqdn[-1] == "." else (dns_fqdn ~ ".") }}' + state: present + delegate_to: '{{ freeipa_master }}' + loop: '{{ dns_cnames }}' diff --git a/roles/dnsmasq/defaults/main.yml b/roles/dnsmasq/defaults/main.yml new file mode 100644 index 0000000..9c83a2c --- /dev/null +++ b/roles/dnsmasq/defaults/main.yml @@ -0,0 +1,8 @@ +dnsmasq_nameservers: '{{ vlan.dns_servers }}' +dnsmasq_searchdomain: '{{ domain }}' +dnsmasq_resolv_options: + - rotate + +dnsmasq_cache_size: 1000 +dnsmasq_negcache: no +dnsmasq_all_servers: yes diff --git a/roles/dnsmasq/handlers/main.yml b/roles/dnsmasq/handlers/main.yml new file mode 100644 index 0000000..a4e4bec --- /dev/null +++ b/roles/dnsmasq/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart NetworkManager + systemd: + name: NetworkManager + state: restarted diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml new file mode 100644 index 0000000..5505897 --- /dev/null +++ b/roles/dnsmasq/tasks/main.yml @@ -0,0 +1,16 @@ +- name: install dnsmasq + dnf: + name: dnsmasq + state: present + +- name: configure NetworkManager to use dnsmasq + template: + src: etc/NetworkManager/conf.d/9A-dns.conf.j2 + dest: /etc/NetworkManager/conf.d/9A-dns.conf + notify: restart NetworkManager + +- name: configure dnsmasq + template: + src: etc/NetworkManager/dnsmasq.d/00-dnsmasq.conf.j2 + dest: /etc/NetworkManager/dnsmasq.d/00-dnsmasq.conf + notify: restart NetworkManager diff --git a/roles/dnsmasq/templates/etc/NetworkManager/conf.d/9A-dns.conf.j2 b/roles/dnsmasq/templates/etc/NetworkManager/conf.d/9A-dns.conf.j2 new file mode 100644 index 0000000..91ae064 --- /dev/null +++ b/roles/dnsmasq/templates/etc/NetworkManager/conf.d/9A-dns.conf.j2 @@ -0,0 +1,9 @@ +[main] +dns=dnsmasq + +[global-dns] +searches={{ dnsmasq_searchdomain if dnsmasq_searchdomain is string else (dnsmasq_searchdomain | join(',')) }} +options={{ dnsmasq_resolv_options if dnsmasq_resolv_options is string else (dnsmasq_resolv_options | join(',')) }} + +[global-dns-domain-*] +servers={{ dnsmasq_nameservers | join(',') }} diff --git a/roles/dnsmasq/templates/etc/NetworkManager/dnsmasq.d/00-dnsmasq.conf.j2 b/roles/dnsmasq/templates/etc/NetworkManager/dnsmasq.d/00-dnsmasq.conf.j2 new file mode 100644 index 0000000..c87ec98 --- /dev/null +++ b/roles/dnsmasq/templates/etc/NetworkManager/dnsmasq.d/00-dnsmasq.conf.j2 @@ -0,0 +1,7 @@ +cache-size={{ dnsmasq_cache_size }} +{% if not dnsmasq_negcache %} +no-negcache +{% endif %} +{% if dnsmasq_all_servers %} +all-servers +{% endif %} diff --git a/roles/dovecot/defaults/main.yml b/roles/dovecot/defaults/main.yml new file mode 100644 index 0000000..e4f3842 --- /dev/null +++ b/roles/dovecot/defaults/main.yml @@ -0,0 +1,24 @@ +dovecot_recipient_delimiter: '+' +dovecot_default_user_quota: 5G +dovecot_quota_grace_percent: 5 +dovecot_default_domain: '{{ email_domain }}' + +dovecot_rspamd_host: '{{ rspamd_host }}' +dovecot_rspamd_password: '{{ rspamd_password }}' +dovecot_rspamd_pubkey: '{{ rspamd_pubkey }}' + +dovecot_access_group: role-imap-access + +dovecot_archive_on_calendar: weekly + +dovecot_lmtp_port: 24 +dovecot_quota_status_port: 10993 + +dovecot_tika_port: 9998 +dovecot_solr_port: 8983 + +dovecot_max_mail_size: 64M +dovecot_quota_warning_percent: + - 95 + - 90 + - 80 diff --git a/roles/dovecot/files/etc/dovecot/sieve.before.d/10-rspamd.sieve b/roles/dovecot/files/etc/dovecot/sieve.before.d/10-rspamd.sieve new file mode 100644 index 0000000..7931a71 --- /dev/null +++ b/roles/dovecot/files/etc/dovecot/sieve.before.d/10-rspamd.sieve @@ -0,0 +1,5 @@ +require ["fileinto"]; + +if header :is "X-Spam" "Yes" { + fileinto "Junk"; +} diff --git a/roles/dovecot/files/etc/dovecot/sieve/report-ham.sieve b/roles/dovecot/files/etc/dovecot/sieve/report-ham.sieve new file mode 100644 index 0000000..578e7b2 --- /dev/null +++ b/roles/dovecot/files/etc/dovecot/sieve/report-ham.sieve @@ -0,0 +1,15 @@ +require ["vnd.dovecot.pipe", "copy", "imapsieve", "environment", "variables"]; + +if environment :matches "imap.mailbox" "*" { + set "mailbox" "${1}"; +} + +if string "${mailbox}" "Trash" { + stop; +} + +if environment :matches "imap.email" "*" { + set "email" "${1}"; +} + +pipe :copy "report-ham.sh" [ "${email}" ]; diff --git a/roles/dovecot/files/etc/dovecot/sieve/report-spam.sieve b/roles/dovecot/files/etc/dovecot/sieve/report-spam.sieve new file mode 100644 index 0000000..d34c71b --- /dev/null +++ b/roles/dovecot/files/etc/dovecot/sieve/report-spam.sieve @@ -0,0 +1,7 @@ +require ["vnd.dovecot.pipe", "copy", "imapsieve", "environment", "variables"]; + +if environment :matches "imap.email" "*" { + set "email" "${1}"; +} + +pipe :copy "report-spam.sh" [ "${email}" ]; diff --git a/roles/dovecot/files/etc/dovecot/virtual/All Messages/dovecot-virtual b/roles/dovecot/files/etc/dovecot/virtual/All Messages/dovecot-virtual new file mode 100644 index 0000000..a7f3148 --- /dev/null +++ b/roles/dovecot/files/etc/dovecot/virtual/All Messages/dovecot-virtual @@ -0,0 +1,2 @@ +* + all diff --git a/roles/dovecot/files/etc/dovecot/virtual/Flagged/dovecot-virtual b/roles/dovecot/files/etc/dovecot/virtual/Flagged/dovecot-virtual new file mode 100644 index 0000000..883f49e --- /dev/null +++ b/roles/dovecot/files/etc/dovecot/virtual/Flagged/dovecot-virtual @@ -0,0 +1,2 @@ +* + flagged diff --git a/roles/dovecot/files/etc/dovecot/virtual/INBOX/dovecot-virtual b/roles/dovecot/files/etc/dovecot/virtual/INBOX/dovecot-virtual new file mode 100644 index 0000000..139e4b0 --- /dev/null +++ b/roles/dovecot/files/etc/dovecot/virtual/INBOX/dovecot-virtual @@ -0,0 +1,2 @@ +Virtual/All Messages + inthread refs x-mailbox INBOX diff --git a/roles/dovecot/files/etc/systemd/system/dovecot.service.d/override.conf b/roles/dovecot/files/etc/systemd/system/dovecot.service.d/override.conf new file mode 100644 index 0000000..0e524e8 --- /dev/null +++ b/roles/dovecot/files/etc/systemd/system/dovecot.service.d/override.conf @@ -0,0 +1,6 @@ +[Unit] +Wants=gssproxy.service +After=local-fs.target network-online.target dovecot-init.service gssproxy.service + +[Service] +Environment=GSS_USE_PROXY=yes diff --git a/roles/dovecot/files/var/lib/solr/dovecot/conf/schema.xml b/roles/dovecot/files/var/lib/solr/dovecot/conf/schema.xml new file mode 100644 index 0000000..601a290 --- /dev/null +++ b/roles/dovecot/files/var/lib/solr/dovecot/conf/schema.xml @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + id + diff --git a/roles/dovecot/handlers/main.yml b/roles/dovecot/handlers/main.yml new file mode 100644 index 0000000..344cf91 --- /dev/null +++ b/roles/dovecot/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart dovecot + systemd: + name: dovecot + state: restarted diff --git a/roles/dovecot/meta/main.yml b/roles/dovecot/meta/main.yml new file mode 100644 index 0000000..c4f4b18 --- /dev/null +++ b/roles/dovecot/meta/main.yml @@ -0,0 +1,12 @@ +dependencies: + - role: yum + yum_repositories: + - epel + - rspamd + tags: yum + + - role: solr + tags: solr + + - role: tika + tags: tika diff --git a/roles/dovecot/tasks/freeipa.yml b/roles/dovecot/tasks/freeipa.yml new file mode 100644 index 0000000..1e1ee29 --- /dev/null +++ b/roles/dovecot/tasks/freeipa.yml @@ -0,0 +1,109 @@ +- name: create IMAP access group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ dovecot_access_group }}' + description: users with IMAP access + nonposix: yes + state: present + run_once: True + +- name: create service principals + ipaservice: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ item }}/{{ ansible_fqdn }}' + state: present + loop: + - imap + - sieve + +- name: retrieve service keytabs + include_role: + name: freeipa_keytab + vars: + keytab_principal: '{{ item }}/{{ ansible_fqdn }}' + keytab_path: '{{ dovecot_keytab }}' + loop: + - imap + - sieve + +- name: configure gssproxy + include_role: + name: gssproxy_client + vars: + gssproxy_name: dovecot + gssproxy_section: service/dovecot + gssproxy_keytab: '{{ dovecot_keytab }}' + gssproxy_client_keytab: '{{ dovecot_keytab }}' + gssproxy_cred_usage: both + gssproxy_euid: dovecot + +- name: create SELinux policy for dovecot to access gssproxy + include_role: + name: selinux_policy + apply: + tags: selinux + vars: + selinux_policy_name: dovecot_gssproxy + selinux_policy_te: '{{ dovecot_selinux_policy_te }}' + tags: selinux + +- name: generate PAM configuration for dovecot + copy: + content: | + auth required pam_sss.so + account required pam_sss.so + dest: /etc/pam.d/dovecot + +- name: create HBAC service + ipahbacsvc: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ dovecot_hbac_service }}' + description: Dovecot IMAP server + state: present + run_once: True + +- name: create imap-servers hostgroup + ipahostgroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ dovecot_hbac_hostgroup }}' + description: IMAP Servers + host: "{{ groups[dovecot_hbac_hostgroup] | map('regex_replace', '$', '.' ~ ansible_domain) }}" + state: present + run_once: True + +# Note: we explicitly allow all here. SSSD will only be consulted when a user performs +# a PLAIN login, falling back to PAM authentication. Users with a valid Kerberos ticket +# bypass the PAM stack entirely, so a restrictive HBAC rule is pointless. +- name: create HBAC rule + ipahbacrule: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: allow_dovecot_on_imap_servers + description: Allow IMAP on imap servers + hostgroup: + - '{{ dovecot_hbac_hostgroup }}' + usercategory: all + hbacsvc: + - '{{ dovecot_hbac_service }}' + run_once: True + +- name: create systemd override directory + file: + path: /etc/systemd/system/dovecot.service.d + state: directory + +- name: create systemd override file + copy: + src: etc/systemd/system/dovecot.service.d/override.conf + dest: /etc/systemd/system/dovecot.service.d/override.conf + notify: restart dovecot + register: dovecot_systemd_unit + +- name: reload systemd daemons + systemd: + daemon_reload: yes + when: dovecot_systemd_unit.changed diff --git a/roles/dovecot/tasks/main.yml b/roles/dovecot/tasks/main.yml new file mode 100644 index 0000000..09f2e2e --- /dev/null +++ b/roles/dovecot/tasks/main.yml @@ -0,0 +1,127 @@ +- name: install dovecot + dnf: + name: '{{ dovecot_packages }}' + state: present + +- name: add vmail user + user: + name: '{{ dovecot_vmail_user }}' + system: yes + home: '{{ dovecot_vmail_dir }}' + shell: /sbin/nologin + create_home: no + register: dovecot_vmail_user_result + +- name: create vmail directory + file: + path: '{{ dovecot_vmail_dir }}' + state: directory + owner: '{{ dovecot_vmail_user }}' + group: '{{ dovecot_vmail_user }}' + setype: mail_spool_t + mode: 0770 + +- name: set selinux context for vmail directory + sefcontext: + target: '{{ dovecot_vmail_dir }}(/.*)?' + setype: mail_spool_t + state: present + register: dovecot_vmail_sefcontext + +- name: apply selinux context to vmail directory + command: 'restorecon -R {{ dovecot_vmail_dir }}' + when: dovecot_vmail_sefcontext.changed + +- name: set up FreeIPA integration for IMAP + import_tasks: freeipa.yml + +- name: request TLS certificate + include_role: + name: getcert_request + vars: + certificate_service: imap + certificate_path: '{{ dovecot_certificate_path }}' + certificate_key_path: '{{ dovecot_certificate_key_path }}' + certificate_owner: dovecot + certificate_hook: systemctl reload dovecot + +- name: generate dhparams + openssl_dhparam: + path: '{{ dovecot_dhparams_path }}' + size: 2048 + +- name: configure Apache Solr for full-text search + import_tasks: solr.yml + tags: solr + +- name: create virtual config directory + file: + path: /etc/dovecot/virtual + state: directory + +- name: create global sieve directories + file: + path: '{{ item }}' + state: directory + recurse: yes + loop: + - '{{ dovecot_sieve_dir }}' + - '{{ dovecot_sieve_before_dir }}' + - '{{ dovecot_sieve_pipe_bin_dir }}' + +- name: create virtual mailbox definitions + copy: + src: etc/dovecot/virtual/ + dest: /etc/dovecot/virtual/ + +- name: generate dovecot configuration + template: + src: '{{ item.src }}' + dest: /etc/dovecot/{{ item.path | splitext | first }} + loop: "{{ lookup('filetree', '../templates/etc/dovecot', wantlist=True) }}" + loop_control: + label: '{{ item.path }}' + when: item.state == 'file' + notify: restart dovecot + +- name: copy quota warn script + template: + src: '{{ dovecot_quota_warning_script[1:] }}.j2' + dest: '{{ dovecot_quota_warning_script }}' + mode: 0555 + +- name: start dovecot + systemd: + name: dovecot + enabled: yes + state: started + +- import_tasks: rspamd.yml + +- name: open firewall ports + firewalld: + service: '{{ item }}' + permanent: yes + immediate: yes + state: enabled + loop: + - imaps + - managesieve + tags: firewalld + +- name: open firewall ports + firewalld: + port: '{{ item }}' + permanent: yes + immediate: yes + state: enabled + loop: + - '{{ dovecot_quota_status_port }}/tcp' + - '{{ dovecot_lmtp_port }}/tcp' + tags: firewalld + +- name: generate archive script + template: + src: '{{ dovecot_archive_script[1:] }}.j2' + dest: '{{ dovecot_archive_script }}' + mode: 0555 diff --git a/roles/dovecot/tasks/rspamd.yml b/roles/dovecot/tasks/rspamd.yml new file mode 100644 index 0000000..90686ee --- /dev/null +++ b/roles/dovecot/tasks/rspamd.yml @@ -0,0 +1,43 @@ +- name: install rspamd + dnf: + name: rspamd + state: present + +- name: copy rspamd X-SPAM sieve script + copy: + src: '{{ dovecot_sieve_before_dir[1:] }}/10-rspamd.sieve' + dest: '{{ dovecot_sieve_before_dir }}/10-rspamd.sieve' + register: dovecot_rspamd_sieve_script + +- name: compile rspamd X-SPAM sieve script + command: sievec '{{ dovecot_sieve_before_dir }}/10-rspamd.sieve' + when: dovecot_rspamd_sieve_script.changed + +- name: copy rspamd sieve reporting scripts + copy: + src: '{{ dovecot_sieve_dir[1:] }}/{{ item }}' + dest: '{{ dovecot_sieve_dir }}/{{ item }}' + loop: + - report-spam.sieve + - report-ham.sieve + register: dovecot_rspamd_report_sieve_scripts + +- name: compile rspamd sieve reporting scripts + command: sievec {{ dovecot_sieve_dir }}/{{ item }} + when: dovecot_rspamd_report_sieve_scripts.results[index].changed + loop: + - report-spam.sieve + - report-ham.sieve + loop_control: + index_var: index + +- name: generate rpsmad bash reporting scripts + template: + src: '{{ dovecot_sieve_pipe_bin_dir[1:] }}/{{ item }}.j2' + dest: '{{ dovecot_sieve_pipe_bin_dir }}/{{ item }}' + owner: root + group: dovecot + mode: 0550 + loop: + - report-spam.sh + - report-ham.sh diff --git a/roles/dovecot/tasks/solr.yml b/roles/dovecot/tasks/solr.yml new file mode 100644 index 0000000..0751192 --- /dev/null +++ b/roles/dovecot/tasks/solr.yml @@ -0,0 +1,40 @@ +- name: add solr collection for dovecot + command: + cmd: '{{ solr_install_dir }}/bin/solr create -c dovecot' + creates: '{{ solr_data_dir }}/dovecot' + become: True + become_user: solr + +- name: check if dovecot schema exists + stat: + path: '{{ solr_data_dir }}/dovecot/conf/schema.xml.bak' + register: schema_xml_bak + +- name: copy dovecot solr schema + copy: + src: '{{ solr_data_dir[1:] }}/dovecot/conf/schema.xml' + dest: '{{ solr_data_dir }}/dovecot/conf/schema.xml' + owner: solr + group: solr + register: solr_schema + changed_when: no + +- name: stat new schema + stat: + path: '{{ solr_data_dir }}/dovecot/conf/schema.xml' + register: schema_xml + +- name: remove managed-schema file + file: + path: '{{ solr_data_dir }}/dovecot/conf/managed-schema.xml' + state: absent + when: (not schema_xml_bak.stat.exists) or (schema_xml_bak.stat.checksum != schema_xml.stat.checksum) + notify: restart solr + +- name: generate dovecot solr config + template: + src: '{{ solr_data_dir[1:] }}/dovecot/conf/solrconfig.xml.j2' + dest: '{{ solr_data_dir }}/dovecot/conf/solrconfig.xml' + owner: solr + group: solr + notify: restart solr diff --git a/roles/dovecot/templates/etc/dovecot/conf.d/10-auth.conf.j2 b/roles/dovecot/templates/etc/dovecot/conf.d/10-auth.conf.j2 new file mode 100644 index 0000000..2185d6d --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/conf.d/10-auth.conf.j2 @@ -0,0 +1,10 @@ +auth_default_realm = {{ freeipa_realm }} + +auth_username_format = %Ln + +auth_gssapi_hostname = "$ALL" + +auth_mechanisms = gssapi plain login + +!include auth-system.conf.ext +!include auth-ldap.conf.ext diff --git a/roles/dovecot/templates/etc/dovecot/conf.d/10-mail.conf.j2 b/roles/dovecot/templates/etc/dovecot/conf.d/10-mail.conf.j2 new file mode 100644 index 0000000..9a3884a --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/conf.d/10-mail.conf.j2 @@ -0,0 +1,31 @@ +mail_location = mdbox:~/mdbox + +namespace inbox { + type = private + separator = / + inbox = yes + subscriptions = yes +} + +namespace virtual { + location = virtual:/etc/dovecot/virtual:INDEX=~/.virtual:CONTROL=~/.virtual:VOLATILEDIR=~/.virtual:LAYOUT=fs + + type = private + separator = / + prefix = Virtual/ +} + +mail_plugins = $mail_plugins quota virtual fts fts_solr + +mail_privileged_group = {{ dovecot_vmail_user }} + +first_valid_uid = {{ dovecot_vmail_user_result.uid }} +last_valid_uid = {{ dovecot_vmail_user_result.uid }} + +first_valid_gid = {{ dovecot_vmail_user_result.group }} +last_valid_gid = {{ dovecot_vmail_user_result.group }} + +# recommended configuration for quota:count +protocol !indexer-worker { + mail_vsize_bg_after_count = 100 +} diff --git a/roles/dovecot/templates/etc/dovecot/conf.d/10-master.conf.j2 b/roles/dovecot/templates/etc/dovecot/conf.d/10-master.conf.j2 new file mode 100644 index 0000000..a2af8b3 --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/conf.d/10-master.conf.j2 @@ -0,0 +1,31 @@ +service imap-login { + inet_listener imap { + port = 143 + } + + inet_listener imaps { + port = 993 + ssl = yes + } +} + +service lmtp { + user = {{ dovecot_vmail_user }} + inet_listener lmtp { + port = {{ dovecot_lmtp_port }} + } +} + +service auth-worker { + user = $default_internal_user +} + +# Allow the vmail user to write to stats. This isn't strictly necessary, but +# prevents dovecot-lda from spamming the mail log with errors. +service stats { + unix_listener stats-writer { + user = dovecot + group = {{ dovecot_vmail_user }} + mode = 0660 + } +} diff --git a/roles/dovecot/templates/etc/dovecot/conf.d/10-ssl.conf.j2 b/roles/dovecot/templates/etc/dovecot/conf.d/10-ssl.conf.j2 new file mode 100644 index 0000000..e677b44 --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/conf.d/10-ssl.conf.j2 @@ -0,0 +1,10 @@ +ssl = required + +ssl_cert = <{{ dovecot_certificate_path }} +ssl_key = <{{ dovecot_certificate_key_path }} + +ssl_dh = <{{ dovecot_dhparams_path }} + +ssl_min_protocol = TLSv1.2 + +ssl_cipher_list = {{ dovecot_ssl_cipher_list }} diff --git a/roles/dovecot/templates/etc/dovecot/conf.d/15-lda.conf.j2 b/roles/dovecot/templates/etc/dovecot/conf.d/15-lda.conf.j2 new file mode 100644 index 0000000..0ed20f5 --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/conf.d/15-lda.conf.j2 @@ -0,0 +1,10 @@ +recipient_delimiter = {{ dovecot_recipient_delimiter }} +lda_original_recipient_header = X-Original-To + +lda_mailbox_autocreate = yes + +lda_mailbox_autosubscribe = no + +protocol lda { + mail_plugins = $mail_plugins sieve +} diff --git a/roles/dovecot/templates/etc/dovecot/conf.d/15-mailboxes.conf.j2 b/roles/dovecot/templates/etc/dovecot/conf.d/15-mailboxes.conf.j2 new file mode 100644 index 0000000..af47fcc --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/conf.d/15-mailboxes.conf.j2 @@ -0,0 +1,36 @@ +namespace inbox { + + mailbox Drafts { + auto = subscribe + special_use = \Drafts + } + + mailbox Junk { + auto = subscribe + special_use = \Junk + } + + mailbox Trash { + auto = subscribe + special_use = \Trash + } + + mailbox Sent { + auto = subscribe + special_use = \Sent + } + + mailbox Archive { + auto = subscribe + special_use = \Archive + } + + # "auto = subscribe" on virtual folders causes dovecot to coredump. + mailbox "Virtual/All Messages" { + special_use = \All + } + + mailbox Virtual/Flagged { + special_use = \Flagged + } +} diff --git a/roles/dovecot/templates/etc/dovecot/conf.d/20-imap.conf.j2 b/roles/dovecot/templates/etc/dovecot/conf.d/20-imap.conf.j2 new file mode 100644 index 0000000..ae67bae --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/conf.d/20-imap.conf.j2 @@ -0,0 +1,3 @@ +protocol imap { + mail_plugins = $mail_plugins imap_quota imap_sieve +} diff --git a/roles/dovecot/templates/etc/dovecot/conf.d/20-lmtp.conf.j2 b/roles/dovecot/templates/etc/dovecot/conf.d/20-lmtp.conf.j2 new file mode 100644 index 0000000..2619ce5 --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/conf.d/20-lmtp.conf.j2 @@ -0,0 +1,3 @@ +protocol lmtp { + mail_plugins = $mail_plugins sieve +} diff --git a/roles/dovecot/templates/etc/dovecot/conf.d/20-managesieve.conf.j2 b/roles/dovecot/templates/etc/dovecot/conf.d/20-managesieve.conf.j2 new file mode 100644 index 0000000..f4adea9 --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/conf.d/20-managesieve.conf.j2 @@ -0,0 +1,11 @@ +protocols = $protocols sieve + +service managesieve-login { + inet_listener sieve { + port = 4190 + } + + inet_listener sieve_deprecated { + port = 0 + } +} diff --git a/roles/dovecot/templates/etc/dovecot/conf.d/90-fts.conf.j2 b/roles/dovecot/templates/etc/dovecot/conf.d/90-fts.conf.j2 new file mode 100644 index 0000000..dbe2102 --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/conf.d/90-fts.conf.j2 @@ -0,0 +1,6 @@ +plugin { + fts_autoindex = yes + fts = solr + fts_solr = url=http://localhost:{{ dovecot_solr_port }}/solr/dovecot/ + fts_tika = http://localhost:{{ dovecot_tika_port }}/tika/ +} diff --git a/roles/dovecot/templates/etc/dovecot/conf.d/90-quota.conf.j2 b/roles/dovecot/templates/etc/dovecot/conf.d/90-quota.conf.j2 new file mode 100644 index 0000000..e1d4449 --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/conf.d/90-quota.conf.j2 @@ -0,0 +1,34 @@ +plugin { + quota = count:User quota + quota_vsizes = yes + quota_rule = *:storage={{ dovecot_default_user_quota }} + quota_grace = {{ dovecot_quota_grace_percent }}%% + + quota_max_mail_size = {{ dovecot_max_mail_size }} + + quota_status_success = DUNNO + quota_status_nouser = DUNNO + quota_status_overquota = "552 5.2.2 Mailbox is full" + + {% for percent in dovecot_quota_warning_percent | sort(reverse=True) %} + quota_warning{% if not loop.first %}{{ loop.index }}{% endif %} = storage={{ percent }}%% quota-warning {{ percent }} %u + {% endfor %} +} + +service quota-warning { + executable = script {{ dovecot_quota_warning_script }} + user = {{ dovecot_vmail_user }} + unix_listener quota-warning { + user = dovecot + group = {{ dovecot_vmail_user }} + mode = 0660 + } +} + +service quota-status { + executable = quota-status -p postfix + inet_listener { + port = {{ dovecot_quota_status_port }} + } + client_limit = 5 +} diff --git a/roles/dovecot/templates/etc/dovecot/conf.d/90-sieve-extprograms.conf.j2 b/roles/dovecot/templates/etc/dovecot/conf.d/90-sieve-extprograms.conf.j2 new file mode 100644 index 0000000..bab3d4f --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/conf.d/90-sieve-extprograms.conf.j2 @@ -0,0 +1,5 @@ +plugin { + sieve_pipe_bin_dir = {{ dovecot_sieve_pipe_bin_dir }} + sieve_filter_bin_dir = /usr/lib/dovecot/sieve-filter + sieve_execute_bin_dir = /usr/lib/dovecot/sieve-execute +} diff --git a/roles/dovecot/templates/etc/dovecot/conf.d/90-sieve.conf.j2 b/roles/dovecot/templates/etc/dovecot/conf.d/90-sieve.conf.j2 new file mode 100644 index 0000000..51ec533 --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/conf.d/90-sieve.conf.j2 @@ -0,0 +1,30 @@ +plugin { + sieve = file:~/sieve;active=~/.dovecot.sieve + + sieve_before = {{ dovecot_sieve_before_dir }} + + sieve_global_extensions = +vnd.dovecot.pipe +vnd.dovecot.execute + + sieve_plugins = sieve_extprograms sieve_imapsieve + + sieve_quota_max_scripts = 10 + sieve_quota_max_storage = 2M + + sieve_user_email = %Ln@{{ dovecot_default_domain }} + + # The default value for this is "sender", but that will totally break SPF + sieve_redirect_envelope_from = orig_recipient + + # From elsewhere to Junk folder + imapsieve_mailbox1_name = Junk + imapsieve_mailbox1_causes = COPY + imapsieve_mailbox1_before = file:{{ dovecot_sieve_dir }}/report-spam.sieve + + # From Junk folder to elsewhere + imapsieve_mailbox2_name = * + imapsieve_mailbox2_from = Junk + imapsieve_mailbox2_causes = COPY + imapsieve_mailbox2_before = file:{{ dovecot_sieve_dir }}/report-ham.sieve + + sieve_global_extensions = +vnd.dovecot.pipe +} diff --git a/roles/dovecot/templates/etc/dovecot/conf.d/auth-ldap.conf.ext.j2 b/roles/dovecot/templates/etc/dovecot/conf.d/auth-ldap.conf.ext.j2 new file mode 100644 index 0000000..7b5ab0e --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/conf.d/auth-ldap.conf.ext.j2 @@ -0,0 +1,4 @@ +userdb { + driver = ldap + args = /etc/dovecot/dovecot-ldap.conf.ext +} diff --git a/roles/dovecot/templates/etc/dovecot/conf.d/auth-system.conf.ext.j2 b/roles/dovecot/templates/etc/dovecot/conf.d/auth-system.conf.ext.j2 new file mode 100644 index 0000000..a53dd53 --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/conf.d/auth-system.conf.ext.j2 @@ -0,0 +1,3 @@ +passdb { + driver = pam +} diff --git a/roles/dovecot/templates/etc/dovecot/dovecot-ldap.conf.ext.j2 b/roles/dovecot/templates/etc/dovecot/dovecot-ldap.conf.ext.j2 new file mode 100644 index 0000000..3f03c82 --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/dovecot-ldap.conf.ext.j2 @@ -0,0 +1,16 @@ +hosts = {{ freeipa_hosts | join(' ') }} + +sasl_bind = yes +sasl_mech = gssapi +sasl_realm = {{ freeipa_realm }} + +base = {{ freeipa_user_basedn }} + +user_filter = (&(uid=%Ln)(memberof=cn={{ dovecot_access_group }},{{ freeipa_group_basedn }})) +user_attrs= \ + =uid={{ dovecot_vmail_user }}, \ + =gid={{ dovecot_vmail_user }}, \ + =home={{ dovecot_vmail_dir }}/%{ldap:uid} + +iterate_attrs = uid=user +iterate_filter = (memberof=cn={{ dovecot_access_group }},{{ freeipa_group_basedn }}) diff --git a/roles/dovecot/templates/etc/dovecot/dovecot.conf.j2 b/roles/dovecot/templates/etc/dovecot/dovecot.conf.j2 new file mode 100644 index 0000000..bfc16bf --- /dev/null +++ b/roles/dovecot/templates/etc/dovecot/dovecot.conf.j2 @@ -0,0 +1,5 @@ +protocols = imap lmtp + +import_environment = $import_environment GSS_USE_PROXY=yes + +!include conf.d/*.conf diff --git a/roles/dovecot/templates/usr/lib/dovecot/sieve-pipe/report-ham.sh.j2 b/roles/dovecot/templates/usr/lib/dovecot/sieve-pipe/report-ham.sh.j2 new file mode 100644 index 0000000..fbce0bc --- /dev/null +++ b/roles/dovecot/templates/usr/lib/dovecot/sieve-pipe/report-ham.sh.j2 @@ -0,0 +1,7 @@ +#!/bin/bash + +exec /usr/bin/rspamc \ + --hostname={{ dovecot_rspamd_host | quote }} \ + --password={{ dovecot_rspamd_password | quote }} \ + --key={{ dovecot_rspamd_pubkey | quote }} \ + learn_ham diff --git a/roles/dovecot/templates/usr/lib/dovecot/sieve-pipe/report-spam.sh.j2 b/roles/dovecot/templates/usr/lib/dovecot/sieve-pipe/report-spam.sh.j2 new file mode 100644 index 0000000..393c5ec --- /dev/null +++ b/roles/dovecot/templates/usr/lib/dovecot/sieve-pipe/report-spam.sh.j2 @@ -0,0 +1,7 @@ +#!/bin/bash + +exec /usr/bin/rspamc \ + --hostname={{ dovecot_rspamd_host | quote }} \ + --password={{ dovecot_rspamd_password | quote }} \ + --key={{ dovecot_rspamd_pubkey | quote }} \ + learn_spam diff --git a/roles/dovecot/templates/usr/local/bin/dovecot-archive.sh.j2 b/roles/dovecot/templates/usr/local/bin/dovecot-archive.sh.j2 new file mode 100644 index 0000000..8f34b6a --- /dev/null +++ b/roles/dovecot/templates/usr/local/bin/dovecot-archive.sh.j2 @@ -0,0 +1,19 @@ +#!/bin/bash + +set -Eeu -o pipefail + +VMAIL_USER={{ dovecot_vmail_user | quote }} +{% raw %} +TMPDIR=$(mktemp -d .dovecot-XXXXXX) +trap 'rm -rf -- "$TMPDIR"' EXIT + +chown "$VMAIL_USER" "$TMPDIR" + +doveadm user '*' | xargs -r -I{} doveadm -o plugin/quota= backup -n inbox -f -u {} "mdbox:${TMPDIR}/{}/mdbox:LAYOUT=fs" + +TIMESTAMP=$(date +%Y%m%d%H%M%S) + +tar czf "mailboxes-${TIMESTAMP}.tar.gz" \ + --transform "s|^\.|mailboxes-${TIMESTAMP}|" \ + -C "$TMPDIR" . +{% endraw %} diff --git a/roles/dovecot/templates/usr/local/bin/dovecot-quota-warning.sh.j2 b/roles/dovecot/templates/usr/local/bin/dovecot-quota-warning.sh.j2 new file mode 100644 index 0000000..5ffe4b8 --- /dev/null +++ b/roles/dovecot/templates/usr/local/bin/dovecot-quota-warning.sh.j2 @@ -0,0 +1,19 @@ +#!/bin/bash + +set -Eeu -o pipefail + +PERCENT=$1 +USER=$2 + +cat << EOF | /usr/libexec/dovecot/dovecot-lda -d "$USER" -o "plugin/quota=count:User quota:noenforcing" +From: postmaster@{{ dovecot_default_domain }} +Subject: Mailbox quota warning + +This is an automatically generated message. + +Your mailbox is now ${PERCENT}% full. + +When your mailbox exceeds its quota, you will no longer receive new mail. + +Please delete some messages to free up space. +EOF diff --git a/roles/dovecot/templates/var/lib/solr/dovecot/conf/solrconfig.xml.j2 b/roles/dovecot/templates/var/lib/solr/dovecot/conf/solrconfig.xml.j2 new file mode 100644 index 0000000..af29a84 --- /dev/null +++ b/roles/dovecot/templates/var/lib/solr/dovecot/conf/solrconfig.xml.j2 @@ -0,0 +1,91 @@ + + + + {{ solr_lucene_version }} + + + + + + + + + + + + + + ${solr.data.dir:} + + + + + ${solr.ulog.dir:} + ${solr.ulog.numVersionBuckets:65536} + + + + ${solr.autoCommit.maxTime:15000} + false + + + + ${solr.autoSoftCommit.maxTime:-1} + + + + + + + + + + + + + + true + + 20 + + 200 + + false + + + + + + + + + + explicit + 10 + + + + + + _text_ + + + + + diff --git a/roles/dovecot/vars/main.yml b/roles/dovecot/vars/main.yml new file mode 100644 index 0000000..5069aa5 --- /dev/null +++ b/roles/dovecot/vars/main.yml @@ -0,0 +1,64 @@ +dovecot_packages: + - dovecot + - dovecot-pigeonhole + +dovecot_vmail_user: vmail +dovecot_vmail_dir: /var/vmail + +dovecot_hbac_hostgroup: imap_servers +dovecot_hbac_service: dovecot + +dovecot_certificate_path: /etc/pki/dovecot/certs/dovecot.pem +dovecot_certificate_key_path: /etc/pki/dovecot/private/dovecot.key +dovecot_dhparams_path: /etc/pki/dovecot/dhparams-dovecot.pem +dovecot_ssl_cipher_list: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 + +dovecot_quota_warning_script: /usr/local/bin/dovecot-quota-warning.sh +dovecot_archive_script: /usr/local/bin/dovecot-archive.sh + +dovecot_keytab: /var/lib/gssproxy/clients/dovecot.keytab + +dovecot_sieve_dir: /etc/dovecot/sieve +dovecot_sieve_before_dir: /etc/dovecot/sieve.before.d +dovecot_sieve_pipe_bin_dir: /usr/lib/dovecot/sieve-pipe + +dovecot_solr_schema_path: /usr/share/doc/dovecot/solr-schema-7.7.0.xml +dovecot_solr_config_path: /usr/share/doc/dovecot/solr-config-7.7.0.xml + +dovecot_selinux_policy_te: | + require { + type autofs_t; + type dovecot_t; + type dovecot_auth_t; + type dovecot_auth_exec_t; + type dovecot_deliver_exec_t; + type gssd_t; + type gssproxy_t; + type gssproxy_var_lib_t; + class dir search; + class sock_file write; + class unix_stream_socket connectto; + class process noatsecure; + class file { read execute open getattr execute_no_trans map }; + class dir search; + class key { read write }; + } + + ### The following rules are needed for dovecot to access gssproxy: + #============= dovecot_auth_t ============== + allow dovecot_auth_t gssproxy_t:unix_stream_socket connectto; + allow dovecot_auth_t gssproxy_var_lib_t:dir search; + allow dovecot_auth_t gssproxy_var_lib_t:sock_file write; + allow dovecot_auth_t autofs_t:dir search; + allow dovecot_auth_t gssd_t:key { read write }; + + #============= dovecot_t ============== + allow dovecot_t dovecot_auth_t:process noatsecure; + allow dovecot_t dovecot_deliver_exec_t:file { read execute open getattr execute_no_trans }; + + #============= gssproxy_t ============== + allow gssproxy_t dovecot_auth_exec_t:file getattr; + + ### The following rules are needed for the delivery process to exec quota warning scripts: + #============= dovecot_t ============== + allow dovecot_t dovecot_deliver_exec_t:file { read execute open getattr execute_no_trans map }; diff --git a/roles/evolution/defaults/main.yml b/roles/evolution/defaults/main.yml new file mode 100644 index 0000000..c6c69af --- /dev/null +++ b/roles/evolution/defaults/main.yml @@ -0,0 +1,6 @@ +evolution_mail_account_name: '{{ organization }}' +evolution_dav_account_name: '{{ organization }}' +evolution_email_domain: '{{ email_domain }}' +evolution_imap_host: '{{ imap_host }}' +evolution_smtp_host: '{{ mail_host }}' +evolution_dav_host: 'dav.{{ domain }}' diff --git a/roles/evolution/handlers/main.yml b/roles/evolution/handlers/main.yml new file mode 100644 index 0000000..1931b02 --- /dev/null +++ b/roles/evolution/handlers/main.yml @@ -0,0 +1,2 @@ +- name: update dconf + command: dconf update diff --git a/roles/evolution/tasks/main.yml b/roles/evolution/tasks/main.yml new file mode 100644 index 0000000..543dec7 --- /dev/null +++ b/roles/evolution/tasks/main.yml @@ -0,0 +1,23 @@ +- name: create evolution autoconfig directory + file: + path: '{{ evolution_autoconfig_dir }}' + state: directory + recurse: yes + +- name: set evolution autoconfig source + template: + src: etc/dconf/db/site.d/10-evolution.j2 + dest: /etc/dconf/db/site.d/10-evolution + notify: update dconf + +- name: generate evolution autoconfig sources + template: + src: '{{ evolution_autoconfig_dir[1:] }}/{{ item }}.j2' + dest: '{{ evolution_autoconfig_dir }}/{{ item }}' + loop: + - ac-caldav.source + - ac-carddav.source + - ac-tasks.source + - ac-imap.source + - ac-mail.source + - ac-smtp.source diff --git a/roles/evolution/templates/etc/dconf/db/site.d/10-evolution.j2 b/roles/evolution/templates/etc/dconf/db/site.d/10-evolution.j2 new file mode 100644 index 0000000..b1a1a0b --- /dev/null +++ b/roles/evolution/templates/etc/dconf/db/site.d/10-evolution.j2 @@ -0,0 +1,2 @@ +[org/gnome/evolution-data-server] +autoconfig-directory='{{ evolution_autoconfig_dir }}' diff --git a/roles/evolution/templates/usr/local/share/evolution/sources/ac-caldav.source.j2 b/roles/evolution/templates/usr/local/share/evolution/sources/ac-caldav.source.j2 new file mode 100644 index 0000000..c214fde --- /dev/null +++ b/roles/evolution/templates/usr/local/share/evolution/sources/ac-caldav.source.j2 @@ -0,0 +1,41 @@ +[Autoconfig] +Revision=1 + +[Data Source] +DisplayName={{ evolution_dav_account_name }} +Enabled=true +Parent=caldav-stub + +[Security] +Method=tls + +[Authentication] +Host={{ evolution_dav_host }} +Method=GSSAPI +Port=443 +ProxyUid=system-proxy +RememberPassword=true +User=${USER} +CredentialName= +IsExternal=false + +[Offline] +StaySynchronized=true + +[Refresh] +Enabled=true +IntervalMinutes=5 + +[WebDAV Backend] +AvoidIfmatch=false +CalendarAutoSchedule=false +DisplayName={{ evolution_dav_account_name }} +EmailAddress=${USER}@{{ evolution_email_domain }} +ResourcePath=/calendars/${USER}/personal/ +ResourceQuery= +SslTrust= + +[Calendar] +BackendName=caldav +Color=#729fcf +Selected=true diff --git a/roles/evolution/templates/usr/local/share/evolution/sources/ac-carddav.source.j2 b/roles/evolution/templates/usr/local/share/evolution/sources/ac-carddav.source.j2 new file mode 100644 index 0000000..c1088ac --- /dev/null +++ b/roles/evolution/templates/usr/local/share/evolution/sources/ac-carddav.source.j2 @@ -0,0 +1,41 @@ +[Autoconfig] +Revision=1 + +[Data Source] +DisplayName={{ evolution_dav_account_name }} +Enabled=true +Parent=webdav-stub + +[Security] +Method=tls + +[Authentication] +Host={{ evolution_dav_host }} +Method=GSSAPI +Port=443 +ProxyUid=system-proxy +RememberPassword=true +User=${USER} +CredentialName= +IsExternal=false + +[Autocomplete] +IncludeMe=true + +[Offline] +StaySynchronized=true + +[WebDAV Backend] +AvoidIfmatch=false +CalendarAutoSchedule=false +DisplayName={{ evolution_dav_account_name }} +EmailAddress= +ResourcePath=/addressbooks/${USER}/personal/ +ResourceQuery= +SslTrust= + +[Contacts Backend] +IncludeMe=true + +[Address Book] +BackendName=webdav diff --git a/roles/evolution/templates/usr/local/share/evolution/sources/ac-imap.source.j2 b/roles/evolution/templates/usr/local/share/evolution/sources/ac-imap.source.j2 new file mode 100644 index 0000000..3350e22 --- /dev/null +++ b/roles/evolution/templates/usr/local/share/evolution/sources/ac-imap.source.j2 @@ -0,0 +1,60 @@ +[Autoconfig] +Revision=1 + +[Data Source] +DisplayName={{ evolution_mail_account_name }} +Enabled=true +Parent= + +[Offline] +StaySynchronized=true + +[Refresh] +Enabled=true +IntervalMinutes=60 + +[Authentication] +Host={{ evolution_imap_host }} +Method=GSSAPI +Port=993 +ProxyUid=system-proxy +RememberPassword=true +User=${USER} +CredentialName= + +[Mail Account] +BackendName=imapx +IdentityUid=ac-mail +ArchiveFolder=folder://ac-imap/Archive +NeedsInitialSetup=false + +[Security] +Method=ssl-on-alternate-port + +[Imapx Backend] +FilterInbox=true +StoreChangesInterval=3 +LimitByAge=true +LimitUnit=years +LimitValue=1 +UseMultiFetch=false +CheckAll=true +CheckSubscribed=true +ConcurrentConnections=3 +FetchOrder=ascending +FilterAll=false +FilterJunk=false +FilterJunkInbox=false +Namespace= +RealJunkPath=Junk +RealTrashPath=Trash +ShellCommand=ssh -C -l %u %h exec /usr/sbin/imapd +UseIdle=true +UseNamespace=false +UseQresync=true +UseRealJunkPath=true +UseRealTrashPath=true +UseShellCommand=false +UseSubscriptions=false +IgnoreOtherUsersNamespace=false +IgnoreSharedFoldersNamespace=false diff --git a/roles/evolution/templates/usr/local/share/evolution/sources/ac-mail.source.j2 b/roles/evolution/templates/usr/local/share/evolution/sources/ac-mail.source.j2 new file mode 100644 index 0000000..08269a7 --- /dev/null +++ b/roles/evolution/templates/usr/local/share/evolution/sources/ac-mail.source.j2 @@ -0,0 +1,51 @@ +[Autoconfig] +Revision=1 + +[Data Source] +DisplayName={{ evolution_mail_account_name }} +Enabled=true +Parent=ac-imap + +[Mail Composition] +Bcc= +Cc= +DraftsFolder=folder://ac-imap/Drafts +ReplyStyle=default +SignImip=true +TemplatesFolder=folder://local/Templates +StartBottom=off +TopSignature=on + +[Mail Identity] +Address=${USER}@{{ evolution_email_domain }} +Aliases= +Name=${REALNAME} +Organization= +ReplyTo= +SignatureUid=autogenerated + +[Mail Submission] +SentFolder=folder://ac-imap/Sent +TransportUid=ac-smtp +RepliesToOriginFolder=false +UseSentFolder=true + +[Pretty Good Privacy (OpenPGP)] +AlwaysTrust=false +EncryptToSelf=true +KeyId= +SigningAlgorithm= +SignByDefault=false +EncryptByDefault=false +PreferInline=false + +[Message Disposition Notifications] +ResponsePolicy=ask + +[Secure MIME (S/MIME)] +EncryptionCertificate= +EncryptByDefault=false +EncryptToSelf=true +SigningAlgorithm= +SigningCertificate= +SignByDefault=false diff --git a/roles/evolution/templates/usr/local/share/evolution/sources/ac-smtp.source.j2 b/roles/evolution/templates/usr/local/share/evolution/sources/ac-smtp.source.j2 new file mode 100644 index 0000000..63d7bbe --- /dev/null +++ b/roles/evolution/templates/usr/local/share/evolution/sources/ac-smtp.source.j2 @@ -0,0 +1,22 @@ +[Autoconfig] +Revision=1 + +[Data Source] +DisplayName={{ evolution_mail_account_name }} +Enabled=true +Parent=ac-imap + +[Mail Transport] +BackendName=smtp + +[Authentication] +Host={{ evolution_smtp_host }} +Method=GSSAPI +Port=587 +ProxyUid=system-proxy +RememberPassword=true +User=${USER} +CredentialName= + +[Security] +Method=starttls-on-standard-port diff --git a/roles/evolution/templates/usr/local/share/evolution/sources/ac-tasks.source.j2 b/roles/evolution/templates/usr/local/share/evolution/sources/ac-tasks.source.j2 new file mode 100644 index 0000000..b41900f --- /dev/null +++ b/roles/evolution/templates/usr/local/share/evolution/sources/ac-tasks.source.j2 @@ -0,0 +1,41 @@ +[Autoconfig] +Revision=1 + +[Data Source] +DisplayName={{ evolution_dav_account_name }} +Enabled=true +Parent=caldav-stub + +[Security] +Method=tls + +[Authentication] +Host={{ evolution_dav_host }} +Method=GSSAPI +Port=443 +ProxyUid=system-proxy +RememberPassword=true +User=${USER} +CredentialName= +IsExternal=false + +[Offline] +StaySynchronized=true + +[Refresh] +Enabled=true +IntervalMinutes=5 + +[WebDAV Backend] +AvoidIfmatch=false +CalendarAutoSchedule=false +DisplayName={{ evolution_dav_account_name }} +EmailAddress=${USER}@{{ evolution_email_domain }} +ResourcePath=/calendars/${USER}/personal/ +ResourceQuery= +SslTrust= + +[Task List] +BackendName=caldav +Color=#729fcf +Selected=true diff --git a/roles/evolution/vars/main.yml b/roles/evolution/vars/main.yml new file mode 100644 index 0000000..26ce565 --- /dev/null +++ b/roles/evolution/vars/main.yml @@ -0,0 +1 @@ +evolution_autoconfig_dir: /usr/local/share/evolution/sources diff --git a/roles/firefox/defaults/main.yml b/roles/firefox/defaults/main.yml new file mode 100644 index 0000000..20ba32e --- /dev/null +++ b/roles/firefox/defaults/main.yml @@ -0,0 +1,33 @@ +firefox_preferences: [] +firefox_extensions: [] +firefox_managed_bookmarks: [] +firefox_managed_bookmarks_top_level_name: Intranet + +firefox_homepage: 'about:home' + +firefox_spnego_domains: ['{{ domain }}'] +firefox_spnego_allow_non_fqdn: yes +firefox_spnego_allow_proxies: yes + +firefox_disable_pocket: yes +firefox_disable_snippets: yes +firefox_disable_app_update: yes +firefox_disable_captive_portal: yes +firefox_disable_default_bookmarks: yes +firefox_disable_feedback: yes +firefox_disable_accounts: yes +firefox_disable_studies: yes +firefox_disable_telemetry: yes +firefox_disable_default_browser_check: yes +firefox_disable_user_messaging: yes +firefox_disable_dns_over_https: yes +firefox_disable_search_suggestions: yes +firefox_disable_highlights: yes +firefox_disable_safe_browsing: yes +firefox_disable_top_sites: yes + +firefox_offer_to_save_logins_default: yes +firefox_use_tracking_protection: no +firefox_update_extensions: yes + +firefox_cookie_behavior: reject-tracker-and-partition-foreign diff --git a/roles/firefox/files/etc/profile.d/firefox.sh b/roles/firefox/files/etc/profile.d/firefox.sh new file mode 100644 index 0000000..faaadbf --- /dev/null +++ b/roles/firefox/files/etc/profile.d/firefox.sh @@ -0,0 +1,3 @@ +if [ "$XDG_SESSION_TYPE" = wayland ]; then + export MOZ_ENABLE_WAYLAND=1 +fi diff --git a/roles/firefox/tasks/main.yml b/roles/firefox/tasks/main.yml new file mode 100644 index 0000000..d759407 --- /dev/null +++ b/roles/firefox/tasks/main.yml @@ -0,0 +1,10 @@ +- name: generate firefox policy + template: + lstrip_blocks: yes + src: usr/lib64/firefox/distribution/policies.json.j2 + dest: /usr/lib64/firefox/distribution/policies.json + +- name: enable wayland for firefox + copy: + src: etc/profile.d/firefox.sh + dest: /etc/profile.d/firefox.sh diff --git a/roles/firefox/templates/usr/lib64/firefox/distribution/policies.json.j2 b/roles/firefox/templates/usr/lib64/firefox/distribution/policies.json.j2 new file mode 100644 index 0000000..6b0b0e2 --- /dev/null +++ b/roles/firefox/templates/usr/lib64/firefox/distribution/policies.json.j2 @@ -0,0 +1,116 @@ +{ + "policies": { + "ExtensionSettings": { + {% for ext in firefox_extensions %} + {{ ext.id | to_json }}: { + "install_url": {{ (ext.url if ext.url is defined else 'https://addons.mozilla.org/firefox/downloads/latest/' ~ ext.name ~ '/latest.xpi') | to_json }}, + "installation_mode": {{ ext.mode | default('normal_installed') | to_json }} + }{% if not loop.last %},{% endif %} + + {% endfor %} + }, + "3rdparty": { + "Extensions": { + {% for ext in firefox_extensions | selectattr('policy', 'defined') %} + {{ ext.id | to_json }}: {{ ext.policy | to_json }}{% if not loop.last %},{% endif %} + + {% endfor %} + } + }, + {% if firefox_disable_user_messaging %} + "UserMessaging": { + "WhatsNew": false, + "ExtensionRecommendations": false, + "UrlbarInterventions": false, + "SkipOnboarding": true + }, + "OverridePostUpdatePage": "", + "OverrideFirstRunPage": "", + {% endif %} + "EnableTrackingProtection": { + {% for s in ['Value', 'Cryptomining', 'Fingerprinting'] %} + "{{ s }}": {{ firefox_use_tracking_protection | bool | to_json }}, + {% endfor %} + "Locked": false + }, + "Cookies": { + "Behavior": "{{ firefox_cookie_behavior }}", + "BehaviorPrivateBrowsing": "{{ firefox_cookie_behavior }}" + }, + "Authentication": { + "SPNEGO": {{ firefox_spnego_domains | to_json }}, + "AllowNonFQDN": { + "SPNEGO": {{ firefox_spnego_allow_non_fqdn | bool | to_json }} + }, + "AllowProxies": { + "SPNEGO": {{ firefox_spnego_allow_proxies | bool | to_json }} + } + }, + "NoDefaultBookmarks": {{ firefox_disable_default_bookmarks | bool | to_json }}, + "DisablePocket": {{ firefox_disable_pocket | bool | to_json }}, + "DisableAppUpdate": {{ firefox_disable_app_update | to_json }}, + "CaptivePortal": {{ (not firefox_disable_captive_portal) | to_json }}, + "DisableFeedbackCommands": {{ firefox_disable_feedback | bool | to_json }}, + "DisableFirefoxAccounts": {{ firefox_disable_accounts | bool | to_json }}, + "DisableFirefoxStudies": {{ firefox_disable_studies | bool | to_json }}, + "DisableTelemetry": {{ firefox_disable_telemetry | bool | to_json }}, + "DontCheckDefaultBrowser": {{ firefox_disable_default_browser_check | bool | to_json }}, + "OfferToSaveLoginsDefault": {{ firefox_offer_to_save_logins_default | bool | to_json }}, + "DNSOverHTTPS": { + "Enabled": {{ (not firefox_disable_dns_over_https) | to_json }} + }, + "SearchSuggestEnabled": {{ (not firefox_disable_search_suggestions) | to_json }}, + "Homepage": { + "URL": {{ firefox_homepage | to_json }}, + "StartPage": "homepage" + }, + "FirefoxHome": { + "Search": true, + "TopSites": {{ (not firefox_disable_top_sites) | to_json }}, + "SponsoredTopSites": false, + "Highlights": {{ (not firefox_disable_highlights) | to_json }}, + "Pocket": {{ (not firefox_disable_pocket) | to_json }}, + "SponsoredPocket": {{ (not firefox_disable_pocket) | to_json }}, + "Snippets": {{ (not firefox_disable_snippets) | to_json }} + }, + {% if firefox_managed_bookmarks %} + "ManagedBookmarks": [ + { + "toplevel_name": {{ firefox_managed_bookmarks_top_level_name | to_json }} + }, + {% for bookmark in firefox_managed_bookmarks %} + { + "url": {{ bookmark.url | to_json }}, + "name": {{ bookmark.name | to_json }} + }{% if not loop.last %},{% endif %} + + {% endfor %} + ], + {% endif %} + "ExtensionUpdate": {{ firefox_update_extensions | bool | to_json }}, + "Preferences": { + {% for pref in firefox_preferences %} + {{ pref.name | to_json }}: { + "Value": {{ pref.value | to_json }}, + "Status": {{ pref.status | default('default') | to_json }} + }, + + {% endfor %} + "privacy.trackingprotection.socialtracking.enabled": { + "Value": {{ firefox_use_tracking_protection | bool | to_json }}, + "Status": "locked" + }, + "browser.toolbars.bookmarks.visibility": { + "Value": "newtab", + "Status": "default" + }, + {% for s in ['malware', 'phishing', 'downloads'] %} + "browser.safebrowsing.{{ s }}.enabled": { + "Value": {{ (not firefox_disable_safe_browsing) | to_json }}, + "Status": "locked" + }{% if not loop.last %},{% endif %} + + {% endfor %} + } + } +} diff --git a/roles/firewalld/tasks/main.yml b/roles/firewalld/tasks/main.yml new file mode 100644 index 0000000..40e39fb --- /dev/null +++ b/roles/firewalld/tasks/main.yml @@ -0,0 +1,17 @@ +- name: install firewalld + dnf: + name: firewalld + state: present + +- name: enable firewalld + systemd: + name: firewalld + state: started + enabled: yes + +- name: disable cockpit rule + firewalld: + service: cockpit + permanent: yes + immediate: yes + state: disabled diff --git a/roles/freebsd_loader/defaults/main.yml b/roles/freebsd_loader/defaults/main.yml new file mode 100644 index 0000000..18b159b --- /dev/null +++ b/roles/freebsd_loader/defaults/main.yml @@ -0,0 +1 @@ +freebsd_loader_config: {} diff --git a/roles/freebsd_loader/tasks/main.yml b/roles/freebsd_loader/tasks/main.yml new file mode 100644 index 0000000..210f469 --- /dev/null +++ b/roles/freebsd_loader/tasks/main.yml @@ -0,0 +1,14 @@ +- name: set loader.conf tunables + lineinfile: + create: yes + path: /boot/loader.conf.local + line: '{{ item.key }}="{{ item.value }}"' + regexp: '^{{ item.key | regex_escape() }}=' + state: present + loop: '{{ freebsd_loader_config | dict2items }}' + register: loader_conf + +- name: warn that reboot is required for loader.conf changes + debug: + msg: 'NOTE: A reboot is required for loader.conf changes to take effect!' + when: loader_conf.changed diff --git a/roles/freeipa_client/defaults/main.yml b/roles/freeipa_client/defaults/main.yml new file mode 100644 index 0000000..95fa912 --- /dev/null +++ b/roles/freeipa_client/defaults/main.yml @@ -0,0 +1 @@ +freeipa_autofs: yes diff --git a/roles/freeipa_client/files/etc/gssproxy/99-nfs-client.conf b/roles/freeipa_client/files/etc/gssproxy/99-nfs-client.conf new file mode 100644 index 0000000..2ef5e1e --- /dev/null +++ b/roles/freeipa_client/files/etc/gssproxy/99-nfs-client.conf @@ -0,0 +1,9 @@ +[service/nfs-client] + mechs = krb5 + cred_store = keytab:/etc/krb5.keytab + cred_store = ccache:FILE:/var/lib/gssproxy/clients/krb5cc_%u + cred_store = client_keytab:/var/lib/gssproxy/clients/%u.keytab + cred_usage = initiate + allow_any_uid = yes + trusted = yes + euid = 0 diff --git a/roles/freeipa_client/handlers/main.yml b/roles/freeipa_client/handlers/main.yml new file mode 100644 index 0000000..6f9bf27 --- /dev/null +++ b/roles/freeipa_client/handlers/main.yml @@ -0,0 +1,14 @@ +- name: restart gssproxy + systemd: + name: gssproxy + state: restarted + +- name: restart sssd + systemd: + name: sssd + state: restarted + +- name: restart rsyslog + systemd: + name: rsyslog + state: restarted diff --git a/roles/freeipa_client/tasks/main.yml b/roles/freeipa_client/tasks/main.yml new file mode 100644 index 0000000..8b98daa --- /dev/null +++ b/roles/freeipa_client/tasks/main.yml @@ -0,0 +1,54 @@ +- name: install freeipa pacakges + dnf: + name: '{{ freeipa_packages[ansible_distribution_major_version] }}' + state: present + +- name: initialize freeipa client + command: > + ipa-client-install + --unattended + --principal={{ ipa_user }} + --password={{ ipa_pass | quote }} + args: + creates: /etc/ipa/default.conf + +- name: configure autofs + command: ipa-client-automount --unattended + register: ipa_client_automount + failed_when: ipa_client_automount.rc not in [0, 3] + changed_when: ipa_client_automount.rc != 3 + when: freeipa_autofs + +- name: configure gssproxy + copy: + src: etc/gssproxy/99-nfs-client.conf + dest: /etc/gssproxy/99-nfs-client.conf + notify: restart gssproxy + +- name: enable krb5 hostname canonicalization + lineinfile: + path: /etc/krb5.conf + regexp: '^\s*{{ item }}\s*=' + line: ' {{ item }} = true' + insertafter: '\[libdefaults\]' + state: present + loop: + - rdns + - dns_canonicalize_hostname + +# Disabling this until they figure out this bug. I don't use containers, +# so the kernel KEYRING ccache is just fine. +# https://bugzilla.redhat.com/show_bug.cgi?id=2035496 +- name: uninstall sssd-kcm + dnf: + name: sssd-kcm + state: absent + notify: restart sssd + +- name: send sssd logs to journald + lineinfile: + create: yes + path: /etc/sysconfig/sssd + regexp: ^DEBUG_LOGGER= + line: DEBUG_LOGGER=--logger=journald + notify: restart sssd diff --git a/roles/freeipa_client/vars/main.yml b/roles/freeipa_client/vars/main.yml new file mode 100644 index 0000000..0dc5a8e --- /dev/null +++ b/roles/freeipa_client/vars/main.yml @@ -0,0 +1,5 @@ +freeipa_packages: + '8': + - '@idm:DL1/client' + '9': + - ipa-client diff --git a/roles/freeipa_keytab/defaults/main.yml b/roles/freeipa_keytab/defaults/main.yml new file mode 100644 index 0000000..fab313e --- /dev/null +++ b/roles/freeipa_keytab/defaults/main.yml @@ -0,0 +1,4 @@ +keytab_path: /etc/krb5.keytab +keytab_owner: root +keytab_group: root +keytab_mode: '0600' diff --git a/roles/freeipa_keytab/tasks/main.yml b/roles/freeipa_keytab/tasks/main.yml new file mode 100644 index 0000000..3b09e44 --- /dev/null +++ b/roles/freeipa_keytab/tasks/main.yml @@ -0,0 +1,37 @@ +- name: check if principal exists in keytab + shell: + cmd: > + klist -kt {{ keytab_path }} + | awk -v p={{ keytab_principal }}@{{ freeipa_realm }} + '$4 == p { rc=1 } END { exit !rc }' + failed_when: false + changed_when: false + register: keytab_principal_exists + +- name: retrieve keytab + shell: + cmd: > + kinit -fpa -l 1m {{ '-k' if use_system_keytab else ipa_user }} && + ipa-getkeytab -p {{ keytab_principal }} -k {{ keytab_path }} && + kdestroy + stdin: '{{ omit if use_system_keytab else ipa_pass }}' + when: keytab_principal_exists.rc != 0 + +- name: set keytab owner + file: + path: '{{ keytab_path }}' + owner: '{{ keytab_owner }}' + group: '{{ keytab_group }}' + mode: '{{ keytab_mode }}' + setype: krb5_keytab_t + +- name: set selinux context for keytab + sefcontext: + target: '{{ keytab_path }}' + setype: krb5_keytab_t + state: present + register: keytab_sefcontext + +- name: apply selinux context to keytab + command: 'restorecon {{ keytab_path }}' + when: keytab_sefcontext.changed diff --git a/roles/freeipa_keytab/vars/main.yml b/roles/freeipa_keytab/vars/main.yml new file mode 100644 index 0000000..f99f769 --- /dev/null +++ b/roles/freeipa_keytab/vars/main.yml @@ -0,0 +1 @@ +use_system_keytab: "{{ keytab_principal is search('/' ~ ansible_fqdn) }}" diff --git a/roles/freeipa_server/defaults/main.yml b/roles/freeipa_server/defaults/main.yml new file mode 100644 index 0000000..209cd5f --- /dev/null +++ b/roles/freeipa_server/defaults/main.yml @@ -0,0 +1,33 @@ +freeipa_domain: '{{ ansible_domain }}' +freeipa_realm: '{{ ansible_domain | upper }}' +freeipa_email_domain: '{{ email_domain }}' +freeipa_workgroup: WORKGROUP + +freeipa_archive_on_calendar: 'Sat *-*-* 04:00:00' + +freeipa_dns_forwarders: + - 8.8.8.8 + - 8.8.4.4 + +freeipa_dns_max_negative_cache: 5 # seconds + +freeipa_nfs_homedirs: no + +freeipa_admin_password: ChangeMe123 +freeipa_ds_password: ChangeMe123 + +freeipa_idstart: 100000 +freeipa_idmax: 299999 + +freeipa_maxpwdlife: 3650 # 10 years +freeipa_minpwdlife: 1 # hours +freeipa_historylength: 0 +freeipa_minclasses: 0 +freeipa_minlength: 8 +freeipa_maxfailcount: 6 +freeipa_failinterval: 60 # seconds +freeipa_lockouttime: 600 # seconds + +freeipa_admin_password_expiration: 20310130235959 + +freeipa_default_login_shell: /bin/bash diff --git a/roles/freeipa_server/files/usr/local/share/dirsrv/schema/jid.ldif b/roles/freeipa_server/files/usr/local/share/dirsrv/schema/jid.ldif new file mode 100644 index 0000000..592059a --- /dev/null +++ b/roles/freeipa_server/files/usr/local/share/dirsrv/schema/jid.ldif @@ -0,0 +1,3 @@ +dn: cn=config +attributetypes: ( 1.3.6.1.1.23.2 NAME 'jid' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Extending FreeIPA' ) +objectclasses: ( 1.3.6.1.1.23.1 NAME 'JIDObject' AUXILIARY MAY jid X-ORIGIN 'Extending FreeIPA' ) diff --git a/roles/freeipa_server/handlers/main.yml b/roles/freeipa_server/handlers/main.yml new file mode 100644 index 0000000..884f66d --- /dev/null +++ b/roles/freeipa_server/handlers/main.yml @@ -0,0 +1,19 @@ +- name: restart freeipa + systemd: + name: ipa + state: restarted + +- name: restart sssd + systemd: + name: sssd + state: restarted + +- name: restart rsyslog + systemd: + name: rsyslog + state: restarted + +- name: restart samba + systemd: + name: smb + state: restarted diff --git a/roles/freeipa_server/tasks/custom_schema.yml b/roles/freeipa_server/tasks/custom_schema.yml new file mode 100644 index 0000000..e5bca0d --- /dev/null +++ b/roles/freeipa_server/tasks/custom_schema.yml @@ -0,0 +1,101 @@ +- name: create custom schema directory + file: + path: '{{ freeipa_custom_schema_dir }}' + state: directory + recurse: yes + +- name: copy jid schema + copy: + src: '{{ freeipa_custom_schema_dir[1:] }}/jid.ldif' + dest: '{{ freeipa_custom_schema_dir }}/jid.ldif' + +- name: check if JIDObject exists in schema + shell: ldapsearch -QLLL -s base -b cn=schema objectclasses | grep -q JIDObject + changed_when: no + failed_when: no + register: ldapsearch_jidobject + +- block: + - name: extend freeipa schema for JIDs + command: ipa-ldap-updater --schema-file '{{ freeipa_custom_schema_dir }}/jid.ldif' + + - name: restart httpd + systemd: + name: httpd + state: restarted + when: ldapsearch_jidobject.rc != 0 + +- name: add index to jid attribute + ldap_entry: + dn: 'cn=jid,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' + objectClass: + - top + - nsIndex + attributes: + cn: jid + nsSystemIndex: false + nsIndexType: eq + bind_dn: cn=Directory Manager + bind_pw: '{{ freeipa_ds_password }}' + server_uri: ldaps://{{ ipa_host }} + register: jid_index + +- name: regenerate indexes for jid attribute + ldap_entry: + dn: cn=jidindex,cn=index,cn=tasks,cn=config + objectClass: + - top + - extensibleObject + attributes: + cn: jidindex + nsInstance: userRoot + nsIndexAttribute: 'jid:eq' + bind_dn: cn=Directory Manager + bind_pw: '{{ freeipa_ds_password }}' + server_uri: ldaps://{{ ipa_host }} + when: jid_index.changed + +- name: add default user object classes + ldap_attrs: + dn: cn=ipaConfig,cn=etc,{{ freeipa_basedn }} + attributes: + ipaUserObjectClasses: + - mailRecipient + - JIDObject + state: present + bind_dn: cn=Directory Manager + bind_pw: '{{ freeipa_ds_password }}' + server_uri: ldaps://{{ ipa_host }} + +- name: add default group object classes + ldap_attrs: + dn: cn=ipaConfig,cn=etc,{{ freeipa_basedn }} + attributes: + ipaGroupObjectClasses: + - mailRecipient + state: present + bind_dn: cn=Directory Manager + bind_pw: '{{ freeipa_ds_password }}' + server_uri: ldaps://{{ ipa_host }} + +- name: allow read access to custom user attributes + ipapermission: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: 'System: Read User Addressbook Attributes' + attrs: + - mailAlternateAddress + - jid + action: member + state: present + +- name: allow read access to custom group attributes + ipapermission: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: 'System: Read Groups' + attrs: + - mail + - mailAlternateAddress + action: member + state: present diff --git a/roles/freeipa_server/tasks/main.yml b/roles/freeipa_server/tasks/main.yml new file mode 100644 index 0000000..1dd6eaa --- /dev/null +++ b/roles/freeipa_server/tasks/main.yml @@ -0,0 +1,77 @@ +- name: install freeipa pacakges + dnf: + name: '{{ freeipa_packages }}' + state: present + +# Disabling this until they figure out this bug. I don't use containers, +# so the kernel KEYRING ccache is just fine. +# https://bugzilla.redhat.com/show_bug.cgi?id=2035496 +- name: uninstall sssd-kcm + dnf: + name: sssd-kcm + state: absent + notify: restart sssd + +- name: open firewall ports + firewalld: + service: '{{ item }}' + permanent: yes + immediate: yes + state: enabled + loop: + - dns + - freeipa-ldap + - freeipa-ldaps + - freeipa-trust + - freeipa-replication + tags: firewalld + +- include_tasks: + file: "{{ 'master' if (freeipa_master == inventory_hostname) else 'replica' }}.yml" + +- name: copy bind configuration + template: + src: etc/named/ipa-options-ext.conf.j2 + dest: /etc/named/ipa-options-ext.conf + notify: restart freeipa + +- name: send sssd logs to journald + lineinfile: + create: yes + path: /etc/sysconfig/sssd + regexp: ^DEBUG_LOGGER= + line: DEBUG_LOGGER=--logger=journald + notify: restart sssd + +- name: check if rsyslog is installed + stat: + path: /etc/rsyslog.d + register: rsyslog_conf_dir + +- name: log krb5 to rsyslog + lineinfile: + path: /etc/krb5.conf + insertafter: '^\[logging\]$' + firstmatch: yes + regexp: '^\s*{{ item }}\s*=' + line: ' {{ item }} = SYSLOG:INFO:DAEMON' + loop: + - kdc + - admin_server + notify: restart freeipa + +- name: log freeipa files to rsyslog + template: + src: etc/rsyslog.d/freeipa.conf.j2 + dest: /etc/rsyslog.d/freeipa.conf + notify: restart rsyslog + when: rsyslog_conf_dir.stat.exists + +- name: log samba to rsyslog + lineinfile: + path: /etc/samba/smb.conf + insertafter: '^\[global\]$' + firstmatch: yes + regexp: '^\s*logging\s*=' + line: 'logging = syslog@2' + notify: restart samba diff --git a/roles/freeipa_server/tasks/master.yml b/roles/freeipa_server/tasks/master.yml new file mode 100644 index 0000000..34d1442 --- /dev/null +++ b/roles/freeipa_server/tasks/master.yml @@ -0,0 +1,138 @@ +- name: initialize freeipa server + command: > + ipa-server-install + --unattended + --realm={{ freeipa_realm }} + --domain={{ freeipa_domain }} + --ds-password={{ freeipa_ds_password | quote }} + --admin={{ freeipa_admin_password | quote }} + --hostname={{ ansible_fqdn }} + --ip-address={{ ansible_default_ipv4.address }} + --no-host-dns + --idstart={{ freeipa_idstart }} + --idmax={{ freeipa_idmax }} + --setup-dns + {% for forwarder in freeipa_dns_forwarders %} + --forwarder {{ forwarder }} + {% endfor %} + --forward-policy=only + --no-ntp + --no-hbac-allow + args: + creates: /etc/ipa/default.conf + +- name: initialize AD trust (for smb) + command: > + ipa-adtrust-install + --unattended + --add-sids + --netbios-name={{ freeipa_workgroup }} + --admin-name=admin + --admin-password={{ freeipa_admin_password | quote }} + args: + creates: /etc/samba/samba.keytab + +- name: set default password policy + community.general.ipa_pwpolicy: + ipa_user: '{{ ipa_user }}' + ipa_pass: '{{ ipa_pass }}' + maxpwdlife: '{{ freeipa_maxpwdlife }}' + minpwdlife: '{{ freeipa_minpwdlife }}' + historylength: '{{ freeipa_historylength }}' + minclasses: '{{ freeipa_minclasses }}' + minlength: '{{ freeipa_minlength }}' + maxfailcount: '{{ freeipa_maxfailcount }}' + failinterval: '{{ freeipa_failinterval }}' + lockouttime: '{{ freeipa_lockouttime }}' + +- name: set admin user's password expiration date + ipauser: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: admin + passwordexpiration: '{{ freeipa_admin_password_expiration }}' + +- name: set global freeipa configuration + ipaconfig: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + emaildomain: '{{ freeipa_email_domain }}' + defaultshell: '{{ freeipa_default_login_shell }}' + +- name: create HBAC services for system-level services + ipahbacsvc: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ item }}' + description: '{{ item }}' + state: present + loop: '{{ freeipa_system_services }}' + +- name: create HBAC rule for system-level services + ipahbacrule: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: whitelisted_system_services + description: Always allow authentication to system-level services + usercategory: all + hostcategory: all + hbacsvc: '{{ freeipa_system_services }}' + +- name: get admin kerberos ticket + command: + cmd: kinit -fpa {{ ipa_user }} + stdin: '{{ ipa_pass }}' + changed_when: false + +- include_tasks: custom_schema.yml + +- name: generate clientAuth certificate profile + template: + src: etc/pki/caIPAclientAuth.cfg.j2 + dest: /etc/pki/caIPAclientAuth.cfg + register: freeipa_clientauth_config + +- name: import clientAuth certificate profile + shell: + cmd: > + ipa certprofile-import caIPAclientAuth + --file /etc/pki/caIPAclientAuth.cfg + --desc 'Profile for client authentication' + --store TRUE + when: freeipa_clientauth_config.changed + +- name: destroy kerberos ticket + command: + cmd: kdestroy + changed_when: false + +- name: create automount maps + ipaautomountmap: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ item }}' + location: default + state: present + loop: '{{ freeipa_automount_maps }}' + +- name: create automount keys + ipaautomountkey: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + location: default + mapname: '{{ item.map }}' + key: '{{ item.key }}' + info: '{{ item.info }}' + state: present + loop: '{{ freeipa_automount_keys }}' + +- name: create /home automount key + ipaautomountkey: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + location: default + mapname: auto.master + key: /home + info: auto.home + state: "{{ 'present' if freeipa_nfs_homedirs else 'absent' }}" + when: freeipa_nfs_homedirs diff --git a/roles/freeipa_server/tasks/replica.yml b/roles/freeipa_server/tasks/replica.yml new file mode 100644 index 0000000..5b6b296 --- /dev/null +++ b/roles/freeipa_server/tasks/replica.yml @@ -0,0 +1,21 @@ +- name: initialize freeipa replica + command: > + ipa-replica-install + --unattended + --realm={{ freeipa_realm }} + --domain={{ freeipa_domain }} + --principal=admin + --admin-password={{ freeipa_admin_password | quote }} + --hostname={{ ansible_fqdn }} + --ip-address={{ ansible_default_ipv4.address }} + --no-host-dns + --setup-ca + --setup-dns + --setup-adtrust + {% for forwarder in freeipa_dns_forwarders %} + --forwarder {{ forwarder }} + {% endfor %} + --no-ntp + args: + creates: /etc/ipa/default.conf + diff --git a/roles/freeipa_server/templates/etc/named/ipa-options-ext.conf.j2 b/roles/freeipa_server/templates/etc/named/ipa-options-ext.conf.j2 new file mode 100644 index 0000000..9c37805 --- /dev/null +++ b/roles/freeipa_server/templates/etc/named/ipa-options-ext.conf.j2 @@ -0,0 +1,7 @@ +allow-recursion { any; }; +allow-query-cache { any; }; + +max-ncache-ttl {{ freeipa_dns_max_negative_cache }}; + +/* turns on IPv6 for port 53, IPv4 is on by default for all ifaces */ +listen-on-v6 { any; }; diff --git a/roles/freeipa_server/templates/etc/pki/caIPAclientAuth.cfg.j2 b/roles/freeipa_server/templates/etc/pki/caIPAclientAuth.cfg.j2 new file mode 100644 index 0000000..0b03615 --- /dev/null +++ b/roles/freeipa_server/templates/etc/pki/caIPAclientAuth.cfg.j2 @@ -0,0 +1,113 @@ +auth.instance_id=raCertAuth +classId=caEnrollImpl +desc=This certificate profile is for client authentication certificates. +enable=true +enableBy=ipara +input.i1.class_id=certReqInputImpl +input.i2.class_id=submitterInfoInputImpl +input.list=i1,i2 +name=IPA-RA Agent-Authenticated Server Certificate Enrollment +output.list=o1 +output.o1.class_id=certOutputImpl +policyset.list=serverCertSet +policyset.serverCertSet.1.constraint.class_id=subjectNameConstraintImpl +policyset.serverCertSet.1.constraint.name=Subject Name Constraint +policyset.serverCertSet.1.constraint.params.accept=true +policyset.serverCertSet.1.constraint.params.pattern=CN=[^,]+,.+ +policyset.serverCertSet.1.default.class_id=subjectNameDefaultImpl +policyset.serverCertSet.1.default.name=Subject Name Default +policyset.serverCertSet.1.default.params.name=CN=$request.req_subject_name.cn$, O={{ freeipa_realm }} +policyset.serverCertSet.10.constraint.class_id=noConstraintImpl +policyset.serverCertSet.10.constraint.name=No Constraint +policyset.serverCertSet.10.default.class_id=subjectKeyIdentifierExtDefaultImpl +policyset.serverCertSet.10.default.name=Subject Key Identifier Extension Default +policyset.serverCertSet.10.default.params.critical=false +policyset.serverCertSet.11.constraint.class_id=noConstraintImpl +policyset.serverCertSet.11.constraint.name=No Constraint +policyset.serverCertSet.11.default.class_id=userExtensionDefaultImpl +policyset.serverCertSet.11.default.name=User Supplied Extension Default +policyset.serverCertSet.11.default.params.userExtOID=2.5.29.17 +policyset.serverCertSet.12.constraint.class_id=noConstraintImpl +policyset.serverCertSet.12.constraint.name=No Constraint +policyset.serverCertSet.12.default.class_id=commonNameToSANDefaultImpl +policyset.serverCertSet.12.default.name=Copy Common Name to Subject Alternative Name +policyset.serverCertSet.2.constraint.class_id=validityConstraintImpl +policyset.serverCertSet.2.constraint.name=Validity Constraint +policyset.serverCertSet.2.constraint.params.notAfterCheck=false +policyset.serverCertSet.2.constraint.params.notBeforeCheck=false +policyset.serverCertSet.2.constraint.params.range=740 +policyset.serverCertSet.2.default.class_id=validityDefaultImpl +policyset.serverCertSet.2.default.name=Validity Default +policyset.serverCertSet.2.default.params.range=731 +policyset.serverCertSet.2.default.params.startTime=0 +policyset.serverCertSet.3.constraint.class_id=keyConstraintImpl +policyset.serverCertSet.3.constraint.name=Key Constraint +policyset.serverCertSet.3.constraint.params.keyParameters=1024,2048,3072,4096,8192 +policyset.serverCertSet.3.constraint.params.keyType=RSA +policyset.serverCertSet.3.default.class_id=userKeyDefaultImpl +policyset.serverCertSet.3.default.name=Key Default +policyset.serverCertSet.4.constraint.class_id=noConstraintImpl +policyset.serverCertSet.4.constraint.name=No Constraint +policyset.serverCertSet.4.default.class_id=authorityKeyIdentifierExtDefaultImpl +policyset.serverCertSet.4.default.name=Authority Key Identifier Default +policyset.serverCertSet.5.constraint.class_id=noConstraintImpl +policyset.serverCertSet.5.constraint.name=No Constraint +policyset.serverCertSet.5.default.class_id=authInfoAccessExtDefaultImpl +policyset.serverCertSet.5.default.name=AIA Extension Default +policyset.serverCertSet.5.default.params.authInfoAccessADEnable_0=true +policyset.serverCertSet.5.default.params.authInfoAccessADLocationType_0=URIName +policyset.serverCertSet.5.default.params.authInfoAccessADLocation_0=http://ipa-ca.{{ freeipa_domain }}/ca/ocsp +policyset.serverCertSet.5.default.params.authInfoAccessADMethod_0=1.3.6.1.5.5.7.48.1 +policyset.serverCertSet.5.default.params.authInfoAccessCritical=false +policyset.serverCertSet.5.default.params.authInfoAccessNumADs=1 +policyset.serverCertSet.6.constraint.class_id=keyUsageExtConstraintImpl +policyset.serverCertSet.6.constraint.name=Key Usage Extension Constraint +policyset.serverCertSet.6.constraint.params.keyUsageCritical=true +policyset.serverCertSet.6.constraint.params.keyUsageCrlSign=false +policyset.serverCertSet.6.constraint.params.keyUsageDataEncipherment=true +policyset.serverCertSet.6.constraint.params.keyUsageDecipherOnly=false +policyset.serverCertSet.6.constraint.params.keyUsageDigitalSignature=true +policyset.serverCertSet.6.constraint.params.keyUsageEncipherOnly=false +policyset.serverCertSet.6.constraint.params.keyUsageKeyAgreement=false +policyset.serverCertSet.6.constraint.params.keyUsageKeyCertSign=false +policyset.serverCertSet.6.constraint.params.keyUsageKeyEncipherment=true +policyset.serverCertSet.6.constraint.params.keyUsageNonRepudiation=true +policyset.serverCertSet.6.default.class_id=keyUsageExtDefaultImpl +policyset.serverCertSet.6.default.name=Key Usage Default +policyset.serverCertSet.6.default.params.keyUsageCritical=true +policyset.serverCertSet.6.default.params.keyUsageCrlSign=false +policyset.serverCertSet.6.default.params.keyUsageDataEncipherment=true +policyset.serverCertSet.6.default.params.keyUsageDecipherOnly=false +policyset.serverCertSet.6.default.params.keyUsageDigitalSignature=true +policyset.serverCertSet.6.default.params.keyUsageEncipherOnly=false +policyset.serverCertSet.6.default.params.keyUsageKeyAgreement=false +policyset.serverCertSet.6.default.params.keyUsageKeyCertSign=false +policyset.serverCertSet.6.default.params.keyUsageKeyEncipherment=true +policyset.serverCertSet.6.default.params.keyUsageNonRepudiation=true +policyset.serverCertSet.7.constraint.class_id=noConstraintImpl +policyset.serverCertSet.7.constraint.name=No Constraint +policyset.serverCertSet.7.default.class_id=extendedKeyUsageExtDefaultImpl +policyset.serverCertSet.7.default.name=Extended Key Usage Extension Default +policyset.serverCertSet.7.default.params.exKeyUsageCritical=false +policyset.serverCertSet.7.default.params.exKeyUsageOIDs=1.3.6.1.5.5.7.3.2 +policyset.serverCertSet.8.constraint.class_id=signingAlgConstraintImpl +policyset.serverCertSet.8.constraint.name=No Constraint +policyset.serverCertSet.8.constraint.params.signingAlgsAllowed=SHA1withRSA,SHA256withRSA,SHA384withRSA,SHA512withRSA,MD5withRSA,MD2withRSA,SHA1withDSA,SHA1withEC,SHA256withEC,SHA384withEC,SHA512withEC +policyset.serverCertSet.8.default.class_id=signingAlgDefaultImpl +policyset.serverCertSet.8.default.name=Signing Alg +policyset.serverCertSet.8.default.params.signingAlg=- +policyset.serverCertSet.9.constraint.class_id=noConstraintImpl +policyset.serverCertSet.9.constraint.name=No Constraint +policyset.serverCertSet.9.default.class_id=crlDistributionPointsExtDefaultImpl +policyset.serverCertSet.9.default.name=CRL Distribution Points Extension Default +policyset.serverCertSet.9.default.params.crlDistPointsCritical=false +policyset.serverCertSet.9.default.params.crlDistPointsEnable_0=true +policyset.serverCertSet.9.default.params.crlDistPointsIssuerName_0=CN=Certificate Authority,o=ipaca +policyset.serverCertSet.9.default.params.crlDistPointsIssuerType_0=DirectoryName +policyset.serverCertSet.9.default.params.crlDistPointsNum=1 +policyset.serverCertSet.9.default.params.crlDistPointsPointName_0=http://ipa-ca.{{ freeipa_domain }}/ipa/crl/MasterCRL.bin +policyset.serverCertSet.9.default.params.crlDistPointsPointType_0=URIName +policyset.serverCertSet.9.default.params.crlDistPointsReasons_0= +policyset.serverCertSet.list=1,2,3,4,5,6,7,8,9,10,11,12 +profileId=caIPAclientAuth +visible=true diff --git a/roles/freeipa_server/templates/etc/rsyslog.d/freeipa.conf.j2 b/roles/freeipa_server/templates/etc/rsyslog.d/freeipa.conf.j2 new file mode 100644 index 0000000..6ef8a1c --- /dev/null +++ b/roles/freeipa_server/templates/etc/rsyslog.d/freeipa.conf.j2 @@ -0,0 +1,8 @@ +{% for file in freeipa_log_files %} +input(type="imfile" + addMetadata="on" + file="{{ file.path }}" + tag="{{ file.tag }}" + severity="{{ file.severity | default('info') }}") + +{% endfor %} diff --git a/roles/freeipa_server/vars/main.yml b/roles/freeipa_server/vars/main.yml new file mode 100644 index 0000000..89657e7 --- /dev/null +++ b/roles/freeipa_server/vars/main.yml @@ -0,0 +1,65 @@ +freeipa_packages: + - ipa-server + - ipa-server-trust-ad + - ipa-server-dns + +freeipa_backup_dir: /var/lib/ipa/backup + +# These services must be explicitly allowed if the default HBAC-allow-all policy +# is not used. See https://pagure.io/freeipa/issue/7831 +freeipa_system_services: + - systemd-user + - sudo + - sudo-i + - polkit-1 + +freeipa_automount_maps: + - auto.nfs + - auto.home + - auto.nfs_user + - auto.nfs_group + - auto.nfs_media + +freeipa_automount_keys: + - map: auto.master + key: /net + info: -hosts + + - map: auto.master + key: /nfs + info: auto.nfs -browse + + - map: auto.nfs + key: user + info: -fstype=autofs auto.nfs_user + + - map: auto.nfs + key: group + info: -fstype=autofs auto.nfs_group + + - map: auto.nfs + key: media + info: -fstype=autofs auto.nfs_media + +freeipa_log_files: + - path: /var/log/pki/pki-tomcat/ca/transactions + tag: ipa-ca + + - path: /var/log/dirsrv/slapd-{{ freeipa_realm | replace('.', '-') }}/access + tag: slapd + + - path: /var/log/dirsrv/slapd-{{ freeipa_realm | replace('.', '-') }}/audit + tag: slapd + + - path: /var/log/dirsrv/slapd-{{ freeipa_realm | replace('.', '-') }}/errors + tag: slapd + severity: error + + - path: /var/log/httpd/access_log + tag: httpd + + - path: /var/log/httpd/error_log + tag: httpd + severity: error + +freeipa_custom_schema_dir: /usr/local/share/dirsrv/schema diff --git a/roles/freeipa_system_account/defaults/main.yml b/roles/freeipa_system_account/defaults/main.yml new file mode 100644 index 0000000..21c0ab2 --- /dev/null +++ b/roles/freeipa_system_account/defaults/main.yml @@ -0,0 +1 @@ +system_account_expiration: 20380119031407Z diff --git a/roles/freeipa_system_account/tasks/main.yml b/roles/freeipa_system_account/tasks/main.yml new file mode 100644 index 0000000..8da9fde --- /dev/null +++ b/roles/freeipa_system_account/tasks/main.yml @@ -0,0 +1,14 @@ +- name: create freeipa system account for LDAP binds + ldap_entry: + dn: 'uid={{ system_account_username }},{{ freeipa_sysaccount_basedn }}' + objectClass: + - account + - simplesecurityobject + attributes: + uid: '{{ system_account_username }}' + userPassword: '{{ system_account_password }}' + passwordExpirationTime: '{{ system_account_expiration }}' + nsIdleTimeout: 0 + bind_dn: cn=Directory Manager + bind_pw: '{{ freeipa_ds_password }}' + server_uri: ldaps://{{ ipa_host }} diff --git a/roles/freeradius/defaults/main.yml b/roles/freeradius/defaults/main.yml new file mode 100644 index 0000000..416d15f --- /dev/null +++ b/roles/freeradius/defaults/main.yml @@ -0,0 +1,3 @@ +freeradius_clients: [] +freeradius_ldap_servers: '{{ freeipa_hosts }}' +freeradius_access_group: role-wifi-access diff --git a/roles/freeradius/files/etc/systemd/system/radiusd.service.d/override.conf b/roles/freeradius/files/etc/systemd/system/radiusd.service.d/override.conf new file mode 100644 index 0000000..d1edca8 --- /dev/null +++ b/roles/freeradius/files/etc/systemd/system/radiusd.service.d/override.conf @@ -0,0 +1,6 @@ +[Unit] +After=syslog.target network-online.target ipa.service dirsrv.target krb5kdc.service mysql.service mariadb.service postgresql.service gssproxy.service +Wants=gssproxy.service + +[Service] +Environment=GSS_USE_PROXY=yes diff --git a/roles/freeradius/handlers/main.yml b/roles/freeradius/handlers/main.yml new file mode 100644 index 0000000..9c89432 --- /dev/null +++ b/roles/freeradius/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart radiusd + systemd: + name: radiusd + state: restarted diff --git a/roles/freeradius/tasks/freeipa.yml b/roles/freeradius/tasks/freeipa.yml new file mode 100644 index 0000000..945e2a8 --- /dev/null +++ b/roles/freeradius/tasks/freeipa.yml @@ -0,0 +1,50 @@ +- name: create access group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ freeradius_access_group }}' + description: wifi access + nonposix: yes + state: present + run_once: True + +- name: create service principal + ipaservice: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: 'radius/{{ ansible_fqdn }}' + state: present + +- name: retrieve service keytab + include_role: + name: freeipa_keytab + vars: + keytab_principal: 'radius/{{ ansible_fqdn }}' + keytab_path: '{{ freeradius_keytab }}' + +- name: configure gssproxy for kerberized LDAP + include_role: + name: gssproxy_client + vars: + gssproxy_name: freeradius + gssproxy_section: service/freeradius + gssproxy_client_keytab: '{{ freeradius_keytab }}' + gssproxy_cred_usage: initiate + gssproxy_euid: radiusd + +- name: create systemd override directory + file: + path: /etc/systemd/system/radiusd.service.d + state: directory + +- name: create systemd override file + copy: + src: etc/systemd/system/radiusd.service.d/override.conf + dest: /etc/systemd/system/radiusd.service.d/override.conf + register: freeradius_systemd_unit + notify: restart radiusd + +- name: reload systemd units + systemd: + daemon_reload: yes + when: freeradius_systemd_unit.changed diff --git a/roles/freeradius/tasks/main.yml b/roles/freeradius/tasks/main.yml new file mode 100644 index 0000000..a2c926d --- /dev/null +++ b/roles/freeradius/tasks/main.yml @@ -0,0 +1,74 @@ +- name: install freeradius + dnf: + name: '{{ freeradius_packages }}' + state: present + +- import_tasks: freeipa.yml + +- name: request TLS certificate + include_role: + name: getcert_request + vars: + certificate_service: radius + certificate_path: '{{ freeradius_certificate_path }}' + certificate_key_path: '{{ freeradius_certificate_key_path }}' + certificate_ca_path: '{{ freeradius_certificate_ca_path }}' + certificate_owner: radiusd + certificate_hook: systemctl restart radiusd + +- name: generate dhparams + openssl_dhparam: + path: '{{ freeradius_dhparams_path }}' + size: 2048 + +- name: enable ldap module + file: + src: /etc/raddb/mods-available/ldap + dest: /etc/raddb/mods-enabled/ldap + state: link + +- name: generate freeradius configuration + template: + src: etc/raddb/{{ item }}.j2 + dest: /etc/raddb/{{ item }} + owner: root + group: radiusd + mode: 0640 + loop: + - radiusd.conf + - clients.conf + - mods-available/eap + - mods-available/ldap + - sites-available/inner-tunnel + notify: restart radiusd + +- name: create tlscache directory + file: + path: '{{ freeradius_tlscache_dir }}' + state: directory + owner: radiusd + group: radiusd + mode: 0700 + +- name: set up clean-freeradius-tlscache timer + include_role: + name: systemd_timer + vars: + timer_name: clean-freeradius-tlscache + timer_description: Delete old freeradius tlscache files + timer_on_calendar: daily + timer_exec: find {{ freeradius_tlscache_dir }} -mtime +2 -exec rm -vf {} ; + +- name: start freeradius + systemd: + name: radiusd + enabled: yes + state: started + +- name: open firewall ports + firewalld: + service: radius + permanent: yes + immediate: yes + state: enabled + tags: firewalld diff --git a/roles/freeradius/templates/etc/raddb/clients.conf.j2 b/roles/freeradius/templates/etc/raddb/clients.conf.j2 new file mode 100644 index 0000000..f6396e0 --- /dev/null +++ b/roles/freeradius/templates/etc/raddb/clients.conf.j2 @@ -0,0 +1,6 @@ +{% for client in freeradius_clients %} +client {{ client.name }} { + ipaddr = {{ client.address }} + secret = {{ client.secret }} +} +{% endfor %} diff --git a/roles/freeradius/templates/etc/raddb/mods-available/eap.j2 b/roles/freeradius/templates/etc/raddb/mods-available/eap.j2 new file mode 100644 index 0000000..5db0d1f --- /dev/null +++ b/roles/freeradius/templates/etc/raddb/mods-available/eap.j2 @@ -0,0 +1,54 @@ +eap { + default_eap_type = ttls + timer_expire = 60 + ignore_unknown_eap_types = yes + cisco_accounting_username_bug = no + max_sessions = ${max_requests} + + tls-config tls-common { + private_key_password = + private_key_file = {{ freeradius_certificate_key_path }} + certificate_file = {{ freeradius_certificate_path }} + ca_file = {{ freeradius_certificate_ca_path }} + auto_chain = no + ca_path = ${cadir} + cipher_list = "PROFILE=SYSTEM" + cipher_server_preference = no + tls_min_version = "1.2" + tls_max_version = "1.2" + ecdh_curve = "prime256v1" + + cache { + enable = yes + lifetime = 24 # hours + name = "EAP module" + persist_dir = "${db_dir}/tlscache" + store { + Tunnel-Private-Group-Id + } + } + + verify { + skip_if_ocsp_ok = yes + tmpdir = /var/run/radiusd/tmp + client = "/usr/bin/openssl verify -CApath ${..ca_path} %{TLS-Client-Cert-Filename}" + } + + ocsp { + enable = yes + override_cert_url = no + } + } + + tls { + tls = tls-common + } + + ttls { + tls = tls-common + default_eap_type = md5 + copy_request_to_tunnel = no + use_tunneled_reply = no + virtual_server = "inner-tunnel" + } +} diff --git a/roles/freeradius/templates/etc/raddb/mods-available/ldap.j2 b/roles/freeradius/templates/etc/raddb/mods-available/ldap.j2 new file mode 100644 index 0000000..85aede1 --- /dev/null +++ b/roles/freeradius/templates/etc/raddb/mods-available/ldap.j2 @@ -0,0 +1,113 @@ +ldap { +{% for server in freeradius_ldap_servers %} + server = '{{ server }}' +{% endfor %} + + base_dn = '{{ freeipa_user_basedn }}' + + sasl { + mech = 'GSSAPI' + realm = '{{ freeipa_realm }}' + } + + update { + control:Password-With-Header += 'userPassword' + control: += 'radiusControlAttribute' + request: += 'radiusRequestAttribute' + reply: += 'radiusReplyAttribute' + } + + user_dn = "LDAP-UserDn" + + user { + base_dn = "${..base_dn}" +{% raw %} + filter = "(uid=%{%{Stripped-User-Name}:-%{User-Name}})" +{% endraw %} + } + + group { + base_dn = '{{ freeipa_group_basedn }}' + filter = '(objectClass=ipagroup)' + name_attribute = cn +{% raw %} + membership_filter = "(member=%{control:${..user_dn}})" +{% endraw %} + membership_attribute = 'memberOf' + cacheable_name = 'yes' + cacheable_dn = 'yes' + allow_dangling_group_ref = 'yes' + } + + profile { } + + client { + base_dn = "${..base_dn}" + filter = '(objectClass=radiusClient)' + + template { } + + attribute { + ipaddr = 'radiusClientIdentifier' + secret = 'radiusClientSecret' + } + } + + read_clients = no + + accounting { + reference = "%{tolower:type.%{Acct-Status-Type}}" + + type { + start { + update { + description := "Online at %S" + } + } + + interim-update { + update { + description := "Last seen at %S" + } + } + + stop { + update { + description := "Offline at %S" + } + } + } + } + + post-auth { + update { + description := "Authenticated at %S" + } + } + + options { + chase_referrals = yes + rebind = yes + res_timeout = 10 + srv_timelimit = 3 + net_timeout = 1 + idle = 60 + probes = 3 + interval = 3 + ldap_debug = 0x0000 + } + + tls { } + + pool { + start = ${thread[pool].start_servers} + min = ${thread[pool].min_spare_servers} + max = ${thread[pool].max_servers} + + spare = ${thread[pool].max_spare_servers} + uses = 0 + retry_delay = 30 + lifetime = 0 + idle_timeout = 60 + } +} diff --git a/roles/freeradius/templates/etc/raddb/radiusd.conf.j2 b/roles/freeradius/templates/etc/raddb/radiusd.conf.j2 new file mode 100644 index 0000000..ad9b58f --- /dev/null +++ b/roles/freeradius/templates/etc/raddb/radiusd.conf.j2 @@ -0,0 +1,73 @@ +prefix = /usr +exec_prefix = /usr +sysconfdir = /etc +localstatedir = /var +sbindir = /usr/sbin +logdir = ${localstatedir}/log/radius +raddbdir = ${sysconfdir}/raddb +radacctdir = ${logdir}/radacct + +name = radiusd + +confdir = ${raddbdir} +modconfdir = ${confdir}/mods-config +certdir = ${confdir}/certs +cadir = ${confdir}/certs +run_dir = ${localstatedir}/run/${name} + +db_dir = ${localstatedir}/lib/radiusd +libdir = /usr/lib64/freeradius +pidfile = ${run_dir}/${name}.pid +correct_escapes = true +max_request_time = 30 +cleanup_delay = 5 +max_requests = 16384 +hostname_lookups = no +log { + destination = stdout + colourise = no + stripped_names = no + auth = yes + auth_badpass = no + auth_goodpass = no + msg_denied = "You are already logged in - access denied" +} + +checkrad = ${sbindir}/checkrad + +ENV { } + +security { + user = radiusd + group = radiusd + allow_core_dumps = no + max_attributes = 200 + reject_delay = 1 + status_server = yes +} + +proxy_requests = yes +$INCLUDE proxy.conf + +$INCLUDE clients.conf + +thread pool { + start_servers = 5 + max_servers = 32 + min_spare_servers = 3 + max_spare_servers = 10 + max_requests_per_server = 0 + auto_limit_acct = no +} + +modules { + $INCLUDE mods-enabled/ +} + +instantiate { } + +policy { + $INCLUDE policy.d/ +} + +$INCLUDE sites-enabled/ diff --git a/roles/freeradius/templates/etc/raddb/sites-available/inner-tunnel.j2 b/roles/freeradius/templates/etc/raddb/sites-available/inner-tunnel.j2 new file mode 100644 index 0000000..043349a --- /dev/null +++ b/roles/freeradius/templates/etc/raddb/sites-available/inner-tunnel.j2 @@ -0,0 +1,90 @@ +server inner-tunnel { + listen { + ipaddr = 127.0.0.1 + port = 18120 + type = auth + } + + authorize { + filter_username + chap + suffix + + update control { + &Proxy-To-Realm := LOCAL + } + + eap { + ok = return + } + + ldap + if (ok || updated) { + update { + control:Auth-Type := ldap + } + } + + expiration + logintime + pap + } + + authenticate { + Auth-Type PAP { + pap + } + + Auth-Type CHAP { + chap + } + + Auth-Type LDAP { + ldap + } + + eap + } + + session { + radutmp + } + + + post-auth { + -sql + update reply { + User-Name !* ANY + Message-Authenticator !* ANY + EAP-Message !* ANY + Proxy-State !* ANY + MS-MPPE-Encryption-Types !* ANY + MS-MPPE-Encryption-Policy !* ANY + MS-MPPE-Send-Key !* ANY + MS-MPPE-Recv-Key !* ANY + } + + update { + &outer.session-state: += &reply: + } + + Post-Auth-Type REJECT { + -sql + attr_filter.access_reject + + update outer.session-state { + &Module-Failure-Message := &request:Module-Failure-Message + } + } + + if (LDAP-Group != "{{ freeradius_access_group }}") { + reject + } + } + + pre-proxy { } + + post-proxy { + eap + } +} diff --git a/roles/freeradius/vars/main.yml b/roles/freeradius/vars/main.yml new file mode 100644 index 0000000..a99a6aa --- /dev/null +++ b/roles/freeradius/vars/main.yml @@ -0,0 +1,12 @@ +freeradius_packages: + - freeradius + - freeradius-ldap + - freeradius-utils + +freeradius_certificate_path: /etc/pki/tls/certs/radiusd.pem +freeradius_certificate_key_path: /etc/pki/tls/private/radiusd.key +freeradius_certificate_ca_path: /etc/ipa/ca.crt +freeradius_dhparams_path: /etc/raddb/certs/dh +freeradius_tlscache_dir: /var/lib/radiusd/tlscache + +freeradius_keytab: /var/lib/gssproxy/clients/freeradius.keytab diff --git a/roles/gather_facts/tasks/main.yml b/roles/gather_facts/tasks/main.yml new file mode 100644 index 0000000..f603197 --- /dev/null +++ b/roles/gather_facts/tasks/main.yml @@ -0,0 +1,2 @@ +- name: gather facts + setup: diff --git a/roles/gathio/defaults/main.yml b/roles/gathio/defaults/main.yml new file mode 100644 index 0000000..817e2e3 --- /dev/null +++ b/roles/gathio/defaults/main.yml @@ -0,0 +1,5 @@ +gathio_version: master +gathio_user: gathio +gathio_port: 8080 +gathio_from_address: 'events-noreply@{{ email_domain }}' +gathio_domain: '{{ ansible_fqdn }}:{{ gathio_port }}' diff --git a/roles/gathio/handlers/main.yml b/roles/gathio/handlers/main.yml new file mode 100644 index 0000000..fdafc98 --- /dev/null +++ b/roles/gathio/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart gathio + systemd: + name: gathio + state: restarted diff --git a/roles/gathio/meta/main.yml b/roles/gathio/meta/main.yml new file mode 100644 index 0000000..8c1c42e --- /dev/null +++ b/roles/gathio/meta/main.yml @@ -0,0 +1,6 @@ +dependencies: + - role: yum + yum_repositories: + - epel + - mongodb-6.0 + tags: yum diff --git a/roles/gathio/tasks/main.yml b/roles/gathio/tasks/main.yml new file mode 100644 index 0000000..17abbcf --- /dev/null +++ b/roles/gathio/tasks/main.yml @@ -0,0 +1,102 @@ +- name: install packages + dnf: + name: '{{ gathio_packages }}' + state: present + +- name: create SELinux policy for mongodb + include_role: + name: selinux_policy + apply: + tags: selinux + vars: + selinux_policy_name: mongodb_custom + selinux_policy_te: '{{ gathio_mongodb_selinux_policy_te }}' + tags: selinux + +- name: enable mongodb + systemd: + name: mongod + state: started + enabled: yes + +- name: create gathio user + user: + name: '{{ gathio_user }}' + system: yes + home: '{{ gathio_home }}' + shell: /sbin/nologin + create_home: no + +- name: create gathio home + file: + path: '{{ gathio_home }}' + owner: '{{ gathio_user }}' + group: '{{ gathio_user }}' + mode: 0755 + state: directory + +- name: disable npm package lock + lineinfile: + regexp: ^package-lock= + line: package-lock=false + path: '{{ gathio_home }}/.npmrc' + create: yes + owner: '{{ gathio_user }}' + group: '{{ gathio_user }}' + mode: 0600 + state: present + +- name: clone gathio repository + git: + repo: '{{ gathio_git_repo }}' + dest: '{{ gathio_install_dir }}' + version: '{{ gathio_version }}' + force: yes + update: yes + become: yes + become_user: '{{ gathio_user }}' + register: gathio_git + notify: restart gathio + +- name: install npm dependencies + npm: + path: '{{ gathio_install_dir }}' + production: yes + no_optional: no + become: yes + become_user: '{{ gathio_user }}' + when: gathio_git.changed + notify: restart gathio + +- name: generate gathio configuration + template: + src: '{{ gathio_install_dir[1:] }}/config/{{ item }}.j2' + dest: '{{ gathio_install_dir }}/config/{{ item }}' + owner: '{{ gathio_user }}' + group: '{{ gathio_user }}' + mode: 0440 + loop: + - api.js + - database.js + - domain.js + notify: restart gathio + +- name: create gathio systemd unit + template: + src: etc/systemd/system/gathio.service.j2 + dest: /etc/systemd/system/gathio.service + register: gathio_unit + notify: restart gathio + +- name: reload systemd daemons + systemd: + daemon_reload: yes + when: gathio_unit.changed + +- name: open firewall ports + firewalld: + port: '{{ gathio_port }}/tcp' + permanent: yes + immediate: yes + state: enabled + tags: firewalld diff --git a/roles/gathio/templates/etc/systemd/system/gathio.service.j2 b/roles/gathio/templates/etc/systemd/system/gathio.service.j2 new file mode 100644 index 0000000..dcf5812 --- /dev/null +++ b/roles/gathio/templates/etc/systemd/system/gathio.service.j2 @@ -0,0 +1,34 @@ +[Unit] +Description=gathio event manager +After=network.target +AssertPathExists={{ gathio_install_dir }} + +[Service] +Type=simple +Environment="NODE_ENV=production" +EnvironmentFile=-/etc/sysconfig/gathio +ExecStart=/usr/bin/node start.js +WorkingDirectory={{ gathio_install_dir }} +User={{ gathio_user }} +Group={{ gathio_user }} +Restart=on-failure + +# See https://www.freedesktop.org/software/systemd/man/systemd.exec.html +# for details +DevicePolicy=closed +NoNewPrivileges=yes +PrivateDevices=yes +PrivateTmp=yes +ProtectControlGroups=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 +RestrictNamespaces=yes +RestrictRealtime=yes +SystemCallFilter=~@clock @debug @module @mount @obsolete @privileged @reboot @setuid @swap + +ProtectSystem=full +ProtectHome=true + +[Install] +WantedBy=multi-user.target diff --git a/roles/gathio/templates/var/lib/gathio/gathio/config/api.js.j2 b/roles/gathio/templates/var/lib/gathio/gathio/config/api.js.j2 new file mode 100644 index 0000000..9ccbd8c --- /dev/null +++ b/roles/gathio/templates/var/lib/gathio/gathio/config/api.js.j2 @@ -0,0 +1,6 @@ +module.exports = { + 'smtpServer': '127.0.0.1', + 'smtpPort': '25', + 'smtpUsername': '', + 'smtpPassword': '' +}; diff --git a/roles/gathio/templates/var/lib/gathio/gathio/config/database.js.j2 b/roles/gathio/templates/var/lib/gathio/gathio/config/database.js.j2 new file mode 100644 index 0000000..120fc64 --- /dev/null +++ b/roles/gathio/templates/var/lib/gathio/gathio/config/database.js.j2 @@ -0,0 +1,3 @@ +module.exports = { + 'url' : 'mongodb://localhost:27017/gathio' +}; diff --git a/roles/gathio/templates/var/lib/gathio/gathio/config/domain.js.j2 b/roles/gathio/templates/var/lib/gathio/gathio/config/domain.js.j2 new file mode 100644 index 0000000..0815294 --- /dev/null +++ b/roles/gathio/templates/var/lib/gathio/gathio/config/domain.js.j2 @@ -0,0 +1,10 @@ +module.exports = { + 'domain' : '{{ gathio_domain }}' , + 'port': '{{ gathio_port }}', + 'email': '{{ gathio_from_address }}', + 'mailService': 'nodemailer', + 'sitename': 'gathio', + 'isFederated': false, + 'logo_url': '', + 'showKofi': false, +}; diff --git a/roles/gathio/vars/main.yml b/roles/gathio/vars/main.yml new file mode 100644 index 0000000..68f584b --- /dev/null +++ b/roles/gathio/vars/main.yml @@ -0,0 +1,20 @@ +gathio_packages: + - mongodb-org + - nodejs + - git + +gathio_git_repo: https://github.com/lowercasename/gathio +gathio_home: /var/lib/gathio +gathio_install_dir: '{{ gathio_home }}/gathio' + +gathio_mongodb_selinux_policy_te: | + require { + type sysctl_fs_t; + type var_lib_nfs_t; + type mongod_t; + class dir search; + } + + #============= mongod_t ============== + allow mongod_t sysctl_fs_t:dir search; + allow mongod_t var_lib_nfs_t:dir search; diff --git a/roles/getcert_request/defaults/main.yml b/roles/getcert_request/defaults/main.yml new file mode 100644 index 0000000..fcac3cc --- /dev/null +++ b/roles/getcert_request/defaults/main.yml @@ -0,0 +1,11 @@ +certificate_sans: '{{ [ansible_fqdn] + cnames }}' +certificate_type: RSA +certificate_size: 2048 + +certificate_owner: root +certificate_mode: 0400 +certificate_service: HTTP + +certificate_hook_name: '{{ certificate_path | basename }}' + +certificate_resubmit: no diff --git a/roles/getcert_request/tasks/main.yml b/roles/getcert_request/tasks/main.yml new file mode 100644 index 0000000..d17515e --- /dev/null +++ b/roles/getcert_request/tasks/main.yml @@ -0,0 +1,96 @@ +# NOTE: certmonger post-command are passed directly to exec(). +# Spaces in filenames, quotes, and other shell meta-characters will break your hook! +--- +- name: check if certificate is already tracked by certmonger + command: ipa-getcert list --certfile {{ certificate_path }} + failed_when: False + changed_when: False + register: certmonger_already_tracking + +- name: retrieve certificate via certmonger + block: + - name: create freeipa hosts + ipahost: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ certificate_san }}' + state: present + loop: '{{ certificate_sans }}' + loop_control: + loop_var: certificate_san + + - name: create freeipa services + ipaservice: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ certificate_service }}/{{ certificate_san }}' + host: '{{ omit if certificate_san == ansible_fqdn else [ansible_fqdn] }}' + loop: '{{ certificate_sans }}' + loop_control: + loop_var: certificate_san + when: "certificate_service != 'host'" + + - name: prepare post-save hook + block: + - name: create post-save script + copy: + content: | + #!/bin/bash + exec 1> >(logger -s -t $(basename "$0")) 2>&1 + exec {{ certificate_hook }} + dest: '{{ certificate_post_save_script }}' + mode: 0555 + setype: certmonger_unconfined_exec_t + + - name: set certmonger_unconfined_exec_t sefcontext on post-save script + sefcontext: + target: '{{ certificate_post_save_script }}' + state: present + setype: certmonger_unconfined_exec_t + tags: selinux + register: certificate_post_save_script_sefcontext + + - name: apply selinux context to post-save script + command: restorecon {{ certificate_post_save_script | quote }} + when: certificate_post_save_script_sefcontext.changed + tags: selinux + when: certificate_hook is defined + + - name: submit certificate request + command: > + ipa-getcert {{ 'resubmit' if certmonger_already_tracking.rc == 0 else 'request' }} + --certfile {{ certificate_path | quote }} + {% if certmonger_already_tracking.rc != 0 %} + --keyfile {{ certificate_key_path | quote }} + --key-type {{ certificate_type | quote }} + --key-size {{ certificate_size | quote }} + {% endif %} + --principal {{ certificate_service ~ '/' ~ ansible_fqdn | quote }} + --subject-name CN={{ ansible_fqdn | quote }} + {% for san in certificate_sans %} + --dns {{ san | quote }} + {% endfor %} + --cert-owner {{ certificate_owner | quote }} + --cert-perms {{ '0%0o' % certificate_mode }} + --key-owner {{ certificate_owner | quote }} + --key-perms {{ '0%0o' % certificate_mode }} + {% if certificate_key_passphrase is defined %} + --pin {{ certificate_key_passphrase | quote }} + {% endif %} + {% if certificate_hook is defined %} + --after-command {{ certificate_post_save_script | quote }} + {% endif %} + + - name: wait request to complete + command: ipa-getcert status --certfile {{ certificate_path | quote }} + register: certmonger_status + retries: 10 + delay: 2 + until: certmonger_status.rc == 0 + when: certmonger_already_tracking.rc != 0 or certificate_resubmit + +- name: enable certmonger daemon + systemd: + name: certmonger + enabled: yes + state: started diff --git a/roles/getcert_request/vars/main.yml b/roles/getcert_request/vars/main.yml new file mode 100644 index 0000000..5cf6aff --- /dev/null +++ b/roles/getcert_request/vars/main.yml @@ -0,0 +1 @@ +certificate_post_save_script: /etc/pki/tls/certmonger-postsave-{{ certificate_hook_name }}.sh diff --git a/roles/gitolite/defaults/main.yml b/roles/gitolite/defaults/main.yml new file mode 100644 index 0000000..3c50916 --- /dev/null +++ b/roles/gitolite/defaults/main.yml @@ -0,0 +1,7 @@ +gitolite_ssh_user: git +gitolite_admin_group: role-git-admin +gitolite_access_group: role-git-access +gitolite_anon_user: nobody +gitolite_freeipa_user: s-gitolite +gitolite_uid: 1993 +gitolite_archive_on_calendar: weekly diff --git a/roles/gitolite/handlers/main.yml b/roles/gitolite/handlers/main.yml new file mode 100644 index 0000000..18c505e --- /dev/null +++ b/roles/gitolite/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart sshd + systemd: + name: sshd + state: restarted diff --git a/roles/gitolite/meta/main.yml b/roles/gitolite/meta/main.yml new file mode 100644 index 0000000..29230f9 --- /dev/null +++ b/roles/gitolite/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - role: yum + yum_repositories: epel + tags: yum diff --git a/roles/gitolite/tasks/freeipa.yml b/roles/gitolite/tasks/freeipa.yml new file mode 100644 index 0000000..f94b9e0 --- /dev/null +++ b/roles/gitolite/tasks/freeipa.yml @@ -0,0 +1,49 @@ +- name: create service account + ipauser: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ gitolite_freeipa_user }}' + loginshell: /sbin/nologin + homedir: '{{ gitolite_home }}' + givenname: Gitolite + sn: Service Account + state: present + run_once: True + +- name: retrieve user keytab + include_role: + name: freeipa_keytab + vars: + keytab_principal: '{{ gitolite_freeipa_user }}' + keytab_path: '{{ gitolite_keytab }}' + +- name: configure gssproxy for kerberized LDAP + include_role: + name: gssproxy_client + vars: + gssproxy_priority: 51 + gssproxy_name: gitolite + gssproxy_section: service/gitolite + gssproxy_client_keytab: '{{ gitolite_keytab }}' + gssproxy_cred_usage: initiate + gssproxy_euid: '{{ gitolite_user }}' + +- name: create admin group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ gitolite_admin_group }}' + description: gitolite admins + nonposix: yes + state: present + run_once: True + +- name: create access group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ gitolite_access_group }}' + description: gitolite users + nonposix: yes + state: present + run_once: True diff --git a/roles/gitolite/tasks/main.yml b/roles/gitolite/tasks/main.yml new file mode 100644 index 0000000..8226557 --- /dev/null +++ b/roles/gitolite/tasks/main.yml @@ -0,0 +1,119 @@ +- name: install gitolite + dnf: + name: '{{ gitolite_packages }}' + state: present + +- import_tasks: freeipa.yml + +- name: disable gitolite user + user: + name: gitolite3 + shell: /sbin/nologin + +- name: get apache uid + getent: + database: passwd + key: '{{ gitolite_user }}' + +- name: create git ssh user + user: + name: '{{ gitolite_ssh_user }}' + comment: Git Pseudo-User + uid: '{{ ansible_facts.getent_passwd[gitolite_user][1] }}' + group: '{{ gitolite_user }}' + home: '{{ gitolite_home }}' + create_home: no + non_unique: yes + shell: '{{ gitolite_shell }}' + +- name: create git home + file: + path: '{{ gitolite_home }}' + mode: 0750 + owner: '{{ gitolite_user }}' + group: '{{ gitolite_user }}' + state: directory + setype: _default + +- name: copy gitolite wrapper script + template: + src: '{{ gitolite_cgi_script[1:] }}.j2' + dest: '{{ gitolite_cgi_script }}' + mode: 0555 + setype: httpd_unconfined_script_exec_t + tags: selinux + +- name: set unconfined selinux context on gitolite wrapper + sefcontext: + target: '{{ gitolite_cgi_script }}' + setype: httpd_unconfined_script_exec_t + state: present + tags: selinux + register: gitolite_cgi_sefcontext + +- name: apply selinux context to gitolite wrapper + command: 'restorecon -R {{ gitolite_cgi_script }}' + when: gitolite_cgi_sefcontext.changed + tags: selinux + +- name: generate gitolite scripts + template: + src: '{{ item[1:] }}.j2' + dest: '{{ item }}' + mode: 0555 + loop: + - '{{ gitolite_groups_script }}' + - '{{ gitolite_authorizedkeys_script }}' + +- import_tasks: sshd.yml + +- name: create SELinux policy for gitolite + include_role: + name: selinux_policy + apply: + tags: selinux + vars: + selinux_policy_name: gitolite_sshd_httpd + selinux_policy_te: '{{ gitolite_selinux_policy_te }}' + tags: selinux + +- name: generate gitolite.rc + template: + src: '{{ gitolite_home[1:] }}/.gitolite.rc.j2' + dest: '{{ gitolite_home }}/.gitolite.rc' + owner: '{{ gitolite_user }}' + group: '{{ gitolite_user }}' + mode: 0600 + setype: _default + +- name: create gitolite config directories + file: + path: '{{ gitolite_home }}/{{ item }}' + state: directory + owner: '{{ gitolite_user }}' + group: '{{ gitolite_user }}' + mode: 0750 + setype: _default + loop: + - .gitolite + - .gitolite/conf + - .gitolite/logs + +- name: create initial gitolite.conf + template: + src: '{{ gitolite_home[1:] }}/.gitolite/conf/gitolite.conf.j2' + dest: '{{ gitolite_home }}/.gitolite/conf/gitolite.conf' + owner: '{{ gitolite_user }}' + group: '{{ gitolite_user }}' + mode: 0640 + force: no + +- name: initialize gitolite + command: + cmd: gitolite setup + chdir: '{{ gitolite_home }}' + creates: '{{ gitolite_home }}/.gitolite/conf/gitolite.conf-compiled.pm' + environment: + HOME: '{{ gitolite_home }}' + become: yes + become_user: '{{ gitolite_user }}' diff --git a/roles/gitolite/tasks/sshd.yml b/roles/gitolite/tasks/sshd.yml new file mode 100644 index 0000000..37a74e4 --- /dev/null +++ b/roles/gitolite/tasks/sshd.yml @@ -0,0 +1,24 @@ +# TODO: ssh_config.d is included by default starting with EL9 +- name: create sshd config directory + file: + path: /etc/ssh/sshd_config.d + state: directory + +- name: add sshd include directive + lineinfile: + path: /etc/ssh/sshd_config + line: Include sshd_config.d/* + insertafter: EOF + +- name: generate sshd configuration for gitolite + template: + src: etc/ssh/sshd_config.d/gitolite.conf.j2 + dest: /etc/ssh/sshd_config.d/gitolite.conf + notify: restart sshd + +- name: allow sshd to query ldap + seboolean: + name: authlogin_nsswitch_use_ldap + state: yes + persistent: yes + tags: selinux diff --git a/roles/gitolite/templates/etc/ssh/sshd_config.d/gitolite.conf.j2 b/roles/gitolite/templates/etc/ssh/sshd_config.d/gitolite.conf.j2 new file mode 100644 index 0000000..38da41f --- /dev/null +++ b/roles/gitolite/templates/etc/ssh/sshd_config.d/gitolite.conf.j2 @@ -0,0 +1,4 @@ +Match User {{ gitolite_ssh_user }} + AuthenticationMethods "publickey" + AuthorizedKeysCommand {{ gitolite_authorizedkeys_script }} + AuthorizedKeysCommandUser {{ gitolite_user }} diff --git a/roles/gitolite/templates/usr/local/bin/gitolite-authorizedkeys.j2 b/roles/gitolite/templates/usr/local/bin/gitolite-authorizedkeys.j2 new file mode 100644 index 0000000..23bfee9 --- /dev/null +++ b/roles/gitolite/templates/usr/local/bin/gitolite-authorizedkeys.j2 @@ -0,0 +1,37 @@ +#!/usr/libexec/platform-python + +import os +import ldap +import ldap.sasl +import ldap.filter + +GITOLITE_ACCESS_GROUP = '{{ gitolite_access_group }}' +GITOLITE_ADMIN_GROUP = '{{ gitolite_admin_group }}' +GITOLITE_SHELL = '{{ gitolite_shell }}' + +LDAP_URI = '{{ freeipa_ldap_uri }}' +USER_BASEDN = '{{ freeipa_user_basedn }}' +GROUP_BASEDN = '{{ freeipa_group_basedn }}' + +GITOLITE_KEY_TEMPLATE = 'command="{shell} {uid}",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty {pubkey}' + +os.environ['GSS_USE_PROXY'] = 'yes' +conn = ldap.initialize(LDAP_URI) +conn.protocol_version = ldap.VERSION3 +conn.sasl_interactive_bind_s('', ldap.sasl.sasl({}, 'GSSAPI')) + +filter = ldap.filter.filter_format( + '(&(ipaSshPubKey=*)(|(memberOf=cn=%s,%s)(memberOf=cn=%s,%s)))', + [GITOLITE_ADMIN_GROUP, GROUP_BASEDN, GITOLITE_ACCESS_GROUP, GROUP_BASEDN]) + +results = conn.search_s( + USER_BASEDN, + ldap.SCOPE_SUBTREE, + filter, + ['uid', 'ipaSshPubKey']) + +for (dn, attributes) in results: + uid = attributes['uid'][0].decode('utf-8') + for pubkey in [pk.decode('utf-8') for pk in attributes['ipaSshPubKey']]: + if pubkey.startswith('ssh-'): + print(GITOLITE_KEY_TEMPLATE.format(shell=GITOLITE_SHELL, uid=uid, pubkey=pubkey)) diff --git a/roles/gitolite/templates/usr/local/bin/gitolite-grouplist.j2 b/roles/gitolite/templates/usr/local/bin/gitolite-grouplist.j2 new file mode 100644 index 0000000..2060620 --- /dev/null +++ b/roles/gitolite/templates/usr/local/bin/gitolite-grouplist.j2 @@ -0,0 +1,42 @@ +#!/usr/libexec/platform-python + +import os +import sys +import ldap +import ldap.sasl +import ldap.filter + +LDAP_URI = '{{ freeipa_ldap_uri }}' +USER_BASEDN = '{{ freeipa_user_basedn }}' +GROUP_BASEDN = '{{ freeipa_group_basedn }}' + +if len(sys.argv) != 2: + sys.exit('must specify one username') + +if sys.argv[1] == 'nobody': + exit(0) + +os.environ['GSS_USE_PROXY'] = 'yes' +conn = ldap.initialize(LDAP_URI) +conn.protocol_version = ldap.VERSION3 +conn.sasl_interactive_bind_s('', ldap.sasl.sasl({}, 'GSSAPI')) + +user = conn.search_s( + USER_BASEDN, + ldap.SCOPE_SUBTREE, + ldap.filter.filter_format('uid=%s', [sys.argv[1]]), + ['memberOf']) + +if not user: + exit(1) + +groups = [] + +for group_dn in [ldap.dn.explode_dn(dn) for dn in user[0][1]['memberOf']]: + if ','.join(group_dn[1:]) == GROUP_BASEDN: + rdn = ldap.dn.str2dn(group_dn[0])[0][0] + if rdn[0] == 'cn': + # replace whitespace with underscore + groups.append('_'.join(rdn[1].split())) + +print(' '.join(groups)) diff --git a/roles/gitolite/templates/var/www/cgi-bin/gitolite-wrapper.j2 b/roles/gitolite/templates/var/www/cgi-bin/gitolite-wrapper.j2 new file mode 100644 index 0000000..38dc426 --- /dev/null +++ b/roles/gitolite/templates/var/www/cgi-bin/gitolite-wrapper.j2 @@ -0,0 +1,14 @@ +#!/bin/bash + +# Strip realm from REMOTE_USER. +# This is a hack around GssapiLocalName not working on RHEL 8: +# https://bugzilla.redhat.com/show_bug.cgi?id=1787630 +if [ -v REMOTE_USER ]; then + export REMOTE_USER=${REMOTE_USER%@*} +fi + +export GIT_PROJECT_ROOT='{{ gitolite_home }}/repositories' +export GITOLITE_HTTP_HOME='{{ gitolite_home }}' +export GIT_HTTP_EXPORT_ALL=1 + +exec {{ gitolite_shell }} diff --git a/roles/gitolite/templates/var/www/git/.gitolite.rc.j2 b/roles/gitolite/templates/var/www/git/.gitolite.rc.j2 new file mode 100644 index 0000000..b78ca08 --- /dev/null +++ b/roles/gitolite/templates/var/www/git/.gitolite.rc.j2 @@ -0,0 +1,28 @@ +$ENV{PATH} .= ":{{ gitolite_home }}/bin"; + +%RC = ( + UMASK => 0027, + GIT_CONFIG_KEYS => '.*', + LOG_DEST => 'syslog', + ROLES => { + READERS => 1, + WRITERS => 1, + }, + ENABLE => [ + 'help', + 'desc', + 'info', + 'perms', + 'writable', + 'D', + 'git-config', + 'gitweb', + 'set-default-roles', + 'upstream', + 'cgit', + ], + GROUPLIST_PGM => '{{ gitolite_groups_script }}', + HTTP_ANON_USER => '{{ gitolite_anon_user }}', +); + +1; diff --git a/roles/gitolite/templates/var/www/git/.gitolite/conf/gitolite.conf.j2 b/roles/gitolite/templates/var/www/git/.gitolite/conf/gitolite.conf.j2 new file mode 100644 index 0000000..7fc1d59 --- /dev/null +++ b/roles/gitolite/templates/var/www/git/.gitolite/conf/gitolite.conf.j2 @@ -0,0 +1,11 @@ +repo gitolite-admin + RW+ = @{{ gitolite_admin_group }} + +repo CREATOR/[A-Za-z0-9/_-]+ + C = @{{ gitolite_admin_group }} @{{ gitolite_access_group }} + RW+ = CREATOR + RW = WRITERS + R = READERS + option default.roles-1 = READERS @all + config gitweb.owner = %GL_CREATOR + config gitweb.category = user repositories diff --git a/roles/gitolite/vars/main.yml b/roles/gitolite/vars/main.yml new file mode 100644 index 0000000..4c3058a --- /dev/null +++ b/roles/gitolite/vars/main.yml @@ -0,0 +1,40 @@ +gitolite_packages: + - httpd + - gitolite3 + - perl-Sys-Syslog + +gitolite_user: apache +gitolite_home: /var/www/git + +gitolite_shell: /usr/share/gitolite3/gitolite-shell +gitolite_cgi_script: /var/www/cgi-bin/gitolite-wrapper +gitolite_groups_script: /usr/local/bin/gitolite-grouplist +gitolite_authorizedkeys_script: /usr/local/bin/gitolite-authorizedkeys + +gitolite_keytab: /var/lib/gssproxy/clients/{{ gitolite_freeipa_user }}.keytab + +gitolite_selinux_policy_te: | + require { + type gssproxy_t; + type gssproxy_var_lib_t; + type sshd_t; + type httpd_t; + type httpd_unconfined_script_t; + class key { read view write }; + class sock_file write; + class unix_stream_socket { connectto }; + } + + #============= sshd_t ============== + allow sshd_t gssproxy_t:unix_stream_socket connectto; + allow sshd_t gssproxy_var_lib_t:sock_file write; + + #============= httpd_t ============== + allow httpd_t httpd_unconfined_script_t:key { read view }; + allow httpd_t sshd_t:key { read view write }; + +gitolite_archive_shell: >- + TIMESTAMP=$(date +%Y%m%d%H%M%S); + tar czf "gitolite-${TIMESTAMP}.tar.gz" + --transform "s|^\.|gitolite-${TIMESTAMP}|" + -C "{{ gitolite_home }}" . diff --git a/roles/grub/defaults/main.yml b/roles/grub/defaults/main.yml new file mode 100644 index 0000000..9f919d4 --- /dev/null +++ b/roles/grub/defaults/main.yml @@ -0,0 +1,2 @@ +# grub_cmdline: quiet ro +grub_timeout: 1 diff --git a/roles/grub/tasks/main.yml b/roles/grub/tasks/main.yml new file mode 100644 index 0000000..ea82ab1 --- /dev/null +++ b/roles/grub/tasks/main.yml @@ -0,0 +1,23 @@ +- name: set grub timeout + lineinfile: + path: /etc/default/grub + regexp: ^GRUB_TIMEOUT= + line: 'GRUB_TIMEOUT={{ grub_timeout }}' + register: grub_timeout_line + +- name: set kernel cmdline + lineinfile: + path: /etc/default/grub + regexp: ^GRUB_CMDLINE_LINUX= + line: 'GRUB_CMDLINE_LINUX="{{ grub_cmdline }}"' + when: grub_cmdline is defined + register: grub_cmdline_line + +- name: rebuild grub config + command: grub2-mkconfig -o /boot/grub2/grub.cfg + when: grub_timeout_line.changed or grub_cmdline_line.changed + +- name: warn if reboot needed + debug: + msg: A reboot is required for changes to kernel cmdline to take effect. + when: grub_cmdline_line.changed diff --git a/roles/gssproxy_client/defaults/main.yml b/roles/gssproxy_client/defaults/main.yml new file mode 100644 index 0000000..01d4e76 --- /dev/null +++ b/roles/gssproxy_client/defaults/main.yml @@ -0,0 +1,8 @@ +# gssproxy_section: service/name +# gssproxy_client_keytab: path/to/client/keytab +# gssproxy_keytab: path/to/keytab +# gssproxy_cred_usage: initiate +# gssproxy_euid: apache +# gssproxy_program: /usr/sbin/httpd +gssproxy_priority: 50 +gssproxy_cred_usage: both diff --git a/roles/gssproxy_client/tasks/main.yml b/roles/gssproxy_client/tasks/main.yml new file mode 100644 index 0000000..656b92a --- /dev/null +++ b/roles/gssproxy_client/tasks/main.yml @@ -0,0 +1,17 @@ +- name: generate gssproxy configuration + template: + src: etc/gssproxy/client.conf.j2 + dest: /etc/gssproxy/{{ gssproxy_priority }}-{{ gssproxy_name }}.conf + register: gssproxy_config + +- name: enable gssproxy + systemd: + name: gssproxy + enabled: yes + state: started + +- name: restart gssproxy + systemd: + name: gssproxy + state: restarted + when: gssproxy_config.changed diff --git a/roles/gssproxy_client/templates/etc/gssproxy/client.conf.j2 b/roles/gssproxy_client/templates/etc/gssproxy/client.conf.j2 new file mode 100644 index 0000000..c3725f7 --- /dev/null +++ b/roles/gssproxy_client/templates/etc/gssproxy/client.conf.j2 @@ -0,0 +1,16 @@ +[{{ gssproxy_section }}] +mechs = krb5 +cred_store = ccache:FILE:/var/lib/gssproxy/clients/krb5cc_%u +{% if gssproxy_client_keytab is defined %} +cred_store = client_keytab:{{ gssproxy_client_keytab }} +{% endif %} +{% if gssproxy_keytab is defined %} +cred_store = keytab:{{ gssproxy_keytab }} +{% endif %} +cred_usage = {{ gssproxy_cred_usage }} +{% if gssproxy_euid is defined %} +euid = {{ gssproxy_euid }} +{% endif %} +{% if gssproxy_program is defined %} +program = {{ gssproxy_program }} +{% endif %} diff --git a/roles/hastebin/defaults/main.yml b/roles/hastebin/defaults/main.yml new file mode 100644 index 0000000..adbe279 --- /dev/null +++ b/roles/hastebin/defaults/main.yml @@ -0,0 +1,9 @@ +hastebin_version: master +hastebin_server_aliases: [] +hastebin_letsencrypt: no +hastebin_upload_cidrs: [] +hastebin_port: 8080 + +hastebin_expire_days: 0 + +hastebin_user: hastebin diff --git a/roles/hastebin/files/var/lib/hastebin/haste-server/static/index.html b/roles/hastebin/files/var/lib/hastebin/haste-server/static/index.html new file mode 100644 index 0000000..e7d71c1 --- /dev/null +++ b/roles/hastebin/files/var/lib/hastebin/haste-server/static/index.html @@ -0,0 +1,70 @@ + + + + + hastebin + + + + + + + + + + + + + + + + + +
+ + +
+ + + + + +
+ +
+ +
+ + + + + + diff --git a/roles/hastebin/handlers/main.yml b/roles/hastebin/handlers/main.yml new file mode 100644 index 0000000..2dd7dad --- /dev/null +++ b/roles/hastebin/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart hastebin + systemd: + name: hastebin + state: restarted diff --git a/roles/hastebin/tasks/main.yml b/roles/hastebin/tasks/main.yml new file mode 100644 index 0000000..75f4cba --- /dev/null +++ b/roles/hastebin/tasks/main.yml @@ -0,0 +1,119 @@ +- name: install packages + dnf: + name: '{{ hastebin_packages }}' + state: present + +- name: create local user + user: + name: '{{ hastebin_user }}' + system: yes + home: '{{ hastebin_home }}' + shell: /sbin/nologin + create_home: no + +- name: create home directory + file: + path: '{{ item }}' + owner: '{{ hastebin_user }}' + group: '{{ hastebin_user }}' + mode: 0700 + state: directory + loop: + - '{{ hastebin_home }}' + - '{{ hastebin_data_dir }}' + +- name: disable npm package lock + lineinfile: + regexp: ^package-lock= + line: package-lock=false + path: '{{ hastebin_home }}/.npmrc' + create: yes + owner: '{{ hastebin_user }}' + group: '{{ hastebin_user }}' + mode: 0600 + state: present + +- name: clone git repository + git: + repo: '{{ hastebin_git_repo }}' + dest: '{{ hastebin_install_dir }}' + version: '{{ hastebin_version }}' + force: yes + update: yes + become: yes + become_user: '{{ hastebin_user }}' + register: hastebin_git + notify: restart hastebin + +- name: install npm dependencies + npm: + path: '{{ hastebin_install_dir }}' + production: yes + no_optional: yes + become: yes + become_user: '{{ hastebin_user }}' + when: hastebin_git.changed + notify: restart hastebin + +- name: create systemd unit + template: + src: etc/systemd/system/hastebin.service.j2 + dest: /etc/systemd/system/hastebin.service + register: hastebin_unit + notify: restart hastebin + +- name: reload systemd daemons + systemd: + daemon_reload: yes + when: hastebin_unit.changed + +- name: generate config file + template: + src: '{{ hastebin_install_dir[1:] }}/config.js.j2' + dest: '{{ hastebin_install_dir }}/config.js' + owner: '{{ hastebin_user }}' + group: '{{ hastebin_user }}' + mode: 0600 + notify: restart hastebin + +- name: copy custom index.html + copy: + src: '{{ hastebin_install_dir[1:] }}/static/index.html' + dest: '{{ hastebin_install_dir }}/static/index.html' + owner: '{{ hastebin_user }}' + group: '{{ hastebin_user }}' + mode: 0644 + +- name: download jquery + get_url: + url: '{{ hastebin_jquery_url }}' + dest: '{{ hastebin_install_dir }}/static/jquery.min.js' + owner: '{{ hastebin_user }}' + group: '{{ hastebin_user }}' + mode: 0644 + +- name: start hastebin + systemd: + name: hastebin + enabled: yes + state: started + +- name: set http_port_t selinux context for hastebin port + seport: + ports: '{{ hastebin_port }}' + proto: tcp + setype: http_port_t + state: present + tags: selinux + +- name: create hastebin-cleanup timer + include_role: + name: systemd_timer + vars: + timer_name: hastebin-cleanup + timer_description: Delete expired hastebin files + timer_after: nss-user-lookup.target + timer_on_calendar: daily + timer_user: '{{ hastebin_user }}' + timer_exec: find {{ hastebin_data_dir }} -type f -mtime +{{ hastebin_expire_days }} -exec rm -v {} + + timer_enabled: '{{ true if hastebin_expire_days > 0 else false }}' diff --git a/roles/hastebin/templates/etc/systemd/system/hastebin.service.j2 b/roles/hastebin/templates/etc/systemd/system/hastebin.service.j2 new file mode 100644 index 0000000..22a2a2d --- /dev/null +++ b/roles/hastebin/templates/etc/systemd/system/hastebin.service.j2 @@ -0,0 +1,35 @@ +[Unit] +Description=hastebin paste server +After=network.target +AssertPathExists={{ hastebin_install_dir }} + +[Service] +Type=simple +Environment="LISTEN_ADDRESS=127.0.0.1" +Environment="NODE_ENV=production" +EnvironmentFile=-/etc/sysconfig/hastebin +ExecStart=/usr/bin/node server.js +WorkingDirectory={{ hastebin_install_dir }} +User={{ hastebin_user }} +Group={{ hastebin_user }} +Restart=on-failure + +# See https://www.freedesktop.org/software/systemd/man/systemd.exec.html +# for details +DevicePolicy=closed +NoNewPrivileges=yes +PrivateDevices=yes +PrivateTmp=yes +ProtectControlGroups=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 +RestrictNamespaces=yes +RestrictRealtime=yes +SystemCallFilter=~@clock @debug @module @mount @obsolete @privileged @reboot @setuid @swap + +ProtectSystem=full +ProtectHome=true + +[Install] +WantedBy=multi-user.target diff --git a/roles/hastebin/templates/var/lib/hastebin/haste-server/config.js.j2 b/roles/hastebin/templates/var/lib/hastebin/haste-server/config.js.j2 new file mode 100644 index 0000000..dcd7668 --- /dev/null +++ b/roles/hastebin/templates/var/lib/hastebin/haste-server/config.js.j2 @@ -0,0 +1,32 @@ +{ + "host": "127.0.0.1", + "port": {{ hastebin_port }}, + "keyLength": 10, + "maxLength": 400000, + "staticMaxAge": 86400, + "recompressStaticAssets": true, + "logging": [ + { + "level": "verbose", + "type": "Console", + "colorize": false + } + ], + "keyGenerator": { + "type": "random" + }, + "rateLimits": { + "categories": { + "normal": { + "totalRequests": 500, + "every": 60000 + } + } + }, + "storage": { + "type": "file", + "path": "{{ hastebin_data_dir }}" + }, + "documents": { + } +} diff --git a/roles/hastebin/vars/main.yml b/roles/hastebin/vars/main.yml new file mode 100644 index 0000000..cfb474b --- /dev/null +++ b/roles/hastebin/vars/main.yml @@ -0,0 +1,30 @@ +hastebin_packages: + - git + - nodejs + +hastebin_home: /var/lib/hastebin +hastebin_install_dir: '{{ hastebin_home }}/haste-server' +hastebin_data_dir: '{{ hastebin_home }}/data' +hastebin_git_repo: https://github.com/toptal/haste-server +hastebin_keytab: /var/lib/gssproxy/clients/{{ hastebin_user }}.keytab + +hastebin_jquery_url: https://code.jquery.com/jquery-1.7.1.min.js + +hastebin_archive_shell: >- + TIMESTAMP=$(date +%Y%m%d%H%M%S); + tar czf "hastebin-${TIMESTAMP}.tar.gz" + --transform "s|^\.|hastebin-${TIMESTAMP}|" + -C "{{ hastebin_data_dir }}" . + +hastebin_apache_config: | + {{ apache_proxy_config }} + ProxyPass / http://127.0.0.1:{{ hastebin_port }}/ + ProxyPassReverse / http://127.0.0.1:{{ hastebin_port }}/ + + + + {% for cidr in hastebin_upload_cidrs %} + Require ip {{ cidr }} + {% endfor %} + + diff --git a/roles/hostname/defaults/main.yml b/roles/hostname/defaults/main.yml new file mode 100644 index 0000000..c884565 --- /dev/null +++ b/roles/hostname/defaults/main.yml @@ -0,0 +1,3 @@ +hostname_fqdn: '{{ fqdn }}' +hostname_short: '{{ inventory_hostname }}' +hostname_ip: '{{ ip }}' diff --git a/roles/hostname/tasks/main.yml b/roles/hostname/tasks/main.yml new file mode 100644 index 0000000..f24d865 --- /dev/null +++ b/roles/hostname/tasks/main.yml @@ -0,0 +1,18 @@ +- name: set hostname + hostname: + name: '{{ hostname_fqdn }}' + register: hostname + +- name: update /etc/hosts + template: + src: etc/hosts.j2 + dest: /etc/hosts + register: hosts_file + +- name: gather hostname facts + setup: + filter: + - ansible_fqdn + - ansible_hostname + - ansible_domain + when: hostname.changed or hosts_file.changed diff --git a/roles/hostname/templates/etc/hosts.j2 b/roles/hostname/templates/etc/hosts.j2 new file mode 100644 index 0000000..8609417 --- /dev/null +++ b/roles/hostname/templates/etc/hosts.j2 @@ -0,0 +1,3 @@ +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 +{{ hostname_ip }} {{ hostname_fqdn }} {{ hostname_short }} diff --git a/roles/invidious/defaults/main.yml b/roles/invidious/defaults/main.yml new file mode 100644 index 0000000..01ad9b1 --- /dev/null +++ b/roles/invidious/defaults/main.yml @@ -0,0 +1,38 @@ +invidious_version: master +invidious_crystal_version: 1.5.0-1 + +invidious_server_name: '{{ ansible_fqdn }}' +invidious_port: 8080 +invidious_user: invidious +invidious_db_user: s-invidious +invidious_db_name: invidious +invidious_db_host: '{{ postgresql_host }}' + +invidious_db_cleanup_on_calendar: weekly +invidious_update_on_calendar: weekly + +invidious_channel_threads: 1 +invidious_feed_threads: 1 +invidious_admin_email: 'root@{{ email_domain }}' +invidious_registration_enabled: yes +invidious_popular_enabled: no +invidious_full_refresh: no +invidious_hmac_key: secretKey +invidious_use_pubsub_feeds: no + +invidious_default_locale: en-US +invidious_default_region: US +invidious_default_dark_mode: auto +invidious_default_autoplay: no +invidious_default_continue: yes +invidious_default_continue_autoplay: no +invidious_default_local: yes +invidious_default_quality: dash +invidious_default_quality_dash: 1080p +invidious_default_related_videos: yes +invidious_default_video_loop: no +invidious_default_player_style: invidious +invidious_default_home: Subscriptions +invidious_feed_menu: + - Subscriptions + - Playlists diff --git a/roles/invidious/handlers/main.yml b/roles/invidious/handlers/main.yml new file mode 100644 index 0000000..150e02c --- /dev/null +++ b/roles/invidious/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart invidious + systemd: + name: invidious + state: restarted diff --git a/roles/invidious/meta/main.yml b/roles/invidious/meta/main.yml new file mode 100644 index 0000000..29230f9 --- /dev/null +++ b/roles/invidious/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - role: yum + yum_repositories: epel + tags: yum diff --git a/roles/invidious/tasks/database.yml b/roles/invidious/tasks/database.yml new file mode 100644 index 0000000..d8a44e1 --- /dev/null +++ b/roles/invidious/tasks/database.yml @@ -0,0 +1,69 @@ +- name: create postgresql database + postgresql_db: + name: '{{ invidious_db_name }}' + state: present + delegate_to: "{{ postgresql_host.split('.')[0] }}" + become: True + become_user: postgres + +- name: create postgresql user + postgresql_user: + name: '{{ invidious_db_user }}' + db: '{{ invidious_db_name }}' + password: '{{ invidious_db_password }}' + priv: ALL + state: present + environment: + PGOPTIONS: "-c password_encryption=scram-sha-256" + delegate_to: "{{ postgresql_host.split('.')[0] }}" + become: True + become_user: postgres + +- name: check if database schema is initialized + postgresql_query: + login_user: '{{ invidious_db_user }}' + login_password: '{{ invidious_db_password }}' + login_host: '{{ invidious_db_host }}' + db: '{{ invidious_db_name }}' + query: SELECT 1 FROM channels LIMIT 1 + register: invidious_check_db + failed_when: false + +- name: initialize database schema + postgresql_query: + login_user: '{{ invidious_db_user }}' + login_password: '{{ invidious_db_password }}' + login_host: '{{ invidious_db_host }}' + db: '{{ invidious_db_name }}' + path_to_script: '{{ invidious_install_dir }}/config/sql/{{ item }}.sql' + as_single_query: yes + loop: '{{ invidious_schema_files }}' + when: + - invidious_check_db.msg is defined + - invidious_check_db.msg is search('relation "channels" does not exist') + +- name: create pgpass file + copy: + content: | + {{ invidious_db_host }}:*:{{ invidious_db_name }}:{{ invidious_db_user }}:{{ invidious_db_password }} + dest: '{{ invidious_home }}/.pgpass' + mode: 0600 + owner: '{{ invidious_user }}' + group: '{{ invidious_user }}' + +- name: generate database cleanup script + template: + src: '{{ invidious_home[1:] }}/invidious-db-cleanup.sh.j2' + dest: '{{ invidious_home }}/invidious-db-cleanup.sh' + mode: 0555 + +- name: set up invidious-db-cleanup timer + include_role: + name: systemd_timer + vars: + timer_name: invidious-db-cleanup + timer_description: Prune invidious database + timer_after: network.target + timer_user: '{{ invidious_user }}' + timer_on_calendar: '{{ invidious_db_cleanup_on_calendar }}' + timer_exec: '{{ invidious_home }}/invidious-db-cleanup.sh' diff --git a/roles/invidious/tasks/main.yml b/roles/invidious/tasks/main.yml new file mode 100644 index 0000000..4a2cf0c --- /dev/null +++ b/roles/invidious/tasks/main.yml @@ -0,0 +1,116 @@ +- name: install dependencies + dnf: + name: '{{ invidious_packages }}' + state: present + +- name: create crystal directory + file: + path: '{{ invidious_crystal_install_dir }}' + state: directory + +- name: download crystal-lang + unarchive: + src: '{{ invidious_crystal_url }}' + dest: '{{ invidious_crystal_install_dir }}' + remote_src: yes + extra_opts: --strip-components=1 + +- name: create local user + user: + name: '{{ invidious_user }}' + system: yes + home: '{{ invidious_home }}' + shell: /sbin/nologin + create_home: no + +- name: create home home directory + file: + path: '{{ invidious_home }}' + owner: '{{ invidious_user }}' + group: '{{ invidious_user }}' + mode: 0755 + state: directory + +- name: clone repo + git: + repo: '{{ invidious_git_repo }}' + dest: '{{ invidious_install_dir }}' + version: '{{ invidious_version }}' + update: yes + force: yes + become: yes + become_user: '{{ invidious_user }}' + register: invidious_git + +- name: build invidious + command: + cmd: '{{ item }}' + chdir: '{{ invidious_install_dir }}' + environment: + PATH: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:{{ invidious_crystal_install_dir }}/bin + loop: + - shards install --production + - crystal build src/invidious.cr -Ddisable_quic --release + when: invidious_git.changed + become: yes + become_user: '{{ invidious_user }}' + +- name: create systemd unit + template: + src: etc/systemd/system/invidious.service.j2 + dest: /etc/systemd/system/invidious.service + register: invidious_unit + notify: restart invidious + +- name: reload systemd daemons + systemd: + daemon_reload: yes + when: invidious_unit.changed + +- name: generate config file + template: + src: '{{ invidious_install_dir[1:] }}/config/config.yml.j2' + dest: '{{ invidious_install_dir }}/config/config.yml' + owner: '{{ invidious_user }}' + group: '{{ invidious_user }}' + mode: 0600 + notify: restart invidious + +- import_tasks: database.yml + +- name: allow apache to connect to invidious port + seport: + ports: '{{ invidious_port }}' + proto: tcp + setype: http_port_t + state: present + tags: selinux + +- name: start invidious + systemd: + name: invidious + enabled: yes + state: started + +- name: generate update script + template: + src: '{{ invidious_home[1:] }}/invidious-update.sh.j2' + dest: '{{ invidious_home }}/invidious-update.sh' + mode: 0555 + +- name: create invidious-update timer + include_role: + name: systemd_timer + vars: + timer_name: invidious-update + timer_description: Update invidious + timer_after: network.target + timer_on_calendar: '{{ invidious_update_on_calendar }}' + timer_exec: '{{ invidious_home }}/invidious-update.sh' + +- name: restart invidious daily at 3:30am + cron: + name: restart invidious + minute: 30 + hour: 3 + job: systemctl restart invidious diff --git a/roles/invidious/templates/etc/systemd/system/invidious.service.j2 b/roles/invidious/templates/etc/systemd/system/invidious.service.j2 new file mode 100644 index 0000000..c711185 --- /dev/null +++ b/roles/invidious/templates/etc/systemd/system/invidious.service.j2 @@ -0,0 +1,32 @@ +[Unit] +Description=Invidious youtube client +After=network.target +AssertPathExists={{ invidious_install_dir }} + +[Service] +Type=simple +ExecStart={{ invidious_install_dir }}/invidious +User={{ invidious_user }} +Group={{ invidious_user }} +WorkingDirectory={{ invidious_install_dir }} +Restart=always + +# See https://www.freedesktop.org/software/systemd/man/systemd.exec.html +# for details +DevicePolicy=closed +NoNewPrivileges=yes +PrivateDevices=yes +PrivateTmp=yes +ProtectControlGroups=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 +RestrictNamespaces=yes +RestrictRealtime=yes +SystemCallFilter=~@clock @debug @module @mount @obsolete @privileged @reboot @setuid @swap + +ProtectSystem=full +ProtectHome=true + +[Install] +WantedBy=multi-user.target diff --git a/roles/invidious/templates/opt/invidious/invidious-db-cleanup.sh.j2 b/roles/invidious/templates/opt/invidious/invidious-db-cleanup.sh.j2 new file mode 100644 index 0000000..1c9ffe1 --- /dev/null +++ b/roles/invidious/templates/opt/invidious/invidious-db-cleanup.sh.j2 @@ -0,0 +1,11 @@ +#!/bin/bash + +set -eu + +PG_HOST={{ invidious_db_host | quote }} +PG_USER={{ invidious_db_user | quote }} +PG_NAME={{ invidious_db_name | quote }} + +export PGPASSFILE={{ invidious_home | quote }}/.pgpass + +psql -qtA -h "$PG_HOST" "$PG_NAME" "$PG_USER" -c {{ invidious_db_cleanup_sql | join('; ') | quote }} diff --git a/roles/invidious/templates/opt/invidious/invidious-update.sh.j2 b/roles/invidious/templates/opt/invidious/invidious-update.sh.j2 new file mode 100644 index 0000000..561e45e --- /dev/null +++ b/roles/invidious/templates/opt/invidious/invidious-update.sh.j2 @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +set -eu + +SRCDIR={{ invidious_install_dir | quote }} +INVIDIOUS_USER={{ invidious_user | quote }} +CRYSTAL_HOME={{ invidious_crystal_install_dir | quote }} + +export PATH="${PATH}:${CRYSTAL_HOME}/bin" + +as-invidious() { + runuser -u "$INVIDIOUS_USER" -- "$@" +} + +if (( $EUID != 0 )); then + echo 'must be superuser' 1>&2 + exit 1 +fi + +cd "$SRCDIR" + +old_rev=$(git rev-list --max-count=1 --abbrev-commit HEAD) + +as-invidious git fetch + +local_rev=$(git rev-parse HEAD) +upstream_rev=$(git rev-parse '@{u}') + +echo "local: $local_rev" +echo "upstream: $upstream_rev" + +if [ "$local_rev" != "$upstream_rev" ]; then + as-invidious git pull --ff-only + + echo "building invidious..." + as-invidious shards install --production + as-invidious crystal build src/invidious.cr -Ddisable_quic --release + + systemctl restart invidious +else + echo "invidious is already up to date" +fi diff --git a/roles/invidious/templates/opt/invidious/invidious/config/config.yml.j2 b/roles/invidious/templates/opt/invidious/invidious/config/config.yml.j2 new file mode 100644 index 0000000..e74caee --- /dev/null +++ b/roles/invidious/templates/opt/invidious/invidious/config/config.yml.j2 @@ -0,0 +1,34 @@ +log_level: warn +domain: {{ invidious_server_name }} +external_port: 443 +channel_threads: {{ invidious_channel_threads }} +feed_threads: {{ invidious_channel_threads }} +database_url: postgres://{{ invidious_db_user }}:{{ invidious_db_password}}@{{ invidious_db_host }}/{{ invidious_db_name }}?sslmode=verify-full +use_pubsub_feeds: {{ invidious_use_pubsub_feeds }} +hmac_key: {{ invidious_hmac_key }} +https_only: true +registration_enabled: {{ invidious_registration_enabled }} +admin_email: {{ invidious_admin_email }} +port: {{ invidious_port }} +host_binding: 127.0.0.1 +popular_enabled: {{ invidious_popular_enabled }} +full_refresh: {{ invidious_full_refresh }} +captcha_enabled: false +check_tables: true +cache_annotations: true + +default_user_preferences: + dark_mode: {{ invidious_default_dark_mode }} + autoplay: {{ invidious_default_autoplay }} + continue: {{ invidious_default_continue }} + continue_autoplay: {{ invidious_default_continue_autoplay }} + local: {{ invidious_default_local }} + quality: {{ invidious_default_quality }} + quality_dash: {{ invidious_default_quality_dash }} + locale: {{ invidious_default_locale }} + region: {{ invidious_default_region }} + related_videos: {{ invidious_default_related_videos }} + video_loop: {{ invidious_default_video_loop }} + player_style: {{ invidious_default_player_style }} + default_home: {{ invidious_default_home }} + feed_menu: {{ invidious_feed_menu | to_yaml }} diff --git a/roles/invidious/vars/main.yml b/roles/invidious/vars/main.yml new file mode 100644 index 0000000..36ca643 --- /dev/null +++ b/roles/invidious/vars/main.yml @@ -0,0 +1,42 @@ +invidious_packages: + - openssl-devel + - libevent-devel + - libxml2-devel + - libyaml-devel + - gmp-devel + - readline-devel + - postgresql + - librsvg2-devel + - sqlite-devel + - zlib-devel + - gcc + - git + - python3-psycopg2 + +invidious_git_repo: https://github.com/iv-org/invidious +invidious_home: /opt/invidious +invidious_install_dir: '{{ invidious_home }}/invidious' + +invidious_crystal_url: https://github.com/crystal-lang/crystal/releases/download/{{ invidious_crystal_version | regex_replace('-.*$', '') }}/crystal-{{ invidious_crystal_version }}-linux-x86_64.tar.gz +invidious_crystal_install_dir: /opt/crystal + +invidious_schema_files: + - channels + - videos + - channel_videos + - users + - session_ids + - nonces + - annotations + - playlists + - playlist_videos + +invidious_db_cleanup_sql: + - DELETE FROM nonces * WHERE expire < current_timestamp + - TRUNCATE TABLE videos + +invidious_apache_config: | + AllowEncodedSlashes NoDecode + {{ apache_proxy_config }} + ProxyPass / http://127.0.0.1:{{ invidious_port }}/ nocanon + ProxyPassReverse / http://127.0.0.1:{{ invidious_port }}/ diff --git a/roles/jellyfin/defaults/main.yml b/roles/jellyfin/defaults/main.yml new file mode 100644 index 0000000..2c3fadb --- /dev/null +++ b/roles/jellyfin/defaults/main.yml @@ -0,0 +1,11 @@ +jellyfin_version: 10.8.9 +jellyfin_port: 8096 +jellyfin_user: s-jellyfin + +jellyfin_sysaccount_username: jellyfin + +jellyfin_media_access_group: role-media-access +jellyfin_access_group: role-media-access +jellyfin_admin_group: role-media-admin + +jellyfin_ldap_server: '{{ freeipa_hosts[0] }}' diff --git a/roles/jellyfin/handlers/main.yml b/roles/jellyfin/handlers/main.yml new file mode 100644 index 0000000..d6dee75 --- /dev/null +++ b/roles/jellyfin/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart jellyfin + systemd: + name: jellyfin + state: restarted diff --git a/roles/jellyfin/meta/main.yml b/roles/jellyfin/meta/main.yml new file mode 100644 index 0000000..f93843f --- /dev/null +++ b/roles/jellyfin/meta/main.yml @@ -0,0 +1,10 @@ +dependencies: + - role: yum + yum_repositories: + - epel + - rpmfusion-free + tags: yum + + - role: freeipa_system_account + system_account_username: '{{ jellyfin_sysaccount_username }}' + system_account_password: '{{ jellyfin_sysaccount_password }}' diff --git a/roles/jellyfin/tasks/freeipa.yml b/roles/jellyfin/tasks/freeipa.yml new file mode 100644 index 0000000..06cfd25 --- /dev/null +++ b/roles/jellyfin/tasks/freeipa.yml @@ -0,0 +1,67 @@ +- name: create user + ipauser: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ jellyfin_user }}' + loginshell: /sbin/nologin + homedir: '{{ jellyfin_home }}' + givenname: Jellyfin + sn: Service Account + state: present + run_once: True + +- name: retrieve user keytab + include_role: + name: freeipa_keytab + vars: + keytab_principal: '{{ jellyfin_user }}' + keytab_path: '{{ jellyfin_keytab }}' + keytab_owner: '{{ jellyfin_user }}' + +- name: create media access group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ jellyfin_media_access_group }}' + nonposix: no + action: group + state: present + run_once: True + +- name: add user to media access group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ jellyfin_media_access_group }}' + user: '{{ jellyfin_user }}' + action: member + state: present + run_once: True + +- name: create access group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ jellyfin_access_group }}' + action: group + state: present + run_once: True + +- name: create admin group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ jellyfin_admin_group }}' + state: present + run_once: True + +- name: configure gssproxy for kerberized NFS + include_role: + name: gssproxy_client + vars: + gssproxy_name: jellyfin + gssproxy_section: service/jellyfin + gssproxy_keytab: /etc/krb5.keytab + gssproxy_client_keytab: '{{ jellyfin_keytab }}' + gssproxy_cred_usage: initiate + gssproxy_euid: '{{ jellyfin_user }}' diff --git a/roles/jellyfin/tasks/main.yml b/roles/jellyfin/tasks/main.yml new file mode 100644 index 0000000..0bb8b91 --- /dev/null +++ b/roles/jellyfin/tasks/main.yml @@ -0,0 +1,94 @@ +- name: install packages + dnf: + name: '{{ jellyfin_packages }}' + state: present + +- name: create installation directory + file: + path: '{{ jellyfin_install_dir }}' + state: directory + +- name: extract tarball + unarchive: + src: '{{ jellyfin_url }}' + remote_src: yes + dest: '{{ jellyfin_install_dir }}' + extra_opts: + - '--strip-components=2' + notify: restart jellyfin + +- name: generate systemd unit + template: + src: etc/systemd/system/jellyfin.service.j2 + dest: /etc/systemd/system/jellyfin.service + notify: restart jellyfin + register: jellyfin_unit + +- name: reload systemd units + systemd: + daemon_reload: yes + when: jellyfin_unit.changed + +- name: generate sysconfig file + template: + src: etc/sysconfig/jellyfin.j2 + dest: /etc/sysconfig/jellyfin + notify: restart jellyfin + +- name: create runtime directories + file: + path: '{{ item.path }}' + owner: '{{ jellyfin_user }}' + group: '{{ jellyfin_user }}' + mode: '{{ item.mode }}' + state: directory + loop: + - { path: '{{ jellyfin_home }}', mode: '0750' } + - { path: '{{ jellyfin_home }}/plugins', mode: '0750' } + - { path: '{{ jellyfin_home }}/plugins/configurations', mode: '0750' } + - { path: /var/cache/jellyfin, mode: '0750' } + - { path: /var/log/jellyfin, mode: '0755' } + - { path: '{{ jellyfin_conf_dir }}', mode: '0755' } + +- name: generate jellyfin network configuration + template: + src: '{{ jellyfin_conf_dir[1:] }}/network.xml.j2' + dest: '{{ jellyfin_conf_dir }}/network.xml' + owner: '{{ jellyfin_user }}' + group: '{{ jellyfin_user }}' + mode: 0644 + notify: restart jellyfin + +- name: generate jellyfin LDAP configuration + template: + src: '{{ jellyfin_home[1:] }}/plugins/configurations/LDAP-Auth.xml.j2' + dest: '{{ jellyfin_home }}/plugins/configurations/LDAP-Auth.xml' + owner: '{{ jellyfin_user }}' + group: '{{ jellyfin_user }}' + mode: 0640 + notify: restart jellyfin + +- import_tasks: freeipa.yml + +- name: enable jellyfin + systemd: + name: jellyfin + state: started + enabled: yes + +- name: allow apache to connect to jellyfin port + seport: + ports: '{{ jellyfin_port }}' + proto: tcp + setype: http_port_t + state: present + tags: selinux + +- name: open firewall ports + firewalld: + port: '{{ item }}' + permanent: yes + immediate: yes + state: enabled + loop: '{{ jellyfin_discovery_ports }}' + tags: firewalld diff --git a/roles/jellyfin/templates/etc/jellyfin/network.xml.j2 b/roles/jellyfin/templates/etc/jellyfin/network.xml.j2 new file mode 100644 index 0000000..9b73f6b --- /dev/null +++ b/roles/jellyfin/templates/etc/jellyfin/network.xml.j2 @@ -0,0 +1,36 @@ + + + false + + 0 + {{ jellyfin_port }} + 0 + false + {{ jellyfin_port }} + false + + false + true + false + + 2 + 100 + true + vEthernet* + 60 + false + + + false + true + + false + false + true + + + 127.0.0.1 + + + false + diff --git a/roles/jellyfin/templates/etc/sysconfig/jellyfin.j2 b/roles/jellyfin/templates/etc/sysconfig/jellyfin.j2 new file mode 100644 index 0000000..75f56ff --- /dev/null +++ b/roles/jellyfin/templates/etc/sysconfig/jellyfin.j2 @@ -0,0 +1,22 @@ +# Program directories +JELLYFIN_DATA_DIR="/var/lib/jellyfin" +JELLYFIN_CONFIG_DIR="/etc/jellyfin" +JELLYFIN_LOG_DIR="/var/log/jellyfin" +JELLYFIN_CACHE_DIR="/var/cache/jellyfin" + +# web client path, installed by the jellyfin-web package +JELLYFIN_WEB_OPT="--webdir={{ jellyfin_install_dir }}/jellyfin-web" + +# [OPTIONAL] ffmpeg binary paths, overriding the UI-configured values +#JELLYFIN_FFMPEG_OPT="--ffmpeg=/usr/bin/ffmpeg" + +# [OPTIONAL] run Jellyfin as a headless service +JELLYFIN_SERVICE_OPT="--service" + +# [OPTIONAL] run Jellyfin without the web app +#JELLYFIN_NOWEBAPP_OPT="--noautorunwebapp" + +# [OPTIONAL] run Jellyfin with ASP.NET Server Garbage Collection (uses more RAM and less CPU than Workstation GC) +# 0 = Workstation +# 1 = Server +COMPlus_gcServer=1 diff --git a/roles/jellyfin/templates/etc/systemd/system/jellyfin.service.j2 b/roles/jellyfin/templates/etc/systemd/system/jellyfin.service.j2 new file mode 100644 index 0000000..2b809bb --- /dev/null +++ b/roles/jellyfin/templates/etc/systemd/system/jellyfin.service.j2 @@ -0,0 +1,51 @@ +[Unit] +Description = Jellyfin Media Server +After=autofs.service network-online.target nss-user-lookup.target + +[Service] +Type = simple +EnvironmentFile = /etc/sysconfig/jellyfin +User = {{ jellyfin_user }} +Group = {{ jellyfin_user }} +WorkingDirectory = /var/lib/jellyfin +ExecStart = {{ jellyfin_install_dir }}/jellyfin ${JELLYFIN_WEB_OPT} ${JELLYFIN_RESTART_OPT} ${JELLYFIN_FFMPEG_OPT} ${JELLYFIN_SERVICE_OPT} ${JELLYFIN_NOWEBAPP_OPT} ${JELLYFIN_ADDITIONAL_OPTS} +Restart = on-failure +TimeoutSec = 15 +SuccessExitStatus=0 143 + +NoNewPrivileges=true +SystemCallArchitectures=native +RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 AF_NETLINK +RestrictNamespaces=false +RestrictRealtime=true +RestrictSUIDSGID=true +ProtectClock=true +ProtectControlGroups=false +ProtectHostname=true +ProtectKernelLogs=false +ProtectKernelModules=false +ProtectKernelTunables=false +LockPersonality=true +PrivateTmp=false +PrivateDevices=false +PrivateUsers=true +RemoveIPC=true +SystemCallFilter=~@clock +SystemCallFilter=~@aio +SystemCallFilter=~@chown +SystemCallFilter=~@cpu-emulation +SystemCallFilter=~@debug +SystemCallFilter=~@keyring +SystemCallFilter=~@memlock +SystemCallFilter=~@module +SystemCallFilter=~@mount +SystemCallFilter=~@obsolete +SystemCallFilter=~@privileged +SystemCallFilter=~@raw-io +SystemCallFilter=~@reboot +SystemCallFilter=~@setuid +SystemCallFilter=~@swap +SystemCallErrorNumber=EPERM + +[Install] +WantedBy = multi-user.target diff --git a/roles/jellyfin/templates/var/lib/jellyfin/plugins/configurations/LDAP-Auth.xml.j2 b/roles/jellyfin/templates/var/lib/jellyfin/plugins/configurations/LDAP-Auth.xml.j2 new file mode 100644 index 0000000..5326ff2 --- /dev/null +++ b/roles/jellyfin/templates/var/lib/jellyfin/plugins/configurations/LDAP-Auth.xml.j2 @@ -0,0 +1,23 @@ + + + {{ jellyfin_ldap_server }} + 636 + true + false + false + uid={{ jellyfin_sysaccount_username }},{{ freeipa_sysaccount_basedn }} + {{ jellyfin_sysaccount_password }} + {{ freeipa_user_basedn }} + (memberOf=cn={{ jellyfin_access_group }},{{ freeipa_group_basedn }}) + + (memberOf=cn={{ jellyfin_admin_group }},{{ freeipa_group_basedn }}) + uid + false + true + false + uid + userPassword + true + + + diff --git a/roles/jellyfin/vars/main.yml b/roles/jellyfin/vars/main.yml new file mode 100644 index 0000000..00b4251 --- /dev/null +++ b/roles/jellyfin/vars/main.yml @@ -0,0 +1,34 @@ +jellyfin_url: https://repo.jellyfin.org/releases/server/linux/stable/combined/jellyfin_{{ jellyfin_version }}_amd64.tar.gz + +jellyfin_packages: + - ffmpeg + +jellyfin_home: /var/lib/jellyfin +jellyfin_conf_dir: /etc/jellyfin +jellyfin_install_dir: /opt/jellyfin +jellyfin_keytab: /var/lib/gssproxy/clients/{{ jellyfin_user }}.keytab + +jellyfin_discovery_ports: + - 1900/udp + - 7359/udp + +jellyfin_apache_config: | + {{ apache_proxy_config }} + ProxyPass / http://127.0.0.1:{{ jellyfin_port }}/ + ProxyPassReverse / http://127.0.0.1:{{ jellyfin_port }}/ + + + ProxyPass http://127.0.0.1:{{ jellyfin_port }}/socket/ + ProxyPassReverse http://127.0.0.1:{{ jellyfin_port }}/socket/ + + RewriteEngine on + RewriteCond %{HTTP:Upgrade} websocket [NC] + RewriteCond %{HTTP:Connection} upgrade [NC] + RewriteRule ^/?(.*) "ws://127.0.0.1:{{ jellyfin_port }}/socket/$1" [P,L] + + +jellyfin_archive_shell: >- + TIMESTAMP=$(date +%Y%m%d%H%M%S); + tar czf "jellyfin-${TIMESTAMP}.tar.gz" + {{ jellyfin_home | quote }}/{data,metadata,plugins,root} + {{ jellyfin_conf_dir | quote }} diff --git a/roles/journald/defaults/main.yml b/roles/journald/defaults/main.yml new file mode 100644 index 0000000..8c0ad8f --- /dev/null +++ b/roles/journald/defaults/main.yml @@ -0,0 +1,3 @@ +journald_persistent: no +journald_forward_to_syslog: yes +journald_max_use: null diff --git a/roles/journald/handlers/main.yml b/roles/journald/handlers/main.yml new file mode 100644 index 0000000..760a573 --- /dev/null +++ b/roles/journald/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart journald + systemd: + name: systemd-journald + state: restarted diff --git a/roles/journald/tasks/main.yml b/roles/journald/tasks/main.yml new file mode 100644 index 0000000..163c417 --- /dev/null +++ b/roles/journald/tasks/main.yml @@ -0,0 +1,23 @@ +- name: generate journald.conf + template: + src: etc/systemd/journald.conf.j2 + dest: /etc/systemd/journald.conf + notify: restart journald + +- name: set journald storage method + file: + path: /var/log/journal + state: "{{ 'directory' if journald_persistent else 'absent' }}" + register: journal_directory + +- name: configure persistent journal + block: + - name: set journal directory permissions + command: systemd-tmpfiles --create --prefix /var/log/journal + + - name: flush journal to disk + command: journalctl --flush + + when: + - journald_persistent + - journal_directory.changed diff --git a/roles/journald/templates/etc/systemd/journald.conf.j2 b/roles/journald/templates/etc/systemd/journald.conf.j2 new file mode 100644 index 0000000..33c59ba --- /dev/null +++ b/roles/journald/templates/etc/systemd/journald.conf.j2 @@ -0,0 +1,6 @@ +[Journal] +Storage=auto +{% if journald_max_use %} +{{ 'System' if journald_persistent else 'Runtime' }}MaxUse={{ journald_max_use }} +{% endif %} +ForwardToSyslog={{ journald_forward_to_syslog | bool | to_json }} diff --git a/roles/linux_desktop/defaults/main.yml b/roles/linux_desktop/defaults/main.yml new file mode 100644 index 0000000..ab00eff --- /dev/null +++ b/roles/linux_desktop/defaults/main.yml @@ -0,0 +1,6 @@ +linux_desktop_access_group: role-linux-desktop-access +linux_desktop_flatpak_update_on_calendar: daily +linux_desktop_enable_wayland: yes +linux_desktop_thumbnail_cache_size: 4096 # MB + +linux_desktop_enable_window_buttons: yes diff --git a/roles/linux_desktop/files/etc/dconf/db/local.d/00-hidpi b/roles/linux_desktop/files/etc/dconf/db/local.d/00-hidpi new file mode 100644 index 0000000..eef356b --- /dev/null +++ b/roles/linux_desktop/files/etc/dconf/db/local.d/00-hidpi @@ -0,0 +1,2 @@ +[org/gnome/mutter] +experimental-features=['scale-monitor-framebuffer'] diff --git a/roles/linux_desktop/files/etc/dconf/db/local.d/locks/hidpi b/roles/linux_desktop/files/etc/dconf/db/local.d/locks/hidpi new file mode 100644 index 0000000..15e31c2 --- /dev/null +++ b/roles/linux_desktop/files/etc/dconf/db/local.d/locks/hidpi @@ -0,0 +1 @@ +/org/gnome/mutter/experimental-features diff --git a/roles/linux_desktop/files/usr/local/share/thumbnailers/totem.thumbnailer b/roles/linux_desktop/files/usr/local/share/thumbnailers/totem.thumbnailer new file mode 100644 index 0000000..26649bd --- /dev/null +++ b/roles/linux_desktop/files/usr/local/share/thumbnailers/totem.thumbnailer @@ -0,0 +1,4 @@ +[Thumbnailer Entry] +TryExec=/usr/bin/totem-video-thumbnailer +Exec=/usr/bin/totem-video-thumbnailer -l -s %s %u %o +MimeType=application/mxf;application/ram;application/sdp;application/vnd.apple.mpegurl;application/vnd.ms-asf;application/vnd.ms-wpl;application/vnd.rn-realmedia;application/vnd.rn-realmedia-vbr;application/x-extension-m4a;application/x-extension-mp4;application/x-flash-video;application/x-matroska;application/x-netshow-channel;application/x-quicktimeplayer;application/x-shorten;image/vnd.rn-realpix;image/x-pict;misc/ultravox;text/x-google-video-pointer;video/3gp;video/3gpp;video/3gpp2;video/dv;video/divx;video/fli;video/flv;video/mp2t;video/mp4;video/mp4v-es;video/mpeg;video/mpeg-system;video/msvideo;video/ogg;video/quicktime;video/vivo;video/vnd.divx;video/vnd.mpegurl;video/vnd.rn-realvideo;video/vnd.vivo;video/webm;video/x-anim;video/x-avi;video/x-flc;video/x-fli;video/x-flic;video/x-flv;video/x-m4v;video/x-matroska;video/x-mjpeg;video/x-mpeg;video/x-mpeg2;video/x-ms-asf;video/x-ms-asf-plugin;video/x-ms-asx;video/x-msvideo;video/x-ms-wm;video/x-ms-wmv;video/x-ms-wmx;video/x-ms-wvx;video/x-nsv;video/x-ogm+ogg;video/x-theora;video/x-theora+ogg;video/x-totem-stream;audio/x-pn-realaudio;audio/3gpp;audio/3gpp2;audio/aac;audio/ac3;audio/AMR;audio/AMR-WB;audio/basic;audio/dv;audio/eac3;audio/flac;audio/m4a;audio/midi;audio/mp1;audio/mp2;audio/mp3;audio/mp4;audio/mpeg;audio/mpg;audio/ogg;audio/opus;audio/prs.sid;audio/scpls;audio/vnd.rn-realaudio;audio/wav;audio/webm;audio/x-aac;audio/x-aiff;audio/x-ape;audio/x-flac;audio/x-gsm;audio/x-it;audio/x-m4a;audio/x-m4b;audio/x-matroska;audio/x-mod;audio/x-mp1;audio/x-mp2;audio/x-mp3;audio/x-mpg;audio/x-mpeg;audio/x-ms-asf;audio/x-ms-asx;audio/x-ms-wax;audio/x-ms-wma;audio/x-musepack;audio/x-opus+ogg;audio/x-pn-aiff;audio/x-pn-au;audio/x-pn-wav;audio/x-pn-windows-acm;audio/x-realaudio;audio/x-real-audio;audio/x-s3m;audio/x-sbc;audio/x-shorten;audio/x-speex;audio/x-stm;audio/x-tta;audio/x-wav;audio/x-wavpack;audio/x-vorbis;audio/x-vorbis+ogg;audio/x-xm;application/x-flac; diff --git a/roles/linux_desktop/handlers/main.yml b/roles/linux_desktop/handlers/main.yml new file mode 100644 index 0000000..16c1d21 --- /dev/null +++ b/roles/linux_desktop/handlers/main.yml @@ -0,0 +1,7 @@ +- name: restart gdm + systemd: + name: gdm + state: restarted + +- name: update dconf + command: dconf update diff --git a/roles/linux_desktop/meta/main.yml b/roles/linux_desktop/meta/main.yml new file mode 100644 index 0000000..9b04ef8 --- /dev/null +++ b/roles/linux_desktop/meta/main.yml @@ -0,0 +1,9 @@ +dependencies: + - role: yum + yum_repositories: + - epel + - rpmfusion-free + - rpmfusion-free-tainted + - rpmfusion-nonfree + - rpmfusion-nonfree-tainted + tags: yum diff --git a/roles/linux_desktop/tasks/freeipa.yml b/roles/linux_desktop/tasks/freeipa.yml new file mode 100644 index 0000000..f7a09e1 --- /dev/null +++ b/roles/linux_desktop/tasks/freeipa.yml @@ -0,0 +1,33 @@ +- name: create linux-desktops hostgroup + ipahostgroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ linux_desktop_hbac_hostgroup}}' + description: Linux Desktops + host: "{{ groups[linux_desktop_hbac_hostgroup] | map('regex_replace', '$', '.' ~ ansible_domain) }}" + run_once: yes + +- name: create desktop access group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ linux_desktop_access_group }}' + description: linux desktop access + nonposix: yes + state: present + run_once: yes + +- name: create HBAC rule for gdm + ipahbacrule: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: allow_gdm_on_linux_desktops + description: Allow login to GDM on linux desktops + hostgroup: + - '{{ linux_desktop_hbac_hostgroup }}' + group: + - '{{ linux_desktop_access_group }}' + hbacsvc: + - gdm + - gdm-password + run_once: yes diff --git a/roles/linux_desktop/tasks/main.yml b/roles/linux_desktop/tasks/main.yml new file mode 100644 index 0000000..dbddcd4 --- /dev/null +++ b/roles/linux_desktop/tasks/main.yml @@ -0,0 +1,109 @@ +- name: install packages + dnf: + name: '{{ linux_desktop_packages }}' + exclude: '{{ linux_desktop_excluded_packages }}' + state: present + +# Sticking with tuned for now. On my thinkpad, the power-profiles-daemon sets the +# CPU governor to "performance" in the "power-save" profile! +- name: mask power-profiles-daemon + systemd: + name: power-profiles-daemon + state: stopped + masked: yes + +- name: make sure tuned wasn't killed by power-profiles-daemon + systemd: + name: tuned + state: started + +- name: enable GuC for intel card + copy: + content: | + options i915 enable_guc=2 enable_fbc=1 + dest: /etc/modprobe.d/i915.conf + register: i915_options + +- name: warn if reboot needed + fail: + msg: A reboot is needed to apply settings to i915 graphics module. + when: i915_options.changed + ignore_errors: yes + +- name: set default target to graphical + file: + src: /usr/lib/systemd/system/graphical.target + dest: /etc/systemd/system/default.target + state: link + +- name: generate gdm configuration + template: + src: etc/gdm/custom.conf.j2 + dest: /etc/gdm/custom.conf + notify: restart gdm + +- name: check if graphical target is active + command: systemctl is-active graphical.target + register: graphical_target + changed_when: false + failed_when: false + +- name: start display manager + command: systemctl isolate graphical.target + when: graphical_target.rc != 0 + notify: restart gdm + +- name: enable fractional scaling + copy: + src: '{{ item[1:] }}' + dest: '{{ item }}' + loop: + - /etc/dconf/db/local.d/00-hidpi + - /etc/dconf/db/local.d/locks/hidpi + notify: update dconf + +- name: add local dconf settings + template: + src: etc/dconf/db/local.d/00-gnome.j2 + dest: /etc/dconf/db/local.d/00-gnome + notify: update dconf + +- name: add flathub flatpak repository + flatpak_remote: + name: flathub + flatpakrepo_url: '{{ linux_desktop_flathub_repo }}' + state: present + +- name: install flatpak applications + flatpak: + name: '{{ item }}' + state: present + loop: '{{ linux_desktop_flatpaks }}' + +- name: set up flatpak-update timer + include_role: + name: systemd_timer + vars: + timer_name: flatpak-update + timer_description: Update flatpaks + timer_after: network.target + timer_on_calendar: '{{ linux_desktop_flatpak_update_on_calendar }}' + timer_exec: flatpak update -y + +- name: configure flatpak overrides + command: flatpak override {{ item.key }} {{ item.value }} + changed_when: no + loop: '{{ linux_desktop_flatpak_overrides | dict2items }}' + +- name: create /usr/local/share/thumbnailers + file: + path: /usr/local/share/thumbnailers + state: directory + +# see https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=973942 +- name: patch totem thumbnailer to support large mp4 files + copy: + src: usr/local/share/thumbnailers/totem.thumbnailer + dest: /usr/local/share/thumbnailers/totem.thumbnailer + +- import_tasks: freeipa.yml diff --git a/roles/linux_desktop/templates/etc/dconf/db/local.d/00-gnome.j2 b/roles/linux_desktop/templates/etc/dconf/db/local.d/00-gnome.j2 new file mode 100644 index 0000000..42e4570 --- /dev/null +++ b/roles/linux_desktop/templates/etc/dconf/db/local.d/00-gnome.j2 @@ -0,0 +1,18 @@ +[org/gnome/desktop/thumbnail-cache] +maximum-size={{ linux_desktop_thumbnail_cache_size }} + +[org/gnome/nautilus/preferences] +recursive-search='always' +show-directory-item-counts='always' +show-image-thumbnails='always' + +[org/gnome/nautilus/list-view] +use-tree-view=true + +{% if linux_desktop_enable_window_buttons %} +[org/gnome/desktop/wm/preferences] +button-layout=':minimize,maximize,close' +{% endif %} + +[org/gnome/shell] +enabled-extensions=['appindicatorsupport@rgcjonas.gmail.com', 'dash-to-dock@gnome-shell-extensions.gcampax.github.com'] diff --git a/roles/linux_desktop/templates/etc/gdm/custom.conf.j2 b/roles/linux_desktop/templates/etc/gdm/custom.conf.j2 new file mode 100644 index 0000000..a1099d2 --- /dev/null +++ b/roles/linux_desktop/templates/etc/gdm/custom.conf.j2 @@ -0,0 +1,16 @@ +# GDM configuration storage + +[daemon] +InitialSetupEnable=false +# Uncomment the line below to force the login screen to use Xorg +WaylandEnable={{ linux_desktop_enable_wayland | bool | to_json }} + +[security] + +[xdmcp] + +[chooser] + +[debug] +# Uncomment the line below to turn on debugging +#Enable=true diff --git a/roles/linux_desktop/vars/main.yml b/roles/linux_desktop/vars/main.yml new file mode 100644 index 0000000..67cd80a --- /dev/null +++ b/roles/linux_desktop/vars/main.yml @@ -0,0 +1,65 @@ +linux_desktop_packages: + - '@gnome-desktop' + - '@fonts' + - '@hardware-support' + - '@internet-browser' + - '@base-x' + - '@networkmanager-submodules' + - '@print-client' + - gnome-tweaks + - evolution + - libreoffice-calc + - libreoffice-draw + - libreoffice-impress + - libreoffice-math + - libreoffice-writer + - ffmpeg-libs + - ffmpeg + - nfs4-acl-tools + - hexchat + - vlc + - youtube-dl + - gstreamer1-plugins-ugly + - gstreamer1-plugins-bad-freeworld + - gstreamer1-libav + - gstreamer1-vaapi + - libva-utils + - intel-media-driver + - seahorse + - inkscape + - dconf-editor + - libdvdcss + - gimp + - brasero + - ntfs-3g + - ntfsprogs + - exfatprogs + - gnome-shell-extension-appindicator + - gnome-shell-extension-dash-to-dock + - chromium + - gnome-extensions-app + +linux_desktop_excluded_packages: + - gnome-software + - libva-intel-driver + +linux_desktop_hbac_hostgroup: linux_desktops + +linux_desktop_flathub_repo: https://dl.flathub.org/repo/flathub.flatpakrepo + +linux_desktop_flatpaks: + - org.signal.Signal + - com.bitwarden + - org.libretro.RetroArch + - ca.littlesvr.asunder + - org.gnome.EasyTAG + - com.makemkv.MakeMKV + - org.gnucash.GnuCash + - org.gnome.Rhythmbox3 + - org.gajim.Gajim + - org.gajim.Gajim.Plugin.omemo + +linux_desktop_flatpak_overrides: + org.gnome.EasyTAG: --filesystem=host + org.gnome.Rhythmbox3: --filesystem=host + org.signal.Signal: --env=SIGNAL_USE_TRAY_ICON=1 diff --git a/roles/linux_laptop/defaults/main.yml b/roles/linux_laptop/defaults/main.yml new file mode 100644 index 0000000..adaa6b2 --- /dev/null +++ b/roles/linux_laptop/defaults/main.yml @@ -0,0 +1,9 @@ +linux_laptop_access_group: role-linux-desktop-access +linux_laptop_wifi_ssid: '{{ wifi_ssid }}' +linux_laptop_wifi_ip: '{{ ip }}' +linux_laptop_wifi_prefix: "{{ vlan.cidr | ansible.utils.ipaddr('prefix') }}" +linux_laptop_wifi_gateway: '{{ vlan.gateway }}' +linux_laptop_wifi_domain: '{{ ansible_domain }}' +linux_laptop_wifi_dns_servers: '{{ vlan.dns_servers }}' +linux_laptop_wlan_device: wlan0 +linux_laptop_dirty_writeback_centisecs: 6000 diff --git a/roles/linux_laptop/tasks/freeipa.yml b/roles/linux_laptop/tasks/freeipa.yml new file mode 100644 index 0000000..3c39bbf --- /dev/null +++ b/roles/linux_laptop/tasks/freeipa.yml @@ -0,0 +1,33 @@ +- name: create linux-laptops hostgroup + ipahostgroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ linux_laptop_hbac_hostgroup}}' + description: Linux Laptops + host: "{{ groups[linux_laptop_hbac_hostgroup] | map('regex_replace', '$', '.' ~ ansible_domain) }}" + state: present + run_once: yes + +- name: create linux laptop access group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ linux_laptop_access_group }}' + nonposix: yes + state: present + run_once: yes + +- name: create HBAC rule for gdm + ipahbacrule: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: allow_gdm_on_linux_laptops + description: Allow login to GDM on linux laptops + hostgroup: + - '{{ linux_laptop_hbac_hostgroup }}' + group: + - '{{ linux_laptop_access_group }}' + hbacsvc: + - gdm + - gdm-password + run_once: yes diff --git a/roles/linux_laptop/tasks/main.yml b/roles/linux_laptop/tasks/main.yml new file mode 100644 index 0000000..93f0862 --- /dev/null +++ b/roles/linux_laptop/tasks/main.yml @@ -0,0 +1,58 @@ +- import_tasks: freeipa.yml + +- name: request TLS certificate for wifi + include_role: + name: getcert_request + vars: + certificate_service: host + certificate_sans: ['{{ ansible_fqdn }}'] + certificate_path: '{{ linux_laptop_wifi_certificate_path }}' + certificate_key_path: '{{ linux_laptop_wifi_certificate_key_path }}' + certificate_key_passphrase: '{{ linux_laptop_wifi_certificate_key_passphrase }}' + +- name: get connection uuid + shell: nmcli --get-values connection.uuid connection show {{ linux_laptop_wifi_ssid }} || uuidgen + register: linux_laptop_wifi_networkmanager_uuid + changed_when: no + +- name: generate wifi network script + template: + src: etc/sysconfig/network-scripts/ifcfg-ssid.j2 + dest: /etc/sysconfig/network-scripts/ifcfg-{{ linux_laptop_wifi_ssid }} + register: linux_laptop_wifi_config + +- name: generate wifi keys file + template: + src: etc/sysconfig/network-scripts/keys-ssid.j2 + dest: /etc/sysconfig/network-scripts/keys-{{ linux_laptop_wifi_ssid }} + mode: 0600 + +- name: warn that wifi settings are not applied automatically + debug: + msg: >- + NOTE: WiFi has been configured in NetworkManager, but changes have not been + applied. Either restart NetworkManager or reboot the host. + when: linux_laptop_wifi_config.changed + +- name: disable nmi watchdog + sysctl: + name: kernel.nmi_watchdog + value: 0 + +- name: bump vm.dirty_writeback_centisecs + sysctl: + name: vm.dirty_writeback_centisecs + value: '{{ linux_laptop_dirty_writeback_centisecs }}' + +- name: enable wifi powersaving + copy: + content: | + options iwlwifi power_save=1 uapsd_disable=0 + options iwlmvm power_scheme=3 + dest: /etc/modprobe.d/iwlwifi.conf + +- name: enable audio powersaving + copy: + content: | + options snd_hda_intel power_save=1 + dest: /etc/modprobe.d/snd_hda_intel.conf diff --git a/roles/linux_laptop/templates/etc/sysconfig/network-scripts/ifcfg-ssid.j2 b/roles/linux_laptop/templates/etc/sysconfig/network-scripts/ifcfg-ssid.j2 new file mode 100644 index 0000000..3737699 --- /dev/null +++ b/roles/linux_laptop/templates/etc/sysconfig/network-scripts/ifcfg-ssid.j2 @@ -0,0 +1,32 @@ +ESSID={{ linux_laptop_wifi_ssid }} +MODE=Managed +KEY_MGMT=WPA-EAP +MAC_ADDRESS_RANDOMIZATION=never +TYPE=Wireless +IEEE_8021X_EAP_METHODS=TLS +IEEE_8021X_IDENTITY=anonymous +IEEE_8021X_CA_CERT={{ linux_laptop_wifi_certificate_ca_path }} +IEEE_8021X_PRIVATE_KEY={{ linux_laptop_wifi_certificate_key_path }} +IEEE_8021X_CLIENT_CERT={{ linux_laptop_wifi_certificate_path }} +PROXY_METHOD=none +BROWSER_ONLY=no +BOOTPROTO=none +IPADDR={{ linux_laptop_wifi_ip }} +PREFIX={{ linux_laptop_wifi_prefix }} +GATEWAY={{ linux_laptop_wifi_gateway }} +{% for server in linux_laptop_wifi_dns_servers %} +DNS{{ loop.index }}={{ server }} +{% endfor %} +DOMAIN={{ linux_laptop_wifi_domain }} +DEFROUTE=yes +IPV4_FAILURE_FATAL=yes +IPV6INIT=yes +IPV6_AUTOCONF=yes +IPV6_DEFROUTE=yes +IPV6_FAILURE_FATAL=no +IPV6_ADDR_GEN_MODE=stable-privacy +NAME={{ linux_laptop_wifi_ssid }} +UUID={{ linux_laptop_wifi_networkmanager_uuid.stdout }} +DEVICE={{ linux_laptop_wlan_device }} +ONBOOT=yes +MACADDR=permanent diff --git a/roles/linux_laptop/templates/etc/sysconfig/network-scripts/keys-ssid.j2 b/roles/linux_laptop/templates/etc/sysconfig/network-scripts/keys-ssid.j2 new file mode 100644 index 0000000..31c61a6 --- /dev/null +++ b/roles/linux_laptop/templates/etc/sysconfig/network-scripts/keys-ssid.j2 @@ -0,0 +1 @@ +IEEE_8021X_PRIVATE_KEY_PASSWORD={{ linux_laptop_wifi_certificate_key_passphrase }} diff --git a/roles/linux_laptop/vars/main.yml b/roles/linux_laptop/vars/main.yml new file mode 100644 index 0000000..d02e644 --- /dev/null +++ b/roles/linux_laptop/vars/main.yml @@ -0,0 +1,6 @@ +linux_laptop_wifi_certificate_path: /etc/pki/tls/certs/wifi-{{ ansible_fqdn }}.crt +linux_laptop_wifi_certificate_key_path: /etc/pki/tls/private/wifi-{{ ansible_fqdn }}.key +linux_laptop_wifi_certificate_ca_path: /etc/ipa/ca.crt +linux_laptop_wifi_certificate_key_passphrase: just_have_to_use_something_or_networkmanger_freaks_out + +linux_laptop_hbac_hostgroup: linux_laptops diff --git a/roles/local_homedirs/files/etc/profile.d/local-homedirs.sh b/roles/local_homedirs/files/etc/profile.d/local-homedirs.sh new file mode 100644 index 0000000..88d710c --- /dev/null +++ b/roles/local_homedirs/files/etc/profile.d/local-homedirs.sh @@ -0,0 +1,16 @@ +# This file contains various environment variables and hacks to accomodate +# applications that don't play well with NFS-mounted home directories. + +if (( UID >= 1000 )); then + export PYTHONUSERBASE="/usr/local/home/${USER}/.local" + export npm_config_cache="/usr/local/home/${USER}/.npm" + export CARGO_HOME="/usr/local/home/${USER}/.cargo" + export GOPATH="/usr/local/home/${USER}/go" + + # firefox + mkdir -p "/usr/local/home/${USER}/.mozilla" + ln -sfn "/usr/local/home/${USER}/.mozilla" "${HOME}/.mozilla" + + # flatpak + ln -sfn "/opt/flatpak/${USER}" "${HOME}/.var" +fi diff --git a/roles/local_homedirs/files/etc/security/pam_env_xdg.conf b/roles/local_homedirs/files/etc/security/pam_env_xdg.conf new file mode 100644 index 0000000..40ee87c --- /dev/null +++ b/roles/local_homedirs/files/etc/security/pam_env_xdg.conf @@ -0,0 +1,4 @@ +XDG_DATA_HOME DEFAULT=/usr/local/home/@{PAM_USER}/.local/share +XDG_STATE_HOME DEFAULT=/usr/local/home/@{PAM_USER}/.local/state +XDG_CACHE_HOME DEFAULT=/usr/local/home/@{PAM_USER}/.cache +XDG_CONFIG_HOME DEFAULT=/usr/local/home/@{PAM_USER}/.config diff --git a/roles/local_homedirs/files/usr/local/sbin/create-local-homedir.sh b/roles/local_homedirs/files/usr/local/sbin/create-local-homedir.sh new file mode 100644 index 0000000..ed42588 --- /dev/null +++ b/roles/local_homedirs/files/usr/local/sbin/create-local-homedir.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +exec 1> >(logger -s -t $(basename "$0")) 2>&1 + +PAM_UID=$(id -u "$PAM_USER") + +if (( PAM_UID >= 1000 )); then + install -o "$PAM_USER" -g "$PAM_USER" -m 0700 -d "/usr/local/home/$PAM_USER" + + # Flatpak shadows /usr with its own runtime, so we need a path that flatpak + # doesn't touch. /opt seems appropriate. + install -o "$PAM_USER" -g "$PAM_USER" -m 0700 -d "/opt/flatpak/$PAM_USER" +fi diff --git a/roles/local_homedirs/tasks/main.yml b/roles/local_homedirs/tasks/main.yml new file mode 100644 index 0000000..8823672 --- /dev/null +++ b/roles/local_homedirs/tasks/main.yml @@ -0,0 +1,76 @@ +- name: create /usr/local/home + file: + path: /usr/local/home + state: directory + +- name: copy homedir creation scripts + copy: + src: usr/local/sbin/create-local-homedir.sh + dest: '{{ item }}' + mode: 0555 + setype: _default + loop: + - '{{ local_homedir_script_ssh }}' + - '{{ local_homedir_script_gdm }}' + +- name: set xdm_unconfined_exec_t sefcontext on homedir creation script + sefcontext: + target: '{{ local_homedir_script_gdm }}' + state: present + setype: xdm_unconfined_exec_t + tags: selinux + register: local_homedir_sefcontext + +- name: apply selinux context to homedir creation script + command: 'restorecon -R {{ local_homedir_script_gdm }}' + when: local_homedir_sefcontext.changed + tags: selinux + +- name: copy profile script + copy: + src: etc/profile.d/local-homedirs.sh + dest: /etc/profile.d/local-homedirs.sh + +- name: copy pam_env for XDG variables + copy: + src: '{{ local_homedir_pam_env_path[1:] }}' + dest: '{{ local_homedir_pam_env_path }}' + +- name: get fcontext equivalencies + command: semanage fcontext -l -C + changed_when: no + register: selinux_equivalencies + +- name: set selinux fcontext for /usr/local/home + command: semanage fcontext -a -e /home /usr/local/home + register: local_homedirs_fcontext + when: "'/usr/local/home = /home' not in selinux_equivalencies.stdout_lines" + +- name: apply selinux fcontext to /usr/local/home + command: restorecon -R /usr/local/home + when: local_homedirs_fcontext.changed + +- name: check if gdm is installed + package_facts: + manager: auto + +- name: modify GDM PAM configuration for local homedirs + lineinfile: + path: /etc/pam.d/gdm-password + line: '{{ item }}' + insertbefore: ^auth\s+optional\s+pam_gnome_keyring\.so$ + state: present + loop: + - auth optional pam_exec.so {{ local_homedir_script_gdm }} + - auth optional pam_env.so conffile={{ local_homedir_pam_env_path }} + when: "'gdm' in ansible_facts.packages" + +- name: modify pam configs for sshd + lineinfile: + path: /etc/pam.d/sshd + line: '{{ item }}' + insertafter: EOF + state: present + loop: + - session optional pam_exec.so {{ local_homedir_script_ssh }} + - session optional pam_env.so conffile={{ local_homedir_pam_env_path }} diff --git a/roles/local_homedirs/vars/main.yml b/roles/local_homedirs/vars/main.yml new file mode 100644 index 0000000..a004c12 --- /dev/null +++ b/roles/local_homedirs/vars/main.yml @@ -0,0 +1,3 @@ +local_homedir_script_gdm: /usr/local/sbin/create-local-homedir-gdm.sh +local_homedir_script_ssh: /usr/local/sbin/create-local-homedir-ssh.sh +local_homedir_pam_env_path: /etc/security/pam_env_xdg.conf diff --git a/roles/locale/defaults/main.yml b/roles/locale/defaults/main.yml new file mode 100644 index 0000000..bc14575 --- /dev/null +++ b/roles/locale/defaults/main.yml @@ -0,0 +1 @@ +locale: en_US.UTF-8 diff --git a/roles/locale/tasks/main.yml b/roles/locale/tasks/main.yml new file mode 100644 index 0000000..e0b5b6f --- /dev/null +++ b/roles/locale/tasks/main.yml @@ -0,0 +1,10 @@ +- name: set default locale + lineinfile: + dest: /etc/locale.conf + regexp: ^LANG= + line: 'LANG={{ locale }}' + +- name: install glibc language pack + dnf: + name: "glibc-langpack-{{ locale | split('_') | first }}" + state: present diff --git a/roles/mediawiki/defaults/main.yml b/roles/mediawiki/defaults/main.yml new file mode 100644 index 0000000..4b65d70 --- /dev/null +++ b/roles/mediawiki/defaults/main.yml @@ -0,0 +1,52 @@ +mediawiki_version: 1.39.1 +mediawiki_extension_version: REL1_39 + +mediawiki_kerberized_cidrs: '{{ kerberized_cidrs }}' + +mediawiki_user: s-mediawiki +mediawiki_db_name: mediawiki +mediawiki_db_host: '{{ postgresql_host }}' + +mediawiki_access_group: role-wiki-access +mediawiki_admin_group: role-wiki-admin + +mediawiki_max_upload_size: 50M +mediawiki_max_upload_count: 32 + +mediawiki_custom_namespaces: [] + +mediawiki_use_subpages: true + +mediawiki_ldap_servers: '{{ freeipa_hosts }}' +mediawiki_sysaccount_username: mediawiki + +mediawiki_site_name: '{{ organization }} Wiki' +mediawiki_meta_namespace: "{{ organization | regex_replace('\\s*', '') }}" +mediawiki_fqdn: '{{ ansible_fqdn }}' +mediawiki_url: https://{{ mediawiki_fqdn }} + +mediawiki_admin_username: admin +mediawiki_emergency_contact: root@{{ email_domain }} +mediawiki_password_sender: wiki-noreply@{{ email_domain }} +mediawiki_email_authentication: no + +mediawiki_local_timezone: '{{ timezone }}' +mediawiki_language_code: en + +mediawiki_default_skin: vector +mediawiki_default_mobile_skin: minerva + +mediawiki_disable_anonymous_read: no +mediawiki_disable_anonymous_edit: yes + +mediawiki_block_wan_login: yes + +mediawiki_apc_shm_size: 256M + +mediawiki_skins: + - Vector + - MinervaNeue + +# mediawiki_logo_1x: /path/to/1x/logo.jpg +# mediawiki_logo_icon: /path/to/icon/logo.jpg +# mediawiki_logo_favicon: /path/to/favicon.ico diff --git a/roles/mediawiki/files/var/www/mediawiki/robots.txt b/roles/mediawiki/files/var/www/mediawiki/robots.txt new file mode 100644 index 0000000..c218f6f --- /dev/null +++ b/roles/mediawiki/files/var/www/mediawiki/robots.txt @@ -0,0 +1,2 @@ +User-agent: * +Disallow: /index.php? diff --git a/roles/mediawiki/meta/main.yml b/roles/mediawiki/meta/main.yml new file mode 100644 index 0000000..f0e6864 --- /dev/null +++ b/roles/mediawiki/meta/main.yml @@ -0,0 +1,8 @@ +dependencies: + - role: yum + yum_repositories: epel + tags: yum + + - role: freeipa_system_account + system_account_username: '{{ mediawiki_sysaccount_username }}' + system_account_password: '{{ mediawiki_sysaccount_password }}' diff --git a/roles/mediawiki/tasks/database.yml b/roles/mediawiki/tasks/database.yml new file mode 100644 index 0000000..b00a8a1 --- /dev/null +++ b/roles/mediawiki/tasks/database.yml @@ -0,0 +1,50 @@ +- name: create postgresql database + postgresql_db: + name: '{{ mediawiki_db_name }}' + state: present + delegate_to: "{{ postgresql_host.split('.')[0] }}" + become: True + become_user: postgres + +- name: create postgresql user + postgresql_user: + name: '{{ mediawiki_user }}' + db: '{{ mediawiki_db_name }}' + priv: ALL + state: present + delegate_to: "{{ postgresql_host.split('.')[0] }}" + become: True + become_user: postgres + +- name: check if database schema is initialized + postgresql_query: + login_user: '{{ mediawiki_user }}' + login_host: '{{ mediawiki_db_host }}' + db: '{{ mediawiki_db_name }}' + query: SELECT 1 FROM mediawiki.page + become: True + become_user: apache + environment: + GSS_USE_PROXY: 'yes' + register: mediawiki_check_db + failed_when: false + +- name: initialize database schema + command: > + php {{ mediawiki_home }}/maintenance/install.php + --server {{ mediawiki_url }} + --dbuser {{ mediawiki_user }} + --dbname {{ mediawiki_db_name }} + --dbserver {{ mediawiki_db_host }} + --dbtype postgres + --pass {{ mediawiki_admin_password | quote }} + --scriptpath / + {{ mediawiki_site_name | quote }} + {{ mediawiki_admin_username }} + become: True + become_user: apache + environment: + GSS_USE_PROXY: 'yes' + when: + - mediawiki_check_db.msg is defined + - mediawiki_check_db.msg is search('relation "mediawiki.page" does not exist') diff --git a/roles/mediawiki/tasks/extension.yml b/roles/mediawiki/tasks/extension.yml new file mode 100644 index 0000000..02f5dc3 --- /dev/null +++ b/roles/mediawiki/tasks/extension.yml @@ -0,0 +1,12 @@ +- name: get url for extension tarball + uri: + url: 'https://www.mediawiki.org/w/index.php?title=Special:ExtensionDistributor&extdistname={{ extension_name }}&extdistversion={{ extension_version }}' + register: extension_distributor_resp + +- name: extract extension tarball + unarchive: + src: "{{ extension_distributor_resp.refresh.split(';') | map('trim') | select('search', '^url=') | first | regex_replace('^url=', '') }}" + remote_src: yes + dest: '{{ mediawiki_home }}/extensions' + owner: apache + group: apache diff --git a/roles/mediawiki/tasks/freeipa.yml b/roles/mediawiki/tasks/freeipa.yml new file mode 100644 index 0000000..565cdca --- /dev/null +++ b/roles/mediawiki/tasks/freeipa.yml @@ -0,0 +1,40 @@ +- name: create mediawiki user + ipauser: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ mediawiki_user }}' + loginshell: /sbin/nologin + homedir: '{{ mediawiki_home }}' + givenname: MediaWiki + sn: Service Account + state: present + run_once: True + +- name: create mediawiki groups + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ item }}' + nonposix: yes + state: present + run_once: True + loop: + - '{{ mediawiki_access_group }}' + - '{{ mediawiki_admin_group }}' + +- name: retrieve mediawiki user keytab + include_role: + name: freeipa_keytab + vars: + keytab_principal: '{{ mediawiki_user }}' + keytab_path: '{{ mediawiki_keytab }}' + +- name: configure gssproxy for kerberized postgres + include_role: + name: gssproxy_client + vars: + gssproxy_name: mediawiki + gssproxy_section: service/php-fpm + gssproxy_client_keytab: '{{ mediawiki_keytab }}' + gssproxy_cred_usage: initiate + gssproxy_euid: apache diff --git a/roles/mediawiki/tasks/main.yml b/roles/mediawiki/tasks/main.yml new file mode 100644 index 0000000..d0c3820 --- /dev/null +++ b/roles/mediawiki/tasks/main.yml @@ -0,0 +1,134 @@ +- name: install packages + dnf: + name: '{{ mediawiki_packages }}' + state: present + +- name: set PHP APC cache size + lineinfile: + path: /etc/php.d/40-apcu.ini + regexp: ^apc\.shm_size= + line: apc.shm_size={{ mediawiki_apc_shm_size }} + state: present + notify: restart php-fpm + +- import_tasks: freeipa.yml + tags: freeipa + +- name: create mediawiki webroot + file: + path: '{{ mediawiki_home }}' + state: directory + +- name: get current mediawiki version + command: php {{ mediawiki_home }}/maintenance/version.php + become: True + become_user: apache + environment: + GSS_USE_PROXY: 'yes' + changed_when: no + failed_when: no + register: mediawiki_current_version + +- name: extract mediawiki tarball + unarchive: + src: '{{ mediawiki_tarball }}' + remote_src: yes + dest: '{{ mediawiki_home }}' + owner: apache + group: apache + extra_opts: + - '--strip-components=1' + +- name: set permissions on writeable directories + file: + path: '{{ mediawiki_home }}/{{ item }}' + state: directory + mode: 0770 + owner: apache + group: apache + setype: _default + loop: '{{ mediawiki_writable_dirs }}' + +- name: set selinux context for writeable directories + sefcontext: + target: '{{ mediawiki_home }}/{{ item }}(/.*)?' + setype: httpd_sys_rw_content_t + state: present + loop: '{{ mediawiki_writable_dirs }}' + register: mediawiki_writeable_sefcontext + tags: selinux + +- name: apply selinux context to writeable directories + command: 'restorecon -R {{ mediawiki_home }}/{{ item }}' + when: mediawiki_writeable_sefcontext.results[index].changed + loop: '{{ mediawiki_writable_dirs }}' + loop_control: + index_var: index + tags: selinux + +- name: set selinux context for executable directories + sefcontext: + target: '{{ mediawiki_home }}/{{ item }}(/.*)?' + setype: httpd_sys_script_exec_t + state: present + loop: '{{ mediawiki_executable_dirs }}' + register: mediawiki_executable_sefcontext + tags: selinux + +- name: apply selinux context to executable directories + command: 'restorecon -R {{ mediawiki_home }}/{{ item }}' + when: mediawiki_executable_sefcontext.results[index].changed + loop: '{{ mediawiki_executable_dirs }}' + loop_control: + index_var: index + tags: selinux + +- import_tasks: database.yml + tags: database + +- name: generate LocalSettings.php + template: + src: '{{ mediawiki_home[1:] }}/LocalSettings.php.j2' + dest: '{{ mediawiki_home }}/LocalSettings.php' + owner: root + group: apache + mode: 0640 + register: mediawiki_localsettings + +- name: install extensions + include_tasks: extension.yml + vars: + extension_name: '{{ item if item is string else item.name }}' + extension_version: '{{ mediawiki_extension_version if item is string else (item.version | default(mediawiki_extension_version)) }}' + loop: '{{ mediawiki_extensions }}' + +- name: update database schema + command: php {{ mediawiki_home }}/maintenance/update.php --quick + become: yes + become_user: apache + environment: + GSS_USE_PROXY: 'yes' + when: mediawiki_localsettings.changed or (mediawiki_current_version.rc == 0 and not mediawiki_current_version.stdout is search(mediawiki_version)) + +- name: copy robots.txt + copy: + src: '{{ mediawiki_home[1:] }}/robots.txt' + dest: '{{ mediawiki_home }}/robots.txt' + +- name: copy 1x logo + copy: + src: '{{ mediawiki_logo_1x }}' + dest: '{{ mediawiki_home }}/resources/assets/{{ mediawiki_logo_1x | basename }}' + when: mediawiki_logo_1x is defined + +- name: copy icon logo + copy: + src: '{{ mediawiki_logo_icon }}' + dest: '{{ mediawiki_home }}/resources/assets/{{ mediawiki_logo_icon | basename }}' + when: mediawiki_logo_icon is defined + +- name: copy favicon + copy: + src: '{{ mediawiki_favicon }}' + dest: '{{ mediawiki_home }}/resources/assets/{{ mediawiki_favicon | basename }}' + when: mediawiki_favicon is defined diff --git a/roles/mediawiki/templates/var/www/mediawiki/LocalSettings.php.j2 b/roles/mediawiki/templates/var/www/mediawiki/LocalSettings.php.j2 new file mode 100644 index 0000000..e94ca80 --- /dev/null +++ b/roles/mediawiki/templates/var/www/mediawiki/LocalSettings.php.j2 @@ -0,0 +1,288 @@ + [ + 'connection' => [ + 'server' => '{{ mediawiki_ldap_servers | join(' ') }}', + 'user' => 'uid={{ mediawiki_sysaccount_username }},{{ freeipa_sysaccount_basedn }}', + 'pass' => '{{ mediawiki_sysaccount_password }}', + 'enctype' => 'tls', + 'options' => [ + 'LDAP_OPT_DEREF' => 1 + ], + 'basedn' => '{{ freeipa_basedn }}', + 'groupbasedn' => '{{ freeipa_group_basedn }}', + 'grouprequest' => 'MediaWiki\\Extension\\LDAPProvider\\UserGroupsRequest\\UserMemberOf::factory', + 'presearchusernamemodifiers' => [ 'lowercase' ], + 'userbasedn' => '{{ freeipa_user_basedn }}', + 'searchattribute' => 'uid', + 'searchstring' => 'uid=USER-NAME,{{ freeipa_user_basedn }}', + 'usernameattribute' => 'uid', + 'realnameattribute' => 'cn', + 'emailattribute' => 'mail' + ], + 'groupsync' => [ + 'mechanism' => 'mappedgroups', + 'mapping' => [ +{% for group in mediawiki_custom_namespaces + | selectattr('restrict', 'defined') + | map(attribute='restrict') + | map('dict2items') + | flatten + | map(attribute='value') + | unique + | difference(mediawiki_builtin_groups) %} + '{{ group }}' => 'cn={{ group }},{{ freeipa_group_basedn }}', +{% endfor %} + 'sysop' => 'cn={{ mediawiki_admin_group }},{{ freeipa_group_basedn }}', + 'interface-admin' => 'cn={{ mediawiki_admin_group }},{{ freeipa_group_basedn }}', + 'bureaucrat' => 'cn={{ mediawiki_admin_group }},{{ freeipa_group_basedn }}' + ] + ], + 'userinfo' => [ + 'attributes-map' => [ + 'email' => 'mail', + 'realname' => 'cn' + ] + ], + 'authorization' => [ + 'rules' => [ + 'groups' => [ + 'required' => [ + 'cn={{ mediawiki_access_group }},{{ freeipa_group_basedn }}', + 'cn={{ mediawiki_admin_group }},{{ freeipa_group_basedn }}' + ] + ] + ] + ] + ] + ]; + + return new \MediaWiki\Extension\LDAPProvider\DomainConfigProvider\InlinePHPArray( $config ); +}; + + +### Extension: PluggableAuth +$wgPluggableAuth_ButtonLabel = 'Log In'; + + +### Extension: CodeMirror +$wgDefaultUserOptions['usecodemirror'] = 1; +$wgCodeMirrorEnableBracketMatching = true; +$wgCodeMirrorLineNumberingNamespaces = null; + + +### Extension: UploadWizard +$wgUploadNavigationUrl = '/Special:UploadWizard'; + + +### Extension: Auth_remoteuser +$wgAuthRemoteuserUserNameReplaceFilter = [ + '@{{ freeipa_realm }}$' => '' +]; + + +### Extension: Lockdown +{% for ns in mediawiki_custom_namespaces | selectattr('restrict', 'defined') %} +{% for r in ns.restrict | dict2items(key_name='perm', value_name='group') %} +$wgNamespacePermissionLockdown[{{ ns.id }}]['{{ r.perm }}'] = {{ ([r.group] if r.group is string else r.group) | to_json }}; +$wgNamespacePermissionLockdown[{{ ns.talk_id }}]['{{ r.perm }}'] = {{ ([r.group] if r.group is string else r.group) | to_json }}; +{% endfor %} +$wgNonincludableNamespaces[] = {{ ns.id }}; +$wgNonincludableNamespaces[] = {{ ns.talk_id }}; +{% endfor %} + + +### Extension: VisualEditor +$wgVisualEditorAvailableNamespaces = [ +{% for ns in mediawiki_custom_namespaces %} + '{{ ns.namespace }}' => true, + '{{ ns.namespace }}Talk' => true{% if not loop.last %},{% endif %} + +{% endfor %} +]; diff --git a/roles/mediawiki/vars/main.yml b/roles/mediawiki/vars/main.yml new file mode 100644 index 0000000..d82f2f4 --- /dev/null +++ b/roles/mediawiki/vars/main.yml @@ -0,0 +1,125 @@ +mediawiki_tarball: https://releases.wikimedia.org/mediawiki/{{ mediawiki_version | splitext | first }}/mediawiki-{{ mediawiki_version }}.tar.gz +mediawiki_home: /var/www/mediawiki +mediawiki_keytab: /var/lib/gssproxy/clients/{{ mediawiki_user }}.keytab + +mediawiki_packages: + - php + - php-json + - php-ldap + - php-mbstring + - php-opcache + - php-pdo + - php-pgsql + - php-xml + - php-intl + - php-gd + - php-pecl-apcu + - php-pecl-igbinary + - python3-psycopg2 + - python3 + - ImageMagick + - poppler-utils + - ghostscript + - varnish + +mediawiki_php_environment: + GSS_USE_PROXY: 'yes' + +mediawiki_php_admin_values: + post_max_size: '{{ mediawiki_max_upload_size }}' + upload_max_filesize: '{{ mediawiki_max_upload_size }}' + max_file_uploads: '{{ mediawiki_max_upload_count }}' + +mediawiki_writable_dirs: + - images + - cache + +mediawiki_executable_dirs: + - extensions/SyntaxHighlight_GeSHi/pygments + +mediawiki_builtin_extensions: + - WikiEditor + - VisualEditor + - MobileFrontend + - MultimediaViewer + - Math + - PageImages + - SyntaxHighlight_GeSHi + - PdfHandler + +mediawiki_extensions: + - PluggableAuth + - LDAPAuthorization + - LDAPAuthentication2 + - LDAPProvider + - MobileFrontend + - LDAPGroups + - LDAPUserInfo + - Auth_remoteuser + - CodeMirror + - RelatedArticles + - UploadWizard + - Lockdown + +mediawiki_builtin_groups: + - user + - autoconfirmed + - bot + - sysop + - interface-admin + - bureaucrat + - suppress + +mediawiki_apache_config: | + AllowEncodedSlashes NoDecode + + RewriteEngine On + + RewriteCond %{REQUEST_URI} ^/({{ mediawiki_rewrite_blacklist | map("regex_escape") | join("|") }})$ + RewriteRule ^(.*)$ %{DOCUMENT_ROOT}/index.php [L] + + RewriteCond %{DOCUMENT_ROOT}%{REQUEST_URI} !\.php/ + RewriteCond %{DOCUMENT_ROOT}%{REQUEST_URI} !-f + RewriteCond %{DOCUMENT_ROOT}%{REQUEST_URI} !-d + RewriteRule ^(.*)$ %{DOCUMENT_ROOT}/index.php [L] + + RewriteCond %{DOCUMENT_ROOT}%{REQUEST_URI} !\.php/ + RewriteCond %{DOCUMENT_ROOT}%{REQUEST_URI} !-f + RewriteCond %{DOCUMENT_ROOT}%{REQUEST_URI} !-d + RewriteRule ^(.*)/([a-z]*)$ %{DOCUMENT_ROOT}/index.php [L,QSA] + + + AuthName "FreeIPA Single Sign-On" + AuthType GSSAPI + + {{ apache_gssapi_session_config }} + Require valid-user + + + + + AllowOverride None + Require all denied + + +# Since we're using pretty URLs, page titles can clash with real files in the +# mediawiki directory. If this ever happens, add the file path to this list. +mediawiki_rewrite_blacklist: + - CODE_OF_CONDUCT.md + - COPYING + - CREDITS + - FAQ + - HISTORY + - INSTALL + - README.md + - SECURITY + - UPGRADE + - composer.json + - jsduck.json + +mediawiki_archive_shell: >- + TIMESTAMP=$(date +%Y%m%d%H%M%S); + tar czf "mediawiki-${TIMESTAMP}.tar.gz" + --transform "s|^\.|mediawiki-${TIMESTAMP}|" + -C "{{ mediawiki_home }}" + images diff --git a/roles/motd/tasks/main.yml b/roles/motd/tasks/main.yml new file mode 100644 index 0000000..b92ff30 --- /dev/null +++ b/roles/motd/tasks/main.yml @@ -0,0 +1,10 @@ +- name: create /etc/motd.d + file: + path: /etc/motd.d + state: directory + +- name: remove cockpit message + file: + src: /dev/null + dest: /etc/motd.d/cockpit + state: link diff --git a/roles/nagios_client/files/usr/lib64/nagios/plugins/check_mem b/roles/nagios_client/files/usr/lib64/nagios/plugins/check_mem new file mode 100644 index 0000000..c72fd76 --- /dev/null +++ b/roles/nagios_client/files/usr/lib64/nagios/plugins/check_mem @@ -0,0 +1,452 @@ +#!/usr/bin/perl -w + +# Heavily based on the script from: +# check_mem.pl Copyright (C) 2000 Dan Larsson +# heavily modified by +# Justin Ellison +# +# Modified again by stonewall@sacredheartsc.com to be ZFS-aware +# +# The MIT License (MIT) +# Copyright (c) 2011 justin@techadvise.com +# Copyright (c) 2023 stonewall@sacredheartsc.com + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this +# software and associated documentation files (the "Software"), to deal in the Software +# without restriction, including without limitation the rights to use, copy, modify, +# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies +# or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE +# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +# OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +# Tell Perl what we need to use +use strict; +use Getopt::Std; + +#TODO - Convert to Nagios::Plugin +#TODO - Use an alarm + +# Predefined exit codes for Nagios +use vars qw($opt_c $opt_f $opt_u $opt_a $opt_w $opt_C $opt_v $opt_h $opt_z %exit_codes); +%exit_codes = ('UNKNOWN' , 3, + 'OK' , 0, + 'WARNING' , 1, + 'CRITICAL', 2, + ); + +# Get our variables, do our checking: +init(); + +# Get the numbers: +my ($free_memory_kb,$used_memory_kb,$caches_kb,$available_memory_kb,$hugepages_kb) = get_memory_info(); +print "$free_memory_kb Free\n$used_memory_kb Used\n$caches_kb Cache\n" if ($opt_v); +print "$available_memory_kb Available\n" if ($opt_v and $opt_a); +print "$hugepages_kb Hugepages\n" if ($opt_v and $opt_h); + +if ($opt_C) { #Do we count caches as free? + $used_memory_kb -= $caches_kb; + $free_memory_kb += $caches_kb; +} + +if ($opt_h) { + $used_memory_kb -= $hugepages_kb; +} + +print "$used_memory_kb Used (after Hugepages)\n" if ($opt_v); + +# Round to the nearest KB +$free_memory_kb = sprintf('%.0f',$free_memory_kb); +$used_memory_kb = sprintf('%.0f',$used_memory_kb); +$caches_kb = sprintf('%.0f',$caches_kb); + +# Tell Nagios what we came up with +tell_nagios($used_memory_kb,$free_memory_kb,$caches_kb,$available_memory_kb,$hugepages_kb); + + +sub tell_nagios { + my ($used,$free,$caches,$available,$hugepages) = @_; + + # Calculate Total Memory + my $total = $free + $used; + print "$total Total\n" if ($opt_v); + + my $perf_warn; + my $perf_crit; + if ( $opt_u or $opt_a ) { + $perf_warn = int(${total} * $opt_w / 100); + $perf_crit = int(${total} * $opt_c / 100); + } else { + $perf_warn = int(${total} * ( 100 - $opt_w ) / 100); + $perf_crit = int(${total} * ( 100 - $opt_c ) / 100); + } + + my $perfdata = "|TOTAL=${total}KB;;;;"; + if ( !$opt_a ) { + $perfdata .= " USED=${used}KB;${perf_warn};${perf_crit};;"; + } else { + $perfdata .= " USED=${used}KB;;;;"; + } + $perfdata .= " FREE=${free}KB;;;;"; + $perfdata .= " CACHES=${caches}KB;;;;"; + $perfdata .= " AVAILABLE=${available}KB;${perf_warn};${perf_crit};;" if ($opt_a); + $perfdata .= " HUGEPAGES=${hugepages}KB;;;;" if ($opt_h); + + if ($opt_f) { + my $percent = sprintf "%.1f", ($free / $total * 100); + if ($percent <= $opt_c) { + finish("CRITICAL - $percent% ($free kB) free!$perfdata",$exit_codes{'CRITICAL'}); + } + elsif ($percent <= $opt_w) { + finish("WARNING - $percent% ($free kB) free!$perfdata",$exit_codes{'WARNING'}); + } + else { + finish("OK - $percent% ($free kB) free.$perfdata",$exit_codes{'OK'}); + } + } + elsif ($opt_a) { + my $percent = sprintf "%.1f", ($available / $total * 100); + if ($percent <= $opt_c) { + finish("CRITICAL - $percent% ($available kB) available!$perfdata",$exit_codes{'CRITICAL'}); + } + elsif ($percent <= $opt_w) { + finish("WARNING - $percent% ($available kB) available!$perfdata",$exit_codes{'WARNING'}); + } + else { + finish("OK - $percent% ($available kB) available.$perfdata",$exit_codes{'OK'}); + } + } + elsif ($opt_u) { + my $percent = sprintf "%.1f", ($used / $total * 100); + if ($percent >= $opt_c) { + finish("CRITICAL - $percent% ($used kB) used!$perfdata",$exit_codes{'CRITICAL'}); + } + elsif ($percent >= $opt_w) { + finish("WARNING - $percent% ($used kB) used!$perfdata",$exit_codes{'WARNING'}); + } + else { + finish("OK - $percent% ($used kB) used.$perfdata",$exit_codes{'OK'}); + } + } +} + +# Show usage +sub usage() { + print "\ncheck_mem.pl v1.0 - Nagios Plugin\n\n"; + print "usage:\n"; + print " check_mem.pl - -w -c \n\n"; + print "options:\n"; + print " -f Check FREE memory\n"; + print " -u Check USED memory\n"; + print " -a Check AVAILABLE memory (only Linux)\n"; + print " -C Count OS caches as FREE memory\n"; + print " -z Count ZFS ARC as FREE memory\n"; + print " -h Remove hugepages from the total memory count\n"; + print " -w PERCENT Percent free/used when to warn\n"; + print " -c PERCENT Percent free/used when critical\n"; + print "\nCopyright (C) 2000 Dan Larsson \n"; + print "check_mem.pl comes with absolutely NO WARRANTY either implied or explicit\n"; + print "This program is licensed under the terms of the\n"; + print "MIT License (check source code for details)\n"; + exit $exit_codes{'UNKNOWN'}; +} + +sub get_memory_info { + my $used_memory_kb = 0; + my $free_memory_kb = 0; + my $total_memory_kb = 0; + my $caches_kb = 0; + my $hugepages_nr = 0; + my $hugepages_size = 0; + my $hugepages_kb = 0; + my $available_memory_kb = 0; + + my $uname; + if ( -e '/usr/bin/uname') { + $uname = `/usr/bin/uname -a`; + } + elsif ( -e '/bin/uname') { + $uname = `/bin/uname -a`; + } + else { + die "Unable to find uname in /usr/bin or /bin!\n"; + } + print "uname returns $uname" if ($opt_v); + if ( $uname =~ /Linux/ ) { + my @meminfo = `/bin/cat /proc/meminfo`; + foreach (@meminfo) { + chomp; + if (/^Mem(Total|Free):\s+(\d+) kB/) { + my $counter_name = $1; + if ($counter_name eq 'Free') { + $free_memory_kb = $2; + } + elsif ($counter_name eq 'Total') { + $total_memory_kb = $2; + } + } + elsif (/^(Buffers|Cached|SReclaimable):\s+(\d+) kB/) { + $caches_kb += $2; + } + elsif (/^Shmem:\s+(\d+) kB/) { + $caches_kb -= $1; + } + elsif (/^MemAvailable:\s+(\d+) kB/) { + $available_memory_kb = $1; + } + # These variables will most likely be overwritten once we look into + # /sys/kernel/mm/hugepages, unless we are running on linux <2.6.27 + # and have to rely on them + elsif (/^HugePages_Total:\s+(\d+)/) { + $hugepages_nr = $1; + } + elsif (/^Hugepagesize:\s+(\d+) kB/) { + $hugepages_size = $1; + } + } + + if ($opt_z && -f '/proc/spl/kstat/zfs/arcstats') { + my @arcstats = `/bin/cat /proc/spl/kstat/zfs/arcstats`; + foreach (@arcstats) { + if (/^size\s+\d+\s+(\d+)/) { + $caches_kb += ($1 / 1024); + } + } + } + $hugepages_kb = $hugepages_nr * $hugepages_size; + $used_memory_kb = $total_memory_kb - $free_memory_kb; + + # Read hugepages info from the newer sysfs interface if available + my $hugepages_sysfs_dir = '/sys/kernel/mm/hugepages'; + if ( -d $hugepages_sysfs_dir ) { + # Reset what we read from /proc/meminfo + $hugepages_kb = 0; + opendir(my $dh, $hugepages_sysfs_dir) + || die "Can't open $hugepages_sysfs_dir: $!"; + while (my $entry = readdir $dh) { + if ($entry =~ /^hugepages-(\d+)kB/) { + $hugepages_size = $1; + my $hugepages_nr_file = "$hugepages_sysfs_dir/$entry/nr_hugepages"; + open(my $fh, '<', $hugepages_nr_file) + || die "Can't open $hugepages_nr_file for reading: $!"; + $hugepages_nr = <$fh>; + close($fh); + $hugepages_kb += $hugepages_nr * $hugepages_size; + } + } + closedir($dh); + } + } + elsif ( $uname =~ /HP-UX/ ) { + # HP-UX, thanks to Christoph Fürstaller + my @meminfo = `/usr/bin/sudo /usr/local/bin/kmeminfo`; + foreach (@meminfo) { + chomp; + if (/^Physical memory\s\s+=\s+(\d+)\s+(\d+.\d)g/) { + $total_memory_kb = ($2 * 1024 * 1024); + } + elsif (/^Free memory\s\s+=\s+(\d+)\s+(\d+.\d)g/) { + $free_memory_kb = ($2 * 1024 * 1024); + } + } + $used_memory_kb = $total_memory_kb - $free_memory_kb; + } + elsif ( $uname =~ /FreeBSD/ ) { + # The FreeBSD case. 2013-03-19 www.claudiokuenzler.com + # free mem = Inactive*Page Size + Cache*Page Size + Free*Page Size + my $pagesize = `sysctl vm.stats.vm.v_page_size`; + $pagesize =~ s/[^0-9]//g; + my $mem_inactive = 0; + my $mem_cache = 0; + my $mem_free = 0; + my $mem_total = 0; + my $free_memory = 0; + my @meminfo = `/sbin/sysctl vm.stats.vm`; + foreach (@meminfo) { + chomp; + if (/^vm.stats.vm.v_inactive_count:\s+(\d+)/) { + $mem_inactive = ($1 * $pagesize); + } + elsif (/^vm.stats.vm.v_cache_count:\s+(\d+)/) { + $mem_cache = ($1 * $pagesize); + } + elsif (/^vm.stats.vm.v_free_count:\s+(\d+)/) { + $mem_free = ($1 * $pagesize); + } + elsif (/^vm.stats.vm.v_page_count:\s+(\d+)/) { + $mem_total = ($1 * $pagesize); + } + } + $free_memory = $mem_inactive + $mem_cache + $mem_free; + $free_memory_kb = ( $free_memory / 1024); + $total_memory_kb = ( $mem_total / 1024); + $used_memory_kb = $total_memory_kb - $free_memory_kb; + $caches_kb = ($mem_cache / 1024); + } + elsif ( $uname =~ /joyent/ ) { + # The SmartOS case. 2014-01-10 www.claudiokuenzler.com + # free mem = pagesfree * pagesize + my $pagesize = `pagesize`; + my $phys_pages = `kstat -p unix:0:system_pages:pagestotal | awk '{print \$NF}'`; + my $free_pages = `kstat -p unix:0:system_pages:pagesfree | awk '{print \$NF}'`; + my $arc_size = `kstat -p zfs:0:arcstats:size | awk '{print \$NF}'`; + my $arc_size_kb = $arc_size / 1024; + + print "Pagesize is $pagesize" if ($opt_v); + print "Total pages is $phys_pages" if ($opt_v); + print "Free pages is $free_pages" if ($opt_v); + print "Arc size is $arc_size" if ($opt_v); + + $caches_kb += $arc_size_kb; + + $total_memory_kb = $phys_pages * $pagesize / 1024; + $free_memory_kb = $free_pages * $pagesize / 1024; + $used_memory_kb = $total_memory_kb - $free_memory_kb; + } + elsif ( $uname =~ /SunOS/ ) { + eval "use Sun::Solaris::Kstat"; + if ($@) { #Kstat not available + if ($opt_C) { + print "You can't report on Solaris caches without Sun::Solaris::Kstat available!\n"; + exit $exit_codes{UNKNOWN}; + } + my @vmstat = `/usr/bin/vmstat 1 2`; + my $line; + foreach (@vmstat) { + chomp; + $line = $_; + } + $free_memory_kb = (split(/ /,$line))[5] / 1024; + my @prtconf = `/usr/sbin/prtconf`; + foreach (@prtconf) { + if (/^Memory size: (\d+) Megabytes/) { + $total_memory_kb = $1 * 1024; + } + } + $used_memory_kb = $total_memory_kb - $free_memory_kb; + + } + else { # We have kstat + my $kstat = Sun::Solaris::Kstat->new(); + my $phys_pages = ${kstat}->{unix}->{0}->{system_pages}->{physmem}; + my $free_pages = ${kstat}->{unix}->{0}->{system_pages}->{freemem}; + # We probably should account for UFS caching here, but it's unclear + # to me how to determine UFS's cache size. There's inode_cache, + # and maybe the physmem variable in the system_pages module?? + # In the real world, it looks to be so small as not to really matter, + # so we don't grab it. If someone can give me code that does this, + # I'd be glad to put it in. + my $arc_size = (exists ${kstat}->{zfs} && ${kstat}->{zfs}->{0}->{arcstats}->{size}) ? + ${kstat}->{zfs}->{0}->{arcstats}->{size} / 1024 + : 0; + $caches_kb += $arc_size; + my $pagesize = `pagesize`; + + $total_memory_kb = $phys_pages * $pagesize / 1024; + $free_memory_kb = $free_pages * $pagesize / 1024; + $used_memory_kb = $total_memory_kb - $free_memory_kb; + } + } + elsif ( $uname =~ /Darwin/ ) { + $total_memory_kb = (split(/ /,`/usr/sbin/sysctl hw.memsize`))[1]/1024; + my $pagesize = (split(/ /,`/usr/sbin/sysctl hw.pagesize`))[1]; + $caches_kb = 0; + my @vm_stat = `/usr/bin/vm_stat`; + foreach (@vm_stat) { + chomp; + if (/^(Pages free):\s+(\d+)\.$/) { + $free_memory_kb = $2*$pagesize/1024; + } + # 'caching' concept works different on MACH + # this should be a reasonable approximation + elsif (/^Pages (inactive|purgable):\s+(\d+).$/) { + $caches_kb += $2*$pagesize/1024; + } + } + $used_memory_kb = $total_memory_kb - $free_memory_kb; + } + elsif ( $uname =~ /AIX/ ) { + my @meminfo = `/usr/bin/vmstat -vh`; + foreach (@meminfo) { + chomp; + if (/^\s*([0-9.]+)\s+(.*)/) { + my $counter_name = $2; + if ($counter_name eq 'memory pages') { + $total_memory_kb = $1*4; + } + if ($counter_name eq 'free pages') { + $free_memory_kb = $1*4; + } + if ($counter_name eq 'file pages') { + $caches_kb = $1*4; + } + if ($counter_name eq 'Number of 4k page frames loaned') { + $free_memory_kb += $1*4; + } + } + } + $used_memory_kb = $total_memory_kb - $free_memory_kb; + } + else { + if ($opt_C) { + print "You can't report on $uname caches!\n"; + exit $exit_codes{UNKNOWN}; + } + my $command_line = `vmstat | tail -1 | awk '{print \$4,\$5}'`; + chomp $command_line; + my @memlist = split(/ /, $command_line); + + # Define the calculating scalars + $used_memory_kb = $memlist[0]/1024; + $free_memory_kb = $memlist[1]/1024; + $total_memory_kb = $used_memory_kb + $free_memory_kb; + } + return ($free_memory_kb,$used_memory_kb,$caches_kb,$available_memory_kb,$hugepages_kb); +} + +sub init { + # Get the options + if ($#ARGV le 0) { + &usage; + } + else { + getopts('c:fuaChvw:z'); + } + + # Shortcircuit the switches + if (!$opt_w or $opt_w == 0 or !$opt_c or $opt_c == 0) { + print "*** You must define WARN and CRITICAL levels!\n"; + &usage; + } + elsif (!$opt_f and !$opt_u and !$opt_a) { + print "*** You must select to monitor USED, FREE or AVAILABLE memory!\n"; + &usage; + } + elsif ($opt_f and $opt_u or $opt_f and $opt_a or $opt_u and $opt_a) { + print "*** You must select to monitor either USED, FREE or AVAILABLE memory!\n"; + &usage; + } + + # Check if levels are sane + if ($opt_w <= $opt_c and $opt_f) { + print "*** WARN level must not be less than CRITICAL when checking FREE memory!\n"; + &usage; + } + elsif ($opt_w >= $opt_c and $opt_u) { + print "*** WARN level must not be greater than CRITICAL when checking USED memory!\n"; + &usage; + } +} + +sub finish { + my ($msg,$state) = @_; + print "$msg\n"; + exit $state; +} diff --git a/roles/nagios_client/files/usr/lib64/nagios/plugins/check_needs_restart b/roles/nagios_client/files/usr/lib64/nagios/plugins/check_needs_restart new file mode 100644 index 0000000..b1484cd --- /dev/null +++ b/roles/nagios_client/files/usr/lib64/nagios/plugins/check_needs_restart @@ -0,0 +1,30 @@ +#!/bin/bash + +set -Eeu -o pipefail + +trap 'exit 3' ERR + +NEEDS_RESTARTING_STDOUT=$(sudo dnf needs-restarting --reboothint) || NEED_REBOOT=$? && NEED_REBOOT=$? +STALE_SERVICES=($(sudo dnf needs-restarting --services 2>/dev/null | sed '/^user@/d')) + +if (( NEED_REBOOT == 1 )); then + echo 'Reboot needed to apply package updates.' + RC=1 +elif (( ${#STALE_SERVICES[@]} > 0 )); then + echo 'One or more services need restarting.' + RC=1 +else + echo 'Everything is up to date.' + RC=0 +fi + +printf '%s\n\n' "${NEEDS_RESTARTING_STDOUT}" + +if (( ${#STALE_SERVICES[@]} > 0 )); then + echo 'The following services need restarting to apply package updates:' + printf ' * %s\n' "${STALE_SERVICES[@]}" +else + echo "All running services are up to date." +fi + +exit $RC diff --git a/roles/nagios_client/files/usr/lib64/nagios/plugins/check_systemd b/roles/nagios_client/files/usr/lib64/nagios/plugins/check_systemd new file mode 100644 index 0000000..c7d83de --- /dev/null +++ b/roles/nagios_client/files/usr/lib64/nagios/plugins/check_systemd @@ -0,0 +1,20 @@ +#!/bin/bash + +set -Eeu -o pipefail + +trap 'exit 3' ERR + +failed_units=$(systemctl --state failed --no-legend --plain | cut -d' ' -f1) + +if [ -n "$failed_units" ]; then + echo "CRIT - failed units: ${failed_units// /, }" + + for unit in $failed_units; do + sudo systemctl status -- "$unit" ||: + done + + exit 2 +else + echo 'OK - all units healthy' + exit 0 +fi diff --git a/roles/nagios_client/files/usr/lib64/nagios/plugins/check_zpools b/roles/nagios_client/files/usr/lib64/nagios/plugins/check_zpools new file mode 100644 index 0000000..30e11e5 --- /dev/null +++ b/roles/nagios_client/files/usr/lib64/nagios/plugins/check_zpools @@ -0,0 +1,74 @@ +#!/bin/bash + +set -Eeu -o pipefail +shopt -s lastpipe + +trap 'exit 3' ERR + +usage() { + echo 'usage: check_zpool -w WARN_THRESHOLD -c CRIT_THRESHOLD' 1>&2 + exit 3 +} + +while getopts ':w:c:' opt; do + case $opt in + w) WARN_THRESHOLD=${OPTARG//%/} ;; + c) CRIT_THRESHOLD=${OPTARG//%/} ;; + *) usage ;; + esac +done +shift $((OPTIND-1)) + +if [ -z "${WARN_THRESHOLD:-}" -o -z "${CRIT_THRESHOLD:-}" ]; then + usage +fi + +if (( WARN_THRESHOLD < CRIT_THRESHOLD )); then + echo 'WARN must be greater than CRIT' 1>&2 + exit 3 +fi + +CRIT=() +WARN=() +OK=() + +zpool list -Ho name,free,capacity,health | while read -r name free capacity health; do + capacity=${capacity//%/} + percent_free=$(( 100 - capacity )) + output="${name} $health: $free free (${percent_free}%)" + + if [ "$health" != ONLINE ]; then + output="$output"$'\n'"$(zpool status "$name")" + fi + + if (( percent_free < CRIT_THRESHOLD )) || [ "$health" != ONLINE ]; then + CRIT+=("$output") + elif (( percent_free < WARN_THRESHOLD )); then + WARN+=("$output") + else + OK+=("$output") + fi +done + +if [ -z "${CRIT[*]}${WARN[*]}${OK[*]}" ]; then + echo 'OK - no zpools present' + exit 0 +fi + +rc=0 + +for line in "${CRIT[@]}"; do + echo "CRIT - ${line}" + rc=3 +done + +for line in "${WARN[@]}"; do + echo "WARN - ${line}" + (( rc )) || rc=2 +done + +for line in "${OK[@]}"; do + echo "OK - ${line}" +done + +exit $rc diff --git a/roles/nagios_client/meta/main.yml b/roles/nagios_client/meta/main.yml new file mode 100644 index 0000000..a5230b1 --- /dev/null +++ b/roles/nagios_client/meta/main.yml @@ -0,0 +1,8 @@ +dependencies: + - role: yum + yum_repositories: epel + when: '"yum_mirrors" not in group_names' + tags: yum + + - role: snmp + tags: snmp diff --git a/roles/nagios_client/tasks/main.yml b/roles/nagios_client/tasks/main.yml new file mode 100644 index 0000000..44424ab --- /dev/null +++ b/roles/nagios_client/tasks/main.yml @@ -0,0 +1,54 @@ +- name: install packages + dnf: + name: '{{ nagios_packages }}' + state: present + +- name: add nagios ssh key + authorized_key: + user: nagios + key: '{{ nagios_ssh_pubkey }}' + state: present + +- name: set selinux context for nagios .ssh directory + sefcontext: + target: '{{ nagios_home }}/\.ssh(/.*)?' + setype: ssh_home_t + state: present + register: nagios_ssh_sefcontext + tags: selinux + +- name: apply selinux context to nagios .ssh directory + command: 'restorecon -R {{ nagios_home }}/.ssh' + when: nagios_ssh_sefcontext.changed + tags: selinux + +- name: set nagios shell + user: + name: nagios + shell: /bin/bash + +- name: set PATH for nagios user + copy: + content: export PATH=/sbin:/bin:/usr/sbin:/usr/bin:{{ nagios_plugin_dir }} + dest: '{{ nagios_home }}/.bashrc' + owner: nagios + group: nagios + mode: 0644 + +- name: copy custom nagios plugins + copy: + src: '{{ item.src }}' + dest: '{{ nagios_plugin_dir }}/{{ item.path }}' + mode: 0555 + loop: "{{ lookup('filetree', nagios_plugin_dir[1:], wantlist=True) }}" + loop_control: + label: '{{ item.path }}' + when: item.state == 'file' + tags: nagios_plugins + +- name: generate sudo rules + template: + src: etc/sudoers.d/nagios.j2 + dest: /etc/sudoers.d/nagios + mode: 0400 + tags: nagios_plugins diff --git a/roles/nagios_client/templates/etc/sudoers.d/nagios.j2 b/roles/nagios_client/templates/etc/sudoers.d/nagios.j2 new file mode 100644 index 0000000..039e7a4 --- /dev/null +++ b/roles/nagios_client/templates/etc/sudoers.d/nagios.j2 @@ -0,0 +1,3 @@ +{% for command in nagios_sudo_whitelist %} +{{ nagios_user }} ALL=(root) NOPASSWD: {{ command | replace(':', '\\:') }} +{% endfor %} diff --git a/roles/nagios_client/vars/main.yml b/roles/nagios_client/vars/main.yml new file mode 100644 index 0000000..29fca6d --- /dev/null +++ b/roles/nagios_client/vars/main.yml @@ -0,0 +1,11 @@ +nagios_packages: + - nagios-plugins-all + +nagios_home: /var/spool/nagios +nagios_plugin_dir: /usr/lib64/nagios/plugins +nagios_user: nagios + +nagios_sudo_whitelist: + - /usr/bin/dnf needs-restarting --reboothint + - /usr/bin/dnf needs-restarting --services + - /usr/bin/systemctl status -- * diff --git a/roles/nagios_server/defaults/main.yml b/roles/nagios_server/defaults/main.yml new file mode 100644 index 0000000..c963b93 --- /dev/null +++ b/roles/nagios_server/defaults/main.yml @@ -0,0 +1,34 @@ +nagios_admin_email: root@{{ email_domain }} +nagios_admin_pager: root@{{ email_domain }} + +nagios_access_group: role-nagios-access + +nagios_email: root@{{ email_domain }} + +nagios_reboot_window: 03:00-05:00 + +nagios_ssh_control_persist: 20m + +nagios_snmp_max_size: 10000 + +nagios_check_dns: + - name: example.com + qtype: A + server: 8.8.8.8 + expect: 1.2.3.4 + +nagios_connectivity_check_host: 8.8.8.8 +nagios_connectivity_check_count: 20 +nagios_connectivity_check_rtt_warn: 50.0 +nagios_connectivity_check_rtt_crit: 100.0 +nagios_connectivity_check_loss_warn: 5% +nagios_connectivity_check_loss_crit: 20% + +nagios_manubulon_repo: https://github.com/SteScho/manubulon-snmp +nagios_manubulon_version: master + +# key: name, value: url +nagios_thirdparty_plugins: {} + +# key: name, value: url +nagios_thirdparty_mibs: {} diff --git a/roles/nagios_server/files/usr/lib64/nagios/plugins/check_asterisk_endpoints b/roles/nagios_server/files/usr/lib64/nagios/plugins/check_asterisk_endpoints new file mode 100644 index 0000000..42fee08 --- /dev/null +++ b/roles/nagios_server/files/usr/lib64/nagios/plugins/check_asterisk_endpoints @@ -0,0 +1,62 @@ +#!/usr/libexec/platform-python + +# Nagios check for Asterisk PJSIP endpoints +# +# Copyright (c) 2023 stonewall@sacredheartsc.com +# MIT License https://opensource.org/licenses/MIT + +import requests +import json +import argparse +import sys +from enum import Enum + +class Status(Enum): + OK = 0 + WARN = 1 + CRIT = 2 + UNKNOWN = 3 + +parser = argparse.ArgumentParser() +parser.add_argument('-H', '--host', help='asterisk host', type=str, required=True) +parser.add_argument('-P', '--port', help='asterisk ARI port', type=int, default=8089) +parser.add_argument('-u', '--username', help='asterisk ARI username', type=str, required=True) +parser.add_argument('-p', '--password', help='asterisk ARI password', type=str, required=True) +parser.add_argument('endpoints', nargs='+', help='endpoint name to check', metavar='ENDPOINT') +args = parser.parse_args() + +try: + r = requests.get(f'https://{args.host}:{args.port}/ari/endpoints', auth=(args.username, args.password)) + + if r.status_code == 200: + state = {i['resource']: i['state'] for i in r.json() if i['technology'] == 'PJSIP'} + results = [] + + for endpoint in args.endpoints: + if endpoint in state: + status = Status.OK if state[endpoint] == 'online' else Status.CRIT + message = f'{endpoint} is {state[endpoint]}' + results.append((status, message)) + else: + results.append((Status.UNKNOWN, f'{endpoint} not found')) + + results.sort(key=lambda x:x[0].value, reverse=True) + + if results[0][0] == Status.OK: + print('all endpoints connected') + elif results[0][0] == Status.UNKNOWN: + print('endpoint(s) not found in ARI!') + else: + print('endpoint not connected!') + + for result in results: + print(f'{result[0].name}: {result[1]}') + + sys.exit(results[0][0].value) + else: + print('failed to retrieve data from ARI!') + sys.exit(Status.UNKNOWN.value) + +except Exception as e: + print(str(e)) + sys.exit(Status.UNKNOWN.value) diff --git a/roles/nagios_server/handlers/main.yml b/roles/nagios_server/handlers/main.yml new file mode 100644 index 0000000..34fdd65 --- /dev/null +++ b/roles/nagios_server/handlers/main.yml @@ -0,0 +1,9 @@ +- name: restart nagios + systemd: + name: nagios + state: restarted + +- name: reload nagios + systemd: + name: nagios + state: reloaded diff --git a/roles/nagios_server/meta/main.yml b/roles/nagios_server/meta/main.yml new file mode 100644 index 0000000..29230f9 --- /dev/null +++ b/roles/nagios_server/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - role: yum + yum_repositories: epel + tags: yum diff --git a/roles/nagios_server/tasks/freeipa.yml b/roles/nagios_server/tasks/freeipa.yml new file mode 100644 index 0000000..59ab7b3 --- /dev/null +++ b/roles/nagios_server/tasks/freeipa.yml @@ -0,0 +1,42 @@ +- name: create HBAC service + ipahbacsvc: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ nagios_hbac_service }}' + description: nagios web interface + state: present + run_once: yes + +- name: create nagios servers hostgroup + ipahostgroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ nagios_hbac_hostgroup }}' + description: Nagios Servers + host: "{{ groups[nagios_hbac_hostgroup] | map('regex_replace', '$', '.' ~ ansible_domain) }}" + state: present + run_once: yes + +- name: create access group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ nagios_access_group }}' + description: nagios Administrators + nonposix: yes + state: present + run_once: yes + +- name: create HBAC rule + ipahbacrule: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: allow_nagios_users_on_nagios_servers + description: Allow nagios admins on nagios servers + hostgroup: + - '{{ nagios_hbac_hostgroup }}' + group: + - '{{ nagios_access_group }}' + hbacsvc: + - '{{ nagios_hbac_service }}' + run_once: yes diff --git a/roles/nagios_server/tasks/main.yml b/roles/nagios_server/tasks/main.yml new file mode 100644 index 0000000..db8ebf6 --- /dev/null +++ b/roles/nagios_server/tasks/main.yml @@ -0,0 +1,90 @@ +- name: install packages + dnf: + name: '{{ nagios_packages }}' + state: present + +- name: generate nagios configuration + template: + src: '{{ item[1:] }}.j2' + dest: '{{ item }}' + loop: + - /etc/nagios/cgi.cfg + - /etc/nagios/nagios.cfg + - /usr/share/nagios/html/config.inc.php + notify: restart nagios + +- name: remove default nagios config files + copy: + content: | + # This file intentionally empty to avoid being clobbered on package updates. + dest: /etc/nagios/objects/{{ item }} + loop: + - printer.cfg + - switch.cfg + - windows.cfg + - localhost.cfg + notify: reload nagios + +- name: clone manubulon repo + git: + repo: '{{ nagios_manubulon_repo }}' + dest: '{{ nagios_manubulon_install_dir }}' + version: '{{ nagios_manubulon_version }}' + force: yes + update: yes + register: nagios_manubulon_git + +- name: install manubulon plugins + shell: install -o root -g root -m755 {{ nagios_manubulon_install_dir }}/plugins/*.pl {{ nagios_plugin_dir }}/ + when: nagios_manubulon_git.changed + +- import_tasks: objects.yml + tags: nagios_config + +- name: download thirdparty plugins + get_url: + url: '{{ item.url }}' + dest: '{{ nagios_plugin_dir }}/{{ item.name }}' + mode: 0555 + loop: '{{ nagios_thirdparty_plugins | dict2items(key_name="name", value_name="url") }}' + tags: nagios_config + +- name: download thirdparty MIBs + get_url: + url: '{{ item.url }}' + dest: '{{ nagios_mib_dir }}/MIB-{{ item.name | upper }}.txt' + loop: '{{ nagios_thirdparty_mibs | dict2items(key_name="name", value_name="url") }}' + +- name: create nagios ssh directory + file: + path: '{{ nagios_home }}/.ssh' + owner: nagios + group: nagios + mode: 0700 + state: directory + +- name: copy nagios ssh key + copy: + content: '{{ nagios_ssh_privkey }}' + dest: "{{ nagios_home }}/.ssh/id_{{ nagios_ssh_pubkey | regex_replace('^ssh-(\\w+).*', '\\1') }}" + owner: nagios + group: nagios + mode: 0600 + +- import_tasks: freeipa.yml + +- name: create SELinux policy for php-fpm to access nagios contexts + include_role: + name: selinux_policy + apply: + tags: selinux + vars: + selinux_policy_name: php_nagios + selinux_policy_te: '{{ nagios_selinux_policy_te }}' + tags: selinux + +- name: enable nagios + systemd: + name: nagios + enabled: yes + state: started diff --git a/roles/nagios_server/tasks/objects.yml b/roles/nagios_server/tasks/objects.yml new file mode 100644 index 0000000..e216e71 --- /dev/null +++ b/roles/nagios_server/tasks/objects.yml @@ -0,0 +1,32 @@ +- name: generate nagios objects + template: + src: '{{ item.src }}' + dest: /etc/nagios/objects/{{ item.path | splitext | first }} + owner: root + group: nagios + mode: 0640 + lstrip_blocks: yes + loop: "{{ lookup('filetree', '../templates/etc/nagios/objects', wantlist=True) }}" + when: item.state == 'file' + loop_control: + label: '{{ item.path }}' + notify: reload nagios + +- name: generate nagios resource file + template: + src: etc/nagios/private/resource.cfg.j2 + dest: /etc/nagios/private/resource.cfg + owner: root + group: nagios + mode: 0640 + notify: reload nagios + +- name: copy nagios plugins + copy: + src: '{{ item.src }}' + dest: '{{ nagios_plugin_dir }}/{{ item.path }}' + mode: 0555 + loop: "{{ lookup('filetree', nagios_plugin_dir[1:], wantlist=True) }}" + loop_control: + label: '{{ item.path }}' + when: item.state == 'file' diff --git a/roles/nagios_server/templates/etc/nagios/cgi.cfg.j2 b/roles/nagios_server/templates/etc/nagios/cgi.cfg.j2 new file mode 100644 index 0000000..2910a7b --- /dev/null +++ b/roles/nagios_server/templates/etc/nagios/cgi.cfg.j2 @@ -0,0 +1,24 @@ +main_config_file=/etc/nagios/nagios.cfg +physical_html_path=/usr/share/nagios/html +url_html_path=/ +show_context_help=0 +use_pending_states=1 +use_authentication=1 +use_ssl_authentication=0 +authorized_for_system_information=* +authorized_for_configuration_information=* +authorized_for_system_commands=* +authorized_for_all_services=* +authorized_for_all_hosts=* +authorized_for_all_service_commands=* +authorized_for_all_host_commands=* +default_statuswrl_layout=4 +ping_syntax=/bin/ping -n -U -c 5 $HOSTADDRESS$ +refresh_rate=90 +result_limit=100 +escape_html_tags=1 +action_url_target=_blank +notes_url_target=_blank +lock_author_names=1 +navbar_search_for_addresses=1 +navbar_search_for_aliases=1 diff --git a/roles/nagios_server/templates/etc/nagios/nagios.cfg.j2 b/roles/nagios_server/templates/etc/nagios/nagios.cfg.j2 new file mode 100644 index 0000000..1e48e0a --- /dev/null +++ b/roles/nagios_server/templates/etc/nagios/nagios.cfg.j2 @@ -0,0 +1,105 @@ +log_file=/var/log/nagios/nagios.log +cfg_dir=/etc/nagios/objects +object_cache_file=/var/spool/nagios/objects.cache +precached_object_file=/var/spool/nagios/objects.precache +resource_file=/etc/nagios/private/resource.cfg +status_file={{ nagios_status_file }} +status_update_interval=10 +nagios_user=nagios +nagios_group=nagios +check_external_commands=1 +command_file=/var/spool/nagios/cmd/nagios.cmd +lock_file=/var/run/nagios/nagios.pid +temp_file=/var/spool/nagios/nagios.tmp +temp_path=/tmp +event_broker_options=-1 +log_rotation_method=d +log_archive_path=/var/log/nagios/archives +use_syslog=1 +log_notifications=1 +log_service_retries=1 +log_host_retries=1 +log_event_handlers=1 +log_initial_states=0 +log_current_states=1 +log_external_commands=1 +log_passive_checks=1 +service_inter_check_delay_method=s +max_service_check_spread=30 +service_interleave_factor=s +host_inter_check_delay_method=s +max_host_check_spread=30 +max_concurrent_checks=0 +check_result_reaper_frequency=10 +max_check_result_reaper_time=30 +check_result_path=/var/spool/nagios/checkresults +max_check_result_file_age=3600 +cached_host_check_horizon=15 +cached_service_check_horizon=15 +enable_predictive_host_dependency_checks=1 +enable_predictive_service_dependency_checks=1 +soft_state_dependencies=0 +auto_reschedule_checks=0 +auto_rescheduling_interval=30 +auto_rescheduling_window=180 +service_check_timeout=60 +host_check_timeout=30 +event_handler_timeout=30 +notification_timeout=30 +ocsp_timeout=5 +ochp_timeout=5 +perfdata_timeout=5 +retain_state_information=1 +state_retention_file={{ nagios_state_retention_file }} +retention_update_interval=60 +use_retained_program_state=1 +use_retained_scheduling_info=1 +retained_host_attribute_mask=0 +retained_service_attribute_mask=0 +retained_process_host_attribute_mask=0 +retained_process_service_attribute_mask=0 +retained_contact_host_attribute_mask=0 +retained_contact_service_attribute_mask=0 +interval_length=60 +check_for_updates=0 +bare_update_check=0 +use_aggressive_host_checking=0 +execute_service_checks=1 +accept_passive_service_checks=1 +execute_host_checks=1 +accept_passive_host_checks=1 +enable_notifications=1 +enable_event_handlers=1 +process_performance_data=0 +obsess_over_services=0 +obsess_over_hosts=0 +translate_passive_host_checks=0 +passive_host_checks_are_soft=0 +check_for_orphaned_services=1 +check_for_orphaned_hosts=1 +check_service_freshness=1 +service_freshness_check_interval=60 +service_check_timeout_state=c +check_host_freshness=0 +host_freshness_check_interval=60 +additional_freshness_latency=15 +enable_flap_detection=1 +low_service_flap_threshold=5.0 +high_service_flap_threshold=20.0 +low_host_flap_threshold=5.0 +high_host_flap_threshold=20.0 +date_format=us +illegal_object_name_chars=`~!$%^&*|'"<>?,()= +illegal_macro_output_chars=`~$&|'"<> +use_regexp_matching=1 +use_true_regexp_matching=0 +admin_email={{ nagios_admin_email }} +admin_pager={{ nagios_admin_pager }} +daemon_dumps_core=0 +use_large_installation_tweaks=0 +enable_environment_macros=0 +debug_level=0 +debug_verbosity=1 +debug_file=/var/log/nagios/nagios.debug +max_debug_file_size=1000000 +allow_empty_hostgroup_assignment=0 diff --git a/roles/nagios_server/templates/etc/nagios/objects/commands.cfg.j2 b/roles/nagios_server/templates/etc/nagios/objects/commands.cfg.j2 new file mode 100644 index 0000000..e44d6ab --- /dev/null +++ b/roles/nagios_server/templates/etc/nagios/objects/commands.cfg.j2 @@ -0,0 +1,285 @@ +################# +### Notifications +################# +define command { + command_name notify-host-by-email + command_line /usr/bin/printf "%b" "Notification Type: $NOTIFICATIONTYPE$\n\nHost: $HOSTNAME$\nAddress: $HOSTADDRESS$\nState: $HOSTSTATE$\n\nDate/Time: $LONGDATETIME$\n\n$HOSTOUTPUT$\n\n$LONGHOSTOUTPUT$" \ + | /usr/bin/mail -s "$NOTIFICATIONTYPE$: $HOSTNAME$ is $HOSTSTATE$" $CONTACTEMAIL$ +} + +define command { + command_name notify-service-by-email + command_line /usr/bin/printf "%b" "Notification Type: $NOTIFICATIONTYPE$\n\nHost: $HOSTALIAS$\nService: $SERVICEDESC$\nState: $SERVICESTATE$\n\nDate/Time: $LONGDATETIME$\n\n$SERVICEOUTPUT$\n\n$LONGSERVICEOUTPUT$" \ + | /usr/bin/mail -s "$NOTIFICATIONTYPE$: $HOSTALIAS$/$SERVICEDESC$ is $SERVICESTATE$" $CONTACTEMAIL$ +} + + +############### +### Host Checks +############### +define command { + command_name check_ping + command_line $USER1$/check_ping \ + --hostname='$ARG1$' \ + --packets='$ARG2$' \ + --warning='$ARG3$' \ + --critical='$ARG4$' +} + +define command { + command_name check_dummy + command_line $USER1$/check_dummy 0 +} + + +########################## +### Service Checks: Common +########################## +define command { + command_name check_ssh + command_line $USER1$/check_ssh '$HOSTADDRESS$' +} + +define command { + command_name check_systemd_by_ssh + command_line $USER1$/check_by_ssh \ + {{ nagios_check_by_ssh_args }} \ + --command='check_systemd' +} + +define command { + command_name check_needs_restart_by_ssh + command_line $USER1$/check_by_ssh \ + {{ nagios_check_by_ssh_args }} \ + --command='check_needs_restart' +} + +define command { + command_name check_mem_by_ssh + command_line $USER1$/check_by_ssh \ + {{ nagios_check_by_ssh_args }} \ + --command='check_mem -u -C -z -w $ARG1$ -c $ARG2$' +} + +define command { + command_name check_swap_by_ssh + command_line $USER1$/check_by_ssh \ + {{ nagios_check_by_ssh_args }} \ + --command='check_swap -n ok -w $ARG1$ -c $ARG2$' +} + +define command { + command_name check_snmp_interface + command_line $USER1$/check_snmp_int.pl \ + {{ nagios_manubulon_args }} \ + --ign-admindown \ + --use-ifname \ + --perfdata \ + --error \ + --perfspeed \ + --perfcheck \ + --extperfcheck \ + --64bits \ + --label \ + --kbits \ + --mega \ + --name='$ARG1$' \ + --warning='$ARG2$' \ + --critical='$ARG3$' \ + --octetlength={{ nagios_snmp_max_size }} \ + $ARG4$ +} + +define command { + command_name check_snmp_storage + command_line $USER1$/check_snmp_storage.pl \ + {{ nagios_manubulon_args }} \ + --name='$ARG1$' \ + --warn='$ARG2$' \ + --crit='$ARG3$' \ + --storagetype=FixedDisk \ + --perfdata \ + --gigabyte \ + --short=1,1 \ + --octetlength={{ nagios_snmp_max_size }} \ + $ARG4$ +} + +define command { + command_name check_snmp_storage_terse + command_line $USER1$/check_snmp_storage.pl \ + {{ nagios_manubulon_args }} \ + --name='$ARG1$' \ + --warn='$ARG2$' \ + --crit='$ARG3$' \ + --storagetype=FixedDisk \ + --gigabyte \ + --short=0,1 \ + --octetlength={{ nagios_snmp_max_size }} \ + $ARG4$ +} + +define command { + command_name check_snmp_load + command_line $USER1$/check_snmp_load.pl \ + {{ nagios_manubulon_args }} \ + --perfdata \ + --type=netsl \ + --warn='$ARG1$' \ + --crit='$ARG2$' +} + +define command { + command_name check_snmp_mem + command_line $USER1$/check_snmp_mem.pl \ + {{ nagios_manubulon_args }} \ + --perfdata \ + --warn='$ARG1$' \ + --crit='$ARG2$' +} + + +####################### +### Service Checks: ZFS +####################### +define command { + command_name check_zpools_by_ssh + command_line $USER1$/check_by_ssh \ + {{ nagios_check_by_ssh_args }} \ + --command='check_zpools -w $ARG1$ -c $ARG2$' +} + + +################################## +### Service Checks: Infrastructure +################################## +define command { + command_name check_cyberpower + command_line $USER1$/check_cyberpower -u -H $HOSTADDRESS$ -C {{ nagios_snmp_community | quote }} -l $ARG1$ $ARG2$ +} + + +########################## +# Service Checks: Asterisk +########################## +define command { + command_name check_asterisk_endpoints + command_line $USER1$/check_asterisk_endpoints -H '$_HOSTFQDN$' -P '$ARG1$' -u '$ARG2$' -p '$ARG3$' $ARG4$ +} + + +###################################### +# Service Checks: Certificate Validity +###################################### +define command { + command_name check_ssl_validity + command_line $USER1$/check_ssl_validity -I '$HOSTADDRESS$' -C 3600 -p '$ARG1$' -H '$ARG2$' -w '$ARG3$' -c '$ARG4$' +} + + +###################### +# Service Checks: SMTP +###################### +define command { + command_name check_smtp + command_line $USER1$/check_smtp -H '$HOSTADDRESS$' -p 25 -S -D '$ARG1$,$ARG2$' -w '$ARG3$' -c '$ARG4$' +} + +define command { + command_name check_mailq + command_line $USER1$/check_by_ssh \ + {{ nagios_check_by_ssh_args }} \ + --command='check_mailq -M postfix -w $ARG1$ -c $ARG2$' +} + +###################### +# Service Checks: IMAP +###################### +define command { + command_name check_imap + command_line $USER1$/check_imap -H '$HOSTADDRESS$' -p 993 -S -D '$ARG1$,$ARG2$' -w '$ARG3$' -c '$ARG4$' +} + +###################### +# Service Checks: XMPP +###################### +define command { + command_name check_xmpp + command_line $USER1$/check_ssl_cert \ + --host '$HOSTADDRESS$' \ + --protocol xmpp-server \ + --ignore-sct \ + --timeout 60 \ + --xmpphost '$ARG1$' \ + --warning '$ARG2$' \ + --critical '$ARG3$' +} + +############################ +# Service Checks: PostgreSQL +############################ +define command { + command_name check_postgres + command_line $USER1$/check_ssl_cert \ + --host '$HOSTADDRESS$' \ + --protocol postgres \ + --ignore-sct \ + --timeout 60 \ + --warning '$ARG1$' \ + --critical '$ARG2$' +} + +############################ +# Service Checks: LDAP +############################ +define command { + command_name check_ldaps + command_line $USER1$/check_ldaps \ + --hostname='$_HOSTFQDN$' \ + --ver3 \ + --base='$ARG1$' \ + --age='$ARG2$,$ARG3$' +} + +###################################### +# Service Checks: HTTPS +###################################### +define command { + command_name check_https + command_line $USER1$/check_http \ + --IP-address='$HOSTADDRESS$' \ + --hostname='$ARG1$' \ + --port=443 \ + --ssl=1.2 \ + --sni \ + --verify-host \ + --certificate '$ARG2$,$ARG3$' \ + --continue-after-certificate \ + --no-body \ + --onredirect=ok \ + --warning='$ARG4$' \ + --critical='$ARG5$' \ + $ARG6$ +} + +###################################### +# Service Checks: DNS +###################################### +define command { + command_name check_dns + command_line $USER1$/check_dns \ + --accept-cname \ + --server='$HOSTADDRESS$' \ + --hostname='$ARG1$' \ + --querytype='$ARG2$' +} + +define command { + command_name check_dns_response + command_line $USER1$/check_dns \ + --accept-cname \ + --server='$ARG1$' \ + --hostname='$ARG2$' \ + --querytype='$ARG3$' \ + --expected-address='$ARG4$' +} diff --git a/roles/nagios_server/templates/etc/nagios/objects/contacts.cfg.j2 b/roles/nagios_server/templates/etc/nagios/objects/contacts.cfg.j2 new file mode 100644 index 0000000..797adbc --- /dev/null +++ b/roles/nagios_server/templates/etc/nagios/objects/contacts.cfg.j2 @@ -0,0 +1,6 @@ +define contact { + contact_name sysadmins + alias System Administrators + email {{ nagios_email }} + use generic-contact +} diff --git a/roles/nagios_server/templates/etc/nagios/objects/hostgroups.cfg.j2 b/roles/nagios_server/templates/etc/nagios/objects/hostgroups.cfg.j2 new file mode 100644 index 0000000..2051447 --- /dev/null +++ b/roles/nagios_server/templates/etc/nagios/objects/hostgroups.cfg.j2 @@ -0,0 +1,10 @@ +{% for groupname in groups.keys() | difference(['all','ungrouped'] + nagios_excluded_groups) %} +{% if groups[groupname] | reject('in', nagios_excluded_groups | map('extract', groups) | flatten) %} +define hostgroup { + hostgroup_name {{ groupname }} + alias {{ groupname | replace('_', ' ') | title }} + members {{ groups[groupname] | reject('in', nagios_excluded_groups | map('extract', groups) | flatten) | join(',') }} +} + +{% endif %} +{% endfor %} diff --git a/roles/nagios_server/templates/etc/nagios/objects/hosts.cfg.j2 b/roles/nagios_server/templates/etc/nagios/objects/hosts.cfg.j2 new file mode 100644 index 0000000..3e8d72f --- /dev/null +++ b/roles/nagios_server/templates/etc/nagios/objects/hosts.cfg.j2 @@ -0,0 +1,31 @@ +define host { + host_name {{ inventory_hostname }} + alias {{ inventory_hostname }} + address 127.0.0.1 + use generic-host + check_command check_dummy + notification_period 24x7 + _fqdn {{ ansible_fqdn }} + _snmp_user {{ hostvars[inventory_hostname].nagios_snmp_user }} + _snmp_priv_pass {{ hostvars[inventory_hostname].nagios_snmp_priv_pass }} + _snmp_priv_proto {{ hostvars[inventory_hostname].nagios_snmp_priv_proto }} + _snmp_auth_pass {{ hostvars[inventory_hostname].nagios_snmp_auth_pass }} + _snmp_auth_proto {{ hostvars[inventory_hostname].nagios_snmp_auth_proto }} +} + +{% for host in groups['all'] | reject('equalto', inventory_hostname) | reject('in', nagios_excluded_groups | map('extract', groups) | flatten) %} +define host { + host_name {{ host }} + alias {{ host }} + address {{ hostvars[host].ip }} + use generic-host + check_command check_ping!$HOSTADDRESS$!{{ hostvars[host].nagios_ping_count }}!{{ hostvars[host].nagios_ping_rtt_warn }},{{ hostvars[host].nagios_ping_loss_warn | replace('%', '') }}%!{{ hostvars[host].nagios_ping_rtt_crit }},{{ hostvars[host].nagios_ping_loss_crit | replace('%', '') }}% + _fqdn {{ hostvars[host].fqdn }} + _snmp_user {{ hostvars[host].nagios_snmp_user }} + _snmp_priv_pass {{ hostvars[host].nagios_snmp_priv_pass }} + _snmp_priv_proto {{ hostvars[host].nagios_snmp_priv_proto }} + _snmp_auth_pass {{ hostvars[host].nagios_snmp_auth_pass }} + _snmp_auth_proto {{ hostvars[host].nagios_snmp_auth_proto }} +} + +{% endfor %} diff --git a/roles/nagios_server/templates/etc/nagios/objects/servicedependencies.cfg.j2 b/roles/nagios_server/templates/etc/nagios/objects/servicedependencies.cfg.j2 new file mode 100644 index 0000000..050a1cd --- /dev/null +++ b/roles/nagios_server/templates/etc/nagios/objects/servicedependencies.cfg.j2 @@ -0,0 +1,8 @@ +# ssh-based checks depend on the ssh service being OK +define servicedependency { + hostgroup nagios_check_ssh + service_description ssh + dependent_servicegroup_name ssh + execution_failure_criteria c,u + notification_failure_criteria c,u +} diff --git a/roles/nagios_server/templates/etc/nagios/objects/servicegroups.cfg.j2 b/roles/nagios_server/templates/etc/nagios/objects/servicegroups.cfg.j2 new file mode 100644 index 0000000..c8e6a98 --- /dev/null +++ b/roles/nagios_server/templates/etc/nagios/objects/servicegroups.cfg.j2 @@ -0,0 +1,19 @@ +define servicegroup { + servicegroup_name ssh + alias SSH-based checks +} + +define servicegroup { + servicegroup_name snmp + alias SNMP-based checks +} + +define servicegroup { + servicegroup_name https + alias HTTPS-based checks +} + +define servicegroup { + servicegroup_name dns + alias DNS-based checks +} diff --git a/roles/nagios_server/templates/etc/nagios/objects/services.cfg.j2 b/roles/nagios_server/templates/etc/nagios/objects/services.cfg.j2 new file mode 100644 index 0000000..68b4fe4 --- /dev/null +++ b/roles/nagios_server/templates/etc/nagios/objects/services.cfg.j2 @@ -0,0 +1,375 @@ +############### +# Local checks +############### + +# Upstream packet loss +define service { + service_description upstream-packet-loss + host_name {{ inventory_hostname }} + use generic-service + check_command check_ping!{{ nagios_connectivity_check_host }}!{{ nagios_connectivity_check_count }}!{{ nagios_connectivity_check_rtt_warn }},{{ nagios_connectivity_check_loss_warn | replace('%', '') }}%!{{ nagios_connectivity_check_rtt_crit }},{{ nagios_connectivity_check_loss_crit | replace('%', '') }}% +} + +# Nagios web gui +define service { + service_description https + host_name {{ inventory_hostname }} + use generic-service + check_command check_https!$_HOSTFQDN$!{{ nagios_certificate_warn }}!{{ nagios_certificate_crit }}!{{ nagios_http_warn }}!{{ nagios_http_crit }}!-e 'HTTP/1.1 401' + servicegroups https +} + +############### +# DNS checks +############### + +{% for item in nagios_check_dns %} +# {{ item.name }} - {{ item.qtype | default('A') | upper }} +define service { + {% if (item.qtype | default('A') | upper) == 'A' %} + service_description dns-{{ item.name }} + {% else %} + service_description dns-{{ item.name }}-{{ item.qtype | lower }} + {% endif %} + host_name {{ inventory_hostname }} + use generic-service + check_command check_dns_response!{{ item.server }}!{{ item.name }}!{{ item.qtype | default('A') | upper }}!{{ item.expect }} + servicegroups dns +} + +{% endfor %} + + +############### +# Common checks +############### + +# SSH +define service { + service_description ssh + hostgroups nagios_check_ssh + use generic-service + check_command check_ssh +} + +# Systemd +define service { + service_description systemd + hostgroups nagios_check_systemd + use generic-service + check_command check_systemd_by_ssh + servicegroups ssh +} + +# Check if services need restart or system needs reboot +define service { + service_description needs-restart + hostgroups nagios_el_clients + use generic-service + check_command check_needs_restart_by_ssh + servicegroups ssh + check_interval 60 + # only alert if needs-restart doesn't resolve within 24h + first_notification_delay 1440 +} + +{% for host in groups.nagios_check_load %} +# Load - {{ host }} +define service { + service_description load + host_name {{ host }} + use generic-service + check_command check_snmp_load!{{ hostvars[host].nagios_load_1m_warn }},{{ hostvars[host].nagios_load_5m_warn }},{{ hostvars[host].nagios_load_15m_warn }}!{{ hostvars[host].nagios_load_1m_crit }},{{ hostvars[host].nagios_load_5m_crit }},{{ hostvars[host].nagios_load_15m_crit }} + servicegroups snmp +} + +{% endfor %} + +{% for host in groups.nagios_check_mem %} +# Memory / Swap - {{ host }} +{% if host in groups.nagios_check_zfs %} +define service { + service_description mem + host_name {{ host }} + use generic-service + check_command check_mem_by_ssh!{{ hostvars[host].nagios_mem_warn | replace('%', '') }}!{{ hostvars[host].nagios_mem_crit | replace('%', '') }} + servicegroups ssh +} +define service { + service_description swap + host_name {{ host }} + use generic-service + check_command check_swap_by_ssh!{{ 100 - (hostvars[host].nagios_swap_warn | replace('%', '') | int) }}%!{{ 100 - (hostvars[host].nagios_swap_crit | replace('%', '') | int) }}% + servicegroups ssh +} +{% else %} +define service { + service_description mem + host_name {{ host }} + use generic-service + check_command check_snmp_mem!{{ hostvars[host].nagios_mem_warn | replace('%', '') }},{{ hostvars[host].nagios_swap_warn | replace('%', '') }}!{{ hostvars[host].nagios_mem_crit | replace('%', '') }},{{ hostvars[host].nagios_swap_crit | replace('%', '') }} + servicegroups snmp +} +{% endif %} + +{% endfor %} + +{% for host in groups.nagios_check_disk %} +# Disk Usage - {{ host }} +{% for disk in hostvars[host].nagios_disks %} +define service { + service_description {% if disk is string %}{{ disk }}{% elif disk.description is defined %}{{ disk.description }}{% else %}{{ disk.path }}{% endif %} + + host_name {{ host }} + use generic-service + check_command check_snmp_storage{% if disk.terse | default(false) %}_terse{% endif %}!{% if disk is string %}{{ disk }}{% elif disk.regex is defined %}{{ disk.regex | replace('!', '\\!') }}{% else %}{{ disk.path }}{% endif %}!{{ disk.warn | default(hostvars[host].nagios_disk_warn) }}!{{ disk.crit | default(hostvars[host].nagios_disk_crit) }}!{% if disk.exclude | default(false) %}--exclude{% endif %} {% if disk.regex is not defined %}--noregexp{% endif %} + + servicegroups snmp +} + +{% endfor %} +{% endfor %} + +{% for host in groups.nagios_check_interfaces %} +# Network Interfaces - {{ host }} +{% for intf in hostvars[host].nagios_interfaces %} +define service { + service_description {% if intf is string %}{{ intf }}{% elif intf.description is defined %}{{ intf.description }}{% else %}{{ intf.name }}{% endif %} + + host_name {{ host }} + use generic-service + check_interval 5 + retry_interval 5 + check_command check_snmp_interface!{% if intf is string %}{{ intf }}{% elif intf.regex is defined %}{{ intf.regex | replace('!', '\\!') }}{% else %}{{ intf.name }}{% endif %}!{{ intf.bandwidth_warn | default(hostvars[host].nagios_interface_bandwidth_warn) }},{{ intf.bandwidth_warn | default(hostvars[host].nagios_interface_bandwidth_warn) }},{{ intf.error_warn | default(hostvars[host].nagios_interface_error_warn) }},{{ intf.error_warn | default(hostvars[host].nagios_interface_error_warn) }},{{ intf.discard_warn | default(hostvars[host].nagios_interface_discard_warn) }},{{ intf.discard_warn | default(hostvars[host].nagios_interface_discard_warn) }}!{{ intf.bandwidth_crit | default(hostvars[host].nagios_interface_bandwidth_crit) }},{{ intf.bandwidth_crit | default(hostvars[host].nagios_interface_bandwidth_crit) }},{{ intf.error_crit | default(hostvars[host].nagios_interface_error_crit) }},{{ intf.error_crit | default(hostvars[host].nagios_interface_error_crit) }},{{ intf.discard_crit | default(hostvars[host].nagios_interface_discard_crit) }},{{ intf.discard_crit | default(hostvars[host].nagios_interface_discard_crit) }}!{% if intf.down_ok | default(false) %}--down{% endif %} {% if intf.regex is not defined %}--noregexp{% endif %} + servicegroups snmp +} + +{% endfor %} +{% endfor %} + + +############ +# ZFS Checks +############ + +{% for host in groups.nagios_check_zfs %} +# zpools - {{ host }} +define service { + service_description zpool + host_name {{ host }} + use generic-service + check_command check_zpools_by_ssh!{{ 100 - (hostvars[host].nagios_disk_warn|replace('%','') | int) }}!{{ 100 - (hostvars[host].nagios_disk_crit|replace('%','') | int) }} + servicegroups ssh +} + +{% endfor %} + + +####################### +# Infrastructure Checks +####################### + +# UPS +define service { + service_description status + hostgroups ups + use generic-service + check_command check_cyberpower!status + servicegroups snmp +} + +define service { + service_description health + hostgroups ups + use generic-service + check_command check_cyberpower!health + servicegroups snmp +} + +define service { + service_description battery + hostgroups ups + use generic-service + check_command check_cyberpower!battery + servicegroups snmp +} + +define service { + service_description transfer + hostgroups ups + use generic-service + check_command check_cyberpower!transfer + servicegroups snmp +} + +{% for host in groups.ups %} +# UPS Temp - {{ host }} +define service { + service_description temp + host_name {{ host }} + use generic-service + check_command check_cyberpower!temp!-w {{ hostvars[host].nagios_temp_warn }} -c {{ hostvars[host].nagios_temp_crit }} + servicegroups snmp +} + +define service { +# UPS Load - {{ host }} + service_description load + host_name {{ host }} + use generic-service + check_command check_cyberpower!load! -w {{ hostvars[host].nagios_power_draw_warn | replace('%', '') }} -c {{ hostvars[host].nagios_power_draw_crit | replace('%', '') }} + servicegroups snmp +} + +{% endfor %} + + +################# +# Asterisk Checks +################# + +{% for host in groups.asterisk_servers %} +# endpoints - {{ host }} +define service { + service_description endpoints + host_name {{ host }} + use generic-service + check_command check_asterisk_endpoints!{{ hostvars[host].asterisk_https_port | default(8089) }}!nagios!{{ hostvars[host].asterisk_ari_users | selectattr('name', '==', 'nagios') | map(attribute='password') | first }}!{{ (hostvars[host].asterisk_sip_trunks + hostvars[host].asterisk_sip_extensions) | map(attribute='name') | join(' ' ) }} +} +{% endfor %} + + +###################### +# SMTP Checks +###################### + +{% for host in groups.mail_servers %} +define service { + service_description smtp + host_name {{ host }} + use generic-service + check_command check_smtp!{{ hostvars[host].nagios_certificate_warn }}!{{ hostvars[host].nagios_certificate_crit }}!{{ hostvars[host].nagios_smtp_warn }}!{{ hostvars[host].nagios_smtp_crit }} +} + +define service { + service_description mailq + host_name {{ host }} + use generic-service + check_command check_mailq!{{ hostvars[host].nagios_mailq_warn }}!{{ hostvars[host].nagios_mailq_crit }} +} + +{% endfor %} + + +###################### +# IMAP Checks +###################### + +{% for host in groups.imap_servers %} +define service { + service_description imap + host_name {{ host }} + use generic-service + check_command check_imap!{{ hostvars[host].nagios_certificate_warn }}!{{ hostvars[host].nagios_certificate_crit }}!{{ hostvars[host].nagios_imap_warn }}!{{ hostvars[host].nagios_imap_crit }} +} + +{% endfor %} + + +###################### +# XMPP Checks +###################### + +{% for host in groups.xmpp_servers %} +{% for vhost in hostvars[host].prosody_vhosts %} +define service { + service_description xmpp-{{ vhost }} + host_name {{ host }} + use generic-service + check_command check_xmpp!{{ vhost }}!{{ hostvars[host].nagios_certificate_warn }}!{{ hostvars[host].nagios_certificate_crit }} +} + +{% endfor %} +{% for vhost in hostvars[host].prosody_conference_vhosts | default(['conference.'] | product(hostvars[host].prosody_vhosts) | map('join') | list) %} +define service { + service_description xmpp-{{ vhost }} + host_name {{ host }} + use generic-service + check_command check_xmpp!{{ vhost }}!{{ hostvars[host].nagios_certificate_warn }}!{{ hostvars[host].nagios_certificate_crit }} +} + +{% endfor %} +{% endfor %} + + +###################### +# PostgreSQL Checks +###################### + +{% for host in groups.postgresql_servers %} +define service { + service_description postgres + host_name {{ host }} + use generic-service + check_command check_postgres!{{ hostvars[host].nagios_certificate_warn }}!{{ hostvars[host].nagios_certificate_crit }} +} + +{% endfor %} + + +###################### +# HTTPS Checks +###################### + +{% for host in groups.nagios_check_https %} +# {{ host }} +{% for vhost in hostvars[host].nagios_https_vhosts | default(['$_HOSTFQDN$']) %} +define service { + service_description {{ 'https' if loop.length == 1 else 'https-'~(vhost if vhost is string else vhost.name) }} + host_name {{ host }} + use generic-service + check_command check_https!{{ vhost if vhost is string else vhost.name }}!{{ hostvars[host].nagios_certificate_warn }}!{{ hostvars[host].nagios_certificate_crit }}!{{ hostvars[host].nagios_http_warn }}!{{ hostvars[host].nagios_http_crit }}!{{ '-e HTTP/1.1 '~vhost.status if vhost.status is defined else '-e HTTP/1.1 '~hostvars[host].nagios_http_status if hostvars[host].nagios_http_status is defined else '' }} + servicegroups https +} +{% endfor %} + +{% endfor %} + + +###################### +# DNS Checks +###################### + +{% for host in groups.authoritative_nameservers %} +{% for zone in hostvars[host].nsd_zones | map(attribute='name') %} +define service { + service_description dns-{{ zone }} + host_name {{ host }} + use generic-service + check_command check_dns!{{ zone }}!SOA +} + +{% endfor %} +{% endfor %} + + +###################### +# FreeIPA Checks +###################### + +define service { + service_description dns + hostgroups freeipa_servers + use generic-service + check_command check_dns!{{ domain }}!SOA +} + +{% for host in groups.freeipa_servers %} +define service { + service_description ldap + hostgroups freeipa_servers + use generic-service + check_command check_ldaps!{{ freeipa_basedn }}!{{ hostvars[host].nagios_certificate_warn }}!{{ hostvars[host].nagios_certificate_crit }} +} + +{% endfor %} diff --git a/roles/nagios_server/templates/etc/nagios/objects/templates.cfg.j2 b/roles/nagios_server/templates/etc/nagios/objects/templates.cfg.j2 new file mode 100644 index 0000000..4f9d306 --- /dev/null +++ b/roles/nagios_server/templates/etc/nagios/objects/templates.cfg.j2 @@ -0,0 +1,51 @@ +define contact { + name generic-contact + host_notification_period 24x7 + host_notification_options d,u,r,f,s + host_notification_commands notify-host-by-email + service_notification_period 24x7 + service_notification_options w,u,c,r,f,s + service_notification_commands notify-service-by-email + register 0 +} + +define host { + name generic-host + notifications_enabled 1 + event_handler_enabled 1 + flap_detection_enabled 0 + process_perf_data 1 + retain_status_information 1 + retain_nonstatus_information 1 + notification_period 24x7-except-reboot-window + notification_interval 0 + notification_options d,u,r,f + first_notification_delay 0 + check_period 24x7 + check_interval 5 + retry_interval 1 + max_check_attempts 3 + contacts sysadmins + register 0 +} + +define service { + name generic-service + parallelize_check 1 + check_freshness 0 + notifications_enabled 1 + event_handler_enabled 1 + flap_detection_enabled 0 + process_perf_data 1 + retain_status_information 1 + retain_nonstatus_information 1 + is_volatile 0 + max_check_attempts 3 + check_interval 10 + retry_interval 1 + notification_options w,u,c,r,f + notification_interval 0 + first_notification_delay 0 + contacts sysadmins + register 0 +} diff --git a/roles/nagios_server/templates/etc/nagios/objects/timeperiods.cfg.j2 b/roles/nagios_server/templates/etc/nagios/objects/timeperiods.cfg.j2 new file mode 100644 index 0000000..2a0c885 --- /dev/null +++ b/roles/nagios_server/templates/etc/nagios/objects/timeperiods.cfg.j2 @@ -0,0 +1,39 @@ +define timeperiod { + timeperiod_name reboot-window + alias reboot window + + sunday {{ nagios_reboot_window }} + monday {{ nagios_reboot_window }} + tuesday {{ nagios_reboot_window }} + wednesday {{ nagios_reboot_window }} + thursday {{ nagios_reboot_window }} + friday {{ nagios_reboot_window }} + saturday {{ nagios_reboot_window }} +} + +define timeperiod { + timeperiod_name 24x7 + alias 24x7 + + sunday 00:00-24:00 + monday 00:00-24:00 + tuesday 00:00-24:00 + wednesday 00:00-24:00 + thursday 00:00-24:00 + friday 00:00-24:00 + saturday 00:00-24:00 +} + +define timeperiod { + timeperiod_name 24x7-except-reboot-window + alias 24x7 (except reboot window) + exclude reboot-window + + sunday 00:00-24:00 + monday 00:00-24:00 + tuesday 00:00-24:00 + wednesday 00:00-24:00 + thursday 00:00-24:00 + friday 00:00-24:00 + saturday 00:00-24:00 +} diff --git a/roles/nagios_server/templates/etc/nagios/private/resource.cfg.j2 b/roles/nagios_server/templates/etc/nagios/private/resource.cfg.j2 new file mode 100644 index 0000000..b8c4e8c --- /dev/null +++ b/roles/nagios_server/templates/etc/nagios/private/resource.cfg.j2 @@ -0,0 +1 @@ +$USER1$=/usr/lib64/nagios/plugins diff --git a/roles/nagios_server/templates/usr/share/nagios/html/config.inc.php.j2 b/roles/nagios_server/templates/usr/share/nagios/html/config.inc.php.j2 new file mode 100644 index 0000000..4723227 --- /dev/null +++ b/roles/nagios_server/templates/usr/share/nagios/html/config.inc.php.j2 @@ -0,0 +1,11 @@ + diff --git a/roles/nagios_server/vars/main.yml b/roles/nagios_server/vars/main.yml new file mode 100644 index 0000000..aa8effa --- /dev/null +++ b/roles/nagios_server/vars/main.yml @@ -0,0 +1,78 @@ +nagios_packages: + - nagios + - nagios-plugins-all + - nagios-contrib + - nagios-selinux + - perl-Net-SNMP + - perl-Getopt-Long + - perl-Crypt-Rijndael + - perl-Crypt-DES + - perl-Digest-HMAC + - perl-Switch + - perl-bignum + - git + - python3 + - bc + +nagios_home: /var/spool/nagios +nagios_html_dir: /usr/share/nagios/html +nagios_cgi_dir: /usr/lib64/nagios/cgi-bin +nagios_status_file: /var/log/nagios/status.dat +nagios_state_retention_file: /var/log/nagios/retention.dat +nagios_plugin_dir: /usr/lib64/nagios/plugins + +nagios_mib_dir: /usr/share/snmp/mibs + +nagios_manubulon_install_dir: /usr/local/share/manubulon + +nagios_thirdparty_plugins: + check_cyberpower: https://exchange.nagios.org/components/com_mtree/attachment.php?link_id=7181&cf_id=24 + check_ssl_cert: https://raw.githubusercontent.com/matteocorti/check_ssl_cert/master/check_ssl_cert + +nagios_thirdparty_mibs: {} + +nagios_hbac_service: nagios +nagios_hbac_hostgroup: nagios_servers + +nagios_check_by_ssh_args: >- + --hostname='$HOSTADDRESS$' + --quiet + --ssh-option=StrictHostKeyChecking=no + --ssh-option=UserKnownHostsFile=/dev/null + --ssh-option=ControlMaster=yes + --ssh-option='ControlPath=/var/run/nagios/ssh-$HOSTNAME$' + --ssh-option=ControlPersist={{ nagios_ssh_control_persist | quote }} + +nagios_manubulon_args: >- + --hostname='$HOSTADDRESS$' + --login='$_HOSTSNMP_USER$' + --passwd='$_HOSTSNMP_AUTH_PASS$' + --privpass='$_HOSTSNMP_PRIV_PASS$' + --protocols='$_HOSTSNMP_AUTH_PROTO$,$_HOSTSNMP_PRIV_PROTO$' + +nagios_selinux_policy_te: | + require { + type nagios_spool_t; + type httpd_t; + class file open; + } + + #============= httpd_t ============== + allow httpd_t nagios_spool_t:file open; + +nagios_apache_config: | + + AuthType GSSAPI + AuthName "FreeIPA Single Sign-On" + {{ apache_gssapi_session_config }} + AuthLDAPUrl "{{ apache_ldap_url }}?krbprincipalname" + {{ apache_ldap_creds }} + Require ldap-attribute memberof=cn={{ nagios_access_group }},{{ freeipa_group_basedn }} + + + ScriptAlias "/cgi-bin/" "{{ nagios_cgi_dir }}/" + + + AllowOverride None + Require all granted + diff --git a/roles/nfs_server/defaults/main.yml b/roles/nfs_server/defaults/main.yml new file mode 100644 index 0000000..bc60543 --- /dev/null +++ b/roles/nfs_server/defaults/main.yml @@ -0,0 +1,14 @@ +nfs_mountd_port: 20048 + +nfs_exports: [] +smb_shares: [] +nfs_homedirs: [] + +nfs_homedir_user_dataset: tank/user +nfs_homedir_group_dataset: tank/group + +nfs_homedir_priv_quota: 50G +nfs_homedir_pub_quota: 10G + +nfs_homedir_options: rw +nfs_homedir_clients: [] diff --git a/roles/nfs_server/files/etc/samba/local.conf b/roles/nfs_server/files/etc/samba/local.conf new file mode 100644 index 0000000..d9f5f53 --- /dev/null +++ b/roles/nfs_server/files/etc/samba/local.conf @@ -0,0 +1,14 @@ +[global] + smb encrypt = desired + use sendfile = yes + map archive = no + name resolve order = host + mdns name = mdns + disable netbios = yes + kernel oplocks = yes + read only = no + directory mask = 0775 + create mask = 0774 + logging = syslog@2 + +include = /etc/samba/shares.conf diff --git a/roles/nfs_server/handlers/main.yml b/roles/nfs_server/handlers/main.yml new file mode 100644 index 0000000..77d3fa3 --- /dev/null +++ b/roles/nfs_server/handlers/main.yml @@ -0,0 +1,19 @@ +- name: restart nfs-server + systemd: + name: nfs-server + state: restarted + +- name: reload nfs-server + systemd: + name: nfs-server + state: reloaded + +- name: restart samba + systemd: + name: smb + state: restarted + +- name: reload samba + systemd: + name: smb + state: reloaded diff --git a/roles/nfs_server/meta/main.yml b/roles/nfs_server/meta/main.yml new file mode 100644 index 0000000..b750790 --- /dev/null +++ b/roles/nfs_server/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - role: zfs + tags: zfs diff --git a/roles/nfs_server/tasks/autofs.yml b/roles/nfs_server/tasks/autofs.yml new file mode 100644 index 0000000..57bb862 --- /dev/null +++ b/roles/nfs_server/tasks/autofs.yml @@ -0,0 +1,57 @@ +- name: create automount maps for exports + ipaautomountmap: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ item }}' + location: default + state: present + loop: "{{ nfs_exports | selectattr('automount_map', 'defined') | map(attribute='automount_map') | unique }}" + +- name: create automount keys for exports + ipaautomountkey: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + location: default + mapname: '{{ item.automount_map }}' + key: '{{ item.automount_key | default(zfs_mountpoints[item.dataset] | basename) }}' + info: '-fstype=nfs4 {{ ansible_fqdn }}:{{ zfs_mountpoints[item.dataset] }}' + state: present + loop: "{{ nfs_exports | selectattr('automount_map', 'defined') }}" + loop_control: + label: '{{ item.dataset }}' + +- name: create automount maps for homedirs + ipaautomountmap: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ item }}' + location: default + state: present + loop: + - '{{ nfs_homedir_home_automount_map }}' + - '{{ nfs_homedir_user_automount_map }}' + - '{{ nfs_homedir_group_automount_map }}' + +- name: create automount keys for homedirs + ipaautomountkey: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + location: default + mapname: '{{ nfs_homedir_group_automount_map if item.group is defined else nfs_homedir_user_automount_map }}' + key: '{{ item.group if item.group is defined else item.user }}' + info: >- + /priv -fstype=nfs4 {{ ansible_fqdn }}:{{ zfs_mountpoints[nfs_homedir_group_dataset if item.group is defined else nfs_homedir_user_dataset] }}/{{ item.group if item.group is defined else item.user }}/priv + /pub -fstype=nfs4 {{ ansible_fqdn }}:{{ zfs_mountpoints[nfs_homedir_group_dataset if item.group is defined else nfs_homedir_user_dataset] }}/{{ item.group if item.group is defined else item.user }}/pub + state: present + loop: '{{ nfs_homedirs }}' + +- name: create /home automount keys + ipaautomountkey: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + location: default + mapname: '{{ nfs_homedir_home_automount_map }}' + key: '{{ item }}' + info: '-fstype=nfs4 {{ ansible_fqdn }}:{{ zfs_mountpoints[nfs_homedir_user_dataset] }}/{{ item }}/priv' + state: present + loop: "{{ nfs_homedirs | selectattr('user', 'defined') | map(attribute='user') }}" diff --git a/roles/nfs_server/tasks/exports.yml b/roles/nfs_server/tasks/exports.yml new file mode 100644 index 0000000..10ff894 --- /dev/null +++ b/roles/nfs_server/tasks/exports.yml @@ -0,0 +1,55 @@ +- name: create zfs datasets for exports + zfs: + name: '{{ item.dataset }}' + state: present + extra_zfs_properties: '{{ item.zfs_properties if item.zfs_properties is defined else omit }}' + loop: "{{ nfs_exports | selectattr('dataset', 'defined') }}" + loop_control: + label: '{{ item.dataset }}' + +- name: collect zfs mountpoints + shell: "zfs list -Hp -o name,mountpoint | sed 's/\t/: /'" + changed_when: False + register: zfs_list_mountpoints + +- name: set zfs_mountpoints fact + set_fact: + zfs_mountpoints: '{{ zfs_list_mountpoints.stdout | from_yaml }}' + +- name: set directory permissions for exports + file: + path: '{{ zfs_mountpoints[item.dataset] }}' + owner: '{{ item.owner | default(omit) }}' + group: '{{ item.group | default(omit) }}' + mode: "{{ '0%0o' % item.mode if item.mode is defined else omit }}" + setype: _default + state: directory + loop: '{{ nfs_exports }}' + loop_control: + label: '{{ item.dataset }}' + +- name: set directory ACLs for exports + acl: + path: '{{ zfs_mountpoints[item.0.dataset] }}' + default: '{{ item.1.default | default(omit) }}' + entity: '{{ item.1.entity }}' + etype: '{{ item.1.etype }}' + permissions: '{{ item.1.permissions }}' + recalculate_mask: mask + state: present + loop: "{{ nfs_exports | selectattr('acl', 'defined') | subelements('acl') }}" + loop_control: + label: '{{ item.0.dataset }}: {{ item.1 }}' + +- name: for exports with a "default" ACL, ensure the ACL is set on the directory itself + acl: + path: '{{ zfs_mountpoints[item.0.dataset] }}' + default: no + entity: '{{ item.1.entity }}' + etype: '{{ item.1.etype }}' + permissions: '{{ item.1.permissions }}' + recalculate_mask: mask + state: present + loop: "{{ nfs_exports | selectattr('acl', 'defined') | subelements('acl') | selectattr('1.default', 'defined') | selectattr('1.default', 'equalto', True) }}" + loop_control: + label: '{{ item.0.dataset }}: {{ item.1 }}' diff --git a/roles/nfs_server/tasks/homedirs.yml b/roles/nfs_server/tasks/homedirs.yml new file mode 100644 index 0000000..0241a6e --- /dev/null +++ b/roles/nfs_server/tasks/homedirs.yml @@ -0,0 +1,112 @@ +- name: create parent zfs datasets for home directories + zfs: + name: '{{ item }}' + state: present + loop: + - '{{ nfs_homedir_user_dataset }}' + - '{{ nfs_homedir_group_dataset }}' + +- name: collect zfs mountpoints + shell: "zfs list -Hp -o name,mountpoint | sed 's/\t/: /'" + changed_when: false + register: zfs_list_mountpoints + +- name: set zfs_mountpoints fact + set_fact: + zfs_mountpoints: '{{ zfs_list_mountpoints.stdout | from_yaml }}' + +- name: set selinux context for home directories + sefcontext: + target: '{{ item }}' + setype: samba_share_t + state: present + loop: + - '{{ zfs_mountpoints[nfs_homedir_group_dataset] }}(/.*)?' + - '{{ zfs_mountpoints[nfs_homedir_user_dataset] }}(/.*)?' + register: nfs_homedir_sefcontext + +- name: apply selinux context to home directories + command: 'restorecon -R {{ zfs_mountpoints[nfs_homedir_group_dataset] }} {{ zfs_mountpoints[nfs_homedir_user_dataset] }}' + when: nfs_homedir_sefcontext.changed + +- name: check which home directories already exist + stat: + path: '{{ zfs_mountpoints[nfs_homedir_group_dataset if item.group is defined else nfs_homedir_user_dataset] }}/{{ item.group if item.group is defined else item.user }}/priv' + loop: '{{ nfs_homedirs }}' + register: nfs_homedir_stat + +- name: create zfs datasets for public home directories + zfs: + name: '{{ nfs_homedir_group_dataset if item.group is defined else nfs_homedir_user_dataset }}/{{ item.group if item.group is defined else item.user }}/pub' + state: present + extra_zfs_properties: + refquota: '{{ item.pub_quota | default(nfs_homedir_pub_quota) }}' + loop: '{{ nfs_homedirs }}' + loop_control: + label: '{{ item }}' + +- name: create zfs datasets for private home directories + zfs: + name: '{{ nfs_homedir_group_dataset if item.group is defined else nfs_homedir_user_dataset }}/{{ item.group if item.group is defined else item.user }}/priv' + state: present + extra_zfs_properties: + refquota: '{{ item.priv_quota | default(nfs_homedir_priv_quota) }}' + loop: '{{ nfs_homedirs }}' + loop_control: + label: '{{ item }}' + +- name: copy skel files into any newly-created home directories + copy: + src: /etc/skel/ + dest: '{{ zfs_mountpoints[nfs_homedir_user_dataset] }}/{{ item.user }}/priv' + remote_src: yes + owner: '{{ item.user }}' + group: '{{ item.user }}' + mode: preserve + when: + - item.user is defined + - not nfs_homedir_stat.results[index].stat.exists + loop: '{{ nfs_homedirs }}' + loop_control: + index_var: index + +- name: set directory permissions for user home directories + file: + path: "{{ zfs_mountpoints[nfs_homedir_user_dataset] }}/{{ item.0 }}/{{ item.1.name }}" + state: directory + owner: '{{ item.0 }}' + group: '{{ item.0 }}' + mode: '{{ item.1.mode }}' + setype: _default + loop: "{{ nfs_homedirs | selectattr('user', 'defined') | map(attribute='user') | product(subdirs) }}" + vars: + subdirs: + - { name: pub, mode: '755' } + - { name: priv, mode: '700' } + +- name: set directory permissions for group directories + file: + path: "{{ zfs_mountpoints[nfs_homedir_group_dataset] }}/{{ item.0 }}/{{ item.1.name }}" + state: directory + owner: root + group: '{{ item.0 }}' + mode: '{{ item.1.mode }}' + setype: _default + loop: "{{ nfs_homedirs | selectattr('group', 'defined') | map(attribute='group') | product(subdirs) }}" + vars: + subdirs: + - { name: pub, mode: '02775' } + - { name: priv, mode: '02770' } + +- name: set directory ACLs for group directories + acl: + path: '{{ zfs_mountpoints[nfs_homedir_group_dataset] }}/{{ item.0 }}/{{ item.1 }}' + default: yes + entity: '{{ item.0 }}' + etype: group + permissions: rwX + recalculate_mask: mask + state: present + loop: "{{ nfs_homedirs | selectattr('group', 'defined') | map(attribute='group') | product(['pub', 'priv']) }}" + loop_control: + label: '{{ item.0 }}: {{ item.1 }}' diff --git a/roles/nfs_server/tasks/main.yml b/roles/nfs_server/tasks/main.yml new file mode 100644 index 0000000..56e7099 --- /dev/null +++ b/roles/nfs_server/tasks/main.yml @@ -0,0 +1,19 @@ +- name: install packages + dnf: + name: '{{ nfs_packages }}' + state: present + +- name: create zfs filesystems for exports + import_tasks: exports.yml + +- name: create zfs filesystems for home directories + import_tasks: homedirs.yml + +- name: configure nfs shares + import_tasks: nfs.yml + +- name: configure smb shares + import_tasks: smb.yml + +- name: generate autofs maps + import_tasks: autofs.yml diff --git a/roles/nfs_server/tasks/nfs.yml b/roles/nfs_server/tasks/nfs.yml new file mode 100644 index 0000000..b32e48f --- /dev/null +++ b/roles/nfs_server/tasks/nfs.yml @@ -0,0 +1,41 @@ +- name: create nfs service + ipaservice: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: 'nfs/{{ ansible_fqdn }}' + state: present + +- name: retrieve nfs service keytab + include_role: + name: freeipa_keytab + vars: + keytab_principal: 'nfs/{{ ansible_fqdn }}' + +- name: generate nfs.conf + template: + src: etc/nfs.conf.j2 + dest: /etc/nfs.conf + notify: restart nfs-server + +- name: generate export list + template: + src: etc/exports.j2 + dest: /etc/exports + notify: reload nfs-server + +- name: start nfs server + systemd: + name: nfs-server + state: started + enabled: yes + +- name: open firewall ports + firewalld: + service: '{{ item }}' + permanent: yes + immediate: yes + state: enabled + loop: + - nfs + - rpc-bind + - mountd diff --git a/roles/nfs_server/tasks/smb.yml b/roles/nfs_server/tasks/smb.yml new file mode 100644 index 0000000..ee050d0 --- /dev/null +++ b/roles/nfs_server/tasks/smb.yml @@ -0,0 +1,54 @@ +- name: configure samba domain member + command: + cmd: ipa-client-samba --no-homes --unattended + creates: /etc/samba/samba.keytab + +- name: add include statement to smb.conf + lineinfile: + path: /etc/samba/smb.conf + line: include = /etc/samba/local.conf + insertafter: EOF + notify: restart samba + +- name: copy samba configuration + copy: + src: etc/samba/local.conf + dest: /etc/samba/local.conf + notify: restart samba + +- name: create samba shares + template: + src: etc/samba/shares.conf.j2 + dest: /etc/samba/shares.conf + notify: reload samba + +- name: set selinux context for samba shares + sefcontext: + target: '{{ zfs_mountpoints[item.dataset] if item.dataset is defined else item.path }}(/.*)?' + setype: samba_share_t + state: present + loop: "{{ (nfs_exports | selectattr('smb_share', 'defined')) + smb_shares }}" + register: nfs_export_sefcontext + +- name: apply selinux context to samba shares + command: 'restorecon -R {{ zfs_mountpoints[item.dataset] if item.dataset is defined else item.path }}' + when: nfs_export_sefcontext.results[index].changed + loop: "{{ (nfs_exports | selectattr('smb_share', 'defined')) + smb_shares }}" + loop_control: + index_var: index + +- name: start samba services + systemd: + name: '{{ item }}' + enabled: yes + state: started + loop: + - smb + - winbind + +- name: open firewall ports + firewalld: + service: samba + permanent: yes + immediate: yes + state: enabled diff --git a/roles/nfs_server/templates/etc/exports.j2 b/roles/nfs_server/templates/etc/exports.j2 new file mode 100644 index 0000000..7f62ef5 --- /dev/null +++ b/roles/nfs_server/templates/etc/exports.j2 @@ -0,0 +1,20 @@ +{% for export in nfs_exports %} +{{ zfs_mountpoints[export.dataset] if export.dataset is defined else export.path }} {% if (export.options | default([])) %}-{% if export.options is string %}{{ export.options }}{% else %}{{ export.options | join(',') }}{% endif %}{%endif %} {% for client in export.clients %}{% if client is string %}{{ client }} {% else %}{{ client.client }}{% if (client.options | default([])) %}({% if client.options is string %}{{ client.options }}{% else %}{{ client.options | join(',') }}{% endif %}){% endif %}{% endif %} {% endfor %} + +{% endfor %} + +# user exports +{% for export in nfs_homedirs | selectattr('user', 'defined') %} +{% for subdir in ['pub', 'priv'] %} +{{ zfs_mountpoints[nfs_homedir_user_dataset] }}/{{ export.user }}/{{ subdir }} {% if (nfs_homedir_options | default([])) %}-{% if nfs_homedir_options is string %}{{ nfs_homedir_options }}{% else %}{{ nfs_homedir_options | join(',') }}{% endif %}{%endif %} {% for client in nfs_homedir_clients %}{% if client is string %}{{ client }} {% else %}{{ client.client }}{% if (client.options | default([])) %}({% if client.options is string %}{{ client.options }}{% else %}{{ client.options | join(',') }}{% endif %}){% endif %}{% endif %} {% endfor %} + +{% endfor %} +{% endfor %} + +# group exports +{% for export in nfs_homedirs | selectattr('group', 'defined') %} +{% for subdir in ['pub', 'priv'] %} +{{ zfs_mountpoints[nfs_homedir_group_dataset] }}/{{ export.group }}/{{ subdir }} {% if (nfs_homedir_options | default([])) %}-{% if nfs_homedir_options is string %}{{ nfs_homedir_options }}{% else %}{{ nfs_homedir_options | join(',') }}{% endif %}{%endif %} {% for client in nfs_homedir_clients %}{% if client is string %}{{ client }} {% else %}{{ client.client }}{% if (client.options | default([])) %}({% if client.options is string %}{{ client.options }}{% else %}{{ client.options | join(',') }}{% endif %}){% endif %}{% endif %} {% endfor %} + +{% endfor %} +{% endfor %} diff --git a/roles/nfs_server/templates/etc/nfs.conf.j2 b/roles/nfs_server/templates/etc/nfs.conf.j2 new file mode 100644 index 0000000..295f20d --- /dev/null +++ b/roles/nfs_server/templates/etc/nfs.conf.j2 @@ -0,0 +1,10 @@ +[gssd] +use-gss-proxy=1 + +[mountd] +port={{ nfs_mountd_port }} + +[nfsd] +vers2=n +vers3=y +vers4.0=n diff --git a/roles/nfs_server/templates/etc/samba/shares.conf.j2 b/roles/nfs_server/templates/etc/samba/shares.conf.j2 new file mode 100644 index 0000000..bb223ed --- /dev/null +++ b/roles/nfs_server/templates/etc/samba/shares.conf.j2 @@ -0,0 +1,19 @@ +{% for export in nfs_exports | selectattr('smb_share', 'defined') %} +[{{ export.smb_share }}] +path = {{ zfs_mountpoints[export.dataset] if export.dataset is defined else export.path }} +{% endfor %} + +{% for share in smb_shares %} +[{{ share.name }}] +path = {{ share.path }} +{% endfor %} + +{% if nfs_homedirs | selectattr('user', 'defined') %} +[users] +path = {{ zfs_mountpoints[nfs_homedir_user_dataset] }} +{% endif %} + +{% if nfs_homedirs | selectattr('group', 'defined') %} +[groups] +path = {{ zfs_mountpoints[nfs_homedir_group_dataset] }} +{% endif %} diff --git a/roles/nfs_server/vars/main.yml b/roles/nfs_server/vars/main.yml new file mode 100644 index 0000000..4c5ef2f --- /dev/null +++ b/roles/nfs_server/vars/main.yml @@ -0,0 +1,9 @@ +nfs_packages: + - nfs-utils + - nfs4-acl-tools + - ipa-client-samba + - rsync + +nfs_homedir_home_automount_map: auto.home +nfs_homedir_user_automount_map: auto.nfs_user +nfs_homedir_group_automount_map: auto.nfs_group diff --git a/roles/nim/defaults/main.yml b/roles/nim/defaults/main.yml new file mode 100644 index 0000000..4c23a91 --- /dev/null +++ b/roles/nim/defaults/main.yml @@ -0,0 +1 @@ +nim_version: 1.6.6 diff --git a/roles/nim/tasks/main.yml b/roles/nim/tasks/main.yml new file mode 100644 index 0000000..c311e0a --- /dev/null +++ b/roles/nim/tasks/main.yml @@ -0,0 +1,12 @@ +- name: create nim installation directory + file: + path: '{{ nim_install_dir }}' + state: directory + +- name: extract nim tarball + unarchive: + src: '{{ nim_url }}' + remote_src: yes + dest: '{{ nim_install_dir }}' + extra_opts: + - '--strip-components=1' diff --git a/roles/nim/vars/main.yml b/roles/nim/vars/main.yml new file mode 100644 index 0000000..2cd6cee --- /dev/null +++ b/roles/nim/vars/main.yml @@ -0,0 +1,2 @@ +nim_url: https://nim-lang.org/download/nim-{{ nim_version }}-linux_x64.tar.xz +nim_install_dir: /usr/local/share/nim diff --git a/roles/nitter/defaults/main.yml b/roles/nitter/defaults/main.yml new file mode 100644 index 0000000..bac10c0 --- /dev/null +++ b/roles/nitter/defaults/main.yml @@ -0,0 +1,21 @@ +nitter_version: master + +nitter_server_name: '{{ ansible_fqdn }}' + +nitter_port: 8080 +nitter_user: nitter + +nitter_update_on_calendar: weekly + +nitter_hmac_key: secretKey + +nitter_max_connections: 100 +nitter_token_count: 10 + +nitter_cache_list_minutes: 240 +nitter_cache_rss_minutes: 10 +nitter_redis_host: localhost +nitter_redis_port: 6379 +nitter_redis_password: '' +nitter_redis_connections: 20 +nitter_redis_max_connections: 30 diff --git a/roles/nitter/handlers/main.yml b/roles/nitter/handlers/main.yml new file mode 100644 index 0000000..67fb6a4 --- /dev/null +++ b/roles/nitter/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nitter + systemd: + name: nitter + state: restarted diff --git a/roles/nitter/meta/main.yml b/roles/nitter/meta/main.yml new file mode 100644 index 0000000..c60a259 --- /dev/null +++ b/roles/nitter/meta/main.yml @@ -0,0 +1,7 @@ +dependencies: + - role: nim + tags: nim + + - role: redis + redis_port: '{{ nitter_redis_port }}' + tags: redis diff --git a/roles/nitter/tasks/main.yml b/roles/nitter/tasks/main.yml new file mode 100644 index 0000000..68c8190 --- /dev/null +++ b/roles/nitter/tasks/main.yml @@ -0,0 +1,97 @@ +- name: install dependencies + dnf: + name: '{{ nitter_packages }}' + state: present + +- name: create local user + user: + name: '{{ nitter_user }}' + system: yes + home: '{{ nitter_home }}' + shell: /sbin/nologin + create_home: no + +- name: create home directory + file: + path: '{{ nitter_home }}' + owner: '{{ nitter_user }}' + group: '{{ nitter_user }}' + mode: 0755 + state: directory + +- name: clone repository + git: + repo: '{{ nitter_git_repo }}' + dest: '{{ nitter_install_dir }}' + version: '{{ nitter_version }}' + force: yes + update: yes + register: nitter_git + become: yes + become_user: '{{ nitter_user }}' + +- name: build nitter + command: + chdir: '{{ nitter_install_dir }}' + cmd: 'nimble --accept {{ item }}' + environment: + PATH: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:{{ nim_install_dir }}/bin + loop: + - build -d:release + - scss + - md + become: yes + become_user: '{{ nitter_user }}' + when: nitter_git.changed + notify: restart nitter + +- name: generate config file + template: + src: '{{ nitter_install_dir[1:] }}/nitter.conf.j2' + dest: '{{ nitter_install_dir }}/nitter.conf' + owner: '{{ nitter_user }}' + group: '{{ nitter_user }}' + mode: 0600 + notify: restart nitter + +- name: create systemd unit + template: + src: etc/systemd/system/nitter.service.j2 + dest: /etc/systemd/system/nitter.service + register: nitter_unit + notify: restart nitter + +- name: reload systemd daemons + systemd: + daemon_reload: yes + when: nitter_unit.changed + +- name: start nitter + systemd: + name: nitter + enabled: yes + state: started + +- name: set http_port_t context for nitter port + seport: + ports: '{{ nitter_port }}' + proto: tcp + setype: http_port_t + state: present + tags: selinux + +- name: generate update script + template: + src: '{{ nitter_home[1:] }}/nitter-update.sh.j2' + dest: '{{ nitter_home }}/nitter-update.sh' + mode: 0555 + +- name: create nitter-update systemd timer + include_role: + name: systemd_timer + vars: + timer_name: nitter-update + timer_description: Update nitter + timer_after: network.target + timer_on_calendar: '{{ nitter_update_on_calendar }}' + timer_exec: '{{ nitter_home }}/nitter-update.sh' diff --git a/roles/nitter/templates/etc/systemd/system/nitter.service.j2 b/roles/nitter/templates/etc/systemd/system/nitter.service.j2 new file mode 100644 index 0000000..59b0ba6 --- /dev/null +++ b/roles/nitter/templates/etc/systemd/system/nitter.service.j2 @@ -0,0 +1,34 @@ +[Unit] +Description=nitter twitter proxy +After=network.target redis@{{ nitter_redis_port }}.service +Requires=redis@{{ nitter_redis_port }}.service +AssertPathExists={{ nitter_install_dir }} + +[Service] +Type=simple +ExecStart={{ nitter_install_dir }}/nitter +WorkingDirectory={{ nitter_install_dir }} +User={{ nitter_user }} +Group={{ nitter_user }} +Restart=always +RestartSec=15 + +# See https://www.freedesktop.org/software/systemd/man/systemd.exec.html +# for details +DevicePolicy=closed +NoNewPrivileges=yes +PrivateDevices=yes +PrivateTmp=yes +ProtectControlGroups=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 +RestrictNamespaces=yes +RestrictRealtime=yes +SystemCallFilter=~@clock @debug @module @mount @obsolete @privileged @reboot @setuid @swap + +ProtectSystem=full +ProtectHome=true + +[Install] +WantedBy=multi-user.target diff --git a/roles/nitter/templates/opt/nitter/nitter-update.sh.j2 b/roles/nitter/templates/opt/nitter/nitter-update.sh.j2 new file mode 100644 index 0000000..8d5782e --- /dev/null +++ b/roles/nitter/templates/opt/nitter/nitter-update.sh.j2 @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +set -eu + +SRCDIR={{ nitter_install_dir | quote }} +NITTER_USER={{ nitter_user | quote }} + +export PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:{{ nim_install_dir }}/bin' + +as-nitter() { + runuser -u "$NITTER_USER" -- "$@" +} + +if (( $EUID != 0 )); then + echo 'must be superuser' 1>&2 + exit 1 +fi + +cd "$SRCDIR" + +as-nitter git fetch + +local_rev=$(git rev-parse HEAD) +upstream_rev=$(git rev-parse '@{u}') + +echo "local: $local_rev" +echo "upstream: $upstream_rev" + +if [ "$local_rev" != "$upstream_rev" ]; then + as-nitter git pull --ff-only + + echo "building nitter..." + as-nitter nimble --accept build -d:release + as-nitter nimble --accept scss + as-nitter nimble --accept md + + systemctl restart nitter +else + echo "nitter is already up to date" +fi diff --git a/roles/nitter/templates/opt/nitter/nitter/nitter.conf.j2 b/roles/nitter/templates/opt/nitter/nitter/nitter.conf.j2 new file mode 100644 index 0000000..83deef3 --- /dev/null +++ b/roles/nitter/templates/opt/nitter/nitter/nitter.conf.j2 @@ -0,0 +1,38 @@ +[Server] +address = "127.0.0.1" +port = {{ nitter_port }} +https = true +httpMaxConnections = {{ nitter_max_connections }} +staticDir = "./public" +title = "nitter" +hostname = "{{ nitter_server_name }}" + +[Cache] +listMinutes = 240 +rssMinutes = 10 +redisHost = "127.0.0.1" +redisPort = {{ nitter_redis_port }} +redisPassword = "" +redisConnections = 20 +redisMaxConnections = 30 + +[Config] +hmacKey = "{{ nitter_hmac_key }}" +base64Media = false +enableRSS = true +enableDebug = false + +proxy = "" +proxyAuth = "" + +tokenCount = {{ nitter_token_count }} + +[Preferences] +theme = "Nitter" +replaceTwitter = "" +replaceYouTube = "" +replaceReddit = "" +replaceInstagram = "" +proxyVideos = true +hlsPlayback = true +infiniteScroll = true diff --git a/roles/nitter/vars/main.yml b/roles/nitter/vars/main.yml new file mode 100644 index 0000000..a7b0f3f --- /dev/null +++ b/roles/nitter/vars/main.yml @@ -0,0 +1,14 @@ +nitter_git_repo: https://github.com/zedeus/nitter +nitter_home: /opt/nitter +nitter_install_dir: '{{ nitter_home }}/nitter' + +nitter_packages: + - libsass + - libsass-devel + - pcre + +nitter_apache_config: | + AllowEncodedSlashes On + ProxyPass / http://127.0.0.1:{{ nitter_port }}/ nocanon + ProxyPassReverse / http://127.0.0.1:{{ nitter_port }}/ + {{ apache_proxy_config }} diff --git a/roles/nsd/defaults/main.yml b/roles/nsd/defaults/main.yml new file mode 100644 index 0000000..de4f06d --- /dev/null +++ b/roles/nsd/defaults/main.yml @@ -0,0 +1,2 @@ +nsd_server_count: '{{ ansible_processor_vcpus }}' +nsd_zones: [] diff --git a/roles/nsd/handlers/main.yml b/roles/nsd/handlers/main.yml new file mode 100644 index 0000000..34ae511 --- /dev/null +++ b/roles/nsd/handlers/main.yml @@ -0,0 +1,9 @@ +- name: restart nsd + systemd: + name: nsd + state: restarted + +- name: reload nsd + systemd: + name: nsd + state: reloaded diff --git a/roles/nsd/tasks/generate_zone.yml b/roles/nsd/tasks/generate_zone.yml new file mode 100644 index 0000000..a78ee62 --- /dev/null +++ b/roles/nsd/tasks/generate_zone.yml @@ -0,0 +1,50 @@ +- name: stat current zone file + stat: + path: /etc/nsd/{{ zone.name }}.zone + register: current_zone_file + +- name: get current serial + command: dig @{{ zone.slave_nameservers | first | default('127.0.0.1') }} +short SOA {{ zone.name }} + register: zone_soa + changed_when: no + +- name: check if zone serial needs to be regenerated + block: + - name: create temporary zone file + copy: + content: | + {{ nsd_soa_block }} + {{ zone.content }} + dest: /tmp/.ansible-{{ zone.name }}.zone.tmp + vars: + serial: '{{ zone_soa.stdout.split()[2] | default(nsd_init_serial) }}' + changed_when: no + + - name: stat temporary zone file + stat: + path: /tmp/.ansible-{{ zone.name }}.zone.tmp + register: temp_zone_file + + - name: remove temporary zone file + file: + path: /tmp/.ansible-{{ zone.name }}.zone.tmp + state: absent + changed_when: no + when: current_zone_file.stat.exists + +- name: generate zone file + copy: + content: | + {{ nsd_soa_block }} + {{ zone.content }} + dest: /etc/nsd/{{ zone.name }}.zone + vars: + serial: >- + {{ + nsd_init_serial if not zone_soa.stdout.split()[2] + else + (zone_soa.stdout.split()[2] | int) if ((not current_zone_file.stat.exists) or current_zone_file.stat.checksum == temp_zone_file.stat.checksum) + else + (zone_soa.stdout.split()[2] | int) + 1 + }} + notify: reload nsd diff --git a/roles/nsd/tasks/main.yml b/roles/nsd/tasks/main.yml new file mode 100644 index 0000000..63ac3eb --- /dev/null +++ b/roles/nsd/tasks/main.yml @@ -0,0 +1,35 @@ +- name: install packages + dnf: + name: nsd + state: present + +- name: generate nsd.conf + template: + src: etc/nsd/nsd.conf.j2 + dest: /etc/nsd/nsd.conf + notify: restart nsd + tags: zoneupdate + +- include_tasks: + file: generate_zone.yml + apply: + tags: zoneupdate + loop: '{{ nsd_zones }}' + loop_control: + loop_var: zone + label: '{{ zone.name }}' + tags: zoneupdate + +- name: enable nsd + systemd: + name: nsd + state: started + enabled: yes + +- name: open firewall ports + firewalld: + service: dns + permanent: yes + immediate: yes + state: enabled + tags: firewalld diff --git a/roles/nsd/templates/etc/nsd/nsd.conf.j2 b/roles/nsd/templates/etc/nsd/nsd.conf.j2 new file mode 100644 index 0000000..6d205c4 --- /dev/null +++ b/roles/nsd/templates/etc/nsd/nsd.conf.j2 @@ -0,0 +1,24 @@ +server: + ip-address: {{ ansible_default_ipv4.address }} + server-count: {{ nsd_server_count }} + database: "" + pidfile: "" + hide-version: yes + verbosity: 1 + log-only-syslog: yes + minimal-responses: yes + refuse-any: yes + +{% for zone in nsd_zones %} +zone: + name: {{ zone.name }} + zonefile: /etc/nsd/%s.zone +{% for ns in zone.slave_nameservers | default([]) %} + notify: {{ ns }} NOKEY + provide-xfr: {{ ns }} NOKEY +{% endfor %} +{% endfor %} + +remote-control: + control-enable: yes + control-interface: /run/nsd/nsd.ctl diff --git a/roles/nsd/vars/main.yml b/roles/nsd/vars/main.yml new file mode 100644 index 0000000..78b1ba6 --- /dev/null +++ b/roles/nsd/vars/main.yml @@ -0,0 +1,15 @@ +nsd_init_serial: 10000 + +nsd_default_ttl: 10800 + +nsd_soa_block: | + $TTL {{ zone.ttl | default(nsd_default_ttl) }} + $ORIGIN {{ zone.name }}. + + @ IN SOA {{ zone.ns | default('ns1.' + zone.name) }}. {{ zone.contact | default('hostmaster.' + zone.name) }}. ( + {{ serial }} ; serial + {{ zone.refresh | default('1d') }} ; refresh + {{ zone.retry | default('3m') }} ; retry + {{ zone.expire | default('1w') }} ; expire + {{ zone.minimum | default('3h') }} ; minimum + ) diff --git a/roles/packages/defaults/main.yml b/roles/packages/defaults/main.yml new file mode 100644 index 0000000..076da58 --- /dev/null +++ b/roles/packages/defaults/main.yml @@ -0,0 +1 @@ +packages_install: [] diff --git a/roles/packages/tasks/main.yml b/roles/packages/tasks/main.yml new file mode 100644 index 0000000..908f740 --- /dev/null +++ b/roles/packages/tasks/main.yml @@ -0,0 +1,10 @@ +- name: update packages + dnf: + name: '*' + state: latest + update_cache: yes + +- name: install packages + dnf: + name: '{{ packages_install }}' + state: present diff --git a/roles/photostructure/defaults/main.yml b/roles/photostructure/defaults/main.yml new file mode 100644 index 0000000..9813abc --- /dev/null +++ b/roles/photostructure/defaults/main.yml @@ -0,0 +1,11 @@ +photostructure_port: 8080 +photostructure_scan_interval_hours: 24 +photostructure_max_cpu_percent: 95 +photostructure_log_level: warn +photostructure_backup_interval_minutes: 30 +photostructure_version: alpha + +photostructure_user: s-photostructure +photostructure_file_access_group: role-photo-admin + +photostructure_kerberized_cidrs: '{{ kerberized_cidrs }}' diff --git a/roles/photostructure/handlers/main.yml b/roles/photostructure/handlers/main.yml new file mode 100644 index 0000000..5148595 --- /dev/null +++ b/roles/photostructure/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart photostructure + systemd: + name: photostructure + state: restarted diff --git a/roles/photostructure/meta/main.yml b/roles/photostructure/meta/main.yml new file mode 100644 index 0000000..e167af1 --- /dev/null +++ b/roles/photostructure/meta/main.yml @@ -0,0 +1,7 @@ +dependencies: + - role: yum + yum_repositories: + - epel + - rpmfusion-free + - rpmfusion-nonfree + tags: yum diff --git a/roles/photostructure/tasks/freeipa.yml b/roles/photostructure/tasks/freeipa.yml new file mode 100644 index 0000000..66d99e7 --- /dev/null +++ b/roles/photostructure/tasks/freeipa.yml @@ -0,0 +1,47 @@ +- name: create user + ipauser: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ photostructure_user }}' + loginshell: /sbin/nologin + homedir: '{{ photostructure_home }}' + givenname: Photostructure + sn: Service Account + state: present + run_once: yes + +- name: retrieve user keytab + include_role: + name: freeipa_keytab + vars: + keytab_principal: '{{ photostructure_user }}' + keytab_path: '{{ photostructure_keytab }}' + +- name: configure gssproxy for kerberized NFS + include_role: + name: gssproxy_client + vars: + gssproxy_name: photostructure + gssproxy_section: service/photostructure + gssproxy_keytab: /etc/krb5.keytab + gssproxy_client_keytab: '{{ photostructure_keytab }}' + gssproxy_cred_usage: initiate + gssproxy_euid: '{{ photostructure_user }}' + +- name: add user to file access group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ photostructure_file_access_group }}' + user: '{{ photostructure_user }}' + action: member + state: present + run_once: yes + +- name: create access group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ photostructure_access_group }}' + state: present + run_once: yes diff --git a/roles/photostructure/tasks/main.yml b/roles/photostructure/tasks/main.yml new file mode 100644 index 0000000..2b37aed --- /dev/null +++ b/roles/photostructure/tasks/main.yml @@ -0,0 +1,78 @@ +- name: install packages + dnf: + name: '{{ photostructure_packages }}' + state: present + +- import_tasks: freeipa.yml + tags: freeipa + +- name: create home directory + file: + path: '{{ photostructure_home }}' + owner: '{{ photostructure_user }}' + group: '{{ photostructure_user }}' + mode: 0700 + state: directory + +- name: clone git repository + git: + repo: '{{ photostructure_repo }}' + dest: '{{ photostructure_install_dir }}' + update: no + version: '{{ photostructure_version }}' + become: yes + become_user: '{{ photostructure_user }}' + register: photostructure_git + +- name: build photostructure + shell: + cmd: >- + mkdir -p "$HOME/.config/PhotoStructure" && + rm -rf node_modules "$HOME/.electron" "$HOME/.electron-gyp" "$HOME/.npm/_libvips" "$HOME/.node-gyp" "$HOME/.cache/yarn/*/*sharp*" && + npx --yes yarn install --silent + chdir: '{{ photostructure_install_dir }}' + become: true + become_user: '{{ photostructure_user }}' + when: photostructure_git.changed + +- name: create systemd unit file + template: + src: etc/systemd/system/photostructure.service.j2 + dest: /etc/systemd/system/photostructure.service + register: photostructure_unit + notify: restart photostructure + +- name: reload systemd units + systemd: + daemon_reload: yes + when: photostructure_unit.changed + +- name: generate environment file + template: + src: etc/sysconfig/photostructure + dest: /etc/sysconfig/photostructure + owner: root + group: '{{ photostructure_user }}' + notify: restart photostructure + +- name: start photostructure + systemd: + name: photostructure + state: started + enabled: yes + +- name: generate update script + template: + src: '{{ photostructure_home[1:] }}/photostructure-update.sh.j2' + dest: '{{ photostructure_home }}/photostructure-update.sh' + mode: 0555 + +- name: set up photostructure-update timer + include_role: + name: systemd_timer + vars: + timer_name: photostructure-update + timer_description: Update photostructure + timer_after: network.target nss-user-lookup.target + timer_on_calendar: '{{ photostructure_update_on_calendar }}' + timer_exec: '{{ photostructure_home }}/photostructure-update.sh' diff --git a/roles/photostructure/templates/etc/sysconfig/photostructure b/roles/photostructure/templates/etc/sysconfig/photostructure new file mode 100644 index 0000000..b1143d2 --- /dev/null +++ b/roles/photostructure/templates/etc/sysconfig/photostructure @@ -0,0 +1,18 @@ +PS_COPY_ASSETS_TO_LIBRARY="false" +PS_SCAN_ALL_DRIVES="false" +PS_LIBRARY_DIR="{{ photostructure_library }}" +PS_LOG_DIR="{{ photostructure_home }}/logs" +PS_LOG_COLOR="false" +PS_LOG_LEVEL="{{ photostructure_log_level }}" +PS_SCAN_PATHS='{{ ([photostructure_scan_paths] if photostructure_scan_paths is string else photostructure_scan_paths) | to_json }}' +PS_EXPOSE_NETWORK_WITHOUT_AUTH="false" +PS_UPGRADE_INSECURE_REQUESTS="true" +PS_HTTP_PORT="{{ photostructure_port }}" +PS_TRUST_PROXY="loopback" +PS_CPU_LOAD_PERCENT="{{ photostructure_max_cpu_percent }}" +PS_ENABLE_VIPS_CACHE="true" +PS_SYNC_INTERVAL_HOURS="{{ photostructure_scan_interval_hours }}" +PS_REPORT_ERRORS="false" +PS_DB_BACKUP_INTERVAL_MINUTES="{{ photostructure_backup_interval_minutes }}" +PS_TAG_LENS="false" +PS_TAG_DATE_FROM_STAT="false" diff --git a/roles/photostructure/templates/etc/systemd/system/photostructure.service.j2 b/roles/photostructure/templates/etc/systemd/system/photostructure.service.j2 new file mode 100644 index 0000000..67700c3 --- /dev/null +++ b/roles/photostructure/templates/etc/systemd/system/photostructure.service.j2 @@ -0,0 +1,19 @@ +[Unit] +Description=PhotoStructure for Servers +Documentation=https://photostructure.com/servers/ +Requires=network.target nss-user-lookup.target autofs.service +After=network.target nss-user-lookup.target autofs.service + +[Service] +User={{ photostructure_user }} +Group={{ photostructure_user }} +EnvironmentFile=/etc/sysconfig/photostructure +WorkingDirectory={{ photostructure_install_dir }} +ExecStart=/usr/bin/node ./photostructure +Type=simple +Restart=on-failure +TimeoutSec=2min +PrivateTmp=true + +[Install] +WantedBy=multi-user.target diff --git a/roles/photostructure/templates/opt/photostructure/photostructure-update.sh.j2 b/roles/photostructure/templates/opt/photostructure/photostructure-update.sh.j2 new file mode 100644 index 0000000..114a145 --- /dev/null +++ b/roles/photostructure/templates/opt/photostructure/photostructure-update.sh.j2 @@ -0,0 +1,48 @@ +#!/usr/bin/env bash + +set -Eeu -o pipefail + +SRCDIR={{ photostructure_install_dir | quote }} +PHOTOSTRUCTURE_USER={{ photostructure_user | quote }} +PHOTOSTRUCTURE_INSTALL_DIR={{ photostructure_install_dir | quote }} +PHOTOSTRUCTURE_HOME={{ photostructure_home | quote }} + +as-photostructure() { + runuser -u "$PHOTOSTRUCTURE_USER" -- "$@" +} + +if (( $EUID != 0 )); then + echo 'must be superuser' 1>&2 + exit 1 +fi + +cd "$SRCDIR" + +as-photostructure git fetch + +local_rev=$(git rev-parse HEAD) +upstream_rev=$(git rev-parse '@{u}') + +echo "local: $local_rev" +echo "upstream: $upstream_rev" + +if [ "$local_rev" != "$upstream_rev" ]; then + systemctl stop photostructure + + as-photostructure git pull --ff-only + + echo "building photostructure..." + rm -rf "${PHOTOSTRUCTURE_INSTALL_DIR}/node_modules" \ + "$PHOTOSTRUCTURE_HOME/.electron" \ + "$PHOTOSTRUCTURE_HOME/.electron-gyp" \ + "$PHOTOSTRUCTURE_HOME/.npm/_libvips" \ + "$PHOTOSTRUCTURE_HOME/.node-gyp" \ + "$PHOTOSTRUCTURE_HOME/.cache/yarn/*/*sharp*" + + cd "$PHOTOSTRUCTURE_INSTALL_DIR" + as-photostructure npx yarn install --yes + + systemctl start photostructure +else + echo "photostructure is already up to date" +fi diff --git a/roles/photostructure/vars/main.yml b/roles/photostructure/vars/main.yml new file mode 100644 index 0000000..d05ed42 --- /dev/null +++ b/roles/photostructure/vars/main.yml @@ -0,0 +1,46 @@ +photostructure_packages: + - nodejs + - ffmpeg + - libjpeg-turbo-utils + - libheif + - python3-devel + - git + - perl + +photostructure_repo: https://github.com/photostructure/photostructure-for-servers.git +photostructure_home: /opt/photostructure +photostructure_library: '{{ photostructure_home }}/library' +photostructure_keytab: /var/lib/gssproxy/clients/{{ photostructure_user }}.keytab +photostructure_install_dir: '{{ photostructure_home }}/photostructure-for-servers' + +photostructure_update_script_path: /usr/local/sbin/photostructure-update.sh +photostructure_update_on_calendar: weekly + +photostructure_apache_config: | + ProxyPass / http://127.0.0.1:{{ photostructure_port }}/ + ProxyPassReverse / http://127.0.0.1:{{ photostructure_port }}/ + ProxyTimeout 3600 + {{ apache_proxy_config }} + + + AuthName "FreeIPA Single Sign-On" + + AuthType GSSAPI + GssapiLocalName On + {{ apache_gssapi_session_config }} + + + AuthType Basic + AuthBasicProvider ldap + + {{ apache_ldap_config }} + Require ldap-attribute memberof=cn={{ photostructure_access_group }},{{ freeipa_group_basedn }} + + +photostructure_archive_shell: >- + systemctl stop photostructure; + TIMESTAMP=$(date +%Y%m%d%H%M%S); + tar cf "photostructure-${TIMESTAMP}.tar" + --transform "s|^\.|photostructure-${TIMESTAMP}|" + -C {{ photostructure_library | quote }} . ; + systemctl start photostructure diff --git a/roles/php/defaults/main.yml b/roles/php/defaults/main.yml new file mode 100644 index 0000000..45c0138 --- /dev/null +++ b/roles/php/defaults/main.yml @@ -0,0 +1,17 @@ +php_post_max_size: 8M +php_upload_max_filesize: 25M +php_max_file_uploads: 20 +php_timezone: '{{ timezone }}' + +php_fpm_pool: www +php_fpm_user: apache +php_fpm_group: apache +php_fpm_max_children: 50 +php_fpm_start_servers: 5 +php_fpm_min_spare_servers: 5 +php_fpm_max_spare_servers: 35 +php_fpm_flags: {} +php_fpm_admin_flags: {} +php_fpm_values: {} +php_fpm_admin_values: {} +php_fpm_environment: {} diff --git a/roles/php/files/etc/systemd/system/php-fpm.service.d/override.conf b/roles/php/files/etc/systemd/system/php-fpm.service.d/override.conf new file mode 100644 index 0000000..26c9ad8 --- /dev/null +++ b/roles/php/files/etc/systemd/system/php-fpm.service.d/override.conf @@ -0,0 +1,2 @@ +[Unit] +After=gssproxy.service diff --git a/roles/php/handlers/main.yml b/roles/php/handlers/main.yml new file mode 100644 index 0000000..f644426 --- /dev/null +++ b/roles/php/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart php-fpm + systemd: + name: php-fpm + state: restarted diff --git a/roles/php/tasks/main.yml b/roles/php/tasks/main.yml new file mode 100644 index 0000000..6505ec0 --- /dev/null +++ b/roles/php/tasks/main.yml @@ -0,0 +1,32 @@ +- name: install php + dnf: + name: '{{ php_packages[ansible_distribution_major_version] }}' + state: present + +# TODO: we should support multiple pools +- name: generate php configuration + template: + src: '{{ item[1:] }}.j2' + dest: '{{ item }}' + loop: + - /etc/php.ini + - /etc/php-fpm.conf + - /etc/php-fpm.d/www.conf + notify: restart php-fpm + +- name: create systemd override directory + file: + path: /etc/systemd/system/php-fpm.service.d + state: directory + +- name: create systemd override file + copy: + src: etc/systemd/system/php-fpm.service.d/override.conf + dest: /etc/systemd/system/php-fpm.service.d/override.conf + notify: restart php-fpm + register: php_fpm_systemd_unit + +- name: reload systemd daemon + systemd: + daemon_reload: yes + when: php_fpm_systemd_unit.changed diff --git a/roles/php/templates/etc/php-fpm.conf.j2 b/roles/php/templates/etc/php-fpm.conf.j2 new file mode 100644 index 0000000..c129708 --- /dev/null +++ b/roles/php/templates/etc/php-fpm.conf.j2 @@ -0,0 +1,6 @@ +include=/etc/php-fpm.d/*.conf + +[global] +pid = /run/php-fpm/php-fpm.pid +error_log = syslog +daemonize = yes diff --git a/roles/php/templates/etc/php-fpm.d/www.conf.j2 b/roles/php/templates/etc/php-fpm.d/www.conf.j2 new file mode 100644 index 0000000..077bc7f --- /dev/null +++ b/roles/php/templates/etc/php-fpm.d/www.conf.j2 @@ -0,0 +1,40 @@ +[www] +user = {{ php_fpm_user }} +group = {{ php_fpm_group }} + +listen = /run/php-fpm/www.sock + +listen.acl_users = apache,nginx +listen.allowed_clients = 127.0.0.1 + +pm = dynamic + +pm.max_children = {{ php_fpm_max_children }} +pm.start_servers = {{ php_fpm_start_servers }} +pm.min_spare_servers = {{ php_fpm_min_spare_servers }} +pm.max_spare_servers = {{ php_fpm_max_spare_servers }} + +php_value[session.save_handler] = files +php_value[session.save_path] = /var/lib/php/session +php_value[soap.wsdl_cache_dir] = /var/lib/php/wsdlcache +php_value[opcache.file_cache] = /var/lib/php/opcache + +{% for item in php_fpm_flags | dict2items %} +php_flag[{{ item.key }}] = {{ item.value if item.value is string else ('on' if (item.value|bool) else 'off') }} +{% endfor %} + +{% for item in php_fpm_admin_flags | dict2items %} +php_admin_flag[{{ item.key }}] = {{ item.value if item.value is string else ('on' if (item.value|bool) else 'off') }} +{% endfor %} + +{% for item in php_fpm_values | dict2items %} +php_value[{{ item.key }}] = {{ item.value }} +{% endfor %} + +{% for item in php_fpm_admin_values | dict2items %} +php_admin_value[{{ item.key }}] = {{ item.value }} +{% endfor %} + +{% for item in php_fpm_environment | dict2items %} +env[{{ item.key }}] = {{ item.value }} +{% endfor %} diff --git a/roles/php/templates/etc/php.ini.j2 b/roles/php/templates/etc/php.ini.j2 new file mode 100644 index 0000000..62b9893 --- /dev/null +++ b/roles/php/templates/etc/php.ini.j2 @@ -0,0 +1,130 @@ +[PHP] +engine = On +short_open_tag = Off +precision = 14 +output_buffering = 4096 +implicit_flush = Off +unserialize_callback_func = +serialize_precision = -1 +disable_functions = +disable_classes = +zend.enable_gc = On +zend.exception_ignore_args = On +expose_php = On +max_execution_time = 30 +max_input_time = 60 +memory_limit = 128M +error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT +display_errors = Off +display_startup_errors = Off +log_errors = On +log_errors_max_len = 1024 +ignore_repeated_errors = Off +ignore_repeated_source = Off +report_memleaks = On +error_log = syslog +variables_order = "GPCS" +request_order = "GP" +register_argc_argv = Off +auto_globals_jit = On +post_max_size = {{ php_post_max_size }} +default_mimetype = "text/html" +default_charset = "UTF-8" +doc_root = +user_dir = +enable_dl = Off +file_uploads = On +upload_max_filesize = {{ php_upload_max_filesize }} +max_file_uploads = {{ php_max_file_uploads }} +allow_url_fopen = On +allow_url_include = Off +default_socket_timeout = 60 + +[CLI Server] +cli_server.color = On + +[Date] +date.timezone = {{ php_timezone }} + +[Pcre] +pcre.jit=0 +[Pdo_mysql] +pdo_mysql.default_socket= +[mail function] +sendmail_path = /usr/sbin/sendmail -t -i +mail.add_x_header = Off +mail.log = syslog + +[ODBC] +odbc.allow_persistent = On +odbc.check_persistent = On +odbc.max_persistent = -1 +odbc.max_links = -1 +odbc.defaultlrl = 4096 +odbc.defaultbinmode = 1 + +[MySQLi] +mysqli.max_persistent = -1 +mysqli.allow_persistent = On +mysqli.max_links = -1 +mysqli.default_port = 3306 +mysqli.default_socket = +mysqli.default_host = +mysqli.default_user = +mysqli.default_pw = +mysqli.reconnect = Off + +[mysqlnd] +mysqlnd.collect_statistics = On +mysqlnd.collect_memory_statistics = Off + +[PostgreSQL] +pgsql.allow_persistent = On +pgsql.auto_reset_persistent = Off +pgsql.max_persistent = -1 +pgsql.max_links = -1 +pgsql.ignore_notice = 0 +pgsql.log_notice = 0 + +[bcmath] +bcmath.scale = 0 + +[Session] +session.save_handler = files +session.use_strict_mode = 1 +session.use_cookies = 1 +session.cookie_secure = 1 +session.use_only_cookies = 1 +session.name = PHPSESSID +session.auto_start = 0 +session.cookie_lifetime = 0 +session.cookie_path = / +session.cookie_domain = +session.cookie_httponly = +session.cookie_samesite = Strict +session.serialize_handler = php +session.gc_probability = 1 +session.gc_divisor = 1000 +session.gc_maxlifetime = 1440 +session.referer_check = +session.cache_limiter = nocache +session.cache_expire = 180 +session.use_trans_sid = 0 +session.sid_length = 26 +session.trans_sid_tags = "a=href,area=href,frame=src,form=" +session.sid_bits_per_character = 5 + +[Assertion] +zend.assertions = -1 + +[Tidy] +tidy.clean_output = Off + +[soap] +soap.wsdl_cache_enabled=1 +soap.wsdl_cache_dir="/tmp" +soap.wsdl_cache_ttl=86400 +soap.wsdl_cache_limit = 5 + +[ldap] +ldap.max_links = -1 diff --git a/roles/php/vars/main.yml b/roles/php/vars/main.yml new file mode 100644 index 0000000..d2762ae --- /dev/null +++ b/roles/php/vars/main.yml @@ -0,0 +1,5 @@ +php_packages: + '8': + - '@php:8.0' + '9': + - php diff --git a/roles/polkit/defaults/main.yml b/roles/polkit/defaults/main.yml new file mode 100644 index 0000000..ad5d8e0 --- /dev/null +++ b/roles/polkit/defaults/main.yml @@ -0,0 +1 @@ +polkit_admin_group: wheel diff --git a/roles/polkit/tasks/main.yml b/roles/polkit/tasks/main.yml new file mode 100644 index 0000000..d9e74a0 --- /dev/null +++ b/roles/polkit/tasks/main.yml @@ -0,0 +1,4 @@ +- name: generate default polkit rules + template: + src: etc/polkit-1/rules.d/40-default.rules.j2 + dest: /etc/polkit-1/rules.d/40-default.rules diff --git a/roles/polkit/templates/etc/polkit-1/rules.d/40-default.rules.j2 b/roles/polkit/templates/etc/polkit-1/rules.d/40-default.rules.j2 new file mode 100644 index 0000000..489b7af --- /dev/null +++ b/roles/polkit/templates/etc/polkit-1/rules.d/40-default.rules.j2 @@ -0,0 +1,3 @@ +polkit.addAdminRule(function(action, subject) { + return ["unix-group:{{ polkit_admin_group }}"]; +}); diff --git a/roles/postfix_client/defaults/main.yml b/roles/postfix_client/defaults/main.yml new file mode 100644 index 0000000..e389380 --- /dev/null +++ b/roles/postfix_client/defaults/main.yml @@ -0,0 +1,3 @@ +postfix_relayhost: '{{ email_domain }}' +postfix_myorigin: '{{ email_domain }}' +postfix_message_size_limit: 67108864 # 64 MB diff --git a/roles/postfix_client/handlers/main.yml b/roles/postfix_client/handlers/main.yml new file mode 100644 index 0000000..5d68d61 --- /dev/null +++ b/roles/postfix_client/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart postfix + systemd: + name: postfix + state: restarted diff --git a/roles/postfix_client/tasks/main.yml b/roles/postfix_client/tasks/main.yml new file mode 100644 index 0000000..1d785a8 --- /dev/null +++ b/roles/postfix_client/tasks/main.yml @@ -0,0 +1,16 @@ +- name: install packages + dnf: + name: '{{ postfix_packages[ansible_distribution_major_version] }}' + state: present + +- name: generate postifx configuration + template: + src: etc/postfix/main.cf.j2 + dest: /etc/postfix/main.cf + notify: restart postfix + +- name: enable postfix + systemd: + name: postfix + enabled: yes + state: started diff --git a/roles/postfix_client/templates/etc/postfix/main.cf.j2 b/roles/postfix_client/templates/etc/postfix/main.cf.j2 new file mode 100644 index 0000000..2cb5a34 --- /dev/null +++ b/roles/postfix_client/templates/etc/postfix/main.cf.j2 @@ -0,0 +1,41 @@ +compatibility_level = 2 + +# path definitions (package defaults) +queue_directory = /var/spool/postfix +command_directory = /usr/sbin +daemon_directory = /usr/libexec/postfix +data_directory = /var/lib/postfix +mail_owner = postfix + +sendmail_path = /usr/sbin/sendmail.postfix +newaliases_path = /usr/bin/newaliases.postfix +mailq_path = /usr/bin/mailq.postfix +setgid_group = postdrop +html_directory = no +manpage_directory = /usr/share/man +sample_directory = /usr/share/doc/postfix/samples +readme_directory = /usr/share/doc/postfix/README_FILES +meta_directory = /etc/postfix +shlib_directory = /usr/lib64/postfix + +smtpd_discard_ehlo_keywords = '' + +myorigin = {{ postfix_myorigin }} + +biff = no +# disable local delivery +mydestination = + +inet_interfaces = loopback-only +inet_protocols = all +mynetworks_style = host + +relayhost = {{ postfix_relayhost }} + +alias_database = hash:/etc/aliases + +smtputf8_enable = yes +message_size_limit = {{ postfix_message_size_limit }} + +smtp_tls_security_level = may +smtp_tls_CAfile = {{ postfix_smtp_ca_file }} diff --git a/roles/postfix_client/vars/main.yml b/roles/postfix_client/vars/main.yml new file mode 100644 index 0000000..39bbb57 --- /dev/null +++ b/roles/postfix_client/vars/main.yml @@ -0,0 +1,9 @@ +postfix_smtp_ca_file: /etc/pki/tls/certs/ca-bundle.crt + +postfix_packages: + '8': + - postfix + - mailx + '9': + - postfix + - s-nail diff --git a/roles/postfix_server/defaults/main.yml b/roles/postfix_server/defaults/main.yml new file mode 100644 index 0000000..3052a76 --- /dev/null +++ b/roles/postfix_server/defaults/main.yml @@ -0,0 +1,13 @@ +postfix_message_size_limit: 67108864 # 64 MB +postfix_recipient_delimiter: '+' +postfix_lmtp_require_tls: yes +postfix_virtual_domains: ['{{ email_domain }}'] +postfix_myorigin: '{{ email_domain }}' +postfix_mynetworks: "{{ vlans.values() | map(attribute='cidr') }}" +postfix_myhostname: '{{ ansible_fqdn }}' + +postfix_virtual_transport: lmtp:inet:{{ imap_host }}:24 +postfix_mailbox_quota_service: inet:{{ imap_host }}:10993 +postfix_milter: inet:{{ rspamd_host }}:11332 + +postfix_recipient_group: role-imap-access diff --git a/roles/postfix_server/files/etc/sasl2/smtpd.conf b/roles/postfix_server/files/etc/sasl2/smtpd.conf new file mode 100644 index 0000000..cc61713 --- /dev/null +++ b/roles/postfix_server/files/etc/sasl2/smtpd.conf @@ -0,0 +1,2 @@ +pwcheck_method: saslauthd +mech_list: gssapi plain login diff --git a/roles/postfix_server/files/etc/systemd/system/postfix.service.d/override.conf b/roles/postfix_server/files/etc/systemd/system/postfix.service.d/override.conf new file mode 100644 index 0000000..d7d8e76 --- /dev/null +++ b/roles/postfix_server/files/etc/systemd/system/postfix.service.d/override.conf @@ -0,0 +1,6 @@ +[Unit] +Wants=gssproxy.service +After=syslog.target network-online.target gssproxy.service + +[Service] +Environment=GSS_USE_PROXY=yes diff --git a/roles/postfix_server/handlers/main.yml b/roles/postfix_server/handlers/main.yml new file mode 100644 index 0000000..286b942 --- /dev/null +++ b/roles/postfix_server/handlers/main.yml @@ -0,0 +1,9 @@ +- name: restart postfix + systemd: + name: postfix + state: restarted + +- name: restart saslauthd + systemd: + name: saslauthd + state: restarted diff --git a/roles/postfix_server/tasks/freeipa.yml b/roles/postfix_server/tasks/freeipa.yml new file mode 100644 index 0000000..84d7818 --- /dev/null +++ b/roles/postfix_server/tasks/freeipa.yml @@ -0,0 +1,95 @@ +- name: create smtp service principal + ipaservice: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: 'smtp/{{ ansible_fqdn }}' + pac_type: NONE + state: present + +- name: retrieve smtp service keytab + include_role: + name: freeipa_keytab + vars: + keytab_principal: 'smtp/{{ ansible_fqdn }}' + keytab_path: '{{ postfix_keytab }}' + +- name: configure gssproxy + include_role: + name: gssproxy_client + vars: + gssproxy_name: postfix + gssproxy_section: service/postfix + gssproxy_keytab: '{{ postfix_keytab }}' + gssproxy_client_keytab: '{{ postfix_keytab }}' + gssproxy_cred_usage: both + gssproxy_euid: postfix + +- name: create SELinux policy for smtpd to access gssproxy + include_role: + name: selinux_policy + apply: + tags: selinux + vars: + selinux_policy_name: smtpd_gssproxy + selinux_policy_te: '{{ postfix_selinux_policy_te }}' + tags: selinux + +- name: generate PAM configuration for smtp + copy: + content: | + auth required pam_sss.so + account required pam_sss.so + dest: /etc/pam.d/smtp + +- name: create smtp HBAC service + ipahbacsvc: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ postfix_hbac_service }}' + description: Postfix SMTP server + state: present + run_once: True + +- name: create mail-servers hostgroup + ipahostgroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ postfix_hbac_hostgroup }}' + description: Mail Servers + host: "{{ groups[postfix_hbac_hostgroup] | map('regex_replace', '$', '.' ~ ansible_domain) }}" + state: present + run_once: True + +# Note: we explicitly allow all here. SSSD will only be consulted when a user +# performs a PLAIN login, falling back to saslauthd/PAM authentication. +# Users with a valid Kerberos ticket bypass the PAM stack entirely, so a +# restrictive HBAC rule is pointless. +- name: create HBAC rule for smtp + ipahbacrule: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: allow_smtp_on_mail_servers + description: Allow SMTP on mail servers + hostgroup: + - '{{ postfix_hbac_hostgroup }}' + usercategory: all + hbacsvc: + - '{{ postfix_hbac_service }}' + run_once: True + +- name: create systemd override directory + file: + path: /etc/systemd/system/postfix.service.d + state: directory + +- name: create systemd override file + copy: + src: etc/systemd/system/postfix.service.d/override.conf + dest: /etc/systemd/system/postfix.service.d/override.conf + notify: restart postfix + register: postfix_systemd_unit + +- name: reload systemd daemons + systemd: + daemon_reload: yes + when: postfix_systemd_unit.changed diff --git a/roles/postfix_server/tasks/main.yml b/roles/postfix_server/tasks/main.yml new file mode 100644 index 0000000..4f22d49 --- /dev/null +++ b/roles/postfix_server/tasks/main.yml @@ -0,0 +1,61 @@ +- name: install postfix + dnf: + name: '{{ postfix_packages }}' + state: present + +- name: request TLS certificate + include_role: + name: certbot + vars: + certificate_sans: ['{{ postfix_myhostname }}'] + certificate_path: '{{ postfix_certificate_path }}' + certificate_key_path: '{{ postfix_certificate_key_path }}' + certificate_owner: postfix + certificate_hook: systemctl reload postfix + +- import_tasks: freeipa.yml + tags: freeipa + +- name: generate dhparams + openssl_dhparam: + path: '{{ postfix_dhparams_path }}' + size: 2048 + +- name: generate postifx configuration + template: + src: etc/postfix/{{ item }}.j2 + dest: /etc/postfix/{{ item }} + loop: + - main.cf + - master.cf + - virtual_mailboxes.cf + - virtual_aliases.cf + notify: restart postfix + +- name: configure saslauthd for smtpd + copy: + src: etc/sasl2/smtpd.conf + dest: /etc/sasl2/smtpd.conf + notify: restart saslauthd + +- name: enable saslauthd + systemd: + name: saslauthd + enabled: yes + state: started + +- name: enable postfix + systemd: + name: postfix + enabled: yes + state: started + +- name: open firewall ports + firewalld: + service: '{{ item }}' + permanent: yes + immediate: yes + state: enabled + loop: + - smtp + - smtp-submission diff --git a/roles/postfix_server/templates/etc/postfix/main.cf.j2 b/roles/postfix_server/templates/etc/postfix/main.cf.j2 new file mode 100644 index 0000000..9132dff --- /dev/null +++ b/roles/postfix_server/templates/etc/postfix/main.cf.j2 @@ -0,0 +1,109 @@ +compatibility_level = 2 + +### path definitions +queue_directory = /var/spool/postfix +command_directory = /usr/sbin +daemon_directory = /usr/libexec/postfix +data_directory = /var/lib/postfix +mail_owner = postfix + +sendmail_path = /usr/sbin/sendmail.postfix +newaliases_path = /usr/bin/newaliases.postfix +mailq_path = /usr/bin/mailq.postfix +setgid_group = postdrop +html_directory = no +manpage_directory = /usr/share/man +sample_directory = /usr/share/doc/postfix/samples +readme_directory = /usr/share/doc/postfix/README_FILES +meta_directory = /etc/postfix +shlib_directory = /usr/lib64/postfix + +import_environment = MAIL_CONFIG MAIL_DEBUG MAIL_LOGTAG TZ XAUTHORITY DISPLAY LANG=C POSTLOG_SERVICE POSTLOG_HOSTNAME GSS_USE_PROXY=yes + +myorigin = {{ postfix_myorigin }} +myhostname = {{ postfix_myhostname }} + +mynetworks = 127.0.0.0/8 {{ postfix_mynetworks | join(' ') }} + +# disable local delivery +mydestination = + +inet_interfaces = all +inet_protocols = all + +# disable open relay +mynetworks_style = host + +alias_database = hash:/etc/aliases + +smtputf8_enable = yes +recipient_delimiter = {{ postfix_recipient_delimiter }} +message_size_limit = {{ postfix_message_size_limit }} + +strict_rfc821_envelopes = yes +allow_percent_hack = no +swap_bangpath = no +disable_vrfy_command = yes +show_user_unknown_table_name = no + +tls_medium_cipherlist = {{ postfix_cipherlist }} +tls_preempt_cipherlist = no + +smtpd_tls_security_level = may +smtpd_tls_auth_only = yes +smtpd_tls_cert_file = {{ postfix_certificate_path }} +smtpd_tls_key_file = {{ postfix_certificate_key_path }} +smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1 +smtpd_tls_mandatory_ciphers = medium +smtpd_tls_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1 +smtpd_tls_dh1024_param_file = {{ postfix_dhparams_path }} + +smtpd_sasl_security_options = noanonymous, noplaintext +smtpd_sasl_tls_security_options = noanonymous + +smtpd_helo_required = yes + +smtp_tls_security_level = may +smtp_tls_CAfile = {{ postfix_smtp_ca_file }} + +lmtp_tls_CAfile = {{ postfix_smtp_ca_file }} +lmtp_tls_security_level = {{ 'secure' if postfix_lmtp_require_tls else 'may' }} + +# public mailserver - restrictive policy +smtpd_helo_required = yes +smtpd_client_restrictions = + permit_mynetworks, + reject_unauth_pipelining, + reject_unknown_reverse_client_hostname +smtpd_helo_restrictions = + permit_mynetworks, + reject_invalid_helo_hostname, + reject_non_fqdn_helo_hostname, + reject_unauth_pipelining +smtpd_sender_restrictions = + permit_mynetworks, + reject_non_fqdn_sender, + reject_unknown_sender_domain, + reject_unauth_pipelining +smtpd_relay_restrictions = + permit_mynetworks, + reject_unauth_destination +smtpd_recipient_restrictions = + permit_mynetworks, + reject_non_fqdn_recipient, + reject_unknown_recipient_domain, + reject_unauth_pipelining, + reject_unlisted_recipient, + reject_unauth_destination, + check_policy_service {{ postfix_mailbox_quota_service }} +smtpd_data_restrictions = + permit_mynetworks, + reject_unauth_pipelining + +virtual_transport = {{ postfix_virtual_transport }} +virtual_mailbox_domains = {{ freeipa_realm }} {{ postfix_virtual_domains | join(' ') }} +virtual_mailbox_maps = ldap:$config_directory/virtual_mailboxes.cf +virtual_alias_maps = ldap:$config_directory/virtual_aliases.cf + +milter_default_action = accept +smtpd_milters = {{ postfix_milter }} diff --git a/roles/postfix_server/templates/etc/postfix/master.cf.j2 b/roles/postfix_server/templates/etc/postfix/master.cf.j2 new file mode 100644 index 0000000..1742b7a --- /dev/null +++ b/roles/postfix_server/templates/etc/postfix/master.cf.j2 @@ -0,0 +1,34 @@ +# ========================================================================== +# service type private unpriv chroot wakeup maxproc command + args +# (yes) (yes) (no) (never) (100) +# ========================================================================== +smtp inet n - n - - smtpd +submission inet n - n - - smtpd + -o syslog_name=postfix/submission + -o smtpd_tls_security_level=encrypt + -o smtpd_sasl_auth_enable=yes +pickup unix n - n 60 1 pickup +cleanup unix n - n - 0 cleanup +qmgr unix n - n 300 1 qmgr +tlsmgr unix - - n 1000? 1 tlsmgr +rewrite unix - - n - - trivial-rewrite +bounce unix - - n - 0 bounce +defer unix - - n - 0 bounce +trace unix - - n - 0 bounce +verify unix - - n - 1 verify +flush unix n - n 1000? 0 flush +proxymap unix - - n - - proxymap +proxywrite unix - - n - 1 proxymap +smtp unix - - n - - smtp +relay unix - - n - - smtp + -o syslog_name=postfix/$service_name +showq unix n - n - - showq +error unix - - n - - error +retry unix - - n - - error +discard unix - - n - - discard +local unix - n n - - local +virtual unix - n n - - virtual +lmtp unix - - n - - lmtp +anvil unix - - n - 1 anvil +scache unix - - n - 1 scache +postlog unix-dgram n - n - 1 postlogd diff --git a/roles/postfix_server/templates/etc/postfix/virtual_aliases.cf.j2 b/roles/postfix_server/templates/etc/postfix/virtual_aliases.cf.j2 new file mode 100644 index 0000000..9ba32e8 --- /dev/null +++ b/roles/postfix_server/templates/etc/postfix/virtual_aliases.cf.j2 @@ -0,0 +1,8 @@ +version = 3 +bind = sasl +sasl_mechs = gssapi +server_host = {{ freeipa_hosts | join(" ") }} +search_base = {{ freeipa_accounts_basedn }} +query_filter = (|(mail=%s)(mailAlternateAddress=%s)) +special_result_attribute = member +result_attribute = krbprincipalname diff --git a/roles/postfix_server/templates/etc/postfix/virtual_mailboxes.cf.j2 b/roles/postfix_server/templates/etc/postfix/virtual_mailboxes.cf.j2 new file mode 100644 index 0000000..a6fae98 --- /dev/null +++ b/roles/postfix_server/templates/etc/postfix/virtual_mailboxes.cf.j2 @@ -0,0 +1,7 @@ +version = 3 +bind = sasl +sasl_mechs = gssapi +server_host = {{ freeipa_hosts | join(" ") }} +search_base = {{ freeipa_user_basedn }} +query_filter = (&(krbprincipalname=%s)(memberof=cn={{ postfix_recipient_group }},{{ freeipa_group_basedn }})) +result_attribute = krbprincipalname diff --git a/roles/postfix_server/vars/main.yml b/roles/postfix_server/vars/main.yml new file mode 100644 index 0000000..050c880 --- /dev/null +++ b/roles/postfix_server/vars/main.yml @@ -0,0 +1,64 @@ +postfix_packages: + - postfix + - postfix-ldap + - cyrus-sasl + - cyrus-sasl-gssapi + - cyrus-sasl-plain + - s-nail + +postfix_certificate_path: /etc/pki/tls/certs/postfix2.pem +postfix_certificate_key_path: /etc/pki/tls/private/postfix2.key +postfix_dhparams_path: /etc/pki/tls/misc/dhparams-postfix.pem + +postfix_hbac_service: smtp +postfix_hbac_hostgroup: mail_servers + +postfix_smtp_ca_file: /etc/pki/tls/certs/ca-bundle.crt +postfix_cipherlist: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 + +postfix_keytab: /var/lib/gssproxy/clients/postfix.keytab + +postfix_selinux_policy_te: | + require { + type postfix_exec_t; + type postfix_smtpd_exec_t; + type postfix_cleanup_t; + type postfix_cleanup_exec_t; + type postfix_master_t; + type postfix_cleanup_t; + type postfix_smtpd_t; + type gssproxy_t; + type gssproxy_var_lib_t; + class file getattr; + class dir search; + class sock_file write; + class unix_stream_socket connectto; + class process noatsecure; + class key { read view write }; + } + + #============= postfix_smtpd_t ============== + allow postfix_smtpd_t gssproxy_t:unix_stream_socket connectto; + allow postfix_smtpd_t gssproxy_var_lib_t:dir search; + allow postfix_smtpd_t gssproxy_var_lib_t:sock_file write; + allow postfix_smtpd_t postfix_master_t:key { read view write }; + + #============= postfix_master_t ============== + allow postfix_master_t postfix_smtpd_t:process noatsecure; + allow postfix_master_t postfix_smtpd_t:key { read write }; + allow postfix_master_t postfix_cleanup_t:process noatsecure; + allow postfix_master_t gssproxy_t:unix_stream_socket connectto; + allow postfix_master_t gssproxy_var_lib_t:dir search; + allow postfix_master_t gssproxy_var_lib_t:sock_file write; + + #============= postfix_cleanup_t ============== + allow postfix_cleanup_t gssproxy_var_lib_t:dir search; + allow postfix_cleanup_t gssproxy_var_lib_t:sock_file write; + allow postfix_cleanup_t gssproxy_t:unix_stream_socket connectto; + allow postfix_cleanup_t postfix_master_t:key read; + allow postfix_cleanup_t postfix_smtpd_t:key read; + + #============= gssproxy_t ============== + allow gssproxy_t postfix_cleanup_exec_t:file getattr; + allow gssproxy_t postfix_smtpd_exec_t:file getattr; + allow gssproxy_t postfix_exec_t:file getattr; diff --git a/roles/postgresql_server/defaults/main.yml b/roles/postgresql_server/defaults/main.yml new file mode 100644 index 0000000..ccb398a --- /dev/null +++ b/roles/postgresql_server/defaults/main.yml @@ -0,0 +1,5 @@ +postgresql_timezone: '{{ timezone }}' +postgresql_max_connections: 100 +postgresql_shared_buffers_mb: '{{ (ansible_memory_mb.real.total / 4) | int }}' + +postgresql_password_users: [] diff --git a/roles/postgresql_server/files/etc/systemd/system/postgresql.service.d/override.conf b/roles/postgresql_server/files/etc/systemd/system/postgresql.service.d/override.conf new file mode 100644 index 0000000..e355ab5 --- /dev/null +++ b/roles/postgresql_server/files/etc/systemd/system/postgresql.service.d/override.conf @@ -0,0 +1,6 @@ +[Unit] +Wants=gssproxy.service +After=network.target gssproxy.service + +[Service] +Environment=GSS_USE_PROXY=yes diff --git a/roles/postgresql_server/handlers/main.yml b/roles/postgresql_server/handlers/main.yml new file mode 100644 index 0000000..c081f0f --- /dev/null +++ b/roles/postgresql_server/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart postgresql + systemd: + name: postgresql + state: restarted diff --git a/roles/postgresql_server/tasks/freeipa.yml b/roles/postgresql_server/tasks/freeipa.yml new file mode 100644 index 0000000..50ea678 --- /dev/null +++ b/roles/postgresql_server/tasks/freeipa.yml @@ -0,0 +1,49 @@ +- name: create postgres service principal + ipaservice: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: 'postgres/{{ ansible_fqdn }}' + state: present + +- name: retrieve postgres service keytab + include_role: + name: freeipa_keytab + vars: + keytab_principal: 'postgres/{{ ansible_fqdn }}' + keytab_path: '{{ postgresql_keytab }}' + +- name: create SELinux policy for postgres to access gssproxy + include_role: + name: selinux_policy + apply: + tags: selinux + vars: + selinux_policy_name: postrgres_gssproxy + selinux_policy_te: '{{ postgresql_selinux_policy_te }}' + tags: selinux + +- name: create systemd override directory + file: + path: /etc/systemd/system/postgresql.service.d/ + state: directory + +- name: create systemd unit override + copy: + src: etc/systemd/system/postgresql.service.d/override.conf + dest: /etc/systemd/system/postgresql.service.d/override.conf + register: postgresql_systemd_override + +- name: reload systemd units + systemd: + daemon_reload: yes + when: postgresql_systemd_override.changed + +- name: configure gssproxy + include_role: + name: gssproxy_client + vars: + gssproxy_name: postgres + gssproxy_section: service/postgresql + gssproxy_keytab: '{{ postgresql_keytab }}' + gssproxy_cred_usage: accept + gssproxy_euid: postgres diff --git a/roles/postgresql_server/tasks/main.yml b/roles/postgresql_server/tasks/main.yml new file mode 100644 index 0000000..96b173c --- /dev/null +++ b/roles/postgresql_server/tasks/main.yml @@ -0,0 +1,53 @@ +- name: install postgresql + dnf: + name: '{{ postgresql_packages }}' + state: present + +- name: initialize database + command: + cmd: postgresql-setup --initdb + creates: '{{ postgresql_data_dir }}/PG_VERSION' + +- import_tasks: freeipa.yml + tags: freeipa + +- name: request TLS certificate + include_role: + name: getcert_request + vars: + certificate_service: postgres + certificate_path: '{{ postgresql_certificate_path }}' + certificate_key_path: '{{ postgresql_certificate_key_path }}' + certificate_owner: postgres + certificate_hook: systemctl reload postgresql + +- name: generate dhparams + openssl_dhparam: + path: '{{ postgresql_dhparams_path }}' + size: 2048 + +- name: generate postgresql configuration + template: + src: '{{ postgresql_data_dir[1:] }}/{{ item }}.j2' + dest: '{{ postgresql_data_dir }}/{{ item }}' + owner: postgres + group: postgres + mode: 0600 + loop: + - postgresql.conf + - pg_hba.conf + notify: restart postgresql + +- name: enable postgresql service + systemd: + name: postgresql + enabled: yes + state: started + +- name: open firewall ports + firewalld: + service: postgresql + permanent: yes + immediate: yes + state: enabled + tags: firewalld diff --git a/roles/postgresql_server/templates/var/lib/pgsql/data/pg_hba.conf.j2 b/roles/postgresql_server/templates/var/lib/pgsql/data/pg_hba.conf.j2 new file mode 100644 index 0000000..7768ad8 --- /dev/null +++ b/roles/postgresql_server/templates/var/lib/pgsql/data/pg_hba.conf.j2 @@ -0,0 +1,7 @@ +# TYPE DATABASE USER ADDRESS METHOD +local all all peer +{% for user in postgresql_password_users %} +hostssl all {{ user }} all scram-sha-256 +{% endfor %} +hostssl all all all gss include_realm=0 krb_realm={{ freeipa_realm }} +hostgssenc all all all gss include_realm=0 krb_realm={{ freeipa_realm }} diff --git a/roles/postgresql_server/templates/var/lib/pgsql/data/postgresql.conf.j2 b/roles/postgresql_server/templates/var/lib/pgsql/data/postgresql.conf.j2 new file mode 100644 index 0000000..07d198e --- /dev/null +++ b/roles/postgresql_server/templates/var/lib/pgsql/data/postgresql.conf.j2 @@ -0,0 +1,34 @@ +listen_addresses = '*' +max_connections = {{ postgresql_max_connections }} + +ssl = on +ssl_cert_file = '{{ postgresql_certificate_path }}' +ssl_key_file = '{{ postgresql_certificate_key_path }}' +ssl_dh_params_file = '{{ postgresql_dhparams_path }}' +ssl_ciphers = '{{ postgresql_ssl_ciphers }}' + +password_encryption = scram-sha-256 + +krb_caseins_users = on + +shared_buffers = {{ postgresql_shared_buffers_mb }}MB +dynamic_shared_memory_type = posix # the default is the first option + +log_destination = 'stderr' +logging_collector = off +log_min_messages = info +log_min_error_statement = warning +log_connections = off +log_disconnections = off +log_line_prefix = '%q%u@%d ' +log_timezone = '{{ postgresql_timezone }}' + +datestyle = 'iso, mdy' +timezone = '{{ postgresql_timezone }}' + +lc_messages = 'en_US.UTF-8' +lc_monetary = 'en_US.UTF-8' +lc_numeric = 'en_US.UTF-8' +lc_time = 'en_US.UTF-8' + +default_text_search_config = 'pg_catalog.english' diff --git a/roles/postgresql_server/vars/main.yml b/roles/postgresql_server/vars/main.yml new file mode 100644 index 0000000..52cecc4 --- /dev/null +++ b/roles/postgresql_server/vars/main.yml @@ -0,0 +1,40 @@ +postgresql_packages: + - postgresql-server + - python3-psycopg2 + +postgresql_user: postgres + +postgresql_data_dir: /var/lib/pgsql/data +postgresql_keytab: /var/lib/gssproxy/postgresql.keytab + +postgresql_certificate_path: /etc/pki/tls/certs/postgres.pem +postgresql_certificate_key_path: /etc/pki/tls/private/postgres.key +postgresql_dhparams_path: /etc/pki/tls/certs/postgres-dhparams.pem +postgresql_ssl_ciphers: 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384' + +postgresql_hbac_hostgroup: postgresql_servers +postgresql_hbac_service: postgresql + +postgresql_archive_shell: >- + pg_dumpall | gzip > "pg_dumpall-$(date +%Y%m%d%H%M%S).sql.gz" + +postgresql_selinux_policy_te: | + require { + type postgresql_t; + type postgresql_exec_t; + type gssproxy_t; + type gssproxy_var_lib_t; + class dir search; + class sock_file write; + class unix_stream_socket connectto; + class file getattr; + } + + #============= postgresql_t ============== + allow postgresql_t gssproxy_var_lib_t:dir search; + allow postgresql_t gssproxy_var_lib_t:sock_file write; + allow postgresql_t gssproxy_t:unix_stream_socket connectto; + allow postgresql_t gssproxy_var_lib_t:dir search; + + #============= gssproxy_t ============== + allow gssproxy_t postgresql_exec_t:file getattr; diff --git a/roles/prosody/defaults/main.yml b/roles/prosody/defaults/main.yml new file mode 100644 index 0000000..df7ac3b --- /dev/null +++ b/roles/prosody/defaults/main.yml @@ -0,0 +1,21 @@ +prosody_admins: [] +prosody_vhosts: ['{{ email_domain }}'] +prosody_conference_vhosts: "{{ ['conference.'] | product(prosody_vhosts) | map('join') | list }}" +prosody_user: s-prosody +prosody_db_name: prosody +prosody_db_host: '{{ postgresql_host }}' +prosody_archive_expires_after: 4w +prosody_http_port: 5280 +prosody_http_host: '{{ ansible_fqdn }}' +prosody_sysaccount_username: prosody + +prosody_ldap_hosts: '{{ freeipa_hosts }}' +prosody_access_group: role-xmpp-access + +prosody_upload_file_size_limit: 52428800 # 50 MB +prosody_upload_expire_after: 604800 # 1 week +prosody_upload_quota: 10737418240 # 10 GiB + +prosody_turn_secret: '{{ coturn_auth_secret }}' +prosody_turn_host: '{{ coturn_realm }}' +prosody_turn_port: 3478 diff --git a/roles/prosody/files/etc/systemd/system/prosody.service.d/override.conf b/roles/prosody/files/etc/systemd/system/prosody.service.d/override.conf new file mode 100644 index 0000000..8ac7456 --- /dev/null +++ b/roles/prosody/files/etc/systemd/system/prosody.service.d/override.conf @@ -0,0 +1,6 @@ +[Unit] +Wants=gssproxy.service +After=network-online.target gssproxy.service + +[Service] +Environment=GSS_USE_PROXY=yes diff --git a/roles/prosody/handlers/main.yml b/roles/prosody/handlers/main.yml new file mode 100644 index 0000000..3d3cbf4 --- /dev/null +++ b/roles/prosody/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart prosody + systemd: + name: prosody + state: restarted diff --git a/roles/prosody/meta/main.yml b/roles/prosody/meta/main.yml new file mode 100644 index 0000000..8f5b990 --- /dev/null +++ b/roles/prosody/meta/main.yml @@ -0,0 +1,16 @@ +dependencies: + - role: yum + yum_repositories: epel + tags: yum + + - role: prosody_letsencrypt_proxy + + - role: freeipa_system_account + system_account_username: '{{ prosody_sysaccount_username }}' + system_account_password: '{{ prosody_sysaccount_password }}' + + - role: apache_vhost + apache_server_name: '{{ prosody_http_host }}' + apache_server_aliases: [] + apache_letsencrypt: yes + apache_config: '{{ prosody_apache_config }}' diff --git a/roles/prosody/tasks/database.yml b/roles/prosody/tasks/database.yml new file mode 100644 index 0000000..675ab11 --- /dev/null +++ b/roles/prosody/tasks/database.yml @@ -0,0 +1,17 @@ +- name: create database + postgresql_db: + name: '{{ prosody_db_name }}' + state: present + delegate_to: '{{ postgresql_inventory_host }}' + become: yes + become_user: postgres + +- name: create database user + postgresql_user: + name: '{{ prosody_user }}' + db: '{{ prosody_db_name }}' + priv: ALL + state: present + delegate_to: '{{ postgresql_inventory_host }}' + become: yes + become_user: postgres diff --git a/roles/prosody/tasks/freeipa.yml b/roles/prosody/tasks/freeipa.yml new file mode 100644 index 0000000..caff62a --- /dev/null +++ b/roles/prosody/tasks/freeipa.yml @@ -0,0 +1,64 @@ +- name: create user + ipauser: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ prosody_user }}' + loginshell: /sbin/nologin + homedir: '{{ prosody_data_dir }}' + givenname: Prosody + sn: Service Account + state: present + run_once: yes + +- name: retrieve user keytab + include_role: + name: freeipa_keytab + vars: + keytab_principal: '{{ prosody_user }}' + keytab_path: '{{ prosody_keytab }}' + +- name: configure gssproxy for kerberized postgres + include_role: + name: gssproxy_client + vars: + gssproxy_name: prosody + gssproxy_section: service/prosody + gssproxy_client_keytab: '{{ prosody_keytab }}' + gssproxy_cred_usage: initiate + gssproxy_euid: prosody + +- name: create systemd override directory + file: + path: /etc/systemd/system/prosody.service.d + state: directory + +- name: create systemd override file + copy: + src: etc/systemd/system/prosody.service.d/override.conf + dest: /etc/systemd/system/prosody.service.d/override.conf + register: prosody_systemd_unit + notify: restart prosody + +- name: reload systemd units + systemd: + daemon_reload: yes + when: prosody_systemd_unit.changed + +- name: create SELinux policy for prosody to access gssproxy + include_role: + name: selinux_policy + apply: + tags: selinux + vars: + selinux_policy_name: prosody_gssproxy + selinux_policy_te: '{{ prosody_selinux_policy_te }}' + tags: selinux + +- name: create access group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ prosody_access_group }}' + nonposix: yes + state: present + run_once: yes diff --git a/roles/prosody/tasks/main.yml b/roles/prosody/tasks/main.yml new file mode 100644 index 0000000..c29dd38 --- /dev/null +++ b/roles/prosody/tasks/main.yml @@ -0,0 +1,97 @@ +- name: install prosody + dnf: + name: '{{ prosody_packages }}' + state: present + +- name: request conference vhost certificates + include_role: + name: certbot + vars: + certificate_sans: ['{{ item }}'] + certificate_path: '{{ prosody_certificate_dir }}/{{ item }}.crt' + certificate_key_path: '{{ prosody_certificate_dir }}/{{ item }}.key' + certificate_owner: prosody + certificate_hook: systemctl reload prosody + certificate_use_apache: yes + loop: '{{ prosody_conference_vhosts }}' + +- import_tasks: freeipa.yml + tags: freeipa + +- import_tasks: database.yml + tags: database + +- name: create module directory + file: + path: '{{ prosody_module_dir }}' + state: directory + +- name: clone module repository + hg: + repo: '{{ prosody_module_repo }}' + dest: '{{ prosody_module_dir }}' + +- name: generate configuration + template: + src: etc/prosody/prosody.cfg.lua.j2 + dest: /etc/prosody/prosody.cfg.lua + owner: root + group: prosody + mode: 0640 + notify: restart prosody + +- name: open firewall ports + firewalld: + permanent: yes + immediate: yes + service: '{{ item }}' + state: enabled + loop: + - xmpp-client + - xmpp-server + tags: firewalld + +- name: enable httpd_can_network_connect SELinux boolean + seboolean: + name: httpd_can_network_connect + state: yes + persistent: yes + tags: selinux + +- name: create roster file with correct permissions + copy: + content: '' + dest: '{{ prosody_groups_file }}' + owner: prosody + group: prosody + mode: 0640 + force: no + +- name: generate roster script + template: + src: usr/local/bin/prosody-update-roster.j2 + dest: /usr/local/bin/prosody-update-roster + mode: 0555 + +- name: create prosody-update-roster timer + include_role: + name: systemd_timer + vars: + timer_name: prosody-update-roster + timer_description: Update prosody shared roster + timer_after: network.target + timer_on_calendar: daily + timer_exec: /usr/local/bin/prosody-update-roster + timer_user: prosody + +- name: generate shared roster + systemd: + name: prosody-update-roster.service + state: started + changed_when: no + +- name: start prosody + systemd: + name: prosody + enabled: yes + state: started diff --git a/roles/prosody/templates/etc/prosody/prosody.cfg.lua.j2 b/roles/prosody/templates/etc/prosody/prosody.cfg.lua.j2 new file mode 100644 index 0000000..9a07f8e --- /dev/null +++ b/roles/prosody/templates/etc/prosody/prosody.cfg.lua.j2 @@ -0,0 +1,119 @@ +admins = { {% for admin in prosody_admins %}"{{ admin }}"{% if loop.last %},{% endif %}{% endfor %} } + +network_backend = "event" + +plugin_paths = { "{{ prosody_module_dir }}" } + +modules_enabled = { + -- required modules + "roster"; -- Allow users to have a roster. Recommended ;) + "saslauth"; -- Authentication for clients and servers. Recommended if you want to log in. + "tls"; -- Add support for secure TLS on c2s/s2s connections + "dialback"; -- s2s dialback support + "disco"; -- Service discovery + + -- optional modules + "csi"; -- Client state indication + "carbons"; -- Keep multiple clients in sync + "pep"; -- Enables users to publish their avatar, mood, activity, playing music and more + "private"; -- Private XML storage (for room bookmarks, etc.) + "blocklist"; -- Allow users to block communications with other users + "vcard4"; -- User profiles (stored in PEP) + "vcard_legacy"; -- Conversion between legacy vCard and PEP Avatar, vcard + "limits"; -- Enable bandwidth limiting for XMPP connections + + "version"; -- Replies to server version requests + "uptime"; -- Report how long server has been running + "time"; -- Let others know the time here on this server + "ping"; -- Replies to XMPP pings with pongs + "mam"; -- Store messages in an archive and allow users to access it + "admin_adhoc"; -- Allows administration via an XMPP client that supports ad-hoc commands + "groups"; -- Shared roster support + + -- community modules + "smacks"; -- Stream management / fast reconnects + "csi_battery_saver"; -- Mobile optimizations + "turn_external"; -- STUN/TURN server + "reload_modules"; -- Reload modules on config reload +} + +reload_modules = { "groups", "tls" } +pidfile = "/run/prosody/prosody.pid"; + +allow_registration = false +groups_file = "{{ prosody_groups_file }}" + +c2s_require_encryption = true +s2s_require_encryption = true +s2s_secure_auth = false + +-- Enable rate limits for incoming client and server connections +limits = { + c2s = { + rate = "10kb/s"; + }; + s2sin = { + rate = "30kb/s"; + }; +} + +-- Authentication +authentication = "ldap" +ldap_server = "{{ prosody_ldap_hosts | join(' ') }}" +ldap_rootdn = "uid={{ prosody_sysaccount_username }},{{ freeipa_sysaccount_basedn }}" +ldap_password = "{{ prosody_sysaccount_password }}" +ldap_base = "{{ freeipa_user_basedn }}" +ldap_filter = "(&(jid=$user@$host)(memberOf=cn={{ prosody_access_group }},{{ freeipa_group_basedn }}))" +ldap_tls = true + +-- Storage +storage = "sql" +sql = { + driver = "PostgreSQL", + database = "{{ prosody_db_name }}", + username = "{{ prosody_user }}", + host = "{{ prosody_db_host }}" +} + +archive_expires_after = "{{ prosody_archive_expires_after }}" + +-- Logging +log = { + info = "*console"; +} + +-- Certificates +certificates = "/etc/pki/prosody" + +-- HTTP +http_ports = { {{ prosody_http_port }} } +http_interfaces = { "127.0.0.1", "::1" } +https_interfaces = { } +https_ports = { } +http_external_url = "https://{{ prosody_http_host }}/" +https_external_url = "https://{{ prosody_http_host }}/" +http_max_content_size = {{ prosody_upload_file_size_limit }} +trusted_proxies = { "127.0.0.1", "::1" } + +Component "{{ prosody_http_host }}" "http_upload" + +http_upload_file_size_limit = {{ prosody_upload_file_size_limit }} +http_upload_expire_after = {{ prosody_upload_expire_after }} +http_upload_quota = {{ prosody_upload_quota }} + +-- Virtual hosts +{% for vhost in prosody_vhosts %} +VirtualHost "{{ vhost }}" +disco_items = { + { "{{ prosody_http_host }}" }, +} +turn_external_host = "{{ prosody_turn_host }}" +turn_external_port = {{ prosody_turn_port }} +turn_external_secret = "{{ prosody_turn_secret }}" + +{% endfor %} + +{% for vhost in prosody_conference_vhosts %} +Component "{{ vhost }}" "muc" + modules_enabled = { "muc_mam" } +{% endfor %} diff --git a/roles/prosody/templates/usr/local/bin/prosody-update-roster.j2 b/roles/prosody/templates/usr/local/bin/prosody-update-roster.j2 new file mode 100644 index 0000000..680ab91 --- /dev/null +++ b/roles/prosody/templates/usr/local/bin/prosody-update-roster.j2 @@ -0,0 +1,56 @@ +#!/usr/libexec/platform-python + +# Copyright (c) 2023 stonewall@sacredheartsc.com +# MIT License https://opensource.org/licenses/MIT +# +# Generates a shared roster file for Prosody from the given IPA group. + +import os +import sys +import ldap +import ldap.sasl +import ldap.filter +import hashlib +import subprocess + +LDAP_URI = '{{ freeipa_ldap_uri }}' +USER_BASEDN = '{{ freeipa_user_basedn }}' +GROUP_BASEDN = '{{ freeipa_group_basedn }}' + +PROSODY_GROUPS_FILE = '{{ prosody_groups_file }}' +PROSODY_ACCESS_GROUP = '{{ prosody_access_group }}' + +ROSTER_GROUP_NAME = 'Internal' + +os.environ['GSS_USE_PROXY'] = 'yes' +conn = ldap.initialize(LDAP_URI) +conn.protocol_version = ldap.VERSION3 +conn.sasl_interactive_bind_s('', ldap.sasl.sasl({}, 'GSSAPI')) + +users = conn.search_s( + USER_BASEDN, + ldap.SCOPE_SUBTREE, + ldap.filter.filter_format('memberOf=cn=%s,%s', [PROSODY_ACCESS_GROUP, GROUP_BASEDN]), + ['jid', 'displayName']) + +if not users: + exit(1) + +with open(PROSODY_GROUPS_FILE, 'rb') as f: + hash_before = hashlib.md5(f.read()).hexdigest() + f.close() + +with open(PROSODY_GROUPS_FILE, 'w') as f: + print(f'[{ROSTER_GROUP_NAME}]', file=f) + for user in users: + jid = user[1]['jid'][0].decode('utf-8') + displayName = user[1]['displayName'][0].decode('utf-8') + print(f'{jid}={displayName}', file=f) + f.close() + +with open(PROSODY_GROUPS_FILE, 'rb') as f: + hash_after = hashlib.md5(f.read()).hexdigest() + f.close() + +if hash_before != hash_after: + subprocess.run(['prosodyctl', 'reload']) diff --git a/roles/prosody/vars/main.yml b/roles/prosody/vars/main.yml new file mode 100644 index 0000000..d971fb7 --- /dev/null +++ b/roles/prosody/vars/main.yml @@ -0,0 +1,38 @@ +prosody_certificate_dir: /etc/pki/prosody +prosody_module_dir: /usr/local/lib64/prosody/modules +prosody_data_dir: /var/lib/prosody +prosody_keytab: /var/lib/gssproxy/clients/{{ prosody_user }}.keytab +prosody_groups_file: /etc/prosody/groups.ini + +prosody_module_repo: https://hg.prosody.im/prosody-modules/ + +prosody_packages: + - prosody + - lua-dbi + - lua-event + - lua-ldap + - lua-sec + - mercurial + +prosody_apache_config: | + {{ apache_proxy_config }} + ProxyPass / http://127.0.0.1:{{ prosody_http_port }}/ + ProxyPassReverse / http://127.0.0.1:{{ prosody_http_port }}/ + +prosody_selinux_policy_te: | + require { + type prosody_t; + type gssproxy_t; + type gssproxy_var_lib_t; + type ldap_port_t; + class dir search; + class sock_file write; + class unix_stream_socket connectto; + class tcp_socket name_connect; + } + + #============= prosody_t ============== + allow prosody_t gssproxy_var_lib_t:dir search; + allow prosody_t gssproxy_var_lib_t:sock_file write; + allow prosody_t gssproxy_t:unix_stream_socket connectto; + allow prosody_t ldap_port_t:tcp_socket name_connect; diff --git a/roles/prosody_letsencrypt_proxy/defaults/main.yml b/roles/prosody_letsencrypt_proxy/defaults/main.yml new file mode 100644 index 0000000..a59fa35 --- /dev/null +++ b/roles/prosody_letsencrypt_proxy/defaults/main.yml @@ -0,0 +1,2 @@ +prosody_le_role: slave +prosody_le_domains: '{{ prosody_vhosts }}' diff --git a/roles/prosody_letsencrypt_proxy/handlers/main.yml b/roles/prosody_letsencrypt_proxy/handlers/main.yml new file mode 100644 index 0000000..18c505e --- /dev/null +++ b/roles/prosody_letsencrypt_proxy/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart sshd + systemd: + name: sshd + state: restarted diff --git a/roles/prosody_letsencrypt_proxy/tasks/main.yml b/roles/prosody_letsencrypt_proxy/tasks/main.yml new file mode 100644 index 0000000..95b108b --- /dev/null +++ b/roles/prosody_letsencrypt_proxy/tasks/main.yml @@ -0,0 +1 @@ +- import_tasks: '{{ prosody_le_role }}.yml' diff --git a/roles/prosody_letsencrypt_proxy/tasks/master.yml b/roles/prosody_letsencrypt_proxy/tasks/master.yml new file mode 100644 index 0000000..ab84669 --- /dev/null +++ b/roles/prosody_letsencrypt_proxy/tasks/master.yml @@ -0,0 +1,47 @@ +- name: create user + user: + name: '{{ prosody_le_user }}' + home: '{{ prosody_le_home }}' + system: yes + create_home: no + shell: /sbin/nologin + +- name: create home directory + file: + path: '{{ prosody_le_home }}' + owner: root + group: '{{ prosody_le_user }}' + mode: 0750 + state: directory + +- name: create ssh authorized_keys directory + file: + path: '{{ prosody_le_authorized_keys_dir }}' + mode: 0755 + state: directory + +- name: copy ssh public key + copy: + content: '{{ prosody_le_ssh_pubkey }}' + dest: '{{ prosody_le_authorized_keys_dir }}/{{ prosody_le_user }}' + mode: 0640 + owner: root + group: '{{ prosody_le_user }}' + +- name: generate sshd configuration + template: + src: etc/ssh/sshd_config.d/99-prosody-le-proxy.conf + dest: /etc/ssh/sshd_config.d/99-prosody-le-proxy.conf + notify: restart sshd + +- name: retrieve certificates + include_role: + name: certbot + vars: + certificate_sans: ['{{ item }}'] + certificate_path: '{{ prosody_le_home }}/{{ item }}.crt' + certificate_key_path: '{{ prosody_le_home }}/{{ item }}.key' + certificate_owner: 'root:{{ prosody_le_user }}' + certificate_mode: 0640 + certificate_use_apache: yes + loop: '{{ prosody_le_domains }}' diff --git a/roles/prosody_letsencrypt_proxy/tasks/slave.yml b/roles/prosody_letsencrypt_proxy/tasks/slave.yml new file mode 100644 index 0000000..1bcf67a --- /dev/null +++ b/roles/prosody_letsencrypt_proxy/tasks/slave.yml @@ -0,0 +1,32 @@ +- name: install packages + dnf: + name: '{{ prosody_le_slave_packages }}' + state: present + +- name: copy ssh privkey + copy: + content: '{{ prosody_le_ssh_privkey }}' + dest: '{{ prosody_le_ssh_privkey_path }}' + mode: 0600 + +- name: generate script + template: + src: usr/local/sbin/prosody-letsencrypt-proxy.j2 + dest: /usr/local/sbin/prosody-letsencrypt-proxy + mode: 0555 + +- name: create systemd timer + include_role: + name: systemd_timer + vars: + timer_name: prosody-letsencrypt-proxy + timer_description: Check for updated prosody certificates + timer_after: network.target + timer_on_calendar: daily + timer_exec: /usr/local/sbin/prosody-letsencrypt-proxy + +- name: retrieve certificates + systemd: + name: prosody-letsencrypt-proxy.service + state: started + changed_when: no diff --git a/roles/prosody_letsencrypt_proxy/templates/etc/ssh/sshd_config.d/99-prosody-le-proxy.conf b/roles/prosody_letsencrypt_proxy/templates/etc/ssh/sshd_config.d/99-prosody-le-proxy.conf new file mode 100644 index 0000000..7d6b9a2 --- /dev/null +++ b/roles/prosody_letsencrypt_proxy/templates/etc/ssh/sshd_config.d/99-prosody-le-proxy.conf @@ -0,0 +1,7 @@ +Match user {{ prosody_le_user }} + AuthorizedKeysFile {{ prosody_le_authorized_keys_dir }}/%u + ChrootDirectory %h + ForceCommand internal-sftp -R + AllowTcpForwarding no + X11Forwarding no + AuthenticationMethods publickey diff --git a/roles/prosody_letsencrypt_proxy/templates/usr/local/sbin/prosody-letsencrypt-proxy.j2 b/roles/prosody_letsencrypt_proxy/templates/usr/local/sbin/prosody-letsencrypt-proxy.j2 new file mode 100644 index 0000000..601bef8 --- /dev/null +++ b/roles/prosody_letsencrypt_proxy/templates/usr/local/sbin/prosody-letsencrypt-proxy.j2 @@ -0,0 +1,51 @@ +#!/bin/bash + +# Copyright (c) 2023 stonewall@sacredheartsc.com +# MIT License https://opensource.org/licenses/MIT +# +# Pulls certificate files from another host over sftp, and restarts prosody +# if any certificate files were modified. + +set -Eeu -o pipefail + +shopt -s nullglob + +SSH_KEY={{ prosody_le_ssh_privkey_path | quote }} +LETSENCRYPT_PROXY_USER={{ prosody_le_user | quote }} +LETSENCRYPT_PROXY_HOST={{ prosody_le_proxy_host | quote }} +CERT_DIR=/etc/prosody/certs + +CHECKSUM_FILE=certs.md5 + +cd "${CERT_DIR}" + +if [ -f "$CHECKSUM_FILE" ]; then + md5_orig=$(<"$CHECKSUM_FILE") +else + md5_orig='' +fi + +sftp -i "$SSH_KEY" "${LETSENCRYPT_PROXY_USER}@${LETSENCRYPT_PROXY_HOST}" < "$CHECKSUM_FILE" +for file in *.{crt,key} ; do + md5sum "$file" >> "$CHECKSUM_FILE" +done + +md5_new=$(<"$CHECKSUM_FILE") + +if [ "$md5_orig" != "$md5_new" ]; then + echo 'found new certificates, reloading prosody.' + if systemctl is-active prosody > /dev/null; then + systemctl reload prosody + fi +else + echo 'certificates unchanged.' +fi diff --git a/roles/prosody_letsencrypt_proxy/vars/main.yml b/roles/prosody_letsencrypt_proxy/vars/main.yml new file mode 100644 index 0000000..a04092d --- /dev/null +++ b/roles/prosody_letsencrypt_proxy/vars/main.yml @@ -0,0 +1,9 @@ +prosody_le_user: prosody-le-proxy +prosody_le_home: /var/spool/prosody +prosody_le_authorized_keys_dir: /etc/ssh/authorized_keys +prosody_le_cert_dir: '{{ prosody_le_home }}/certs' + +prosody_le_slave_packages: + - prosody + +prosody_le_ssh_privkey_path: /etc/prosody/id_prosody_le_proxy diff --git a/roles/proxmox_hypervisor/defaults/main.yml b/roles/proxmox_hypervisor/defaults/main.yml new file mode 100644 index 0000000..a037eb4 --- /dev/null +++ b/roles/proxmox_hypervisor/defaults/main.yml @@ -0,0 +1,33 @@ +proxmox_api_user: ansible@pam +proxmox_api_password: changeme + +proxmox_ntp_servers: '{{ vlan.ntp_servers }}' + +proxmox_mail_origin: '{{ email_domain }}' +proxmox_relayhost: '{{ email_domain }}' + +proxmox_syslog_host: '{{ syslog_host_ip }}' +proxmox_syslog_port: 514 +proxmox_syslog_proto: tcp + +proxmox_sudo_mailto: root + +proxmox_bridge: vmbr0 +proxmox_storage: local-zfs + +proxmox_zfs_trim_on_calendar: monthly +proxmox_zfs_scrub_on_calendar: monthly + +proxmox_zed_email: root +proxmox_zed_verbose: yes +proxmox_zed_notify_interval_sec: 3600 + +proxmox_nagios_ssh_pubkey: '{{ nagios_ssh_pubkey }}' + +proxmox_snmp_location: unknown +proxmox_snmp_contact: '{{ organization }} Sysadmins ' + +snmp_v3_users: + - name: '{{ nagios_snmp_user }}' + auth_pass: '{{ nagios_snmp_auth_pass }}' + priv_pass: '{{ nagios_snmp_priv_pass }}' diff --git a/roles/proxmox_hypervisor/files/etc/apt/apt.conf.d/20auto-upgrades b/roles/proxmox_hypervisor/files/etc/apt/apt.conf.d/20auto-upgrades new file mode 100644 index 0000000..5bf85d3 --- /dev/null +++ b/roles/proxmox_hypervisor/files/etc/apt/apt.conf.d/20auto-upgrades @@ -0,0 +1,3 @@ +APT::Periodic::Update-Package-Lists "1"; +APT::Periodic::Unattended-Upgrade "1"; +APT::Periodic::AutocleanInterval "7"; diff --git a/roles/proxmox_hypervisor/files/etc/apt/apt.conf.d/50unattended-upgrades b/roles/proxmox_hypervisor/files/etc/apt/apt.conf.d/50unattended-upgrades new file mode 100644 index 0000000..b2e9457 --- /dev/null +++ b/roles/proxmox_hypervisor/files/etc/apt/apt.conf.d/50unattended-upgrades @@ -0,0 +1,14 @@ +Unattended-Upgrade::Origins-Pattern { + "origin=*"; +}; + +Unattended-Upgrade::Package-Blacklist { +}; + +Unattended-Upgrade::Mail "root"; +Unattended-Upgrade::MailOnlyOnError "true"; + +Unattended-Upgrade::Remove-Unused-Kernel-Packages "true"; +Unattended-Upgrade::Remove-New-Unused-Dependencies "true"; +Unattended-Upgrade::Remove-Unused-Dependencies "true"; +Unattended-Upgrade::Automatic-Reboot "false"; diff --git a/roles/proxmox_hypervisor/files/usr/lib/nagios/plugins b/roles/proxmox_hypervisor/files/usr/lib/nagios/plugins new file mode 120000 index 0000000..b13c8fa --- /dev/null +++ b/roles/proxmox_hypervisor/files/usr/lib/nagios/plugins @@ -0,0 +1 @@ +../../../../../nagios_client/files/usr/lib64/nagios/plugins \ No newline at end of file diff --git a/roles/proxmox_hypervisor/handlers/main.yml b/roles/proxmox_hypervisor/handlers/main.yml new file mode 100644 index 0000000..63fe760 --- /dev/null +++ b/roles/proxmox_hypervisor/handlers/main.yml @@ -0,0 +1,24 @@ +- name: restart chrony + systemd: + name: chronyd + state: restarted + +- name: restart postfix + systemd: + name: postfix + state: restarted + +- name: restart rsyslog + systemd: + name: rsyslog + state: restarted + +- name: restart zfs-zed + systemd: + name: zfs-zed + state: restarted + +- name: restart snmpd + systemd: + name: snmpd + state: restarted diff --git a/roles/proxmox_hypervisor/tasks/chrony.yml b/roles/proxmox_hypervisor/tasks/chrony.yml new file mode 100644 index 0000000..ed9b0ce --- /dev/null +++ b/roles/proxmox_hypervisor/tasks/chrony.yml @@ -0,0 +1,11 @@ +- name: generate chrony.conf + template: + src: etc/chrony/chrony.conf.j2 + dest: /etc/chrony/chrony.conf + notify: restart chrony + +- name: start chrony + systemd: + name: chronyd + enabled: yes + state: started diff --git a/roles/proxmox_hypervisor/tasks/main.yml b/roles/proxmox_hypervisor/tasks/main.yml new file mode 100644 index 0000000..6495e74 --- /dev/null +++ b/roles/proxmox_hypervisor/tasks/main.yml @@ -0,0 +1,31 @@ +- name: configure NTP + import_tasks: chrony.yml + tags: ntp,chrony + +- name: configure postfix + import_tasks: postfix.yml + tags: mail,postfix + +- name: configure rsyslog + import_tasks: rsyslog.yml + tags: syslog,rsyslog + +- name: configure sudo + import_tasks: sudo.yml + tags: sudo,sudoers + +- name: configure unattended upgrades + import_tasks: unattended_upgrades.yml + tags: apt,packages + +- name: configure ZFS + import_tasks: zfs.yml + tags: zfs + +- name: configure proxmox VE + import_tasks: pve.yml + tags: pve + +- name: configure nagios plugins + import_tasks: nagios.yml + tags: nagios diff --git a/roles/proxmox_hypervisor/tasks/nagios.yml b/roles/proxmox_hypervisor/tasks/nagios.yml new file mode 100644 index 0000000..b42317d --- /dev/null +++ b/roles/proxmox_hypervisor/tasks/nagios.yml @@ -0,0 +1,68 @@ +- name: install packages + apt: + name: '{{ proxmox_nagios_packages }}' + state: present + +- name: create nagios user + user: + name: nagios + comment: Nagios Pseudo-User + system: yes + shell: /bin/bash + home: '{{ proxmox_nagios_home }}' + create_home: yes + state: present + +- name: add nagios ssh key + authorized_key: + user: nagios + key: '{{ proxmox_nagios_ssh_pubkey }}' + state: present + +- name: copy custom nagios scripts + copy: + src: '{{ item.src }}' + dest: '{{ proxmox_nagios_plugin_dir }}/{{ item.path }}' + mode: 0555 + loop: "{{ lookup('filetree', proxmox_nagios_plugin_dir[1:], wantlist=True) }}" + when: item.state == 'file' + +- name: generate sudo rules + template: + src: etc/sudoers.d/nagios.j2 + dest: /etc/sudoers.d/nagios + mode: 0400 + +- name: set PATH for nagios user + copy: + content: export PATH=/sbin:/bin:/usr/sbin:/usr/bin:{{ proxmox_nagios_plugin_dir }} + dest: '{{ proxmox_nagios_home }}/.bashrc' + owner: '{{ proxmox_nagios_user }}' + group: '{{ proxmox_nagios_user }}' + mode: 0644 + +- name: stop snmpd + systemd: + name: snmpd + state: stopped + +- name: generate snmpd.conf + template: + src: etc/snmp/snmpd.conf.j2 + dest: /etc/snmp/snmpd.conf + mode: 0600 + +- name: add snmpv3 users + lineinfile: + path: /var/lib/snmp/snmpd.conf + line: 'createUser {{ item.name }} SHA "{{ item.auth_pass }}" AES "{{ item.priv_pass }}"' + insertafter: EOF + loop: '{{ snmp_v3_users }}' + loop_control: + label: '{{ item.name }}' + +- name: enable and start snmpd + systemd: + name: snmpd + enabled: yes + state: started diff --git a/roles/proxmox_hypervisor/tasks/postfix.yml b/roles/proxmox_hypervisor/tasks/postfix.yml new file mode 100644 index 0000000..2cb3fb2 --- /dev/null +++ b/roles/proxmox_hypervisor/tasks/postfix.yml @@ -0,0 +1,18 @@ +- name: install postfix + apt: + name: + - postfix + - bsd-mailx + state: present + +- name: generate postifx configuration + template: + src: etc/postfix/main.cf.j2 + dest: /etc/postfix/main.cf + notify: restart postfix + +- name: enable postfix + systemd: + name: postfix + enabled: yes + state: started diff --git a/roles/proxmox_hypervisor/tasks/pve.yml b/roles/proxmox_hypervisor/tasks/pve.yml new file mode 100644 index 0000000..e780bf6 --- /dev/null +++ b/roles/proxmox_hypervisor/tasks/pve.yml @@ -0,0 +1,58 @@ +- name: disable proxmox enterprise repositiory + apt_repository: + filename: pve-enterprise + repo: 'deb {{ proxmox_repo_url }} {{ ansible_distribution_release }} pve-enterprise' + state: absent + update_cache: no + +- name: enable proxmox community repository + apt_repository: + filename: pve-no-subscription + repo: 'deb {{ proxmox_repo_url }} {{ ansible_distribution_release }} pve-no-subscription' + state: present + update_cache: yes + +- name: enable snippets on local storage + lineinfile: + path: /etc/pve/storage.cfg + line: ' content iso,backup,snippets,vztmpl' + regexp: '^\s+content\s' + insertafter: '^dir: local$' + firstmatch: yes + +- name: create snippets directory + file: + path: '{{ proxmox_snippets_dir }}' + state: directory + +- name: generate custom snippets + template: + src: '{{ item.src }}' + dest: '{{ proxmox_snippets_dir }}/{{ item.path | splitext | first }}' + loop: "{{ lookup('filetree', '../templates/{{ proxmox_snippets_dir[1:] }}', wantlist=True) }}" + when: item.state == 'file' + +- name: add ansible API user + import_tasks: pve_api_user.yml + +- name: create kvm image directory + file: + path: '{{ proxmox_kvm_image_dir }}' + state: directory + +- name: get current VMIDs + shell: qm list | awk '{print $1}' + register: vmids + changed_when: false + +- name: install proxmoxer + apt: + name: python3-proxmoxer + state: present + +- name: create KVM templates + include_tasks: pve_kvm_template.yml + when: (image.vmid | string) not in vmids.stdout_lines + loop: '{{ proxmox_kvm_images }}' + loop_control: + loop_var: image diff --git a/roles/proxmox_hypervisor/tasks/pve_api_user.yml b/roles/proxmox_hypervisor/tasks/pve_api_user.yml new file mode 100644 index 0000000..f620a90 --- /dev/null +++ b/roles/proxmox_hypervisor/tasks/pve_api_user.yml @@ -0,0 +1,21 @@ +- name: create unix account + user: + name: "{{ proxmox_api_user | replace('@pam', '') }}" + shell: /sbin/nologin + password: '{{ proxmox_api_password | password_hash("sha512", proxmox_password_salt | default("")) }}' + state: present + +- name: check if user has PVE account + shell: pveum user list --noheader --noborder | cut -d ' ' -f1 + changed_when: False + register: pve_users + +- name: create PVE account + block: + - name: create PVE user + command: pveum user add {{ proxmox_api_user }} + + - name: set user ACLs + command: pveum acl modify / -user {{ proxmox_api_user }} -role PVEAdmin -propagate 1 + + when: proxmox_api_user not in pve_users.stdout_lines diff --git a/roles/proxmox_hypervisor/tasks/pve_kvm_template.yml b/roles/proxmox_hypervisor/tasks/pve_kvm_template.yml new file mode 100644 index 0000000..6f0dfac --- /dev/null +++ b/roles/proxmox_hypervisor/tasks/pve_kvm_template.yml @@ -0,0 +1,32 @@ +- name: download the disk image + get_url: + url: '{{ image.url }}' + checksum: 'sha256:{{ image.sha256 }}' + dest: '{{ proxmox_kvm_image_dir }}/{{ image.name }}.{{ image.type | default("qcow2") }}' + +- name: create a new VM + command: > + qm create {{ image.vmid }} + --name {{ image.name }} + --ostype {{ image.ostype | default('l26') }} + --scsihw virtio-scsi-pci + --memory 2048 + --net0 virtio,bridge={{ proxmox_bridge }} + --serial0 socket + --vga serial0 + --scsi1 {{ proxmox_storage }}:cloudinit + +- name: import the disk image + command: > + qm importdisk {{ image.vmid }} + {{ proxmox_kvm_image_dir }}/{{ image.name }}.{{ image.type | default("qcow2") }} + {{ proxmox_storage }} + +- name: attach disk to VM + command: qm set {{ image.vmid }} --scsi0 {{ proxmox_storage }}:vm-{{ image.vmid }}-disk-0 + +- name: set boot order + command: qm set {{ image.vmid }} --boot order=scsi0 + +- name: convert VM to template + command: qm template {{ image.vmid }} diff --git a/roles/proxmox_hypervisor/tasks/rsyslog.yml b/roles/proxmox_hypervisor/tasks/rsyslog.yml new file mode 100644 index 0000000..bbd981c --- /dev/null +++ b/roles/proxmox_hypervisor/tasks/rsyslog.yml @@ -0,0 +1,16 @@ +- name: install rsyslog + apt: + name: rsyslog + state: present + +- name: generate rsyslog configuration + template: + src: etc/rsyslog.d/forward.conf.j2 + dest: /etc/rsyslog.d/forward.conf + notify: restart rsyslog + +- name: enable rsyslog + systemd: + name: rsyslog + enabled: yes + state: started diff --git a/roles/proxmox_hypervisor/tasks/sudo.yml b/roles/proxmox_hypervisor/tasks/sudo.yml new file mode 100644 index 0000000..7419bf0 --- /dev/null +++ b/roles/proxmox_hypervisor/tasks/sudo.yml @@ -0,0 +1,5 @@ +- name: generate sudoers file + template: + src: etc/sudoers.j2 + dest: /etc/sudoers + mode: 0440 diff --git a/roles/proxmox_hypervisor/tasks/unattended_upgrades.yml b/roles/proxmox_hypervisor/tasks/unattended_upgrades.yml new file mode 100644 index 0000000..9ce7e89 --- /dev/null +++ b/roles/proxmox_hypervisor/tasks/unattended_upgrades.yml @@ -0,0 +1,11 @@ +- name: install unattended-upgrades + apt: + name: unattended-upgrades + +- name: copy unattended-upgrades configuration + copy: + src: etc/apt/apt.conf.d/{{ item }} + dest: /etc/apt/apt.conf.d/{{ item }} + loop: + - 20auto-upgrades + - 50unattended-upgrades diff --git a/roles/proxmox_hypervisor/tasks/zfs.yml b/roles/proxmox_hypervisor/tasks/zfs.yml new file mode 100644 index 0000000..5a1587e --- /dev/null +++ b/roles/proxmox_hypervisor/tasks/zfs.yml @@ -0,0 +1,34 @@ +- name: generate zed.rc + template: + src: etc/zfs/zed.d/zed.rc.j2 + dest: /etc/zfs/zed.d/zed.rc + notify: restart zfs-zed + +- name: enable zfs event daemon + systemd: + name: zfs-zed + enabled: yes + state: started + +- name: create zfs systemd units + template: + src: etc/systemd/system/zfs-{{ item[0] }}@.{{ item[1] }}.j2 + dest: /etc/systemd/system/zfs-{{ item[0] }}@.{{ item[1] }} + loop: "{{ ['scrub', 'trim'] | product(['service', 'timer']) }}" + register: zfs_units + +- name: reload systemd units + systemd: + daemon-reload: yes + when: zfs_units.changed + +- name: enable periodic trim and scrub for zpool + systemd: + name: zfs-{{ item }}@rpool.timer + state: started + enabled: yes + loop: + - trim + - scrub + loop_control: + label: zfs-{{ item }}@rpool.timer diff --git a/roles/proxmox_hypervisor/templates/etc/chrony/chrony.conf.j2 b/roles/proxmox_hypervisor/templates/etc/chrony/chrony.conf.j2 new file mode 100644 index 0000000..e1819d7 --- /dev/null +++ b/roles/proxmox_hypervisor/templates/etc/chrony/chrony.conf.j2 @@ -0,0 +1,10 @@ +{% for server in proxmox_ntp_servers %} +server {{ server }} iburst +{% endfor %} + +driftfile /var/lib/chrony/chrony.drift +makestep 1.0 3 +rtcsync +keyfile /etc/chrony/chrony.keys +leapsectz right/UTC +logdir /var/log/chrony diff --git a/roles/proxmox_hypervisor/templates/etc/postfix/main.cf.j2 b/roles/proxmox_hypervisor/templates/etc/postfix/main.cf.j2 new file mode 100644 index 0000000..76575e3 --- /dev/null +++ b/roles/proxmox_hypervisor/templates/etc/postfix/main.cf.j2 @@ -0,0 +1,19 @@ +compatibility_level = 2 + +myorigin = {{ proxmox_mail_origin }} + +# disable local delivery +biff = no +mydestination = + +inet_interfaces = loopback-only +inet_protocols = all +mynetworks_style = host + +relayhost = {{ proxmox_relayhost }} + +alias_database = hash:/etc/aliases + +smtputf8_enable = yes + +smtp_tls_security_level = may diff --git a/roles/proxmox_hypervisor/templates/etc/rsyslog.d/forward.conf.j2 b/roles/proxmox_hypervisor/templates/etc/rsyslog.d/forward.conf.j2 new file mode 100644 index 0000000..a0dd7f2 --- /dev/null +++ b/roles/proxmox_hypervisor/templates/etc/rsyslog.d/forward.conf.j2 @@ -0,0 +1,7 @@ +if prifilt("*.info") then { + action(type="omfwd" + target="{{ proxmox_syslog_host }}" + port="{{ proxmox_syslog_port }}" + protocol="{{ proxmox_syslog_proto }}" + ) +} diff --git a/roles/proxmox_hypervisor/templates/etc/snmp/snmpd.conf.j2 b/roles/proxmox_hypervisor/templates/etc/snmp/snmpd.conf.j2 new file mode 100644 index 0000000..ad04e59 --- /dev/null +++ b/roles/proxmox_hypervisor/templates/etc/snmp/snmpd.conf.j2 @@ -0,0 +1,10 @@ +syslocation {{ proxmox_snmp_location }} +syscontact {{ proxmox_snmp_contact }} + +sysServices 72 + +master agentx + +{% for user in snmp_v3_users %} +rouser {{ user.name }} +{% endfor %} diff --git a/roles/proxmox_hypervisor/templates/etc/sudoers.d/nagios.j2 b/roles/proxmox_hypervisor/templates/etc/sudoers.d/nagios.j2 new file mode 100644 index 0000000..1a1945e --- /dev/null +++ b/roles/proxmox_hypervisor/templates/etc/sudoers.d/nagios.j2 @@ -0,0 +1,3 @@ +{% for command in proxmox_nagios_sudo_whitelist %} +{{ proxmox_nagios_user }} ALL=(root) NOPASSWD: {{ command | replace(':', '\\:') }} +{% endfor %} diff --git a/roles/proxmox_hypervisor/templates/etc/sudoers.j2 b/roles/proxmox_hypervisor/templates/etc/sudoers.j2 new file mode 100644 index 0000000..2f14a77 --- /dev/null +++ b/roles/proxmox_hypervisor/templates/etc/sudoers.j2 @@ -0,0 +1,15 @@ +Defaults env_reset +Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +Defaults mailto = "{{ proxmox_sudo_mailto }}" +Defaults mail_badpass +Defaults mail_no_host +Defaults mail_no_perms +Defaults mail_no_user + +root ALL=(ALL:ALL) ALL + +# Allow members of group sudo to execute any command +%sudo ALL=(ALL:ALL) ALL + +@includedir /etc/sudoers.d diff --git a/roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-scrub@.service.j2 b/roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-scrub@.service.j2 new file mode 100644 index 0000000..3dfb199 --- /dev/null +++ b/roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-scrub@.service.j2 @@ -0,0 +1,11 @@ +[Unit] +Description=zpool scrub for %i + +[Service] +Nice=19 +IOSchedulingClass=idle +KillSignal=SIGINT +ExecStart=zpool scrub %i + +[Install] +WantedBy=multi-user.target diff --git a/roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-scrub@.timer.j2 b/roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-scrub@.timer.j2 new file mode 100644 index 0000000..efc33f0 --- /dev/null +++ b/roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-scrub@.timer.j2 @@ -0,0 +1,10 @@ +[Unit] +Description=zpool scrub for %i on calendar interval + +[Timer] +OnCalendar={{ proxmox_zfs_scrub_on_calendar }} +AccuracySec=1h +Persistent=true + +[Install] +WantedBy=multi-user.target diff --git a/roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-trim@.service.j2 b/roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-trim@.service.j2 new file mode 100644 index 0000000..ef3ec43 --- /dev/null +++ b/roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-trim@.service.j2 @@ -0,0 +1,11 @@ +[Unit] +Description=zpool trim for %i + +[Service] +Nice=19 +IOSchedulingClass=idle +KillSignal=SIGINT +ExecStart=zpool trim %i + +[Install] +WantedBy=multi-user.target diff --git a/roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-trim@.timer.j2 b/roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-trim@.timer.j2 new file mode 100644 index 0000000..2867d0d --- /dev/null +++ b/roles/proxmox_hypervisor/templates/etc/systemd/system/zfs-trim@.timer.j2 @@ -0,0 +1,10 @@ +[Unit] +Description=Zpool trim for %i on calendar interval + +[Timer] +OnCalendar={{ proxmox_zfs_trim_on_calendar }} +AccuracySec=1h +Persistent=true + +[Install] +WantedBy=multi-user.target diff --git a/roles/proxmox_hypervisor/templates/etc/zfs/zed.d/zed.rc.j2 b/roles/proxmox_hypervisor/templates/etc/zfs/zed.d/zed.rc.j2 new file mode 100644 index 0000000..3ad418a --- /dev/null +++ b/roles/proxmox_hypervisor/templates/etc/zfs/zed.d/zed.rc.j2 @@ -0,0 +1,7 @@ +ZED_EMAIL_ADDR="{{ proxmox_zed_email }}" +ZED_EMAIL_PROG="mail" +ZED_EMAIL_OPTS="-s '@SUBJECT@' @ADDRESS@" +ZED_NOTIFY_INTERVAL_SECS={{ proxmox_zed_notify_interval_sec }} +ZED_NOTIFY_VERBOSE={{ proxmox_zed_verbose | bool | int }} +ZED_USE_ENCLOSURE_LEDS=1 +ZED_SYSLOG_SUBCLASS_EXCLUDE="history_event" diff --git a/roles/proxmox_hypervisor/templates/var/lib/vz/snippets/userdata.yaml.j2 b/roles/proxmox_hypervisor/templates/var/lib/vz/snippets/userdata.yaml.j2 new file mode 100644 index 0000000..75283cf --- /dev/null +++ b/roles/proxmox_hypervisor/templates/var/lib/vz/snippets/userdata.yaml.j2 @@ -0,0 +1,17 @@ +#cloud-config +manage_etc_hosts: False +users: + - name: root + passwd: {{ root_password | password_hash("sha512", root_password_salt | default("")) }} + lock_passwd: False + ssh_authorized_keys: +{% for key in root_authorized_keys %} + - {{ key }} +{% endfor %} +chpasswd: + expire: False +disable_root: False +ssh_pwauth: False +package_update: False +package_upgrade: False +preserve_hostname: true diff --git a/roles/proxmox_hypervisor/vars/main.yml b/roles/proxmox_hypervisor/vars/main.yml new file mode 100644 index 0000000..6a31caa --- /dev/null +++ b/roles/proxmox_hypervisor/vars/main.yml @@ -0,0 +1,34 @@ +proxmox_repo_url: http://download.proxmox.com/debian/pve +proxmox_snippets_dir: /var/lib/vz/snippets +proxmox_kvm_image_dir: /usr/local/share/pve-kvm-images + +proxmox_kvm_images: + - name: rocky8.6 + url: https://download.rockylinux.org/pub/rocky/8/images/Rocky-8-GenericCloud-8.6.20220702.0.x86_64.qcow2 + sha256: 7b786a39eeb96e22dd85386377ff186737f6c1b9a5faa105b0a0a7a4895c29d0 + vmid: 9002 + + - name: rocky8.7 + url: https://dl.rockylinux.org/pub/rocky/8/images/x86_64/Rocky-8-GenericCloud-Base-8.7-20221130.0.x86_64.qcow2 + sha256: 02e5a7564c979bca08e86e4f5bfbdad9bafcf4154844f7d2a029ec3f3df0fbd9 + vmid: 9004 + + - name: rocky9.0 + url: https://download.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-GenericCloud-9.0-20220830.0.x86_64.qcow2 + sha256: f02570e0ad3653df7f56baa8157739dbe92a003234acd5824dcf94d24694e20b + vmid: 9003 + + - name: rocky9.1 + url: https://dl.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-GenericCloud-Base-9.1-20221130.0.x86_64.qcow2 + sha256: 4405926b4c84edf4a25a51d5ed36bffada04e5e143045c41c974a9a9d35937f1 + vmid: 9005 + +proxmox_nagios_user: nagios +proxmox_nagios_plugin_dir: /usr/lib/nagios/plugins +proxmox_nagios_home: /var/spool/nagios +proxmox_nagios_packages: + - monitoring-plugins + - snmpd + +proxmox_nagios_sudo_whitelist: + - /usr/bin/systemctl status -- * diff --git a/roles/proxmox_instance/defaults/main.yml b/roles/proxmox_instance/defaults/main.yml new file mode 100644 index 0000000..5af0126 --- /dev/null +++ b/roles/proxmox_instance/defaults/main.yml @@ -0,0 +1,31 @@ +proxmox_hostname: '{{ inventory_hostname }}' +proxmox_onboot: yes +proxmox_bridge: vmbr0 +proxmox_firewall: no +proxmox_storage: local-zfs + +proxmox_disk: 32 # GB +proxmox_memory: 4096 # MB +proxmox_cpu: host +proxmox_sockets: 1 +proxmox_cores: 2 +proxmox_bios: ovmf # 'ovmf' for UEFI, 'seabios' for BIOS + +proxmox_template: rocky9.1 +proxmox_password: '{{ root_password }}' +proxmox_pubkeys: '{{ root_authorized_keys }}' +proxmox_vlan: '{{ vlan.id }}' +proxmox_ip: '{{ ip }}' +proxmox_gateway: '{{ vlan.gateway }}' +proxmox_netmask: '{{ vlan.cidr | ansible.utils.ipaddr("prefix") }}' +proxmox_nameservers: '{{ vlan.dns_servers }}' +proxmox_searchdomain: '{{ domain }}' + +proxmox_discard: yes +proxmox_ssd: yes + +proxmox_kvm_vga: serial0 +proxmox_kvm_ciuser: root +proxmox_kvm_scsihw: virtio-scsi-pci +proxmox_kvm_guest_agent: yes +proxmox_userdata: local:snippets/userdata.yaml diff --git a/roles/proxmox_instance/tasks/main.yml b/roles/proxmox_instance/tasks/main.yml new file mode 100644 index 0000000..590f1df --- /dev/null +++ b/roles/proxmox_instance/tasks/main.yml @@ -0,0 +1,143 @@ +- name: clone proxmox template + proxmox_kvm: + node: '{{ proxmox_node }}' + api_host: localhost + api_user: '{{ proxmox_api_user }}' + api_password: '{{ proxmox_api_password }}' + name: '{{ proxmox_hostname }}' + storage: '{{ proxmox_storage }}' + clone: '{{ proxmox_template }}' + full: yes + format: unspecified + state: present + delegate_to: '{{ proxmox_api_host }}' + register: vm_clone + +- name: wait for PVE to settle + pause: + seconds: 5 + when: vm_clone.changed + +- name: get new vmid + shell: >- + qm list | awk -v name={{ proxmox_hostname }} '$2 == name { print $1; rc=1 } END { exit !rc }' + changed_when: False + register: proxmox_vmid + until: proxmox_vmid.stdout | int > 0 + retries: 5 + delay: 2 + delegate_to: '{{ proxmox_api_host }}' + +- name: update VM properties + proxmox_kvm: + node: '{{ proxmox_node }}' + api_host: localhost + api_user: '{{ proxmox_api_user }}' + api_password: '{{ proxmox_api_password }}' + bios: '{{ proxmox_bios }}' + efidisk0: + storage: '{{ proxmox_storage }}' + efitype: 4m + pre_enrolled_keys: 1 + format: unspecified + name: '{{ proxmox_hostname }}' + agent: '{{ proxmox_kvm_guest_agent }}' + storage: '{{ proxmox_storage }}' + onboot: '{{ proxmox_onboot }}' + cpu: '{{ proxmox_cpu }}' + sockets: '{{ proxmox_sockets }}' + cores: '{{ proxmox_cores }}' + vcpus: '{{ proxmox_sockets * proxmox_cores }}' + memory: '{{ proxmox_memory }}' + vga: '{{ proxmox_kvm_vga }}' + scsihw: '{{ proxmox_kvm_scsihw }}' + nameservers: "{{ proxmox_nameservers | join(',') }}" + searchdomains: '{{ proxmox_searchdomain }}' + sshkeys: "{{ proxmox_pubkeys | join('\n') }}" + ciuser: '{{ proxmox_kvm_ciuser }}' + cipassword: '{{ proxmox_password }}' + cicustom: 'user={{ proxmox_userdata }}' + ipconfig: + ipconfig0: 'ip={{ (proxmox_ip ~ "/" ~ proxmox_netmask) | ansible.utils.ipaddr("cidr") }},gw={{ proxmox_gateway }}' + update: yes + delegate_to: '{{ proxmox_api_host }}' + # The proxox_kvm module is not smart enough to report when the VM remains unchanged - sad! + changed_when: false + +- name: query the virtual NIC configuration + shell: > + qm config {{ proxmox_vmid.stdout }} + | awk '$1 == "net0:" {print $2}' + | sed -e 's/=/: /g' -e 's/,/\n/g' + register: qm_config_net0 + changed_when: False + delegate_to: '{{ proxmox_api_host }}' + +- name: convert NIC to YAML dictionary + set_fact: + vm_nic: '{{ qm_config_net0.stdout | from_yaml }}' + +- name: set the virtual NIC vlan tag + command: > + qm set {{ proxmox_vmid.stdout }} + -net0 virtio={{ vm_nic.virtio }},bridge={{ vm_nic.bridge }},firewall={{ proxmox_firewall | int }},tag={{ proxmox_vlan }} + when: (proxmox_vlan | int) != (vm_nic.tag | default(1) | int) + delegate_to: '{{ proxmox_api_host }}' + +- name: query the virtual disk configuration + shell: > + qm config {{ proxmox_vmid.stdout }} + | awk '$1 == "scsi0:" {print $2}' + | sed -e 's/[=:]/: /g' -e 's/,/\n/g' + register: qm_config_scsi0 + changed_when: False + delegate_to: '{{ proxmox_api_host }}' + +- name: convert disk to YAML dictionary + set_fact: + vm_disk: '{{ qm_config_scsi0.stdout | trim | from_yaml }}' + +- name: grow the virtual disk + command: qm resize {{ proxmox_vmid.stdout }} scsi0 {{ proxmox_disk ~ 'G' }} + when: (proxmox_disk ~ 'G') != vm_disk.size + delegate_to: '{{ proxmox_api_host }}' + +- name: set virtual disk properties + command: > + qm set {{ proxmox_vmid.stdout }} + -scsi0 {{ proxmox_storage }}:{{ vm_disk[proxmox_storage] }},discard={{ 'on' if proxmox_discard else 'off' }},ssd={{ proxmox_ssd | int }} + when: >- + vm_disk.discard is not defined + or vm_disk.discard != proxmox_discard + or vm_disk.ssd is not defined + or (vm_disk.ssd | int | bool) != proxmox_ssd + delegate_to: '{{ proxmox_api_host }}' + +- name: configure efidisk + block: + - name: query the efi disk configuration + shell: "qm config {{ proxmox_vmid.stdout }} | grep -q '^efidisk0:'" + register: qm_config_efidisk0 + changed_when: no + failed_when: no + delegate_to: '{{ proxmox_api_host }}' + + - name: create efidisk + command: qm set {{ proxmox_vmid.stdout }} -efidisk0 {{ proxmox_storage }}:1,efitype=4m,pre-enrolled-keys=1 + delegate_to: '{{ proxmox_api_host }}' + when: qm_config_efidisk0.rc != 0 + when: "proxmox_bios == 'ovmf'" + +- name: start the VM + proxmox_kvm: + node: '{{ proxmox_node }}' + api_host: localhost + api_user: '{{ proxmox_api_user }}' + api_password: '{{ proxmox_api_password }}' + vmid: '{{ proxmox_vmid.stdout }}' + state: started + delegate_to: '{{ proxmox_api_host }}' + +- name: wait for VM to become reachable + wait_for_connection: + timeout: 120 diff --git a/roles/psitransfer/defaults/main.yml b/roles/psitransfer/defaults/main.yml new file mode 100644 index 0000000..e43067f --- /dev/null +++ b/roles/psitransfer/defaults/main.yml @@ -0,0 +1,22 @@ +psitransfer_version: 2.1.2 +psitransfer_port: 8080 + +psitransfer_server_name: '{{ ansible_fqdn }}' +psitransfer_server_aliases: '{{ cnames }}' + +psitransfer_upload_cidrs: [] +psitransfer_admin_cidrs: [] + +psitransfer_retentions: + one-time: one time download + 3600: 1 hour + 86400: 1 day + 604800: 1 week + 2419200: 1 month + 4838400: 2 months + +psitransfer_default_retention: 604800 + +psitransfer_max_file_size: 1 GB +psitransfer_max_bucket_size: 5 GB +psitransfer_max_preview_size: 32 MB diff --git a/roles/psitransfer/handlers/main.yml b/roles/psitransfer/handlers/main.yml new file mode 100644 index 0000000..9c64c79 --- /dev/null +++ b/roles/psitransfer/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart psitransfer + systemd: + name: psitransfer + state: restarted diff --git a/roles/psitransfer/tasks/main.yml b/roles/psitransfer/tasks/main.yml new file mode 100644 index 0000000..8526465 --- /dev/null +++ b/roles/psitransfer/tasks/main.yml @@ -0,0 +1,76 @@ +- name: install nodejs + dnf: + name: nodejs + state: present + +- name: create local user + user: + name: '{{ psitransfer_user }}' + system: yes + home: '{{ psitransfer_home }}' + shell: /sbin/nologin + create_home: no + +- name: create directories + file: + path: '{{ item }}' + owner: '{{ psitransfer_user }}' + group: '{{ psitransfer_user }}' + mode: 0700 + state: directory + loop: + - '{{ psitransfer_home }}' + - '{{ psitransfer_install_dir }}' + - '{{ psitransfer_data_dir }}' + +- name: extract tarball + unarchive: + src: '{{ psitransfer_url }}' + remote_src: yes + dest: '{{ psitransfer_install_dir }}' + owner: '{{ psitransfer_user }}' + group: '{{ psitransfer_user }}' + extra_opts: + - '--strip-components=1' + +- name: install npm dependencies + npm: + path: '{{ psitransfer_install_dir }}' + production: yes + become: yes + become_user: '{{ psitransfer_user }}' + +- name: create systemd unit + template: + src: etc/systemd/system/psitransfer.service.j2 + dest: /etc/systemd/system/psitransfer.service + register: psitransfer_unit + notify: restart psitransfer + +- name: reload systemd daemons + systemd: + daemon_reload: yes + when: psitransfer_unit.changed + +- name: generate config file + template: + src: '{{ psitransfer_install_dir[1:] }}/config.production.js.j2' + dest: '{{ psitransfer_install_dir }}/config.production.js' + owner: '{{ psitransfer_user }}' + group: '{{ psitransfer_user }}' + mode: 0600 + notify: restart psitransfer + +- name: start psitransfer + systemd: + name: psitransfer + enabled: yes + state: started + +- name: set http_port_t selinux context on psitransfer port + seport: + ports: '{{ psitransfer_port }}' + proto: tcp + setype: http_port_t + state: present + tags: selinux diff --git a/roles/psitransfer/templates/etc/systemd/system/psitransfer.service.j2 b/roles/psitransfer/templates/etc/systemd/system/psitransfer.service.j2 new file mode 100644 index 0000000..6bb8c97 --- /dev/null +++ b/roles/psitransfer/templates/etc/systemd/system/psitransfer.service.j2 @@ -0,0 +1,36 @@ +[Unit] +Description=Psitransfer File Sharing +After=network.target +AssertPathExists={{ psitransfer_install_dir }} + +[Service] +Type=simple +Environment="PSITRANSFER_IFACE=127.0.0.1" +Environment="PSITRANSFER_PORT={{ psitransfer_port }}" +Environment=NODE_ENV=production +EnvironmentFile=-/etc/sysconfig/psitransfer +ExecStart=/usr/bin/node app.js +WorkingDirectory={{ psitransfer_install_dir }} +User={{ psitransfer_user }} +Group={{ psitransfer_user }} +Restart=on-failure + +# See https://www.freedesktop.org/software/systemd/man/systemd.exec.html +# for details +DevicePolicy=closed +NoNewPrivileges=yes +PrivateDevices=yes +PrivateTmp=yes +ProtectControlGroups=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 +RestrictNamespaces=yes +RestrictRealtime=yes +SystemCallFilter=~@clock @debug @module @mount @obsolete @privileged @reboot @setuid @swap + +ProtectSystem=full +ProtectHome=true + +[Install] +WantedBy=multi-user.target diff --git a/roles/psitransfer/templates/var/lib/psitransfer/psitransfer/config.production.js.j2 b/roles/psitransfer/templates/var/lib/psitransfer/psitransfer/config.production.js.j2 new file mode 100644 index 0000000..c489f87 --- /dev/null +++ b/roles/psitransfer/templates/var/lib/psitransfer/psitransfer/config.production.js.j2 @@ -0,0 +1,12 @@ +module.exports = { + "uploadAppPath": "/upload", + "uploadDir": "{{ psitransfer_data_dir }}", + "retentions": {{ psitransfer_retentions | to_json }}, + "defaultRetention": "{{ psitransfer_default_retention }}", + "maxFileSize": {{ psitransfer_max_file_size | human_to_bytes }}, + "maxBucketSize": {{ psitransfer_max_bucket_size | human_to_bytes }}, + "maxPreviewSize": {{ psitransfer_max_preview_size | human_to_bytes }}, + {% if psitransfer_admin_password is defined %} + "adminPass": "{{ psitransfer_admin_password }}" + {% endif %} +}; diff --git a/roles/psitransfer/vars/main.yml b/roles/psitransfer/vars/main.yml new file mode 100644 index 0000000..d6007fa --- /dev/null +++ b/roles/psitransfer/vars/main.yml @@ -0,0 +1,33 @@ +psitransfer_home: /var/lib/psitransfer +psitransfer_install_dir: '{{ psitransfer_home }}/psitransfer' +psitransfer_data_dir: '{{ psitransfer_home }}/data' +psitransfer_user: psitransfer +psitransfer_url: https://github.com/psi-4ward/psitransfer/releases/download/v{{ psitransfer_version }}/psitransfer-v{{ psitransfer_version }}.tar.gz + +psitransfer_archive_shell: >- + TIMESTAMP=$(date +%Y%m%d%H%M%S); + tar czf "psitransfer-${TIMESTAMP}.tar.gz" + --transform "s|^\.|psitransfer-${TIMESTAMP}|" + -C {{ psitransfer_data_dir | quote }} . + +psitransfer_apache_config: | + {{ apache_proxy_config }} + ProxyPass / http://127.0.0.1:{{ psitransfer_port }}/ + ProxyPassReverse / http://127.0.0.1:{{ psitransfer_port }}/ + + RewriteEngine on + RewriteCond %{HTTP:Upgrade} websocket [NC] + RewriteCond %{HTTP:Connection} upgrade [NC] + RewriteRule ^/?(.*) "ws://127.0.0.1:{{ psitransfer_port }}/$1" [P,L] + + + {% for cidr in psitransfer_upload_cidrs %} + Require ip {{ cidr }} + {% endfor %} + + + + {% for cidr in psitransfer_admin_cidrs %} + Require ip {{ cidr }} + {% endfor %} + diff --git a/roles/pxe_server/README.txt b/roles/pxe_server/README.txt new file mode 100644 index 0000000..5775583 --- /dev/null +++ b/roles/pxe_server/README.txt @@ -0,0 +1,18 @@ +To generate the grub binaries: +------------------------------ + +Install the required packages: + + dnf install grub2 grub2-pc grub2-efi grub2-pc-modules grub2-efi-x64-modules grub2-efi-aa64-modules + +Generate the images: + + # location of the grub.cfg files within the tftp root + PREFIX=/grub + COMMON_MODULES="normal linux echo http tftp reboot configfile" + + # Last arguments are the modules to "statically link" into the grub image. + # I'd rather not maintain a bunch of .mod files within the tftp directory. + grub2-mkimage --format=x86_64-efi --output=bootx64.efi -p $PREFIX $COMMON_MODULES efinet bsd + grub2-mkimage --format=arm64-efi --output=bootaa64.efi -p $PREFIX $COMMON_MODULES efinet + grub2-mkimage --format=i386-pc-pxe --output=booti386 -p $PREFIX $COMMON_MODULES pxe bsd diff --git a/roles/pxe_server/defaults/main.yml b/roles/pxe_server/defaults/main.yml new file mode 100644 index 0000000..ce5f7dc --- /dev/null +++ b/roles/pxe_server/defaults/main.yml @@ -0,0 +1,4 @@ +pxe_root: /tftpboot +pxe_http_port: 80 +pxe_grub_prefix: grub +locale: en_US.UTF-8 diff --git a/roles/pxe_server/tasks/extract_iso.yml b/roles/pxe_server/tasks/extract_iso.yml new file mode 100644 index 0000000..754f9ec --- /dev/null +++ b/roles/pxe_server/tasks/extract_iso.yml @@ -0,0 +1,16 @@ +- name: create directories + file: + path: '{{ pxe_root }}/{{ item.name }}/{{ item.version }}/{{ item.arch }}' + state: directory + recurse: yes + +- name: download iso + get_url: + url: '{{ item.url }}' + checksum: sha256:{{ item.sha256 }} + dest: '{{ pxe_root }}/{{ item.name }}/{{ item.version }}/{{ item.arch }}/{{ item.name }}-{{ item.version }}-{{ item.arch }}.iso' + +- name: extract iso with bsdtar + command: >- + bsdtar -C '{{ pxe_root }}/{{ item.name }}/{{ item.version }}/{{ item.arch }}' + -xf '{{ pxe_root }}/{{ item.name }}/{{ item.version }}/{{ item.arch }}/{{ item.name }}-{{ item.version }}-{{ item.arch }}.iso' diff --git a/roles/pxe_server/tasks/main.yml b/roles/pxe_server/tasks/main.yml new file mode 100644 index 0000000..9bd7b30 --- /dev/null +++ b/roles/pxe_server/tasks/main.yml @@ -0,0 +1,39 @@ +- name: prepare boot images + include_tasks: extract_iso.yml + loop: '{{ pxe_images }}' + +- name: create grub directories + file: + path: '{{ pxe_root }}/{{ item }}' + state: directory + loop: + - '{{ pxe_grub_prefix }}' + - kickstart + +- name: generate menuentries + template: + src: grub/menuentry-{{ image.os }}.cfg.j2 + dest: '{{ pxe_root }}/{{ pxe_grub_prefix }}/{{ image.name }}-{{ image.version }}-{{ image.arch }}.cfg' + loop: '{{ pxe_images }}' + loop_control: + loop_var: image + +- name: generate kickstart files + template: + src: kickstart/{{ item }}.j2 + dest: '{{ pxe_root }}/kickstart/{{ item }}' + loop: '{{ pxe_images | selectattr("kickstart", "defined") | map(attribute="kickstart") | flatten | map(attribute="name") | unique }}' + +- name: generate kickstart menuentries + template: + src: grub/menuentry-{{ image.os }}.cfg.j2 + dest: '{{ pxe_root }}/{{ pxe_grub_prefix }}/{{ image.name }}-{{ image.version }}-{{ image.arch }}-{{ kickstart.name | splitext | first }}.cfg' + vars: + image: '{{ item.0 }}' + kickstart: '{{ item.1 }}' + loop: '{{ pxe_images | subelements("kickstart", { "skip_missing": true }) }}' + +- name: generate grub config + template: + src: grub/grub.cfg.j2 + dest: '{{ pxe_root }}/{{ pxe_grub_prefix }}/grub.cfg' diff --git a/roles/pxe_server/templates/grub/grub.cfg.j2 b/roles/pxe_server/templates/grub/grub.cfg.j2 new file mode 100644 index 0000000..ae2d7cb --- /dev/null +++ b/roles/pxe_server/templates/grub/grub.cfg.j2 @@ -0,0 +1,22 @@ +set timeout=-1 + +if [ "$grub_cpu" = "x86_64" -a "$grub_platform" = "efi" ]; then + set linux=linuxefi + set initrd=initrdefi + export linux + export initrd +fi + +{% for image in pxe_images %} +if [ "$grub_cpu" = "{{ image.arch }}" ]; then + menuentry "{{ image.description }} {{ image.version }}" { + configfile "$prefix/{{ image.name }}-{{ image.version }}-{{image.arch }}.cfg" + } + + {% for kickstart in image.kickstart | default([]) %} + menuentry "{{ image.description }} {{ image.version }}: {{ kickstart.description }}" { + configfile "$prefix/{{ image.name }}-{{ image.version }}-{{image.arch }}-{{ kickstart.name | splitext | first }}.cfg" + } + {% endfor %} +fi +{% endfor %} diff --git a/roles/pxe_server/templates/grub/menuentry-redhat.cfg.j2 b/roles/pxe_server/templates/grub/menuentry-redhat.cfg.j2 new file mode 100644 index 0000000..f7dc2ac --- /dev/null +++ b/roles/pxe_server/templates/grub/menuentry-redhat.cfg.j2 @@ -0,0 +1,18 @@ +echo "{{ image.description }} {{ image.version }} ({{ image.arch }})" +{% if kickstart is defined %} +echo "kickstart: {{ kickstart.name }}" +{% endif %} + +echo "loading kernel..." +linux (http,${net_default_server}:{{ pxe_http_port }})/{{ image.name }}/{{ image.version }}/{{ image.arch }}/{{ image.kernel }} \ + ip=dhcp \ + inst.repo=http://${net_default_server}:{{ pxe_http_port }}/{{ image.name }}/{{ image.version }}/{{ image.arch }}/ {%- if kickstart is defined %} \ + inst.ks=http://${net_default_server}:{{ pxe_http_port }}/kickstart/{{ kickstart.name }} + {%- endif %} + + +echo "loading initrd..." +initrd (http,${net_default_server}:{{ pxe_http_port }})/{{ image.name }}/{{ image.version }}/{{ image.arch }}/{{ image.initrd }} + +echo "booting linux..." +boot diff --git a/roles/pxe_server/templates/kickstart/rocky8-ks.cfg.j2 b/roles/pxe_server/templates/kickstart/rocky8-ks.cfg.j2 new file mode 100644 index 0000000..ddbb0f0 --- /dev/null +++ b/roles/pxe_server/templates/kickstart/rocky8-ks.cfg.j2 @@ -0,0 +1,89 @@ +%pre --interpreter=/bin/bash +set -Eeu -o pipefail + +# get the primary interface name +interface=$(ip route list default | cut -d' ' -f5) + +# parse DHCP lease info +declare -A dhcp +while IFS= read -r line; do + dhcp["${line%% =*}"]=${line#*= } +done <<< $(nmcli --terse --fields dhcp4 device show "$interface" | cut -d: -f2-) + +# configure interface for DHCP +printf 'network --bootproto=dhcp --device=%q --hostname=%q --onboot=yes --noipv6\n' \ + "$interface" \ + "${dhcp[host_name]:-rocky-kickstart}" \ + > /tmp/network.ks + +# if ntp-server was specified by DHCP server, use it +if [ -n "${dhcp[ntp_servers]:-}" ]; then + printf 'timezone %q --utc --ntpservers=%q\n' \ + {{ timezone | quote }} \ + "${dhcp[ntp_servers]}" \ + > /tmp/timezone.ks +else + printf 'timezone %q --utc\n' {{ timezone | quote }} > /tmp/timezone.ks +fi +%end + + +# installer configuration +cmdline +eula --agreed +reboot + + +# system configuration +firstboot --disabled +firewall --disabled +keyboard --vckeymap=us +lang {{ locale }} +rootpw --iscrypted {{ root_password | password_hash("sha512", root_password_salt | default("")) }} +selinux --disabled +skipx + +{% for pubkey in root_authorized_keys %} +sshkey --username=root "{{ pubkey }}" +{% endfor %} + + +# network +%include /tmp/network.ks + + +# timezone +%include /tmp/timezone.ks + + +# storage +autopart --type=lvm --fstype=xfs --nohome +bootloader --boot-drive=vda --location=mbr --timeout=3 +clearpart --drives=vda --all --initlabel +zerombr + + +# packages +%packages +@^minimal-environment +-plymouth +-iwl100-firmware +-iwl1000-firmware +-iwl105-firmware +-iwl135-firmware +-iwl2000-firmware +-iwl2030-firmware +-iwl3160-firmware +-iwl5000-firmware +-iwl5150-firmware +-iwl6000-firmware +-iwl6000g2a-firmware +-iwl6050-firmware +-iwl7260-firmware +%end + + +# disable kernel crashdumps +%addon com_redhat_kdump --disable + +%end diff --git a/roles/pxe_server/vars/main.yml b/roles/pxe_server/vars/main.yml new file mode 100644 index 0000000..8cc20bd --- /dev/null +++ b/roles/pxe_server/vars/main.yml @@ -0,0 +1,23 @@ +pxe_images: + - name: rocky + description: Rocky Linux + version: 8.6 + arch: x86_64 + os: redhat + kernel: images/pxeboot/vmlinuz + initrd: images/pxeboot/initrd.img + url: https://download.rockylinux.org/pub/rocky/8/isos/x86_64/Rocky-8.6-x86_64-minimal.iso + sha256: a9ece0e810275e881abfd66bb0e59ac05d567a5ec0bc2f108b9a3e90bef5bf94 + kickstart: + - name: rocky8-ks.cfg + description: Unattended Install + + - name: rocky + description: Rocky Linux + version: 9.0 + arch: x86_64 + os: redhat + kernel: images/pxeboot/vmlinuz + initrd: images/pxeboot/initrd.img + url: http://download.rockylinux.org/pub/rocky/9/isos/x86_64/Rocky-9.0-20220805.0-x86_64-minimal.iso + sha256: b16bc85f4fd14facf3174cd0cf8434ee048d81e5470292f3e1cfff47af2463b7 diff --git a/roles/qemu_guest_agent/tasks/main.yml b/roles/qemu_guest_agent/tasks/main.yml new file mode 100644 index 0000000..b320e86 --- /dev/null +++ b/roles/qemu_guest_agent/tasks/main.yml @@ -0,0 +1,10 @@ +- name: install QEMU guest agent + package: + name: qemu-guest-agent + state: present + +- name: enable QEMU guest agent + systemd: + name: qemu-guest-agent + enabled: yes + state: started diff --git a/roles/redis/defaults/main.yml b/roles/redis/defaults/main.yml new file mode 100644 index 0000000..db46496 --- /dev/null +++ b/roles/redis/defaults/main.yml @@ -0,0 +1,3 @@ +redis_port: 6379 +redis_max_memory: 2gb +redis_max_memory_policy: allkeys-lru diff --git a/roles/redis/files/etc/systemd/system/redis@.service b/roles/redis/files/etc/systemd/system/redis@.service new file mode 100644 index 0000000..b119142 --- /dev/null +++ b/roles/redis/files/etc/systemd/system/redis@.service @@ -0,0 +1,18 @@ +[Unit] +Description=Redis persistent key-value database on port %I +After=network.target + +[Service] +ExecStart=/usr/bin/redis-server /etc/redis-%i.conf --supervised systemd +ExecStop=/usr/bin/redis-cli -h 127.0.0.1 -p %i shutdown +Type=notify +User=redis +Group=redis +RuntimeDirectory=redis-%i +RuntimeDirectoryMode=0755 + +[Install] +WantedBy=multi-user.target + +[Service] +LimitNOFILE=10240 diff --git a/roles/redis/tasks/main.yml b/roles/redis/tasks/main.yml new file mode 100644 index 0000000..ccd7c98 --- /dev/null +++ b/roles/redis/tasks/main.yml @@ -0,0 +1,43 @@ +- name: install redis + dnf: + name: redis + state: present + +- name: create systemd unit + copy: + src: etc/systemd/system/redis@.service + dest: /etc/systemd/system/redis@.service + register: redis_unit + +- name: reload systemd units + systemd: + daemon_reload: yes + when: redis_unit.changed + +- name: create redis data directory + file: + path: '{{ redis_home }}/{{ redis_port }}' + owner: redis + group: redis + mode: 0750 + state: directory + +- name: generate config file + template: + src: etc/redis.conf.j2 + dest: /etc/redis-{{ redis_port }}.conf + register: redis_config_file + +- name: set redis_port_t selinux context for redis port + seport: + ports: '{{ redis_port }}' + proto: tcp + setype: redis_port_t + state: present + tags: selinux + +- name: enable redis + systemd: + name: redis@{{ redis_port }} + enabled: yes + state: "{{ 'restarted' if redis_config_file.changed else 'started' }}" diff --git a/roles/redis/templates/etc/redis.conf.j2 b/roles/redis/templates/etc/redis.conf.j2 new file mode 100644 index 0000000..5f6a307 --- /dev/null +++ b/roles/redis/templates/etc/redis.conf.j2 @@ -0,0 +1,65 @@ +bind 127.0.0.1 ::1 +protected-mode yes +port {{ redis_port }} +logfile "" +maxmemory {{ redis_max_memory }} +maxmemory-policy {{ redis_max_memory_policy }} +dir {{ redis_home }}/{{ redis_port }} + +# the rest of this file is unchanged from the EL defaults: +tcp-backlog 511 +timeout 0 +tcp-keepalive 300 +daemonize no +supervised no +loglevel notice +databases 16 +always-show-logo no +save 900 1 +save 300 10 +save 60 10000 +stop-writes-on-bgsave-error yes +rdbcompression yes +rdbchecksum yes +dbfilename dump.rdb +replica-serve-stale-data yes +replica-read-only yes +repl-diskless-sync no +repl-diskless-sync-delay 5 +repl-disable-tcp-nodelay no +replica-priority 100 +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no +appendonly no +appendfilename "appendonly.aof" +appendfsync everysec +no-appendfsync-on-rewrite no +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb +aof-load-truncated yes +aof-use-rdb-preamble yes +lua-time-limit 5000 +slowlog-log-slower-than 10000 +slowlog-max-len 128 +latency-monitor-threshold 0 +notify-keyspace-events "" +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 +list-max-ziplist-size -2 +list-compress-depth 0 +set-max-intset-entries 512 +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 +hll-sparse-max-bytes 3000 +stream-node-max-bytes 4096 +stream-node-max-entries 100 +activerehashing yes +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 +hz 10 +dynamic-hz yes +aof-rewrite-incremental-fsync yes +rdb-save-incremental-fsync yes diff --git a/roles/redis/vars/main.yml b/roles/redis/vars/main.yml new file mode 100644 index 0000000..4659127 --- /dev/null +++ b/roles/redis/vars/main.yml @@ -0,0 +1 @@ +redis_home: /var/lib/redis diff --git a/roles/root_authorized_keys/defaults/main.yml b/roles/root_authorized_keys/defaults/main.yml new file mode 100644 index 0000000..f90c23c --- /dev/null +++ b/roles/root_authorized_keys/defaults/main.yml @@ -0,0 +1 @@ +root_authorized_keys: [] diff --git a/roles/root_authorized_keys/tasks/main.yml b/roles/root_authorized_keys/tasks/main.yml new file mode 100644 index 0000000..8f1618b --- /dev/null +++ b/roles/root_authorized_keys/tasks/main.yml @@ -0,0 +1,5 @@ +- name: set root authorized_keys + authorized_key: + user: root + exclusive: yes + key: "{{ root_authorized_keys | join('\n') }}" diff --git a/roles/root_password/defaults/main.yml b/roles/root_password/defaults/main.yml new file mode 100644 index 0000000..23f5768 --- /dev/null +++ b/roles/root_password/defaults/main.yml @@ -0,0 +1,2 @@ +root_password: changeme +root_password_salt: '' diff --git a/roles/root_password/tasks/main.yml b/roles/root_password/tasks/main.yml new file mode 100644 index 0000000..cc4fe24 --- /dev/null +++ b/roles/root_password/tasks/main.yml @@ -0,0 +1,4 @@ +- name: set root password + user: + name: root + password: '{{ root_password | password_hash("sha512", root_password_salt) }}' diff --git a/roles/rspamd/defaults/main.yml b/roles/rspamd/defaults/main.yml new file mode 100644 index 0000000..9caad05 --- /dev/null +++ b/roles/rspamd/defaults/main.yml @@ -0,0 +1,12 @@ +rspamd_milter_port: 11332 +rspamd_milter_process_count: '{{ ansible_processor_vcpus }}' +rspamd_controller_port: 11334 +rspamd_redis_port: 6379 +rspamd_redis_bayes_port: 6380 +rspamd_redis_max_memory: 512mb +rspamd_admin_group: role-rspamd-admin + +rspamd_dkim_keys: {} +rspamd_dkim_selector: dkim + +rspamd_domain_whitelist: [] diff --git a/roles/rspamd/handlers/main.yml b/roles/rspamd/handlers/main.yml new file mode 100644 index 0000000..a355c9e --- /dev/null +++ b/roles/rspamd/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart rspamd + systemd: + name: rspamd + state: restarted diff --git a/roles/rspamd/meta/main.yml b/roles/rspamd/meta/main.yml new file mode 100644 index 0000000..0bc5383 --- /dev/null +++ b/roles/rspamd/meta/main.yml @@ -0,0 +1,19 @@ +dependencies: + - role: yum + yum_repositories: + - epel + - rspamd + tags: yum + + - role: redis + redis_port: '{{ rspamd_redis_port }}' + vars: + redis_max_memory: '{{ rspamd_redis_max_memory }}' + tags: redis + + - role: redis + redis_port: '{{ rspamd_redis_bayes_port }}' + vars: + redis_max_memory: '{{ rspamd_redis_max_memory }}' + redis_max_memory_policy: volatile-ttl + tags: redis diff --git a/roles/rspamd/tasks/main.yml b/roles/rspamd/tasks/main.yml new file mode 100644 index 0000000..d9da674 --- /dev/null +++ b/roles/rspamd/tasks/main.yml @@ -0,0 +1,76 @@ +- name: install packages + dnf: + name: '{{ rspamd_packages }}' + state: present + +- name: generate config files + template: + src: '{{ item.src }}' + dest: /etc/rspamd/{{ item.path | splitext | first }} + loop: "{{ lookup('filetree', '../templates/etc/rspamd', wantlist=True) }}" + loop_control: + label: '{{ item.path }}' + when: item.state == 'file' + notify: restart rspamd + +- name: create dkim directory + file: + path: '{{ rspamd_data_dir }}/dkim' + state: directory + owner: root + group: '{{ rspamd_group }}' + mode: 0750 + +- name: generate dkim keys + copy: + content: '{{ item.value }}' + dest: '{{ rspamd_data_dir }}/dkim/{{ item.key }}.{{ rspamd_dkim_selector }}.key' + owner: root + group: '{{ rspamd_group }}' + mode: 0440 + loop: '{{ rspamd_dkim_keys | dict2items }}' + loop_control: + label: '{{ item.key }}' + +- name: generate domain whitelist + copy: + content: | + {% for domain in rspamd_domain_whitelist %} + {{ domain }} + {% endfor %} + dest: /etc/rspamd/maps.d/domain-whitelist.map + tags: whitelist + +- name: open firewall ports + firewalld: + port: '{{ item }}/tcp' + permanent: yes + immediate: yes + state: enabled + loop: + - '{{ rspamd_milter_port }}' + - '{{ rspamd_controller_port }}' + tags: firewalld + +- name: set http_port_t selinux context for http port + seport: + ports: '{{ rspamd_controller_port }}' + proto: tcp + setype: http_port_t + state: present + tags: selinux + +- name: enable rspamd + systemd: + name: rspamd + enabled: yes + state: started + +- name: create rspamd admin group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ rspamd_admin_group }}' + nonposix: yes + state: present + run_once: yes diff --git a/roles/rspamd/templates/etc/rspamd/local.d/classifier-bayes.conf.j2 b/roles/rspamd/templates/etc/rspamd/local.d/classifier-bayes.conf.j2 new file mode 100644 index 0000000..e40dd74 --- /dev/null +++ b/roles/rspamd/templates/etc/rspamd/local.d/classifier-bayes.conf.j2 @@ -0,0 +1,3 @@ +backend = "redis"; +servers = "localhost:{{ rspamd_redis_bayes_port }}"; +autolearn = true; diff --git a/roles/rspamd/templates/etc/rspamd/local.d/dkim_signing.conf.j2 b/roles/rspamd/templates/etc/rspamd/local.d/dkim_signing.conf.j2 new file mode 100644 index 0000000..4e04b54 --- /dev/null +++ b/roles/rspamd/templates/etc/rspamd/local.d/dkim_signing.conf.j2 @@ -0,0 +1,3 @@ +path = "{{ rspamd_data_dir }}/dkim/$domain.$selector.key"; +selector = "{{ rspamd_dkim_selector }}"; +allow_username_mismatch = true; diff --git a/roles/rspamd/templates/etc/rspamd/local.d/greylist.conf.j2 b/roles/rspamd/templates/etc/rspamd/local.d/greylist.conf.j2 new file mode 100644 index 0000000..2c675a1 --- /dev/null +++ b/roles/rspamd/templates/etc/rspamd/local.d/greylist.conf.j2 @@ -0,0 +1 @@ +servers = "localhost:{{ rspamd_redis_port }}"; diff --git a/roles/rspamd/templates/etc/rspamd/local.d/logging.inc.j2 b/roles/rspamd/templates/etc/rspamd/local.d/logging.inc.j2 new file mode 100644 index 0000000..b2ff81c --- /dev/null +++ b/roles/rspamd/templates/etc/rspamd/local.d/logging.inc.j2 @@ -0,0 +1 @@ +type = console diff --git a/roles/rspamd/templates/etc/rspamd/local.d/multimap.conf.j2 b/roles/rspamd/templates/etc/rspamd/local.d/multimap.conf.j2 new file mode 100644 index 0000000..7247f93 --- /dev/null +++ b/roles/rspamd/templates/etc/rspamd/local.d/multimap.conf.j2 @@ -0,0 +1,9 @@ +sender_from_whitelist_domain { + type = "header"; + header = "from"; + filter = "email:domain"; + map = "file://$LOCAL_CONFDIR/maps.d/domain-whitelist.map"; + symbol = "SENDER_FROM_WHITELIST_DOMAIN"; + description = "Local sender domain whitelist"; + score = -6.0; +} diff --git a/roles/rspamd/templates/etc/rspamd/local.d/phishing.conf.j2 b/roles/rspamd/templates/etc/rspamd/local.d/phishing.conf.j2 new file mode 100644 index 0000000..caa3afe --- /dev/null +++ b/roles/rspamd/templates/etc/rspamd/local.d/phishing.conf.j2 @@ -0,0 +1 @@ +openphish_enabled = true; diff --git a/roles/rspamd/templates/etc/rspamd/local.d/redis.conf.j2 b/roles/rspamd/templates/etc/rspamd/local.d/redis.conf.j2 new file mode 100644 index 0000000..2c675a1 --- /dev/null +++ b/roles/rspamd/templates/etc/rspamd/local.d/redis.conf.j2 @@ -0,0 +1 @@ +servers = "localhost:{{ rspamd_redis_port }}"; diff --git a/roles/rspamd/templates/etc/rspamd/local.d/replies.conf.j2 b/roles/rspamd/templates/etc/rspamd/local.d/replies.conf.j2 new file mode 100644 index 0000000..470f484 --- /dev/null +++ b/roles/rspamd/templates/etc/rspamd/local.d/replies.conf.j2 @@ -0,0 +1 @@ +servers = "localhost:{{ rspamd_redis_port }}" diff --git a/roles/rspamd/templates/etc/rspamd/local.d/worker-controller.inc.j2 b/roles/rspamd/templates/etc/rspamd/local.d/worker-controller.inc.j2 new file mode 100644 index 0000000..e0bc633 --- /dev/null +++ b/roles/rspamd/templates/etc/rspamd/local.d/worker-controller.inc.j2 @@ -0,0 +1,11 @@ +bind_socket = 0.0.0.0:{{ rspamd_controller_port }} + +password = {{ rspamd_password_hash }} + +keypair { + algorithm = "curve25519"; + privkey = "{{ rspamd_privkey }}"; + type = "kex"; + encoding = "base32"; + pubkey = "{{ rspamd_pubkey }}"; +} diff --git a/roles/rspamd/templates/etc/rspamd/local.d/worker-normal.inc.j2 b/roles/rspamd/templates/etc/rspamd/local.d/worker-normal.inc.j2 new file mode 100644 index 0000000..47f94fb --- /dev/null +++ b/roles/rspamd/templates/etc/rspamd/local.d/worker-normal.inc.j2 @@ -0,0 +1 @@ +enabled = false diff --git a/roles/rspamd/templates/etc/rspamd/local.d/worker-proxy.inc.j2 b/roles/rspamd/templates/etc/rspamd/local.d/worker-proxy.inc.j2 new file mode 100644 index 0000000..bf499db --- /dev/null +++ b/roles/rspamd/templates/etc/rspamd/local.d/worker-proxy.inc.j2 @@ -0,0 +1,7 @@ +bind_socket = 0.0.0.0:{{ rspamd_milter_port }} + +count = {{ rspamd_milter_process_count }} + +upstream "local" { + self_scan = yes; +} diff --git a/roles/rspamd/vars/main.yml b/roles/rspamd/vars/main.yml new file mode 100644 index 0000000..01508d3 --- /dev/null +++ b/roles/rspamd/vars/main.yml @@ -0,0 +1,30 @@ +rspamd_packages: + - rspamd + +rspamd_user: _rspamd +rspamd_group: _rspamd + +rspamd_data_dir: /var/lib/rspamd + +rspamd_archive_shell: >- + echo save | redis-cli -p {{ rspamd_redis_port }}; + echo save | redis-cli -p {{ rspamd_redis_bayes_port }}; + TIMESTAMP=$(date +%Y%m%d%H%M%S); + tar czf "redis-${TIMESTAMP}.tar.gz" + --transform "s|^\.|redis-${TIMESTAMP}|" + -C {{ redis_home }} . + +rspamd_apache_config: | + {{ apache_proxy_vhost_config }} + ProxyAddHeaders off + ProxyPass / http://127.0.0.1:{{ rspamd_controller_port }}/ + ProxyPassReverse / http://127.0.0.1:{{ rspamd_controller_port }}/ + + + AuthName "FreeIPA Single Sign-On" + AuthType GSSAPI + GssapiLocalName On + {{ apache_gssapi_session_config }} + {{ apache_ldap_config }} + Require ldap-attribute memberof=cn={{ rspamd_admin_group }},{{ freeipa_group_basedn }} + diff --git a/roles/rsyslog_client/defaults/main.yml b/roles/rsyslog_client/defaults/main.yml new file mode 100644 index 0000000..9ba00aa --- /dev/null +++ b/roles/rsyslog_client/defaults/main.yml @@ -0,0 +1,7 @@ +rsyslog_target: '{{ syslog_host }}' +rsyslog_forward: yes +rsyslog_queue_max_disk_space: 250m +rsyslog_queue_size: 10000 +rsyslog_relp_port: 20514 +rsyslog_relp_tls_port: 10514 +rsyslog_tls: yes diff --git a/roles/rsyslog_client/handlers/main.yml b/roles/rsyslog_client/handlers/main.yml new file mode 100644 index 0000000..35e1f2d --- /dev/null +++ b/roles/rsyslog_client/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart rsyslog + systemd: + name: rsyslog + state: restarted diff --git a/roles/rsyslog_client/tasks/main.yml b/roles/rsyslog_client/tasks/main.yml new file mode 100644 index 0000000..c610d6d --- /dev/null +++ b/roles/rsyslog_client/tasks/main.yml @@ -0,0 +1,27 @@ +- name: install rsyslog + dnf: + name: '{{ rsyslog_packages }}' + state: present + +- name: request TLS certificate + include_role: + name: getcert_request + vars: + certificate_sans: ['{{ ansible_fqdn }}'] + certificate_service: syslog + certificate_path: '{{ rsyslog_certificate_path }}' + certificate_key_path: '{{ rsyslog_certificate_key_path }}' + certificate_hook: systemctl restart rsyslog + when: rsyslog_tls + +- name: generate rsyslog configuration + template: + src: etc/rsyslog.conf.j2 + dest: /etc/rsyslog.conf + notify: restart rsyslog + +- name: enable rsyslog + systemd: + name: rsyslog + enabled: yes + state: started diff --git a/roles/rsyslog_client/templates/etc/rsyslog.conf.j2 b/roles/rsyslog_client/templates/etc/rsyslog.conf.j2 new file mode 100644 index 0000000..83dc799 --- /dev/null +++ b/roles/rsyslog_client/templates/etc/rsyslog.conf.j2 @@ -0,0 +1,61 @@ +module(load="imklog") +module(load="imuxsock" SysSock.name="/run/systemd/journal/syslog") +module(load="imfile") +module(load="omrelp" tls.tlslib="openssl") + +global( + workDirectory="/var/lib/rsyslog" + parser.escapecontrolcharactertab="off" +) + +module( + load="builtin:omfile" + template="RSYSLOG_TraditionalFileFormat" + fileOwner="root" + fileGroup="root" + fileCreateMode="0600" + dirCreateMode="0700" +) + +include(file="/etc/rsyslog.d/*.conf" mode="optional") + +# if message didn't come from imfile, process as normal. +if ($!metadata!filename == '') then { + # EL defaults + *.info;mail.none;authpriv.none;cron.none /var/log/messages + authpriv.* /var/log/secure + mail.* -/var/log/maillog + cron.* /var/log/cron + *.emerg :omusrmsg:* + uucp,news.crit /var/log/spooler + local7.* /var/log/boot.log +} + +{% if rsyslog_forward %} +# forward to syslog server +if prifilt("*.info") then { + action(type="omrelp" + template="RSYSLOG_ForwardFormat" + target="{{ rsyslog_target }}" + {% if rsyslog_tls %} + port="{{ rsyslog_relp_tls_port }}" + tls="on" + tls.caCert="{{ rsyslog_certificate_ca_path }}" + tls.myCert="{{ rsyslog_certificate_path }}" + tls.myPrivKey="{{ rsyslog_certificate_key_path }}" + tls.authMode="name" + tls.permittedPeer="{{ rsyslog_target }}" + {% else %} + port="{{ rsyslog_relp_port }}" + {% endif %} + queue.type="LinkedList" + queue.size="{{ rsyslog_queue_size }}" + queue.filename="q_forward" + queue.saveOnShutdown="on" + queue.maxDiskSpace="{{ rsyslog_queue_max_disk_space }}" + action.resumeRetryCount="-1" + action.resumeInterval="10" + action.reportSuspension="on" + action.reportSuspensionContinuation="on") +} +{% endif %} diff --git a/roles/rsyslog_client/vars/main.yml b/roles/rsyslog_client/vars/main.yml new file mode 100644 index 0000000..d36a841 --- /dev/null +++ b/roles/rsyslog_client/vars/main.yml @@ -0,0 +1,8 @@ +rsyslog_packages: + - rsyslog + - rsyslog-relp + - rsyslog-openssl + +rsyslog_certificate_path: /etc/pki/rsyslog/syslog.pem +rsyslog_certificate_key_path: /etc/pki/rsyslog/syslog.key +rsyslog_certificate_ca_path: /etc/ipa/ca.crt diff --git a/roles/rsyslog_server/defaults/main.yml b/roles/rsyslog_server/defaults/main.yml new file mode 100644 index 0000000..7931580 --- /dev/null +++ b/roles/rsyslog_server/defaults/main.yml @@ -0,0 +1,14 @@ +rsyslog_owner: root +rsyslog_group: root +rsyslog_file_mode: '0640' +rsyslog_dir_mode: '0750' + +rsyslog_port: 514 +rsyslog_relp_port: 20514 +rsyslog_relp_tls_port: 10514 + +rsyslog_gzip_on_calendar: daily +rsyslog_gzip_days_ago: 7 + +rsyslog_permitted_peers: + - '*.{{ ansible_domain }}' diff --git a/roles/rsyslog_server/handlers/main.yml b/roles/rsyslog_server/handlers/main.yml new file mode 100644 index 0000000..fdad349 --- /dev/null +++ b/roles/rsyslog_server/handlers/main.yml @@ -0,0 +1,10 @@ +- name: restart rsyslog + systemd: + name: rsyslog + state: restarted + +- name: reload syslog-gzip timer + systemd: + name: syslog-gzip.timer + daemon-reload: yes + state: restarted diff --git a/roles/rsyslog_server/tasks/main.yml b/roles/rsyslog_server/tasks/main.yml new file mode 100644 index 0000000..2a77388 --- /dev/null +++ b/roles/rsyslog_server/tasks/main.yml @@ -0,0 +1,74 @@ +- name: install rsyslog + dnf: + name: '{{ rsyslog_packages }}' + state: present + +- name: request TLS certificate + include_role: + name: getcert_request + vars: + certificate_service: syslog + certificate_path: '{{ rsyslog_certificate_path }}' + certificate_key_path: '{{ rsyslog_certificate_key_path }}' + certificate_hook: systemctl restart rsyslog + +- name: generate config file + template: + src: etc/rsyslog.conf.j2 + dest: /etc/rsyslog.conf + notify: restart rsyslog + +- name: create syslog-gzip systemd timer + include_role: + name: systemd_timer + vars: + timer_name: syslog-gzip + timer_description: Compress old syslog files + timer_after: nss-user-lookup.target + timer_on_calendar: '{{ rsyslog_gzip_on_calendar }}' + timer_user: '{{ rsyslog_owner }}' + timer_group: '{{ rsyslog_group }}' + timer_exec: find {{ rsyslog_storage_dir }} -type f -mtime +{{ rsyslog_gzip_days_ago }} -not -name '*.gz' -exec gzip {} ; + +- name: create syslog-update-today-symlink timer + include_role: + name: systemd_timer + vars: + timer_name: syslog-update-today-symlink + timer_description: Update today symlink in syslog dir + timer_after: nss-user-lookup.target + timer_on_calendar: daily + timer_user: '{{ rsyslog_owner }}' + timer_group: '{{ rsyslog_group }}' + timer_shell: yes + timer_exec: ln -sfT "$(date +%Y/%m/%d)" {{ rsyslog_storage_dir }}/today + +- name: create remote log directory + file: + path: '{{ rsyslog_storage_dir }}' + state: directory + +- name: create today symlink + systemd: + name: syslog-update-today-symlink.service + state: started + changed_when: no + +- name: enable rsyslog + systemd: + name: rsyslog + enabled: yes + state: started + +- name: open firewall ports + firewalld: + port: '{{ item }}' + permanent: yes + immediate: yes + state: enabled + loop: + - '{{ rsyslog_port }}/tcp' + - '{{ rsyslog_port }}/udp' + - '{{ rsyslog_relp_port }}/tcp' + - '{{ rsyslog_relp_tls_port }}/tcp' + tags: firewalld diff --git a/roles/rsyslog_server/templates/etc/rsyslog.conf.j2 b/roles/rsyslog_server/templates/etc/rsyslog.conf.j2 new file mode 100644 index 0000000..174e966 --- /dev/null +++ b/roles/rsyslog_server/templates/etc/rsyslog.conf.j2 @@ -0,0 +1,97 @@ +module(load="imklog") +module(load="imuxsock" SysSock.name="/run/systemd/journal/syslog") +module(load="imudp") +module(load="imtcp") +module(load="imfile") +module(load="imrelp" tls.tlslib="openssl") + +global( + workDirectory="/var/lib/rsyslog" + parser.escapecontrolcharactertab="off" +) + +module(load="builtin:omfile" + template="RSYSLOG_TraditionalFileFormat" + dirCreateMode="{{ rsyslog_dir_mode }}" + dirOwner="{{ rsyslog_owner }}" + dirGroup="{{ rsyslog_group }}" + fileCreateMode="{{ rsyslog_file_mode }}" + fileOwner="{{ rsyslog_owner }}" + fileGroup="{{ rsyslog_group }}") + +include(file="/etc/rsyslog.d/*.conf" mode="optional") + +template(name="RemoteLogSavePath" type="list") { + constant(value="{{ rsyslog_storage_dir }}/") + property(name="timegenerated" dateFormat="year") constant(value="/") + property(name="timegenerated" dateFormat="month") constant(value="/") + property(name="timegenerated" dateFormat="day") constant(value="/") + property(name="fromhost" caseConversion="lower") constant(value="/") + property(name="$.filename" caseConversion="lower") +} + +template(name="HttpdAccessLog_FileFormat" type="string" + string="%HOSTNAME% %syslogtag%%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n" +) + +ruleset(name="RemoteLog") { + # default filename + set $.filename = "messages.log"; + + # drop any debug messages + if not prifilt("*.info") then { + stop + } + + # program-specific overrides + if $syslogtag == {{ (rsyslog_log_by_tag + rsyslog_access_log_by_tag) | to_json }} then { + if $syslogtag == {{ rsyslog_log_by_tag | to_json }} then { + set $.filename = $syslogtag & ".log"; + } else if prifilt("*.=info") then { + set $.filename = $syslogtag & "-access.log"; + } else { + set $.filename = $syslogtag & "-error.log"; + } + + action(type="omfile" + template="HttpdAccessLog_FileFormat" + dynaFile="RemoteLogSavePath" + dynaFileCacheSize="1024" + asyncWriting="on" + flushOnTXEnd="off" + flushInterval="1" + ioBufferSize="64k") + } else { + action(type="omfile" + template="RSYSLOG_FileFormat" + dynaFile="RemoteLogSavePath" + dynaFileCacheSize="1024" + asyncWriting="on" + flushOnTXEnd="off" + flushInterval="1" + ioBufferSize="64k") + } +} + +input(type="imtcp" port="{{ rsyslog_port }}" ruleset="RemoteLog") +input(type="imudp" port="{{ rsyslog_port }}" ruleset="RemoteLog") +input(type="imrelp" port="{{ rsyslog_relp_port }}" ruleset="RemoteLog") +input(type="imrelp" + port="{{ rsyslog_relp_tls_port }}" + tls="on" + tls.caCert="{{ rsyslog_certificate_ca_path }}" + tls.myCert="{{ rsyslog_certificate_path }}" + tls.myPrivKey="{{ rsyslog_certificate_key_path }}" + tls.authMode="name" + tls.permittedPeer=["{{ rsyslog_permitted_peers | join('", "') }}"] + ruleset="RemoteLog") + + +# EL defaults +*.info;mail.none;authpriv.none;cron.none /var/log/messages +authpriv.* /var/log/secure +mail.* -/var/log/maillog +cron.* /var/log/cron +*.emerg :omusrmsg:* +uucp,news.crit /var/log/spooler +local7.* /var/log/boot.log diff --git a/roles/rsyslog_server/vars/main.yml b/roles/rsyslog_server/vars/main.yml new file mode 100644 index 0000000..3cd223c --- /dev/null +++ b/roles/rsyslog_server/vars/main.yml @@ -0,0 +1,20 @@ +rsyslog_packages: + - rsyslog + - rsyslog-doc + - rsyslog-relp + - rsyslog-openssl + +rsyslog_log_by_tag: + - unifi + - airsonic + +rsyslog_access_log_by_tag: + - httpd + - nginx + - slapd + +rsyslog_certificate_path: /etc/pki/rsyslog/syslog.pem +rsyslog_certificate_key_path: /etc/pki/rsyslog/syslog.key +rsyslog_certificate_ca_path: /etc/ipa/ca.crt + +rsyslog_storage_dir: /var/log/remote diff --git a/roles/sabredav/defaults/main.yml b/roles/sabredav/defaults/main.yml new file mode 100644 index 0000000..87b98ee --- /dev/null +++ b/roles/sabredav/defaults/main.yml @@ -0,0 +1,7 @@ +sabredav_version: master +sabredav_user: s-sabredav +sabredav_db_name: sabredav +sabredav_db_host: '{{ postgresql_host }}' +sabredav_imip_from: calendar-noreply@{{ email_domain }} +sabredav_access_group: role-dav-access +sabredav_kerberized_cidrs: '{{ kerberized_cidrs }}' diff --git a/roles/sabredav/tasks/composer.yml b/roles/sabredav/tasks/composer.yml new file mode 100644 index 0000000..c3aaedd --- /dev/null +++ b/roles/sabredav/tasks/composer.yml @@ -0,0 +1,10 @@ +- name: download composer installer + get_url: + url: '{{ sabredav_composer_url }}' + dest: /tmp/composer-setup.php + +- name: install composer + command: >- + php /tmp/composer-setup.php + --install-dir=/usr/local/bin + --filename=composer diff --git a/roles/sabredav/tasks/database.yml b/roles/sabredav/tasks/database.yml new file mode 100644 index 0000000..0089788 --- /dev/null +++ b/roles/sabredav/tasks/database.yml @@ -0,0 +1,46 @@ +- name: create database + postgresql_db: + name: '{{ sabredav_db_name }}' + state: present + delegate_to: "{{ postgresql_inventory_host }}" + become: yes + become_user: postgres + +- name: create database user + postgresql_user: + name: '{{ sabredav_user }}' + db: '{{ sabredav_db_name }}' + priv: ALL + state: present + delegate_to: "{{ postgresql_inventory_host }}" + become: yes + become_user: postgres + +- name: check if database schema is initialized + postgresql_query: + login_user: '{{ sabredav_user }}' + login_host: '{{ sabredav_db_host }}' + db: '{{ sabredav_db_name }}' + query: SELECT 1 FROM calendars + become: yes + become_user: apache + environment: + GSS_USE_PROXY: 'yes' + register: sabredav_check_db + failed_when: no + +- name: initialize database schema + postgresql_query: + login_user: '{{ sabredav_user }}' + login_host: '{{ sabredav_db_host }}' + db: '{{ sabredav_db_name }}' + path_to_script: '{{ sabredav_home }}/pgsql.schema.sql' + as_single_query: yes + become: yes + become_user: apache + environment: + GSS_USE_PROXY: 'yes' + when: + - sabredav_check_db.msg is defined + - sabredav_check_db.msg is search('relation "calendars" does not exist') + diff --git a/roles/sabredav/tasks/freeipa.yml b/roles/sabredav/tasks/freeipa.yml new file mode 100644 index 0000000..d2c841e --- /dev/null +++ b/roles/sabredav/tasks/freeipa.yml @@ -0,0 +1,27 @@ +- name: create user + ipauser: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ sabredav_user }}' + loginshell: /sbin/nologin + homedir: '{{ sabredav_home }}' + givenname: SabreDAV + sn: Service Account + state: present + run_once: yes + +- name: retrieve user keytab + include_role: + name: freeipa_keytab + vars: + keytab_principal: '{{ sabredav_user }}' + keytab_path: '{{ sabredav_keytab }}' + +- name: create access group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ sabredav_access_group }}' + nonposix: yes + state: present + run_once: yes diff --git a/roles/sabredav/tasks/main.yml b/roles/sabredav/tasks/main.yml new file mode 100644 index 0000000..36b8326 --- /dev/null +++ b/roles/sabredav/tasks/main.yml @@ -0,0 +1,77 @@ +- name: install packages + dnf: + name: '{{ sabredav_packages }}' + state: present + +- name: create webroot + file: + path: '{{ sabredav_home }}' + state: directory + +- name: clone git repository + git: + repo: '{{ sabredav_git_repo }}' + dest: '{{ sabredav_home }}' + version: '{{ sabredav_version }}' + +- name: set permissions on writeable directories + file: + path: '{{ sabredav_home }}/{{ item }}' + state: directory + mode: 0770 + owner: apache + group: apache + setype: httpd_sys_rw_content_t + loop: '{{ sabredav_writable_dirs }}' + +- name: set selinux context on writeable directories + sefcontext: + target: '{{ sabredav_home }}/{{ item }}(/.*)?' + setype: httpd_sys_rw_content_t + state: present + loop: '{{ sabredav_writable_dirs }}' + register: sabredav_writeable_sefcontext + tags: selinux + +- name: apply selinux context to writeable directories + command: 'restorecon -R {{ sabredav_home }}/{{ item }}' + when: sabredav_writeable_sefcontext.results[index].changed + loop: '{{ sabredav_writable_dirs }}' + loop_control: + index_var: index + tags: selinux + +- import_tasks: freeipa.yml + tags: freeipa + +- name: configure gssproxy for kerberized postgres + include_role: + name: gssproxy_client + vars: + gssproxy_name: sabredav + gssproxy_section: service/php-fpm + gssproxy_client_keytab: '{{ sabredav_keytab }}' + gssproxy_cred_usage: initiate + gssproxy_euid: apache + +- name: check if composer is installed + stat: + path: /usr/local/bin/composer + register: stat_composer + +- name: install composer + include_tasks: composer.yml + when: not stat_composer.stat.exists + +- name: install dependencies using composer + composer: + command: install + working_dir: '{{ sabredav_home }}' + +- name: generate sabredav configuration + template: + src: '{{ sabredav_home[1:] }}/server.php.j2' + dest: '{{ sabredav_home }}/server.php' + +- import_tasks: database.yml + tags: database diff --git a/roles/sabredav/templates/var/www/sabredav/server.php.j2 b/roles/sabredav/templates/var/www/sabredav/server.php.j2 new file mode 100644 index 0000000..36bc973 --- /dev/null +++ b/roles/sabredav/templates/var/www/sabredav/server.php.j2 @@ -0,0 +1,61 @@ +setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION); + +// autoloader +require_once 'vendor/autoload.php'; + +// freeipa +$ipa = new \FreeIPA\Connection(); +$allowedGroups = ['{{ sabredav_access_group }}']; + +// backends +$principalBackend = new \FreeIPA\PrincipalBackend($ipa, $allowedGroups); +$caldavBackend = new \Sabre\CalDAV\Backend\PDO($pdo); +$carddavBackend = new \Sabre\CardDAV\Backend\PDO($pdo); +$authBackend = new \FreeIPA\AuthBackend($ipa, $caldavBackend, $carddavBackend, $allowedGroups); +$lockBackend = new \Sabre\DAV\Locks\Backend\PDO($pdo); + +// directory structure +$server = new Sabre\DAV\Server([ + new \Sabre\CalDAV\Principal\Collection($principalBackend), + new \Sabre\CalDAV\CalendarRoot($principalBackend, $caldavBackend), + new \Sabre\CardDAV\AddressBookRoot($principalBackend, $carddavBackend), + new \Sabre\DAVACL\FS\HomeCollection($principalBackend, __DIR__.'/webdav') +]); + +// plugins +$server->addPlugin(new \Sabre\DAV\Auth\Plugin($authBackend,'SabreDAV')); +$server->addPlugin(new \Sabre\DAV\Browser\Plugin()); +$server->addPlugin(new \Sabre\DAV\Sync\Plugin()); +$server->addPlugin(new \Sabre\DAV\Sharing\Plugin()); + +$aclPlugin = new \Sabre\DAVACL\Plugin(); +$aclPlugin->hideNodesFromListings = true; +$server->addPlugin($aclPlugin); + +// webdav plugins +$server->addPlugin(new \Sabre\DAV\Locks\Plugin($lockBackend)); +$server->addPlugin(new \Sabre\DAV\Browser\GuessContentType()); +$server->addPlugin(new \Sabre\DAV\TemporaryFileFilterPlugin(__DIR__.'/tmpdata')); + +// caldav plugins +$server->addPlugin(new \Sabre\CalDAV\Plugin()); +$server->addPlugin(new \Sabre\CalDAV\Schedule\Plugin()); +$server->addPlugin(new \Sabre\CalDAV\Schedule\IMipPlugin('{{ sabredav_imip_from }}')); +$server->addPlugin(new \Sabre\CalDAV\Subscriptions\Plugin()); +$server->addPlugin(new \Sabre\CalDAV\Notifications\Plugin()); +$server->addPlugin(new \Sabre\CalDAV\SharingPlugin()); +$server->addPlugin(new \Sabre\CalDAV\ICSExportPlugin()); + +// carddav plugins +$server->addPlugin(new \Sabre\CardDAV\Plugin()); +$server->addPlugin(new \Sabre\CardDAV\VCFExportPlugin()); + +// lets goooooo +$server->exec(); diff --git a/roles/sabredav/vars/main.yml b/roles/sabredav/vars/main.yml new file mode 100644 index 0000000..6463d37 --- /dev/null +++ b/roles/sabredav/vars/main.yml @@ -0,0 +1,60 @@ +sabredav_packages: + - php + - php-json + - php-ldap + - php-mbstring + - php-opcache + - php-pdo + - php-pgsql + - php-pecl-zip + - php-xml + - python3-psycopg2 + - git + +sabredav_composer_url: https://getcomposer.org/installer + +sabredav_git_repo: https://github.com/sacredheartsc/sabredav-freeipa + +sabredav_home: /var/www/sabredav +sabredav_keytab: /var/lib/gssproxy/clients/{{ sabredav_user }}.keytab + +sabredav_writable_dirs: + - webdav + - tmpdata + +sabredav_php_environment: + GSS_USE_PROXY: 'yes' + +sabredav_php_flags: + output_buffering: no + always_populate_raw_post_data: no + mbstring.func_overload: no + +sabredav_archive_shell: >- + TIMESTAMP=$(date +%Y%m%d%H%M%S); + tar czf "webdav-${TIMESTAMP}.tar.gz" + --transform "s|^\.|webdav-${TIMESTAMP}|" + -C "{{ sabredav_home }}/webdav" . + +sabredav_apache_config: | + Redirect /.well-known/caldav /server.php + Redirect /.well-known/carddav /server.php + + RewriteEngine On + RewriteCond %{REQUEST_URI} !^/\.well-known/ + RewriteRule .* /server.php [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L] + + + AuthName "FreeIPA Single Sign-On" + + AuthType GSSAPI + GssapiLocalName On + {{ apache_gssapi_session_config }} + + + AuthType Basic + AuthBasicProvider ldap + + {{ apache_ldap_config }} + Require ldap-attribute memberof=cn={{ sabredav_access_group }},{{ freeipa_group_basedn }} + diff --git a/roles/selinux/defaults/main.yml b/roles/selinux/defaults/main.yml new file mode 100644 index 0000000..1969f11 --- /dev/null +++ b/roles/selinux/defaults/main.yml @@ -0,0 +1 @@ +selinux_enabled: yes diff --git a/roles/selinux/tasks/main.yml b/roles/selinux/tasks/main.yml new file mode 100644 index 0000000..38a1e83 --- /dev/null +++ b/roles/selinux/tasks/main.yml @@ -0,0 +1,22 @@ +- name: install packages + dnf: + name: '{{ selinux_packages }}' + state: present + +- name: start auditd + systemd: + name: auditd + enabled: yes + state: started + +- name: enable selinux + lineinfile: + path: /etc/selinux/config + regexp: ^SELINUX= + line: SELINUX={{ 'enforcing' if selinux_enabled else 'disabled' }} + state: present + register: selinux_config + +- name: reboot to apply selinux mode + reboot: + when: selinux_config.changed diff --git a/roles/selinux/vars/main.yml b/roles/selinux/vars/main.yml new file mode 100644 index 0000000..25515b7 --- /dev/null +++ b/roles/selinux/vars/main.yml @@ -0,0 +1,4 @@ +selinux_packages: + - selinux-policy-targeted + - audit + - setroubleshoot-server diff --git a/roles/selinux_policy/tasks/main.yml b/roles/selinux_policy/tasks/main.yml new file mode 100644 index 0000000..0ec008b --- /dev/null +++ b/roles/selinux_policy/tasks/main.yml @@ -0,0 +1,44 @@ +- name: create custom SELinux module directory + file: + path: '{{ selinux_policy_custom_dir }}' + state: directory + +- name: create SELinux type-enforcement file + copy: + content: | + module {{ selinux_policy_name }} {{ selinux_policy_version | default('1.0') }}; + + {{ selinux_policy_te }} + dest: '{{ selinux_policy_custom_dir }}/{{ selinux_policy_name }}.te' + register: selinux_te_file + +- name: check if SELinux policy is loaded + shell: semodule -l | grep -q {{ selinux_policy_name }} + changed_when: false + failed_when: false + register: se_policy_loaded + +- name: compile and load SELinux module + block: + - name: unload SELinux module + command: semodule -r {{ selinux_policy_name }} + when: se_policy_loaded.rc == 0 + + - name: compile SELinux module + command: checkmodule -M -m -o {{ selinux_policy_custom_dir ~ '/' ~ selinux_policy_name }}.mod {{ selinux_policy_custom_dir ~ '/' ~ selinux_policy_name }}.te + + - name: build SELinux policy package + command: semodule_package -o {{ selinux_policy_custom_dir ~ '/' ~ selinux_policy_name }}.pp -m {{ selinux_policy_custom_dir ~ '/' ~ selinux_policy_name }}.mod + + - name: load SELinux module + command: semodule -i {{ selinux_policy_custom_dir ~ '/' ~ selinux_policy_name }}.pp + + - name: clean up build artifacts + file: + path: '{{ selinux_policy_custom_dir }}/{{ selinux_policy_name }}.{{ item }}' + state: absent + loop: + - mod + - pp + + when: selinux_te_file.changed or se_policy_loaded.rc != 0 diff --git a/roles/selinux_policy/vars/main.yml b/roles/selinux_policy/vars/main.yml new file mode 100644 index 0000000..d6c8c33 --- /dev/null +++ b/roles/selinux_policy/vars/main.yml @@ -0,0 +1 @@ +selinux_policy_custom_dir: /etc/selinux/custom diff --git a/roles/snmp/defaults/main.yml b/roles/snmp/defaults/main.yml new file mode 100644 index 0000000..0569c3f --- /dev/null +++ b/roles/snmp/defaults/main.yml @@ -0,0 +1,9 @@ +snmp_location: unknown +snmp_contact: '{{ organization }} Sysadmins ' + +snmp_force_users: no + +snmp_v3_users: + - name: '{{ nagios_snmp_user }}' + auth_pass: '{{ nagios_snmp_auth_pass }}' + priv_pass: '{{ nagios_snmp_priv_pass }}' diff --git a/roles/snmp/handlers/main.yml b/roles/snmp/handlers/main.yml new file mode 100644 index 0000000..9c1d345 --- /dev/null +++ b/roles/snmp/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart snmpd + systemd: + name: snmpd + state: restarted diff --git a/roles/snmp/tasks/main.yml b/roles/snmp/tasks/main.yml new file mode 100644 index 0000000..e2ca90c --- /dev/null +++ b/roles/snmp/tasks/main.yml @@ -0,0 +1,51 @@ +- name: install packages + dnf: + name: '{{ snmp_packages }}' + state: present + +- name: generate config file + template: + src: etc/snmp/snmpd.conf.j2 + dest: /etc/snmp/snmpd.conf + mode: 0600 + notify: restart snmpd + +- name: open firewall ports + firewalld: + permanent: yes + immediate: yes + service: snmp + state: enabled + tags: firewalld + +- name: check if snmp users are defined + command: grep -q usmUser /var/lib/net-snmp/snmpd.conf + failed_when: no + changed_when: no + register: snmp_users_exist + +- name: add snmp users + block: + - name: stop snmpd + systemd: + name: snmpd + state: stopped + + - name: add snmpv3 users + lineinfile: + path: /var/lib/net-snmp/snmpd.conf + line: 'createUser {{ item.name }} SHA "{{ item.auth_pass }}" AES "{{ item.priv_pass }}"' + insertafter: EOF + create: yes + mode: 0600 + loop: '{{ snmp_v3_users }}' + loop_control: + label: '{{ item.name }}' + + - name: enable and start snmpd + systemd: + name: snmpd + enabled: yes + state: started + + when: snmp_users_exist.rc != 0 or snmp_force_users diff --git a/roles/snmp/templates/etc/snmp/snmpd.conf.j2 b/roles/snmp/templates/etc/snmp/snmpd.conf.j2 new file mode 100644 index 0000000..337e1c2 --- /dev/null +++ b/roles/snmp/templates/etc/snmp/snmpd.conf.j2 @@ -0,0 +1,8 @@ +syslocation {{ snmp_location }} +syscontact {{ snmp_contact }} + +dontLogTCPWrappersConnects yes + +{% for user in snmp_v3_users %} +rouser {{ user.name }} +{% endfor %} diff --git a/roles/snmp/vars/main.yml b/roles/snmp/vars/main.yml new file mode 100644 index 0000000..0f03433 --- /dev/null +++ b/roles/snmp/vars/main.yml @@ -0,0 +1,2 @@ +snmp_packages: + - net-snmp diff --git a/roles/solr/defaults/main.yml b/roles/solr/defaults/main.yml new file mode 100644 index 0000000..ffcc163 --- /dev/null +++ b/roles/solr/defaults/main.yml @@ -0,0 +1,5 @@ +solr_version: 9.1.1 +solr_lucene_version: 9.3.0 +solr_port: 8983 +solr_heap_size: 2g +solr_softcommit_ms: 60000 diff --git a/roles/solr/handlers/main.yml b/roles/solr/handlers/main.yml new file mode 100644 index 0000000..ca32ef7 --- /dev/null +++ b/roles/solr/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart solr + systemd: + name: solr + state: restarted diff --git a/roles/solr/tasks/main.yml b/roles/solr/tasks/main.yml new file mode 100644 index 0000000..0538a2a --- /dev/null +++ b/roles/solr/tasks/main.yml @@ -0,0 +1,77 @@ +- name: install java + dnf: + name: java-17-openjdk-headless + state: present + +- name: create installation directory + file: + path: '{{ solr_install_dir }}' + state: directory + +- name: unpack solr tarball + unarchive: + src: '{{ solr_url }}' + remote_src: yes + dest: '{{ solr_install_dir }}' + extra_opts: + - '--strip-components=1' + notify: restart solr + +- name: add local user + user: + name: solr + system: yes + home: '{{ solr_data_dir }}' + shell: /sbin/nologin + create_home: no + +- name: create data directory + file: + path: '{{ solr_data_dir }}' + state: directory + owner: solr + group: solr + mode: 0770 + +- name: create systemd unit + template: + src: etc/systemd/system/solr.service.j2 + dest: /etc/systemd/system/solr.service + register: solr_unit + +- name: reload systemd units + systemd: + daemon_reload: yes + when: solr_unit.changed + +- name: create config directory + file: + path: /etc/solr + state: directory + +- name: create EnvironmentFile + template: + src: etc/sysconfig/solr.j2 + dest: /etc/sysconfig/solr + notify: restart solr + +- name: create config files + template: + src: etc/solr/{{ item }}.j2 + dest: /etc/solr/{{ item }} + loop: + - log4j2.xml + - solrconfig.xml + notify: restart solr + +- name: copy default solr configuration + copy: + src: '{{ solr_install_dir }}/server/solr/solr.xml' + dest: '{{ solr_data_dir }}/solr.xml' + remote_src: yes + +- name: start solr + systemd: + name: solr + enabled: yes + state: started diff --git a/roles/solr/templates/etc/solr/log4j2.xml.j2 b/roles/solr/templates/etc/solr/log4j2.xml.j2 new file mode 100644 index 0000000..a5d0442 --- /dev/null +++ b/roles/solr/templates/etc/solr/log4j2.xml.j2 @@ -0,0 +1,18 @@ + + + + + + + {% raw %}%maxLen{%-5p %c %m%notEmpty{ =>%ex{short}}}{10240}%n{% endraw %} + + + + + + + + + + + diff --git a/roles/solr/templates/etc/solr/solrconfig.xml.j2 b/roles/solr/templates/etc/solr/solrconfig.xml.j2 new file mode 100644 index 0000000..0b5a602 --- /dev/null +++ b/roles/solr/templates/etc/solr/solrconfig.xml.j2 @@ -0,0 +1,280 @@ + + + {{ solr_lucene_version }} + + ${solr.data.dir:} + + + + ${solr.lock.type:native} + + + + + ${solr.ulog.dir:} + ${solr.ulog.numVersionBuckets:65536} + + + ${solr.autoCommit.maxTime:15000} + false + + + ${solr.autoSoftCommit.maxTime:-1} + + + + + ${solr.max.booleanClauses:1024} + + + + + + + + true + + 20 + + 200 + + + + + + + + + + + false + + + + + + + + + + + + + explicit + 10 + hdr + + + + + + explicit + json + true + + + + + + _text_ + + + + + + text_general + + + default + _text_ + solr.DirectSolrSpellChecker + internal + 0.5 + 2 + 1 + 5 + 4 + 0.01 + + + + + + default + on + true + 10 + 5 + 5 + true + true + 10 + 5 + + + spellcheck + + + + + + + + true + false + + + terms + + + + + + + + 100 + + + + + + 70 + 0.5 + [-\w ,/\n\"']{20,200} + + + + + + ]]> + ]]> + + + + + + + + + + + + + + + + + ,, + ,, + ,, + ,, + ,]]> + ]]> + + + + + + 10 + .,!? + + + + + + WORD + en + US + + + + + + + + + [^\w-\.] + _ + + + + + + + yyyy-MM-dd['T'[HH:mm[:ss[.SSS]][z + yyyy-MM-dd['T'[HH:mm[:ss[,SSS]][z + yyyy-MM-dd HH:mm[:ss[.SSS]][z + yyyy-MM-dd HH:mm[:ss[,SSS]][z + [EEE, ]dd MMM yyyy HH:mm[:ss] z + EEEE, dd-MMM-yy HH:mm:ss z + EEE MMM ppd HH:mm:ss [z ]yyyy + + + + + java.lang.String + text_general + + *_str + 256 + + true + + + java.lang.Boolean + booleans + + + java.util.Date + pdates + + + java.lang.Long + java.lang.Integer + plongs + + + java.lang.Number + pdoubles + + + + + + + + + + + text/plain; charset=UTF-8 + + diff --git a/roles/solr/templates/etc/sysconfig/solr.j2 b/roles/solr/templates/etc/sysconfig/solr.j2 new file mode 100644 index 0000000..04e9ade --- /dev/null +++ b/roles/solr/templates/etc/sysconfig/solr.j2 @@ -0,0 +1,6 @@ +JVM_HEAP_SIZE="{{ solr_heap_size }}" + +SOLR_CONF_DIR=/etc/solr +SOLR_OPTS="-Dsolr.autoSoftCommit.maxTime={{ solr_softcommit_ms }}" + +LOG4J_PROPS=/etc/solr/log4j2.xml diff --git a/roles/solr/templates/etc/systemd/system/solr.service.j2 b/roles/solr/templates/etc/systemd/system/solr.service.j2 new file mode 100644 index 0000000..52ee55f --- /dev/null +++ b/roles/solr/templates/etc/systemd/system/solr.service.j2 @@ -0,0 +1,63 @@ +[Unit] +Description=Apache Solr +Before=dovecot.service + +[Service] +Type=simple +User=solr +LimitNOFILE=65000 +LimitNPROC=65000 +Restart=on-failure + +ProtectSystem=strict +ReadWritePaths={{ solr_data_dir }} /var/log/solr + +# Harden this java nightmare +NoNewPrivileges=yes +PrivateTmp=yes +PrivateDevices=yes +DevicePolicy=closed +ProtectSystem=strict +ProtectHome=yes +ProtectControlGroups=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 +RestrictNamespaces=yes +RestrictRealtime=yes +RestrictSUIDSGID=yes +LockPersonality=yes + +WorkingDirectory={{ solr_install_dir }}/server +LogsDirectory=solr + +Environment=SOLR_HOME={{ solr_data_dir }} +Environment=SOLR_CONF_DIR=${SOLR_HOME}/server/solr/configsets/_default/conf +Environment=JVM_ARGS= +Environment=JVM_GC_ARGS="-XX:+UseG1GC -XX:+PerfDisableSharedMem -XX:+ParallelRefProcEnabled -XX:MaxGCPauseMillis=250 -XX:+UseLargePages -XX:+AlwaysPreTouch -XX:+ExplicitGCInvokesConcurrent" +Environment=JVM_HEAP_SIZE=512m +Environment=JETTY_HOST=localhost +Environment=JETTY_PORT=8983 +Environment=LOG4J_PROPS={{ solr_install_dir }}/server/resources/log4j2.xml +EnvironmentFile=/etc/sysconfig/solr + +ExecStart=java -server \ + $JVM_ARGS \ + -Xmx${JVM_HEAP_SIZE} \ + $SOLR_OPTS \ + $JVM_GC_ARGS \ + -XX:+CrashOnOutOfMemoryError \ + -Dlog4j.configurationFile=${LOG4J_PROPS} \ + -Dsolr.log.dir=/var/log/solr \ + -Djetty.host=${JETTY_HOST} \ + -Djetty.port=${JETTY_PORT} \ + -Djetty.home={{ solr_install_dir }}/server \ + -Dsolr.solr.home=${SOLR_HOME} \ + -Dsolr.data.home= \ + -Dsolr.install.dir={{ solr_install_dir }} \ + -Dsolr.default.confdir=${SOLR_CONF_DIR} \ + -Dlog4j2.formatMsgNoLookups=true \ + -jar start.jar --module=http --module=gzip + +[Install] +WantedBy=multi-user.target diff --git a/roles/solr/vars/main.yml b/roles/solr/vars/main.yml new file mode 100644 index 0000000..fa5f1f8 --- /dev/null +++ b/roles/solr/vars/main.yml @@ -0,0 +1,3 @@ +solr_url: https://dlcdn.apache.org/solr/solr/{{ solr_version }}/solr-{{ solr_version }}.tgz +solr_install_dir: /usr/local/share/solr +solr_data_dir: /var/lib/solr diff --git a/roles/ssh/defaults/main.yml b/roles/ssh/defaults/main.yml new file mode 100644 index 0000000..6d2acb6 --- /dev/null +++ b/roles/ssh/defaults/main.yml @@ -0,0 +1 @@ +ssh_canonical_domains: ['{{ ansible_domain }}'] diff --git a/roles/ssh/tasks/main.yml b/roles/ssh/tasks/main.yml new file mode 100644 index 0000000..7b0c386 --- /dev/null +++ b/roles/ssh/tasks/main.yml @@ -0,0 +1,4 @@ +- name: generate ssh_config + template: + src: etc/ssh/ssh_config.j2 + dest: /etc/ssh/ssh_config diff --git a/roles/ssh/templates/etc/ssh/ssh_config.j2 b/roles/ssh/templates/etc/ssh/ssh_config.j2 new file mode 100644 index 0000000..bbde76e --- /dev/null +++ b/roles/ssh/templates/etc/ssh/ssh_config.j2 @@ -0,0 +1,13 @@ +Include /etc/ssh/ssh_config.d/*.conf + +CanonicalizeHostname always +CanonicalDomains {{ ssh_canonical_domains | join(' ') }} +CanonicalizeMaxDots 0 +CanonicalizeFallbackLocal yes + +Host {{ ssh_canonical_domains | map('regex_replace', '^', '*.') | join(' ') }} + GSSAPIAuthentication yes + GSSAPIDelegateCredentials yes + GSSAPIKeyExchange yes + GSSAPIRenewalForcesRekey yes + GSSAPITrustDns yes diff --git a/roles/sudo/defaults/main.yml b/roles/sudo/defaults/main.yml new file mode 100644 index 0000000..623e42b --- /dev/null +++ b/roles/sudo/defaults/main.yml @@ -0,0 +1,2 @@ +sudo_mailto: root +sudo_send_emails: yes diff --git a/roles/sudo/tasks/main.yml b/roles/sudo/tasks/main.yml new file mode 100644 index 0000000..7419bf0 --- /dev/null +++ b/roles/sudo/tasks/main.yml @@ -0,0 +1,5 @@ +- name: generate sudoers file + template: + src: etc/sudoers.j2 + dest: /etc/sudoers + mode: 0440 diff --git a/roles/sudo/templates/etc/sudoers.j2 b/roles/sudo/templates/etc/sudoers.j2 new file mode 100644 index 0000000..2ee55be --- /dev/null +++ b/roles/sudo/templates/etc/sudoers.j2 @@ -0,0 +1,48 @@ +# +# Refuse to run if unable to disable echo on the tty. +# +Defaults !visiblepw + +# +# Preserving HOME has security implications since many programs +# use it when searching for configuration files. Note that HOME +# is already set when the the env_reset option is enabled, so +# this option is only effective for configurations where either +# env_reset is disabled or HOME is present in the env_keep list. +# +Defaults always_set_home +Defaults match_group_by_gid + +{% if sudo_send_emails %} +Defaults mailto = "{{ sudo_mailto }}" +Defaults mail_badpass +Defaults mail_no_host +Defaults mail_no_perms +Defaults mail_no_user +{% endif %} + +# Prior to version 1.8.15, groups listed in sudoers that were not +# found in the system group database were passed to the group +# plugin, if any. Starting with 1.8.15, only groups of the form +# %:group are resolved via the group plugin by default. +# We enable always_query_group_plugin to restore old behavior. +# Disable this option for new behavior. +Defaults always_query_group_plugin + +Defaults env_reset +Defaults env_keep = "COLORS DISPLAY HOSTNAME HISTSIZE KDEDIR LS_COLORS" +Defaults env_keep += "MAIL PS1 PS2 QTDIR USERNAME LANG LC_ADDRESS LC_CTYPE" +Defaults env_keep += "LC_COLLATE LC_IDENTIFICATION LC_MEASUREMENT LC_MESSAGES" +Defaults env_keep += "LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER LC_TELEPHONE" +Defaults env_keep += "LC_TIME LC_ALL LANGUAGE LINGUAS _XKB_CHARSET XAUTHORITY" + +Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin + +## Allow root to run any commands anywhere +root ALL=(ALL) ALL + +## Allows people in group wheel to run all commands +%wheel ALL=(ALL) ALL + +## Read drop-in files from /etc/sudoers.d (the # here does not mean a comment) +#includedir /etc/sudoers.d diff --git a/roles/syncthing/defaults/main.yml b/roles/syncthing/defaults/main.yml new file mode 100644 index 0000000..51f1f66 --- /dev/null +++ b/roles/syncthing/defaults/main.yml @@ -0,0 +1,5 @@ +# username-port mappings +syncthing_users: {} + +syncthing_fs_watcher_enabled: no # inotify doesn't work on nfs. +syncthing_rescan_interval_sec: 60 diff --git a/roles/syncthing/meta/main.yml b/roles/syncthing/meta/main.yml new file mode 100644 index 0000000..29230f9 --- /dev/null +++ b/roles/syncthing/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - role: yum + yum_repositories: epel + tags: yum diff --git a/roles/syncthing/tasks/main.yml b/roles/syncthing/tasks/main.yml new file mode 100644 index 0000000..cf6b6b2 --- /dev/null +++ b/roles/syncthing/tasks/main.yml @@ -0,0 +1,73 @@ +- name: install packages + dnf: + name: '{{ syncthing_packages }}' + state: present + +- name: mask global syncthing service + systemd: + name: syncthing + scope: global + state: stopped + enabled: no + masked: yes + +- name: increase udp buffer size + sysctl: + name: net.core.rmem_max + value: '{{ syncthing_max_udp_buffer_size }}' + state: present + +- name: create syncthing directory + file: + path: '{{ syncthing_home }}' + state: directory + +- name: generate systemd unit + template: + src: 'etc/systemd/system/syncthing-user@.service.j2' + dest: '/etc/systemd/system/syncthing-user@.service' + register: syncthing_unit + +- name: reload systemd units + systemd: + daemon_reload: yes + when: syncthing_unit.changed + +- name: set httpd_var_run_t selinux context for runtime directory + sefcontext: + target: '{{ syncthing_runtime_dir }}(/.*)?' + setype: httpd_var_run_t + state: present + +- name: generate user directories + include_tasks: syncthing_user.yml + loop: '{{ syncthing_users | dict2items }}' + loop_control: + index_var: syncthing_user_index + vars: + syncthing_user: '{{ item.key }}' + syncthing_port: '{{ item.value }}' + +- name: open firewall ports + firewalld: + port: '{{ item.0 }}/{{ item.1 }}' + permanent: yes + immediate: yes + state: enabled + loop: "{{ syncthing_users.values() | product(['tcp', 'udp']) }}" + tags: firewalld + +- name: generate landing page + template: + src: var/www/html/index.html.j2 + dest: /var/www/html/index.html + +- name: create selinux policy for apache to connect to unix socket + include_role: + name: selinux_policy + apply: + tags: selinux + vars: + selinux_policy_name: syncthing_httpd + selinux_policy_te: '{{ syncthing_selinux_policy_te }}' + tags: selinux diff --git a/roles/syncthing/tasks/syncthing_user.yml b/roles/syncthing/tasks/syncthing_user.yml new file mode 100644 index 0000000..c580df6 --- /dev/null +++ b/roles/syncthing/tasks/syncthing_user.yml @@ -0,0 +1,36 @@ +- name: create user directory + file: + path: '{{ syncthing_home }}/{{ syncthing_user }}' + state: directory + owner: '{{ syncthing_user }}' + group: '{{ syncthing_user }}' + mode: 0700 + +- name: generate default configuration + command: + cmd: syncthing -generate '{{ syncthing_home }}/{{ syncthing_user }}' + creates: '{{ syncthing_home }}/{{ syncthing_user }}/config.xml' + become: yes + become_user: '{{ syncthing_user }}' + register: syncthing_generate + +- name: get device id + command: + cmd: syncthing -home {{ syncthing_home }}/{{ syncthing_user }} -device-id + changed_when: no + register: syncthing_device_id + +- name: generate config file + template: + src: '{{ syncthing_home[1:] }}/config.xml.j2' + dest: '{{ syncthing_home }}/{{ syncthing_user }}/config.xml' + owner: '{{ syncthing_user }}' + group: '{{ syncthing_user }}' + mode: 0600 + force: '{{ syncthing_generate.changed }}' + +- name: enable systemd unit + systemd: + name: 'syncthing-user@{{ syncthing_user }}' + enabled: yes + state: started diff --git a/roles/syncthing/templates/etc/systemd/system/syncthing-user@.service.j2 b/roles/syncthing/templates/etc/systemd/system/syncthing-user@.service.j2 new file mode 100644 index 0000000..ba0ffb5 --- /dev/null +++ b/roles/syncthing/templates/etc/systemd/system/syncthing-user@.service.j2 @@ -0,0 +1,27 @@ +[Unit] +Description=Syncthing - Open Source Continuous File Synchronization for %I +Documentation=man:syncthing(1) +After=autofs.service nss-user-lookup.target network-online.target + +[Service] +User=%i +Group=%i +Environment=STNOUPGRADE=1 +PermissionsStartOnly=true +ExecStartPre=install -o root -g root -m 0755 -Z -d {{ syncthing_runtime_dir | quote }} +ExecStartPre=install -o %i -g apache -m 2750 -Z -d {{ syncthing_runtime_dir | quote }}/%i +ExecStart=/usr/bin/syncthing -no-browser -no-restart -logflags=0 -home {{ syncthing_home | quote}}/%i -gui-address=unix://{{ syncthing_runtime_dir | quote }}/%i/gui.sock +Restart=on-failure +RestartSec=5 +SuccessExitStatus=3 4 +RestartForceExitStatus=3 4 + +# Hardening +ProtectSystem=full +PrivateTmp=true +SystemCallArchitectures=native +MemoryDenyWriteExecute=true +NoNewPrivileges=true + +[Install] +WantedBy=multi-user.target diff --git a/roles/syncthing/templates/var/lib/syncthing/config.xml.j2 b/roles/syncthing/templates/var/lib/syncthing/config.xml.j2 new file mode 100644 index 0000000..7790dd8 --- /dev/null +++ b/roles/syncthing/templates/var/lib/syncthing/config.xml.j2 @@ -0,0 +1,116 @@ + + +
tcp://{{ ansible_fqdn }}:{{ syncthing_port }}
+ false + false + 0 + 0 + 0 + false + 0 +
+ +
{{ syncthing_runtime_dir }}/{{ syncthing_user }}/gui.sock
+ 770 + default + true +
+ + + quic://0.0.0.0:{{ syncthing_port }} + tcp://0.0.0.0:{{ syncthing_port }} + default + false + false + 0 + [ff32::5222]:0 + 0 + 0 + 60 + false + 10 + false + false + 60 + 30 + 10 + -1 + 3 + 00000000 + https://data.syncthing.net/newdata + false + 1800 + false + 0 + false + 24 + false + 5 + false + 1 + https://upgrades.syncthing.net/meta.json + false + 10 + 0 + false + 0 + https://crash.syncthing.net/newcrash + false + 0 + 0 + default + auto + 0 + true + false + 0 + 0 + false + + + + basic + + + + 1 + + 3600 + + basic + + 0 + 0 + 0 + random + false + 0 + 0 + 10 + false + false + false + 25 + .stfolder + false + 0 + 2 + false + standard + standard + false + false + + +
dynamic
+ false + false + 0 + 0 + 0 + false + 0 +
+ +
+
diff --git a/roles/syncthing/templates/var/www/html/index.html.j2 b/roles/syncthing/templates/var/www/html/index.html.j2 new file mode 100644 index 0000000..63944f5 --- /dev/null +++ b/roles/syncthing/templates/var/www/html/index.html.j2 @@ -0,0 +1,15 @@ + + + + Syncthing + + + +

Choose your username.

+
    +{% for user in syncthing_users.keys() %} +
  • {{ user }}
  • +{% endfor %} +
+ + diff --git a/roles/syncthing/vars/main.yml b/roles/syncthing/vars/main.yml new file mode 100644 index 0000000..2f15f87 --- /dev/null +++ b/roles/syncthing/vars/main.yml @@ -0,0 +1,46 @@ +syncthing_packages: + - syncthing + - syncthing-tools + - httpd + +syncthing_home: /var/lib/syncthing +syncthing_runtime_dir: /var/run/syncthing + +syncthing_max_udp_buffer_size: 2500000 + +syncthing_archive_shell: >- + TIMESTAMP=$(date +%Y%m%d%H%M%S); + tar czf "syncthing-${TIMESTAMP}.tar.gz" + --transform "s|^\.|syncthing-${TIMESTAMP}|" + --exclude="*/index-*.db*" + -C "{{ syncthing_home }}" . + +syncthing_selinux_policy_te: | + require { + type httpd_t; + type unconfined_service_t; + class unix_stream_socket connectto; + } + + #============= httpd_t ============== + allow httpd_t unconfined_service_t:unix_stream_socket connectto; + +syncthing_apache_config: | + {{ apache_proxy_vhost_config }} + + {% for user in syncthing_users %} + + AuthType GSSAPI + AuthName "FreeIPA Single Sign-On" + GssapiLocalName On + {{ apache_gssapi_session_config }} + + Require user {{ user }} + + ProxyPass unix:{{ syncthing_runtime_dir }}/{{ user }}/gui.sock|http://{{ user }}/ + ProxyPassReverse unix:{{ syncthing_runtime_dir }}/{{ user }}/gui.sock|http://{{ user }}/ + + {{ apache_proxy_header_config }} + + + {% endfor %} diff --git a/roles/systemd_timer/defaults/main.yml b/roles/systemd_timer/defaults/main.yml new file mode 100644 index 0000000..e429f17 --- /dev/null +++ b/roles/systemd_timer/defaults/main.yml @@ -0,0 +1,11 @@ +timer_persistent: yes +timer_user: root +timer_shell: no +timer_enabled: yes + +# timer_group: groupname +# timer_chdir: /path/to/working/directory +# +# timer_shell: some $shell | command +# OR +# timer_exec: command to exec diff --git a/roles/systemd_timer/tasks/main.yml b/roles/systemd_timer/tasks/main.yml new file mode 100644 index 0000000..0c0e028 --- /dev/null +++ b/roles/systemd_timer/tasks/main.yml @@ -0,0 +1,21 @@ +- name: create systemd units + template: + src: etc/systemd/system/task.{{ item }}.j2 + dest: /etc/systemd/system/{{ timer_name }}.{{ item }} + loop: + - service + - timer + register: timer_unit + +- name: reload systemd units + systemd: + name: '{{ timer_name }}.timer' + daemon-reload: yes + state: restarted + when: timer_unit.changed + +- name: enable systemd timer + systemd: + name: '{{ timer_name }}.timer' + enabled: '{{ true if timer_enabled else false }}' + state: "{{ 'started' if timer_enabled else 'stopped' }}" diff --git a/roles/systemd_timer/templates/etc/systemd/system/task.service.j2 b/roles/systemd_timer/templates/etc/systemd/system/task.service.j2 new file mode 100644 index 0000000..307c112 --- /dev/null +++ b/roles/systemd_timer/templates/etc/systemd/system/task.service.j2 @@ -0,0 +1,23 @@ +[Unit] +Description={{ timer_description | default(timer_name) }} +{% if timer_after is defined %} +After={{ timer_after if timer_after is string else (timer_after | join(' ')) }} +{% endif %} + +[Service] +Type=oneshot +User={{ timer_user }} +{% if timer_group is defined %} +Group={{ timer_group }} +{% endif %} +{% if timer_chdir is defined %} +WorkingDirectory={{ timer_chdir }} +{% endif %} +{% if timer_shell %} +ExecStart=/bin/bash -Eeu -o pipefail -c {{ timer_exec | replace('%', '%%') | replace('$', '$$') | replace('\\', '\\\\') | quote }} +{% else %} +ExecStart={{ timer_exec | replace('%', '%%') | replace(';', '\;') }} +{% endif %} + +[Install] +WantedBy=multi-user.target diff --git a/roles/systemd_timer/templates/etc/systemd/system/task.timer.j2 b/roles/systemd_timer/templates/etc/systemd/system/task.timer.j2 new file mode 100644 index 0000000..0e0adc8 --- /dev/null +++ b/roles/systemd_timer/templates/etc/systemd/system/task.timer.j2 @@ -0,0 +1,9 @@ +[Unit] +Description={{ timer_description | default(timer_name) }} on calendar interval + +[Timer] +OnCalendar={{ timer_on_calendar }} +Persistent={{ timer_persistent | bool | to_json }} + +[Install] +WantedBy=timers.target diff --git a/roles/teddit/defaults/main.yml b/roles/teddit/defaults/main.yml new file mode 100644 index 0000000..35557ef --- /dev/null +++ b/roles/teddit/defaults/main.yml @@ -0,0 +1,24 @@ +teddit_version: main +teddit_user: teddit +teddit_port: 8080 +teddit_server_name: '{{ ansible_fqdn }}' + +teddit_update_on_calendar: weekly + +teddit_use_reddit_oauth: no +teddit_theme: auto +teddit_clean_homepage: yes +teddit_flairs_enabled: no +teddit_highlight_controversial: yes +teddit_videos_muted: yes +teddit_comments_sort: confidence +teddit_show_upvotes: yes +teddit_show_upvote_percentage: yes +teddit_suggested_subreddits: + - All + - Saved + +teddit_redis_host: 127.0.0.1 +teddit_redis_password: '' +teddit_redis_port: 6379 +teddit_cache_control_interval: 24 diff --git a/roles/teddit/handlers/main.yml b/roles/teddit/handlers/main.yml new file mode 100644 index 0000000..3b0ce8a --- /dev/null +++ b/roles/teddit/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart teddit + systemd: + name: teddit + state: restarted diff --git a/roles/teddit/meta/main.yml b/roles/teddit/meta/main.yml new file mode 100644 index 0000000..7422a2b --- /dev/null +++ b/roles/teddit/meta/main.yml @@ -0,0 +1,10 @@ +dependencies: + - role: yum + yum_repositories: + - epel + - rpmfusion-free + tags: yum + + - role: redis + redis_port: '{{ teddit_redis_port }}' + tags: redis diff --git a/roles/teddit/tasks/main.yml b/roles/teddit/tasks/main.yml new file mode 100644 index 0000000..a26370f --- /dev/null +++ b/roles/teddit/tasks/main.yml @@ -0,0 +1,104 @@ +- name: install packages + dnf: + name: '{{ teddit_packages }}' + state: present + +- name: create local user + user: + name: '{{ teddit_user }}' + system: yes + home: '{{ teddit_home }}' + shell: /sbin/nologin + create_home: no + +- name: create home directory + file: + path: '{{ teddit_home }}' + owner: '{{ teddit_user }}' + group: '{{ teddit_user }}' + mode: 0755 + state: directory + +- name: disable npm package lock + lineinfile: + regexp: ^package-lock= + line: package-lock=false + path: '{{ teddit_home }}/.npmrc' + create: yes + owner: '{{ teddit_user }}' + group: '{{ teddit_user }}' + mode: 0600 + state: present + +- name: clone git repository + git: + repo: '{{ teddit_git_repo }}' + dest: '{{ teddit_install_dir }}' + version: '{{ teddit_version }}' + force: yes + update: yes + become: yes + become_user: '{{ teddit_user }}' + register: teddit_git + notify: restart teddit + +- name: install npm dependencies + npm: + path: '{{ teddit_install_dir }}' + production: yes + no_optional: yes + become: yes + become_user: '{{ teddit_user }}' + when: teddit_git.changed + notify: restart teddit + +- name: create teddit systemd unit + template: + src: etc/systemd/system/teddit.service.j2 + dest: /etc/systemd/system/teddit.service + register: teddit_unit + notify: restart teddit + +- name: reload systemd daemons + systemd: + daemon_reload: yes + when: teddit_unit.changed + +- name: generate config file + template: + src: '{{ teddit_install_dir[1:] }}/config.js.j2' + dest: '{{ teddit_install_dir }}/config.js' + owner: '{{ teddit_user }}' + group: '{{ teddit_user }}' + mode: 0600 + notify: restart teddit + +- name: start teddit + systemd: + name: teddit + enabled: yes + state: started + +- name: set http_port_t selinux contect on teddit port + seport: + ports: '{{ teddit_port }}' + proto: tcp + setype: http_port_t + state: present + tags: selinux + +- name: generate update script + template: + src: '{{ teddit_home[1:] }}/teddit-update.sh.j2' + dest: '{{ teddit_home }}/teddit-update.sh' + mode: 0555 + +- name: set up teddit-update timer + include_role: + name: systemd_timer + vars: + timer_name: teddit-update + timer_description: Update teddit + timer_after: network.target + timer_on_calendar: '{{ teddit_update_on_calendar }}' + timer_exec: '{{ teddit_home }}/teddit-update.sh' diff --git a/roles/teddit/templates/etc/systemd/system/teddit.service.j2 b/roles/teddit/templates/etc/systemd/system/teddit.service.j2 new file mode 100644 index 0000000..35e3d9d --- /dev/null +++ b/roles/teddit/templates/etc/systemd/system/teddit.service.j2 @@ -0,0 +1,36 @@ +[Unit] +Description=teddit reddit proxy +After=network.target redis@{{ teddit_redis_port }}.service +Requires=redis@{{ teddit_redis_port }}.service +AssertPathExists={{ teddit_install_dir }} + +[Service] +Type=simple +Environment="LISTEN_ADDRESS=127.0.0.1" +Environment="NODE_ENV=production" +EnvironmentFile=-/etc/sysconfig/teddit +ExecStart=/usr/bin/node app.js +WorkingDirectory={{ teddit_install_dir }} +User={{ teddit_user }} +Group={{ teddit_user }} +Restart=on-failure + +# See https://www.freedesktop.org/software/systemd/man/systemd.exec.html +# for details +DevicePolicy=closed +NoNewPrivileges=yes +PrivateDevices=yes +PrivateTmp=yes +ProtectControlGroups=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 +RestrictNamespaces=yes +RestrictRealtime=yes +SystemCallFilter=~@clock @debug @module @mount @obsolete @privileged @reboot @setuid @swap + +ProtectSystem=full +ProtectHome=true + +[Install] +WantedBy=multi-user.target diff --git a/roles/teddit/templates/opt/teddit/teddit-update.sh.j2 b/roles/teddit/templates/opt/teddit/teddit-update.sh.j2 new file mode 100644 index 0000000..07de718 --- /dev/null +++ b/roles/teddit/templates/opt/teddit/teddit-update.sh.j2 @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +set -eu + +SRCDIR={{ teddit_install_dir | quote }} +TEDDIT_USER={{ teddit_user | quote }} + +as-teddit() { + runuser -u "$TEDDIT_USER" -- "$@" +} + +if (( $EUID != 0 )); then + echo 'must be superuser' 1>&2 + exit 1 +fi + +cd "$SRCDIR" + +as-teddit git fetch + +local_rev=$(git rev-parse HEAD) +upstream_rev=$(git rev-parse '@{u}') + +echo "local: $local_rev" +echo "upstream: $upstream_rev" + +if [ "$local_rev" != "$upstream_rev" ]; then + as-teddit git reset --hard HEAD + + echo "installing dependencies..." + as-teddit npm install --production --no-optional + + systemctl restart teddit +else + echo "teddit is already up to date" +fi diff --git a/roles/teddit/templates/opt/teddit/teddit/config.js.j2 b/roles/teddit/templates/opt/teddit/teddit/config.js.j2 new file mode 100644 index 0000000..1f56f92 --- /dev/null +++ b/roles/teddit/templates/opt/teddit/teddit/config.js.j2 @@ -0,0 +1,71 @@ +const config = { + domain: {{ teddit_server_name | to_json }}, + use_reddit_oauth: {{ teddit_use_reddit_oauth | bool | to_json }}, + cert_dir: '', + theme: {{ teddit_theme | to_json }}, + clean_homepage: {{ teddit_clean_homepage | bool | to_json }}, + flairs_enabled: {{ teddit_flairs_enabled | bool | to_json }}, + highlight_controversial: {{ teddit_highlight_controversial | bool | to_json }}, + api_enabled: true, + api_force_https: false, + video_enabled: true, + redis_enabled: true, + redis_db: 0, + redis_host: {{ teddit_redis_host | to_json }}, + redis_password: {{ teddit_redis_password | to_json }}, + redis_port: {{ teddit_redis_port | to_json }}, + ssl_port: 0, + nonssl_port: {{ teddit_port }}, + listen_address: '127.0.0.1', + https_enabled: false, + redirect_http_to_https: false, + redirect_www: false, + use_compression: true, + use_view_cache: false, + use_helmet: false, + use_helmet_hsts: false, + trust_proxy: true, + trust_proxy_address: '127.0.0.1', + http_proxy: '', + nsfw_enabled: true, + videos_muted: {{ teddit_videos_muted | bool | to_json }}, + post_comments_sort: {{ teddit_comments_sort | to_json }}, + reddit_app_id: {{ teddit_reddit_app_id | to_json }}, + domain_replacements: [], + cache_control: true, + cache_control_interval: {{ teddit_cache_control_interval | int | to_json }}, + show_upvoted_percentage: {{ teddit_show_upvote_percentage | bool | to_json }}, + show_upvotes: {{ teddit_show_upvotes | bool | to_json }}, + post_media_max_heights: { + 'extra-small': 300, + 'small': 415, + 'medium': 600, + 'large': 850, + 'extra-large': 1200 + }, + setexs: { + frontpage: 600, + subreddit: 600, + posts: 600, + user: 600, + searches: 600, + sidebar: 60 * 60 * 24 * 7, + shorts: 60 * 60 * 24 * 31, + wikis: 60 * 60 * 24 * 7, + subreddits_explore: { + front: 60 * 60 * 24 * 1, + new_page: 60 + }, + }, + rate_limiting: { + enabled: false, + initial_limit: 100, + limit_after_limited: 30 + }, + valid_media_domains: ['preview.redd.it', 'external-preview.redd.it', 'i.redd.it', 'v.redd.it', 'a.thumbs.redditmedia.com', 'b.thumbs.redditmedia.com', 'emoji.redditmedia.com', 'styles.redditmedia.com', 'www.redditstatic.com', 'thumbs.gfycat.com', 'i.ytimg.com', 'i.imgur.com'], + valid_embed_video_domains: ['gfycat.com', 'youtube.com'], + reddit_api_error_text: `Seems like your instance is either blocked (e.g. due to API rate limiting), reddit is currently down, or your API key is expired and not renewd properly. This can also happen for other reasons.`, + suggested_subreddits: {{ teddit_suggested_subreddits | to_json }} +}; + +module.exports = config; diff --git a/roles/teddit/vars/main.yml b/roles/teddit/vars/main.yml new file mode 100644 index 0000000..c294dfa --- /dev/null +++ b/roles/teddit/vars/main.yml @@ -0,0 +1,13 @@ +teddit_packages: + - ffmpeg + - nodejs + - git + +teddit_git_repo: https://codeberg.org/teddit/teddit +teddit_home: /opt/teddit +teddit_install_dir: '{{ teddit_home }}/teddit' + +teddit_apache_config: | + {{ apache_proxy_config }} + ProxyPass / http://127.0.0.1:{{ teddit_port }}/ + ProxyPassReverse / http://127.0.0.1:{{ teddit_port }}/ diff --git a/roles/tika/defaults/main.yml b/roles/tika/defaults/main.yml new file mode 100644 index 0000000..9ca8d89 --- /dev/null +++ b/roles/tika/defaults/main.yml @@ -0,0 +1,3 @@ +tika_version: 2.6.0 +tika_port: 9998 +tika_heap_size: 2g diff --git a/roles/tika/handlers/main.yml b/roles/tika/handlers/main.yml new file mode 100644 index 0000000..56f3127 --- /dev/null +++ b/roles/tika/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart tika + systemd: + name: tika + state: restarted diff --git a/roles/tika/tasks/main.yml b/roles/tika/tasks/main.yml new file mode 100644 index 0000000..d2d59dc --- /dev/null +++ b/roles/tika/tasks/main.yml @@ -0,0 +1,69 @@ +- name: install java + dnf: + name: java-17-openjdk-headless + state: present + +- name: create tika installation directory + file: + path: '{{ tika_install_dir }}' + state: directory + +- name: download jar file + get_url: + url: '{{ tika_url }}' + dest: '{{ tika_install_dir }}/tika-server.jar' + mode: 0444 + notify: restart tika + +- name: add local user + user: + name: tika + system: yes + home: '{{ tika_data_dir }}' + shell: /sbin/nologin + create_home: no + +- name: create data directory + file: + path: '{{ tika_data_dir }}' + state: directory + owner: tika + group: tika + mode: 0770 + +- name: create systemd unit + template: + src: etc/systemd/system/tika.service.j2 + dest: /etc/systemd/system/tika.service + register: tika_unit + +- name: reload systemd units + systemd: + daemon_reload: yes + when: tika_unit.changed + +- name: create environment file + template: + src: etc/sysconfig/tika.j2 + dest: /etc/sysconfig/tika + notify: restart tika + +- name: create configuration directory + file: + path: '{{ tika_conf_dir }}' + state: directory + +- name: generate config files + template: + src: '{{ tika_conf_dir[1:] }}/{{ item }}.j2' + dest: '{{ tika_conf_dir }}/{{ item }}' + loop: + - log4j2.xml + - config.xml + notify: restart tika + +- name: start tika + systemd: + name: tika + enabled: yes + state: started diff --git a/roles/tika/templates/etc/sysconfig/tika.j2 b/roles/tika/templates/etc/sysconfig/tika.j2 new file mode 100644 index 0000000..1e2ac54 --- /dev/null +++ b/roles/tika/templates/etc/sysconfig/tika.j2 @@ -0,0 +1,3 @@ +JVM_HEAP_SIZE={{ tika_heap_size }} +TIKA_PORT={{ tika_port }} +TIKA_OPTS="-Dlog4j.configurationFile={{ tika_conf_dir }}/log4j2.xml" diff --git a/roles/tika/templates/etc/systemd/system/tika.service.j2 b/roles/tika/templates/etc/systemd/system/tika.service.j2 new file mode 100644 index 0000000..f888fcb --- /dev/null +++ b/roles/tika/templates/etc/systemd/system/tika.service.j2 @@ -0,0 +1,53 @@ +[Unit] +Description=Apache Tika +Before=dovecot.service + +[Service] +Type=simple +User=tika +Restart=on-failure + +ProtectSystem=strict +ReadWritePaths={{ tika_data_dir }} /var/log/tika + +# Harden this java nightmare +NoNewPrivileges=yes +PrivateTmp=yes +PrivateDevices=yes +DevicePolicy=closed +ProtectSystem=strict +ProtectHome=yes +ProtectControlGroups=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 +RestrictNamespaces=yes +RestrictRealtime=yes +RestrictSUIDSGID=yes +LockPersonality=yes + +WorkingDirectory={{ tika_install_dir }} +LogsDirectory=tika + +Environment=TIKA_DATA_HOME={{ tika_data_dir }} +Environment=JVM_ARGS= +Environment=TIKA_OPTS= +Environment=JVM_GC_ARGS="-XX:+UseG1GC -XX:+PerfDisableSharedMem -XX:+ParallelRefProcEnabled -XX:MaxGCPauseMillis=250 -XX:+UseLargePages -XX:+AlwaysPreTouch" +Environment=TIKA_HOST=localhost +Environment=TIKA_PORT=9998 +Environment=TIKA_LOGS_DIR=/var/log/tika +Environment=TIKA_CONFIG_FILE={{ tika_conf_dir }}/config.xml +EnvironmentFile=/etc/sysconfig/tika + +ExecStart=java -server \ + $JVM_ARGS \ + $JVM_GC_ARGS \ + -Dlog4j2.formatMsgNoLookups=true \ + $TIKA_OPTS \ + -jar tika-server.jar \ + -c ${TIKA_CONFIG_FILE} \ + -h ${TIKA_HOST} \ + -p ${TIKA_PORT} + +[Install] +WantedBy=multi-user.target diff --git a/roles/tika/templates/etc/tika/config.xml.j2 b/roles/tika/templates/etc/tika/config.xml.j2 new file mode 100644 index 0000000..0e8df31 --- /dev/null +++ b/roles/tika/templates/etc/tika/config.xml.j2 @@ -0,0 +1,15 @@ + + + + false + + -Xmx{{ tika_heap_size }} + -Dlog4jconfigurationFile={{ tika_conf_dir }}/log4j2.xml + + + tika + status + + + + diff --git a/roles/tika/templates/etc/tika/log4j2.xml.j2 b/roles/tika/templates/etc/tika/log4j2.xml.j2 new file mode 100644 index 0000000..ae66fbb --- /dev/null +++ b/roles/tika/templates/etc/tika/log4j2.xml.j2 @@ -0,0 +1,18 @@ + + + + + + + {% raw %}%maxLen{%-5p %c %m%notEmpty{ =>%ex{short}}}{10240}%n{% endraw %} + + + + + + + + + + + diff --git a/roles/tika/vars/main.yml b/roles/tika/vars/main.yml new file mode 100644 index 0000000..e730904 --- /dev/null +++ b/roles/tika/vars/main.yml @@ -0,0 +1,4 @@ +tika_url: https://dlcdn.apache.org/tika/{{ tika_version }}/tika-server-standard-{{ tika_version }}.jar +tika_install_dir: /usr/local/share/tika +tika_data_dir: /var/lib/tika +tika_conf_dir: /etc/tika diff --git a/roles/timezone/default/main.yml b/roles/timezone/default/main.yml new file mode 100644 index 0000000..acb10de --- /dev/null +++ b/roles/timezone/default/main.yml @@ -0,0 +1 @@ +timezone: Etc/UTC diff --git a/roles/timezone/tasks/main.yml b/roles/timezone/tasks/main.yml new file mode 100644 index 0000000..a62a5e4 --- /dev/null +++ b/roles/timezone/tasks/main.yml @@ -0,0 +1,12 @@ +- name: check if system clock uses localtime + command: timedatectl show --property LocalRTC --value + changed_when: false + register: timezone_localrtc + +- name: set system clock to UTC + command: timedatectl --adjust-system-clock set-local-rtc 0 + when: timezone_localrtc.stdout != "no" + +- name: set timezone + timezone: + name: '{{ timezone }}' diff --git a/roles/ttrss/defaults/main.yml b/roles/ttrss/defaults/main.yml new file mode 100644 index 0000000..02c9b2e --- /dev/null +++ b/roles/ttrss/defaults/main.yml @@ -0,0 +1,16 @@ +ttrss_freeipa_plugin_version: HEAD + +ttrss_update_on_calendar: weekly + +ttrss_server_name: '{{ ansible_fqdn }}' +ttrss_url: https://{{ ttrss_server_name }} + +ttrss_user: s-ttrss +ttrss_db_host: '{{ postgresql_host }}' +ttrss_db_name: ttrss +ttrss_session_lifetime_sec: 604800 +ttrss_email_from_name: Tiny Tiny RSS +ttrss_email_from_address: ttrss-noreply@{{ email_domain }} + +ttrss_access_group: role-ttrss-access +ttrss_admin_group: role-ttrss-admin diff --git a/roles/ttrss/handlers/main.yml b/roles/ttrss/handlers/main.yml new file mode 100644 index 0000000..f644426 --- /dev/null +++ b/roles/ttrss/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart php-fpm + systemd: + name: php-fpm + state: restarted diff --git a/roles/ttrss/tasks/database.yml b/roles/ttrss/tasks/database.yml new file mode 100644 index 0000000..ca20eeb --- /dev/null +++ b/roles/ttrss/tasks/database.yml @@ -0,0 +1,26 @@ +- name: create database + postgresql_db: + name: '{{ ttrss_db_name }}' + state: present + delegate_to: "{{ postgresql_inventory_host }}" + become: yes + become_user: postgres + +- name: create database user + postgresql_user: + name: '{{ ttrss_user }}' + db: '{{ ttrss_db_name }}' + priv: ALL + state: present + delegate_to: "{{ postgresql_inventory_host }}" + become: yes + become_user: postgres + +- name: update database schema + command: php {{ ttrss_home }}/update.php --update-schema=force-yes + become: yes + become_user: apache + environment: + GSS_USE_PROXY: 'yes' + register: ttrss_update_schema + changed_when: ttrss_update_schema.stdout is not search('Database schema is already at latest version') diff --git a/roles/ttrss/tasks/freeipa.yml b/roles/ttrss/tasks/freeipa.yml new file mode 100644 index 0000000..a8d4ddf --- /dev/null +++ b/roles/ttrss/tasks/freeipa.yml @@ -0,0 +1,46 @@ +- name: create user + ipauser: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ ttrss_user }}' + loginshell: /sbin/nologin + homedir: '{{ ttrss_home }}' + givenname: TinyTinyRSS + sn: Service Account + state: present + run_once: yes + +- name: retrieve user keytab + include_role: + name: freeipa_keytab + vars: + keytab_principal: '{{ ttrss_user }}' + keytab_path: '{{ ttrss_keytab }}' + +- name: configure gssproxy for kerberized postgres + include_role: + name: gssproxy_client + vars: + gssproxy_name: ttrss + gssproxy_section: service/php-fpm + gssproxy_client_keytab: '{{ ttrss_keytab }}' + gssproxy_cred_usage: initiate + gssproxy_euid: apache + +- name: create access group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ ttrss_access_group }}' + nonposix: yes + state: present + run_once: yes + +- name: create admin group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ ttrss_admin_group }}' + nonposix: yes + state: present + run_once: yes diff --git a/roles/ttrss/tasks/main.yml b/roles/ttrss/tasks/main.yml new file mode 100644 index 0000000..13cd9b0 --- /dev/null +++ b/roles/ttrss/tasks/main.yml @@ -0,0 +1,96 @@ +- name: install packages + dnf: + name: '{{ ttrss_packages }}' + state: present + +- name: create webroot + file: + path: '{{ ttrss_home }}' + state: directory + +- name: clone git repository + git: + repo: '{{ ttrss_git_repo }}' + dest: '{{ ttrss_home }}' + version: '{{ ttrss_version }}' + update: yes + +- name: set httpd_sys_rw_content_t selinux context for writable directories + sefcontext: + target: '{{ ttrss_home }}/{{ item }}(/.*)?' + setype: httpd_sys_rw_content_t + state: present + loop: '{{ ttrss_writable_dirs }}' + register: ttrss_writeable_sefcontext + +- name: apply selinux context to writeable directories + command: 'restorecon -R {{ ttrss_home }}/{{ item }}' + when: ttrss_writeable_sefcontext.results[index].changed + loop: '{{ ttrss_writable_dirs }}' + loop_control: + index_var: index + +- name: set permissions on writable directories + file: + path: '{{ ttrss_home }}/{{ item }}' + mode: 0775 + owner: root + group: apache + setype: httpd_sys_rw_content_t + loop: '{{ ttrss_writable_dirs }}' + +- import_tasks: freeipa.yml + tags: freeipa + +- name: create auth_freeipa plugin directory + file: + path: '{{ ttrss_home }}/plugins.local/auth_freeipa' + state: directory + +- name: download auth_freeipa plugin + get_url: + url: '{{ ttrss_freeipa_plugin_url }}' + dest: '{{ ttrss_home }}/plugins.local/auth_freeipa/init.php' + +- name: generate config file + template: + src: '{{ ttrss_home[1:] }}/config.php.j2' + dest: '{{ ttrss_home }}/config.php' + +- import_tasks: database.yml + tags: database + +- name: generate systemd unit for updating feeds + template: + src: etc/systemd/system/ttrss.service.j2 + dest: /etc/systemd/system/ttrss.service + register: ttrss_unit + +- name: reload systemd units + systemd: + name: ttrss + state: restarted + daemon_reload: yes + when: ttrss_unit.changed + +- name: start background feed updates + systemd: + name: ttrss + enabled: yes + state: started + +- name: generate update script + template: + src: 'usr/local/sbin/ttrss-update.sh.j2' + dest: '/usr/local/sbin/ttrss-update.sh' + mode: 0555 + +- name: create ttrss-update timer + include_role: + name: systemd_timer + vars: + timer_name: ttrss-update + timer_description: Update ttrss + timer_after: network.target + timer_on_calendar: '{{ ttrss_update_on_calendar }}' + timer_exec: /usr/local/sbin/ttrss-update.sh diff --git a/roles/ttrss/templates/etc/systemd/system/ttrss.service.j2 b/roles/ttrss/templates/etc/systemd/system/ttrss.service.j2 new file mode 100644 index 0000000..53fef07 --- /dev/null +++ b/roles/ttrss/templates/etc/systemd/system/ttrss.service.j2 @@ -0,0 +1,18 @@ +[Unit] +Description=Tiny Tiny RSS background feed updates +Wants=gssproxy.service +After=network-online.target nss-lookup.target gssproxy.service +StartLimitIntervalSec=200 +StartLimitBurst=5 + +[Service] +Type=simple +User=apache +Group=apache +Environment=GSS_USE_PROXY=yes +ExecStart=php {{ ttrss_home }}/update_daemon2.php +Restart=on-failure +RestartSec=30 + +[Install] +WantedBy=multi-user.target diff --git a/roles/ttrss/templates/usr/local/sbin/ttrss-update.sh.j2 b/roles/ttrss/templates/usr/local/sbin/ttrss-update.sh.j2 new file mode 100644 index 0000000..663558c --- /dev/null +++ b/roles/ttrss/templates/usr/local/sbin/ttrss-update.sh.j2 @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -eu + +SRCDIR={{ ttrss_home | quote }} + +if (( $EUID != 0 )); then + echo 'must be superuser' 1>&2 + exit 1 +fi + +cd "$SRCDIR" + +git fetch + +local_rev=$(git rev-parse HEAD) +upstream_rev=$(git rev-parse '@{u}') + +echo "local: $local_rev" +echo "upstream: $upstream_rev" + +if [ "$local_rev" != "$upstream_rev" ]; then + git reset --hard HEAD + systemctl restart ttrss +else + echo "ttrss is already up to date" +fi diff --git a/roles/ttrss/templates/var/www/ttrss/config.php.j2 b/roles/ttrss/templates/var/www/ttrss/config.php.j2 new file mode 100644 index 0000000..9b5e108 --- /dev/null +++ b/roles/ttrss/templates/var/www/ttrss/config.php.j2 @@ -0,0 +1,23 @@ + + + AuthType GSSAPI + AuthName "FreeIPA Single Sign-On" + GssapiLocalName On + {{ apache_gssapi_session_config }} + {{ apache_ldap_config }} + Require ldap-attribute memberof=cn={{ ttrss_access_group }},{{ freeipa_group_basedn }} + Require ldap-attribute memberof=cn={{ ttrss_admin_group }},{{ freeipa_group_basedn }} + ErrorDocument 401 /index.php?noext=1 + + diff --git a/roles/tuned/defaults/main.yml b/roles/tuned/defaults/main.yml new file mode 100644 index 0000000..f02c787 --- /dev/null +++ b/roles/tuned/defaults/main.yml @@ -0,0 +1 @@ +tuned_profile: balanced diff --git a/roles/tuned/tasks/main.yml b/roles/tuned/tasks/main.yml new file mode 100644 index 0000000..36d2f16 --- /dev/null +++ b/roles/tuned/tasks/main.yml @@ -0,0 +1,19 @@ +- name: install tuned + dnf: + name: tuned + state: present + +- name: start tuned + systemd: + name: tuned + enabled: yes + state: started + +- name: get current tuned profile + slurp: + src: /etc/tuned/active_profile + register: tuned_active_profile + +- name: set tuned profile + command: 'tuned-adm profile {{ tuned_profile }}' + when: tuned_active_profile.content | b64decode | trim != tuned_profile diff --git a/roles/udev/defaults/main.yml b/roles/udev/defaults/main.yml new file mode 100644 index 0000000..263aac9 --- /dev/null +++ b/roles/udev/defaults/main.yml @@ -0,0 +1,2 @@ +udev_rules: [] +udev_pci_powersave_blacklist: [] diff --git a/roles/udev/handlers/main.yml b/roles/udev/handlers/main.yml new file mode 100644 index 0000000..e272cfb --- /dev/null +++ b/roles/udev/handlers/main.yml @@ -0,0 +1,9 @@ +- name: restart systemd-udevd + systemd: + name: systemd-udevd + state: restarted + listen: reload udev rules + +- name: run udev triggers + command: udevadm trigger + listen: reload udev rules diff --git a/roles/udev/tasks/main.yml b/roles/udev/tasks/main.yml new file mode 100644 index 0000000..3049b0a --- /dev/null +++ b/roles/udev/tasks/main.yml @@ -0,0 +1,13 @@ +- name: generate udev rules + copy: + content: | + {{ item.rule }} + dest: "/etc/udev/rules.d/{{ item.name | replace(' ', '_') }}.rules" + loop: '{{ udev_rules }}' + notify: reload udev rules + +- name: generate powersave blacklist + template: + src: etc/udev/rules.d/pci_pm.rules.j2 + dest: /etc/udev/rules.d/pci_pm.rules + notify: reload udev rules diff --git a/roles/udev/templates/etc/udev/rules.d/pci_pm.rules.j2 b/roles/udev/templates/etc/udev/rules.d/pci_pm.rules.j2 new file mode 100644 index 0000000..5400241 --- /dev/null +++ b/roles/udev/templates/etc/udev/rules.d/pci_pm.rules.j2 @@ -0,0 +1,5 @@ +{% for vendordevice in udev_pci_powersave_blacklist %} +SUBSYSTEM=="pci", ATTR{vendor}=="0x{{ vendordevice | split(':') | first }}", ATTR{device}=="0x{{ vendordevice | split(':') | last }}", ATTR{power/control}="on", GOTO="pci_pm_end" +{% endfor %} +SUBSYSTEM=="pci", ATTR{power/control}="auto" +LABEL="pci_pm_end" diff --git a/roles/unifi/files/etc/rsyslog.d/unifi.conf b/roles/unifi/files/etc/rsyslog.d/unifi.conf new file mode 100644 index 0000000..9a053cc --- /dev/null +++ b/roles/unifi/files/etc/rsyslog.d/unifi.conf @@ -0,0 +1,4 @@ +input(type="imfile" + addMetadata="on" + file="/var/log/unifi/server.log" + tag="unifi") diff --git a/roles/unifi/handlers/main.yml b/roles/unifi/handlers/main.yml new file mode 100644 index 0000000..00e3a00 --- /dev/null +++ b/roles/unifi/handlers/main.yml @@ -0,0 +1,9 @@ +- name: restart unifi + systemd: + name: unifi + state: restarted + +- name: restart rsyslog + systemd: + name: rsyslog + state: restarted diff --git a/roles/unifi/meta/main.yml b/roles/unifi/meta/main.yml new file mode 100644 index 0000000..4ceca8e --- /dev/null +++ b/roles/unifi/meta/main.yml @@ -0,0 +1,8 @@ +dependencies: + - role: yum + yum_repositories: + - epel + - rpmfusion-free + - rpmfusion-nonfree + - mongodb-4.4 + tags: yum diff --git a/roles/unifi/tasks/main.yml b/roles/unifi/tasks/main.yml new file mode 100644 index 0000000..683068e --- /dev/null +++ b/roles/unifi/tasks/main.yml @@ -0,0 +1,81 @@ +- name: install packages + dnf: + name: '{{ unifi_packages }}' + state: present + +- name: create SELinux policy for mongodb + include_role: + name: selinux_policy + apply: + tags: selinux + vars: + selinux_policy_name: mongodb_cgroup_memory + selinux_policy_te: '{{ unifi_mongodb_te }}' + tags: selinux + +- name: start unifi controller + systemd: + name: unifi + enabled: yes + state: started + +- name: create default site + file: + path: '/var/lib/unifi/{{ item }}' + owner: unifi + group: unifi + state: directory + mode: 0750 + loop: + - data + - data/sites + - data/sites/default + +- name: opt-out of ubiquiti analytics + lineinfile: + create: yes + path: /var/lib/unifi/data/sites/default/config.properties + regexp: ^config.system_cfg.1=system.analytics.anonymous= + line: config.system_cfg.1=system.analytics.anonymous=disabled + owner: unifi + group: unifi + mode: 0640 + notify: restart unifi + +- name: open firewall ports + firewalld: + permanent: yes + immediate: yes + service: unifi + state: enabled + tags: firewalld + +- name: forward http ports + firewalld: + permanent: yes + immediate: yes + rich_rule: 'rule family={{ item[0] }} forward-port port={{ item[1][0] }} protocol=tcp to-port={{ item[1][1] }}' + state: enabled + loop: "{{ ['ipv4', 'ipv6'] | product([[80, 8080], [443, 8443]]) }}" + tags: firewalld + +- name: generate certificate hook script + template: + src: '{{ unifi_certificate_hook_path[1:] }}.j2' + dest: '{{ unifi_certificate_hook_path }}' + mode: 0555 + +- name: request TLS certificate + include_role: + name: getcert_request + vars: + certificate_service: unifi + certificate_path: '{{ unifi_certificate_path }}' + certificate_key_path: '{{ unifi_certificate_key_path }}' + certificate_hook: '{{ unifi_certificate_hook_path }}' + +- name: log to rsyslog + copy: + src: etc/rsyslog.d/unifi.conf + dest: /etc/rsyslog.d/unifi.conf + notify: restart rsyslog diff --git a/roles/unifi/templates/usr/local/sbin/unifi-certificate-update.sh.j2 b/roles/unifi/templates/usr/local/sbin/unifi-certificate-update.sh.j2 new file mode 100644 index 0000000..becb349 --- /dev/null +++ b/roles/unifi/templates/usr/local/sbin/unifi-certificate-update.sh.j2 @@ -0,0 +1,33 @@ +#!/bin/bash + +exec 1> >(logger -s -t $(basename "$0")) 2>&1 + +UNIFI_KEYSTORE='{{ unifi_keystore }}' +CERT_PATH='{{ unifi_certificate_path }}' +CA_PATH='{{ unifi_certificate_ca_path }}' +KEY_PATH='{{ unifi_certificate_key_path }}' +PKCS12_PATH='/etc/pki/tls/private/unifi.p12' +PASSWORD='aircontrolenterprise' + +openssl pkcs12 \ + -export \ + -in "$CERT_PATH" \ + -inkey "$KEY_PATH" \ + -out "$PKCS12_PATH" \ + -name unifi \ + -CAfile "$CA_PATH" \ + -caname root \ + -password pass:"$PASSWORD" + +keytool \ + -importkeystore \ + -deststorepass "$PASSWORD" \ + -destkeypass "$PASSWORD" \ + -destkeystore "$UNIFI_KEYSTORE" \ + -srckeystore "$PKCS12_PATH" \ + -srcstoretype PKCS12 \ + -srcstorepass "$PASSWORD" \ + -alias unifi \ + -noprompt + +systemctl restart unifi diff --git a/roles/unifi/vars/main.yml b/roles/unifi/vars/main.yml new file mode 100644 index 0000000..ee4362f --- /dev/null +++ b/roles/unifi/vars/main.yml @@ -0,0 +1,41 @@ +unifi_packages: + - java-11-openjdk-headless + - unifi + - mongodb-org-server + +unifi_keystore: /var/lib/unifi/data/keystore +unifi_certificate_hook_path: /usr/local/sbin/unifi-certificate-update.sh +unifi_certificate_path: /etc/pki/tls/certs/unifi.pem +unifi_certificate_key_path: /etc/pki/tls/private/unifi.key +unifi_certificate_ca_path: /etc/ipa/ca.crt + +unifi_autobackup_dir: /var/lib/unifi/data/backup/autobackup + +unifi_archive_shell: >- + cp --preserve=timestamps {{ unifi_autobackup_dir | quote }}/*.unf . + +unifi_mongodb_te: | + require { + type cgroup_t; + type configfs_t; + class dir { search getattr }; + class file { getattr open read }; + type file_type; + type mongod_t; + type proc_net_t; + type sysctl_fs_t; + type sysctl_net_t; + type var_lib_nfs_t; + } + + #============= mongod_t ============== + allow mongod_t cgroup_t:dir { search getattr }; + allow mongod_t cgroup_t:file { getattr open read }; + allow mongod_t configfs_t:dir getattr; + allow mongod_t file_type:dir { getattr search }; + allow mongod_t file_type:file getattr; + allow mongod_t proc_net_t:file { open read }; + allow mongod_t sysctl_fs_t:dir search; + allow mongod_t sysctl_net_t:dir search; + allow mongod_t sysctl_net_t:file { getattr read open }; + allow mongod_t var_lib_nfs_t:dir search; diff --git a/roles/vaultwarden/defaults/main.yml b/roles/vaultwarden/defaults/main.yml new file mode 100644 index 0000000..8652adc --- /dev/null +++ b/roles/vaultwarden/defaults/main.yml @@ -0,0 +1,21 @@ +vaultwarden_version: 1.27.0 +vaultwarden_web_version: 2023.1.0 + +vaultwarden_port: 8008 +vaultwarden_websocket_port: 8009 + +vaultwarden_server_name: '{{ ansible_fqdn }}' +vaultwarden_url: https://{{ vaultwarden_server_name }} + +vaultwarden_user: s-vaultwarden +vaultwarden_db_name: vaultwarden +vaultwarden_db_host: '{{ postgresql_host }}' +vaultwarden_verify_signups: true +vaultwarden_signup_domain_whitelist: ['{{ email_domain }}'] +vaultwarden_invitations_allowed: false +vaultwarden_user_attachment_limit_kb: 1048576 # 1 GB +vaultwarden_admin_group: role-bitwarden-admin + +vaultwarden_smtp_host: '{{ mail_host }}' +vaultwarden_smtp_from: bitwarden-noreply@{{ email_domain }} +vaultwarden_smtp_from_name: Bitwarden diff --git a/roles/vaultwarden/handlers/main.yml b/roles/vaultwarden/handlers/main.yml new file mode 100644 index 0000000..6f8f7ec --- /dev/null +++ b/roles/vaultwarden/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart vaultwarden + systemd: + name: vaultwarden + state: restarted diff --git a/roles/vaultwarden/tasks/database.yml b/roles/vaultwarden/tasks/database.yml new file mode 100644 index 0000000..313232e --- /dev/null +++ b/roles/vaultwarden/tasks/database.yml @@ -0,0 +1,18 @@ +- name: create database + postgresql_db: + name: '{{ vaultwarden_db_name }}' + state: present + delegate_to: "{{ postgresql_inventory_host }}" + become: yes + become_user: postgres + register: vaultwarden_db + +- name: create database user + postgresql_user: + name: '{{ vaultwarden_user }}' + db: '{{ vaultwarden_db_name }}' + priv: ALL + state: present + delegate_to: "{{ postgresql_inventory_host }}" + become: yes + become_user: postgres diff --git a/roles/vaultwarden/tasks/freeipa.yml b/roles/vaultwarden/tasks/freeipa.yml new file mode 100644 index 0000000..aea52e6 --- /dev/null +++ b/roles/vaultwarden/tasks/freeipa.yml @@ -0,0 +1,38 @@ +- name: create user + ipauser: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ vaultwarden_user }}' + loginshell: /sbin/nologin + homedir: '{{ vaultwarden_home }}' + givenname: Vaultwarden + sn: Service Account + state: present + run_once: yes + +- name: retrieve user keytab + include_role: + name: freeipa_keytab + vars: + keytab_principal: '{{ vaultwarden_user }}' + keytab_path: '{{ vaultwarden_keytab }}' + +- name: configure gssproxy for kerberized postgres + include_role: + name: gssproxy_client + vars: + gssproxy_name: vaultwarden + gssproxy_section: service/vaultwarden + gssproxy_client_keytab: '{{ vaultwarden_keytab }}' + gssproxy_cred_usage: initiate + gssproxy_euid: '{{ vaultwarden_user }}' + +- name: create admin group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ vaultwarden_admin_group }}' + description: Bitwarden Administrators + nonposix: yes + state: present + run_once: yes diff --git a/roles/vaultwarden/tasks/main.yml b/roles/vaultwarden/tasks/main.yml new file mode 100644 index 0000000..a4ad144 --- /dev/null +++ b/roles/vaultwarden/tasks/main.yml @@ -0,0 +1,100 @@ +- name: install packages + dnf: + name: '{{ vaultwarden_packages }}' + state: present + +- import_tasks: freeipa.yml + tags: freeipa + +- name: create home directory + file: + path: '{{ vaultwarden_home }}' + owner: '{{ vaultwarden_user }}' + group: '{{ vaultwarden_user }}' + mode: 0755 + state: directory + +- name: download rustup + get_url: + url: https://sh.rustup.rs + dest: '{{ vaultwarden_home }}/rustup.sh' + mode: 0555 + become: yes + become_user: '{{ vaultwarden_user }}' + register: rustup + +- name: install rust + command: '{{ vaultwarden_home }}/rustup.sh -y' + when: rustup.changed + become: yes + become_user: '{{ vaultwarden_user }}' + +- name: clone git repository + git: + repo: '{{ vaultwarden_git_repo }}' + dest: '{{ vaultwarden_source_dir }}' + version: '{{ vaultwarden_version }}' + force: yes + update: yes + become: yes + become_user: '{{ vaultwarden_user }}' + register: vaultwarden_git + notify: restart vaultwarden + +- name: build vaultwarden + shell: + cmd: 'source ${HOME}/.cargo/env && cargo build --features postgresql --release' + chdir: '{{ vaultwarden_source_dir }}' + become: yes + become_user: '{{ vaultwarden_user }}' + when: vaultwarden_git.changed + +- name: create web vault directory + file: + path: '{{ vaultwarden_web_dir }}' + owner: '{{ vaultwarden_user }}' + group: '{{ vaultwarden_user }}' + mode: 0755 + state: directory + +- name: extract web vault + unarchive: + src: '{{ vaultwarden_web_url }}' + dest: '{{ vaultwarden_web_dir }}' + remote_src: yes + extra_opts: --strip-components=1 + become: yes + become_user: '{{ vaultwarden_user }}' + +- name: create data directory + file: + path: '{{ vaultwarden_data_dir }}' + owner: '{{ vaultwarden_user }}' + group: '{{ vaultwarden_user }}' + mode: 0700 + state: directory + +- name: create systemd unit + template: + src: etc/systemd/system/vaultwarden.service.j2 + dest: /etc/systemd/system/vaultwarden.service + notify: restart vaultwarden + +- name: reload systemd daemons + systemd: + daemon_reload: yes + +- name: generate environment file + template: + src: etc/sysconfig/vaultwarden.j2 + dest: /etc/sysconfig/vaultwarden + notify: restart vaultwarden + +- import_tasks: database.yml + tags: database + +- name: enable systemd unit + systemd: + name: vaultwarden + enabled: yes + state: started diff --git a/roles/vaultwarden/templates/etc/sysconfig/vaultwarden.j2 b/roles/vaultwarden/templates/etc/sysconfig/vaultwarden.j2 new file mode 100644 index 0000000..61d50e7 --- /dev/null +++ b/roles/vaultwarden/templates/etc/sysconfig/vaultwarden.j2 @@ -0,0 +1,48 @@ +ROCKET_CLI_COLORS=false + +LOG_LEVEL=warn +EXTENDED_LOGGING=true + +IP_HEADER=X-Forwarded-For + +DATABASE_URL=postgresql://{{ vaultwarden_user }}@{{ vaultwarden_db_host }}/{{ vaultwarden_db_name }} + +WEBSOCKET_ENABLED=true +WEBSOCKET_ADDRESS=127.0.0.1 +WEBSOCKET_PORT={{ vaultwarden_websocket_port }} + +SIGNUPS_ALLOWED={{ 'false' if vaultwarden_signup_domain_whitelist else 'true' }} +SIGNUPS_VERIFY={{ vaultwarden_verify_signups }} + +{% if vaultwarden_signup_domain_whitelist %} +SIGNUPS_DOMAINS_WHITELIST={{ vaultwarden_signup_domain_whitelist | join(',') }} +{% endif %} + +DISABLE_ADMIN_TOKEN=true + +INVITATIONS_ALLOWED={{ vaultwarden_invitations_allowed }} + +{% if vaultwarden_user_attachment_limit_kb %} +USER_ATTACHMENT_LIMIT={{ vaultwarden_user_attachment_limit_kb }} +{% endif %} + +DOMAIN={{ vaultwarden_url }} + +{% if vaultwarden_yubico_client_id is defined %} +YUBICO_CLIENT_ID={{ vaultwarden_yubico_client_id }} +YUBICO_SECRET_KEY={{ vaultwarden_yubico_secret_key }} +{% endif %} + +ROCKET_ADDRESS=127.0.0.1 +ROCKET_PORT={{ vaultwarden_port }} + +SMTP_HOST=localhost +SMTP_FROM={{ vaultwarden_smtp_from }} +SMTP_FROM_NAME={{ vaultwarden_smtp_from_name }} +SMTP_SECURITY=off +SMTP_SSL=false +SMTP_PORT=25 + +{% if vaultwarden_haveibeenpwned_api_key is defined %} +HIBP_API_KEY={{ vaultwarden_haveibeenpwned_api_key }} +{% endif %} diff --git a/roles/vaultwarden/templates/etc/systemd/system/vaultwarden.service.j2 b/roles/vaultwarden/templates/etc/systemd/system/vaultwarden.service.j2 new file mode 100644 index 0000000..883359b --- /dev/null +++ b/roles/vaultwarden/templates/etc/systemd/system/vaultwarden.service.j2 @@ -0,0 +1,35 @@ +[Unit] +Description=Vaultwarden Server +Documentation=https://github.com/dani-garcia/vaultwarden +Wants=gssproxy.service +After=network-online.target nss-user-lookup.target gssproxy.service + +[Service] +NoNewPrivileges=yes +PrivateTmp=yes +PrivateDevices=yes +DevicePolicy=closed +ProtectSystem=strict +ProtectHome=yes +ProtectControlGroups=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +RestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 +RestrictNamespaces=yes +RestrictRealtime=yes +RestrictSUIDSGID=yes +LockPersonality=yes +ReadWritePaths={{ vaultwarden_data_dir }} + +User={{ vaultwarden_user }} +Group={{ vaultwarden_user }} + +Environment=DATA_FOLDER={{ vaultwarden_data_dir }} +Environment=WEB_VAULT_FOLDER={{ vaultwarden_web_dir }} +Environment=GSS_USE_PROXY=yes +EnvironmentFile=/etc/sysconfig/vaultwarden + +ExecStart={{ vaultwarden_source_dir }}/target/release/vaultwarden + +[Install] +WantedBy=multi-user.target diff --git a/roles/vaultwarden/vars/main.yml b/roles/vaultwarden/vars/main.yml new file mode 100644 index 0000000..5c232ad --- /dev/null +++ b/roles/vaultwarden/vars/main.yml @@ -0,0 +1,54 @@ +vaultwarden_packages: + - mariadb-connector-c + - libpq + - libpq-devel + - openssl-devel + - git + - npm + - nodejs + - gcc + +vaultwarden_home: /opt/vaultwarden + +vaultwarden_git_repo: https://github.com/dani-garcia/vaultwarden +vaultwarden_source_dir: '{{ vaultwarden_home }}/vaultwarden' + +vaultwarden_web_url: https://github.com/dani-garcia/bw_web_builds/releases/download/v{{ vaultwarden_web_version }}/bw_web_v{{ vaultwarden_web_version }}.tar.gz +vaultwarden_web_dir: '{{ vaultwarden_home }}/web-vault' + +vaultwarden_data_dir: /var/lib/vaultwarden +vaultwarden_keytab: /var/lib/gssproxy/clients/{{ vaultwarden_user }}.keytab + +vaultwarden_admin_hbac_hostgroup: bitwarden_servers +vaultwarden_admin_hbac_service: bitwarden-admin + +vaultwarden_apache_config: | + {{ apache_proxy_config }} + + ProxyPass http://127.0.0.1:{{ vaultwarden_port }}/ + ProxyPassReverse http://127.0.0.1:{{ vaultwarden_port }}/ + + + + ProxyPass http://127.0.0.1:{{ vaultwarden_websocket_port }}/ + ProxyPassReverse http://127.0.0.1:{{ vaultwarden_websocket_port }}/ + + RewriteEngine on + RewriteCond %{HTTP:Upgrade} websocket [NC] + RewriteCond %{HTTP:Connection} upgrade [NC] + RewriteRule ^/?(.*) "ws://127.0.0.1:{{ vaultwarden_websocket_port }}/$1" [P,L] + + + + ProxyPass http://127.0.0.1:{{ vaultwarden_port }}/ + ProxyPassReverse http://127.0.0.1:{{ vaultwarden_port }}/ + + + + AuthType GSSAPI + AuthName "FreeIPA Single Sign-On" + GssapiLocalName On + {{ apache_gssapi_session_config }} + {{ apache_ldap_config }} + Require ldap-attribute memberof=cn={{ vaultwarden_admin_group }},{{ freeipa_group_basedn }} + diff --git a/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-8 b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-8 new file mode 100644 index 0000000..30b69a6 --- /dev/null +++ b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-8 @@ -0,0 +1,28 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBFz3zvsBEADJOIIWllGudxnpvJnkxQz2CtoWI7godVnoclrdl83kVjqSQp+2 +dgxuG5mUiADUfYHaRQzxKw8efuQnwxzU9kZ70ngCxtmbQWGmUmfSThiapOz00018 ++eo5MFabd2vdiGo1y+51m2sRDpN8qdCaqXko65cyMuLXrojJHIuvRA/x7iqOrRfy +a8x3OxC4PEgl5pgDnP8pVK0lLYncDEQCN76D9ubhZQWhISF/zJI+e806V71hzfyL +/Mt3mQm/li+lRKU25Usk9dWaf4NH/wZHMIPAkVJ4uD4H/uS49wqWnyiTYGT7hUbi +ecF7crhLCmlRzvJR8mkRP6/4T/F3tNDPWZeDNEDVFUkTFHNU6/h2+O398MNY/fOh +yKaNK3nnE0g6QJ1dOH31lXHARlpFOtWt3VmZU0JnWLeYdvap4Eff9qTWZJhI7Cq0 +Wm8DgLUpXgNlkmquvE7P2W5EAr2E5AqKQoDbfw/GiWdRvHWKeNGMRLnGI3QuoX3U +pAlXD7v13VdZxNydvpeypbf/AfRyrHRKhkUj3cU1pYkM3DNZE77C5JUe6/0nxbt4 +ETUZBTgLgYJGP8c7PbkVnO6I/KgL1jw+7MW6Az8Ox+RXZLyGMVmbW/TMc8haJfKL +MoUo3TVk8nPiUhoOC0/kI7j9ilFrBxBU5dUtF4ITAWc8xnG6jJs/IsvRpQARAQAB +tChGZWRvcmEgRVBFTCAoOCkgPGVwZWxAZmVkb3JhcHJvamVjdC5vcmc+iQI4BBMB +AgAiBQJc9877AhsPBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAh6kWrL4bW +oWagD/4xnLWws34GByVDQkjprk0fX7Iyhpm/U7BsIHKspHLL+Y46vAAGY/9vMvdE +0fcr9Ek2Zp7zE1RWmSCzzzUgTG6BFoTG1H4Fho/7Z8BXK/jybowXSZfqXnTOfhSF +alwDdwlSJvfYNV9MbyvbxN8qZRU1z7PEWZrIzFDDToFRk0R71zHpnPTNIJ5/YXTw +NqU9OxII8hMQj4ufF11040AJQZ7br3rzerlyBOB+Jd1zSPVrAPpeMyJppWFHSDAI +WK6x+am13VIInXtqB/Cz4GBHLFK5d2/IYspVw47Solj8jiFEtnAq6+1Aq5WH3iB4 +bE2e6z00DSF93frwOyWN7WmPIoc2QsNRJhgfJC+isGQAwwq8xAbHEBeuyMG8GZjz +xohg0H4bOSEujVLTjH1xbAG4DnhWO/1VXLX+LXELycO8ZQTcjj/4AQKuo4wvMPrv +9A169oETG+VwQlNd74VBPGCvhnzwGXNbTK/KH1+WRH0YSb+41flB3NKhMSU6dGI0 +SGtIxDSHhVVNmx2/6XiT9U/znrZsG5Kw8nIbbFz+9MGUUWgJMsd1Zl9R8gz7V9fp +n7L7y5LhJ8HOCMsY/Z7/7HUs+t/A1MI4g7Q5g5UuSZdgi0zxukiWuCkLeAiAP4y7 +zKK4OjJ644NDcWCHa36znwVmkz3ixL8Q0auR15Oqq2BjR/fyog== +=84m8 +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-9 b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-9 new file mode 100644 index 0000000..234c12f --- /dev/null +++ b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-9 @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGE3mOsBEACsU+XwJWDJVkItBaugXhXIIkb9oe+7aadELuVo0kBmc3HXt/Yp +CJW9hHEiGZ6z2jwgPqyJjZhCvcAWvgzKcvqE+9i0NItV1rzfxrBe2BtUtZmVcuE6 +2b+SPfxQ2Hr8llaawRjt8BCFX/ZzM4/1Qk+EzlfTcEcpkMf6wdO7kD6ulBk/tbsW +DHX2lNcxszTf+XP9HXHWJlA2xBfP+Dk4gl4DnO2Y1xR0OSywE/QtvEbN5cY94ieu +n7CBy29AleMhmbnx9pw3NyxcFIAsEZHJoU4ZW9ulAJ/ogttSyAWeacW7eJGW31/Z +39cS+I4KXJgeGRI20RmpqfH0tuT+X5Da59YpjYxkbhSK3HYBVnNPhoJFUc2j5iKy +XLgkapu1xRnEJhw05kr4LCbud0NTvfecqSqa+59kuVc+zWmfTnGTYc0PXZ6Oa3rK +44UOmE6eAT5zd/ToleDO0VesN+EO7CXfRsm7HWGpABF5wNK3vIEF2uRr2VJMvgqS +9eNwhJyOzoca4xFSwCkc6dACGGkV+CqhufdFBhmcAsUotSxe3zmrBjqA0B/nxIvH +DVgOAMnVCe+Lmv8T0mFgqZSJdIUdKjnOLu/GRFhjDKIak4jeMBMTYpVnU+HhMHLq +uDiZkNEvEEGhBQmZuI8J55F/a6UURnxUwT3piyi3Pmr2IFD7ahBxPzOBCQARAQAB +tCdGZWRvcmEgKGVwZWw5KSA8ZXBlbEBmZWRvcmFwcm9qZWN0Lm9yZz6JAk4EEwEI +ADgWIQT/itE0RZcQbs6BO5GKOHK/MihGfAUCYTeY6wIbDwULCQgHAgYVCgkICwIE +FgIDAQIeAQIXgAAKCRCKOHK/MihGfFX/EACBPWv20+ttYu1A5WvtHJPzwbj0U4yF +3zTQpBglQ2UfkRpYdipTlT3Ih6j5h2VmgRPtINCc/ZE28adrWpBoeFIS2YAKOCLC +nZYtHl2nCoLq1U7FSttUGsZ/t8uGCBgnugTfnIYcmlP1jKKA6RJAclK89evDQX5n +R9ZD+Cq3CBMlttvSTCht0qQVlwycedH8iWyYgP/mF0W35BIn7NuuZwWhgR00n/VG +4nbKPOzTWbsP45awcmivdrS74P6mL84WfkghipdmcoyVb1B8ZP4Y/Ke0RXOnLhNe +CfrXXvuW+Pvg2RTfwRDtehGQPAgXbmLmz2ZkV69RGIr54HJv84NDbqZovRTMr7gL +9k3ciCzXCiYQgM8yAyGHV0KEhFSQ1HV7gMnt9UmxbxBE2pGU7vu3CwjYga5DpwU7 +w5wu1TmM5KgZtZvuWOTDnqDLf0cKoIbW8FeeCOn24elcj32bnQDuF9DPey1mqcvT +/yEo/Ushyz6CVYxN8DGgcy2M9JOsnmjDx02h6qgWGWDuKgb9jZrvRedpAQCeemEd +fhEs6ihqVxRFl16HxC4EVijybhAL76SsM2nbtIqW1apBQJQpXWtQwwdvgTVpdEtE +r4ArVJYX5LrswnWEQMOelugUG6S3ZjMfcyOa/O0364iY73vyVgaYK+2XtT2usMux +VL469Kj5m13T6w== +=Mjs/ +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-8 b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-8 new file mode 100644 index 0000000..28ce769 --- /dev/null +++ b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-8 @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGAofzYBEAC6yS1azw6f3wmaVd//3aSy6O2c9+jeetulRQvg2LvhRRS1eNqp +/x9tbBhfohu/tlDkGpYHV7diePgMml9SZDy1sKlI3tDhx6GZ3xwF0fd1vWBZpmNk +D9gRkUmYBeLotmcXQZ8ZpWLicosFtDpJEYpLUhuIgTKwt4gxJrHvkWsGQiBkJxKD +u3/RlL4IYA3Ot9iuCBflc91EyAw1Yj0gKcDzbOqjvlGtS3ASXgxPqSfU0uLC9USF +uKDnP2tcnlKKGfj0u6VkqISliSuRAzjlKho9Meond+mMIFOTT6qp4xyu+9Dj3IjZ +IC6rBXRU3xi8z0qYptoFZ6hx70NV5u+0XUzDMXdjQ5S859RYJKijiwmfMC7gZQAf +OkdOcicNzen/TwD/slhiCDssHBNEe86Wwu5kmDoCri7GJlYOlWU42Xi0o1JkVltN +D8ZId+EBDIms7ugSwGOVSxyZs43q2IAfFYCRtyKHFlgHBRe9/KTWPUrnsfKxGJgC +Do3Yb63/IYTvfTJptVfhQtL1AhEAeF1I+buVoJRmBEyYKD9BdU4xQN39VrZKziO3 +hDIGng/eK6PaPhUdq6XqvmnsZ2h+KVbyoj4cTo2gKCB2XA7O2HLQsuGduHzYKNjf +QR9j0djjwTrsvGvzfEzchP19723vYf7GdcLvqtPqzpxSX2FNARpCGXBw9wARAQAB +tDNSZWxlYXNlIEVuZ2luZWVyaW5nIDxpbmZyYXN0cnVjdHVyZUByb2NreWxpbnV4 +Lm9yZz6JAk4EEwEIADgWIQRwUcRwqSn0VM6+N7cVr12sbXRaYAUCYCh/NgIbDwUL +CQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRAVr12sbXRaYLFmEACSMvoO1FDdyAbu +1m6xEzDhs7FgnZeQNzLZECv2j+ggFSJXezlNVOZ5I1I8umBan2ywfKQD8M+IjmrW +k9/7h9i54t8RS/RN7KNo7ECGnKXqXDPzBBTs1Gwo1WzltAoaDKUfXqQ4oJ4aCP/q +/XPVWEzgpJO1XEezvCq8VXisutyDiXEjjMIeBczxb1hbamQX+jLTIQ1MDJ4Zo1YP +zlUqrHW434XC2b1/WbSaylq8Wk9cksca5J+g3FqTlgiWozyy0uxygIRjb6iTzKXk +V7SYxeXp3hNTuoUgiFkjh5/0yKWCwx7aQqlHar9GjpxmBDAO0kzOlgtTw//EqTwR +KnYZLig9FW0PhwvZJUigr0cvs/XXTTb77z/i/dfHkrjVTTYenNyXogPtTtSyxqca +61fbPf0B/S3N43PW8URXBRS0sykpX4SxKu+PwKCqf+OJ7hMEVAapqzTt1q9T7zyB +QwvCVx8s7WWvXbs2d6ZUrArklgjHoHQcdxJKdhuRmD34AuXWCLW+gH8rJWZpuNl3 ++WsPZX4PvjKDgMw6YMcV7zhWX6c0SevKtzt7WP3XoKDuPhK1PMGJQqQ7spegGB+5 +DZvsJS48Ip0S45Qfmj82ibXaCBJHTNZE8Zs+rdTjQ9DS5qvzRA1sRA1dBb/7OLYE +JmeWf4VZyebm+gc50szsg6Ut2yT8hw== +=AiP8 +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9 b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9 new file mode 100644 index 0000000..6fb617c --- /dev/null +++ b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9 @@ -0,0 +1,31 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: resf.keykeeper.v1 +Comment: Keykeeper + +xsFNBGJ5RksBEADF/Lzssm7uryV6+VHAgL36klyCVcHwvx9Bk853LBOuHVEZWsme +kbJF3fQG7i7gfCKGuV5XW15xINToe4fBThZteGJziboSZRpkEQ2z3lYcbg34X7+d +co833lkBNgz1v6QO7PmAdY/x76Q6Hx0J9yiJWd+4j+vRi4hbWuh64vUtTd7rPwk8 +0y3g4oK1YT0NR0Xm/QUO9vWmkSTVflQ6y82HhHIUrG+1vQnSOrWaC0O1lqUI3Nuo +b6jTARCmbaPsi+XVQnBbsnPPq6Tblwc+NYJSqj5d9nT0uEXT7Zovj4Je5oWVFXp9 +P1OWkbo2z5XkKjoeobM/zKDESJR78h+YQAN9IOKFjL/u/Gzrk1oEgByCABXOX+H5 +hfucrq5U3bbcKy4e5tYgnnZxqpELv3fN/2l8iZknHEh5aYNT5WXVHpD/8u2rMmwm +I9YTEMueEtmVy0ZV3opUzOlC+3ZUwjmvAJtdfJyeVW/VMy3Hw3Ih0Fij91rO613V +7n72ggVlJiX25jYyT4AXlaGfAOMndJNVgBps0RArOBYsJRPnvfHlLi5cfjVd7vYx +QhGX9ODYuvyJ/rW70dMVikeSjlBDKS08tvdqOgtiYy4yhtY4ijQC9BmCE9H9gOxU +FN297iLimAxr0EVsED96fP96TbDGILWsfJuxAvoqmpkElv8J+P1/F7to2QARAQAB +zU9Sb2NreSBFbnRlcnByaXNlIFNvZnR3YXJlIEZvdW5kYXRpb24gLSBSZWxlYXNl +IGtleSAyMDIyIDxyZWxlbmdAcm9ja3lsaW51eC5vcmc+wsGKBBMBCAA0BQJieUZL +FiEEIcslauFvxUxuZSlJcC1CbTUNJ10CGwMCHgECGQEDCwkHAhUIAxYAAgIiAQAK +CRBwLUJtNQ0nXWQ5D/9472seOyRO6//bQ2ns3w9lE+aTLlJ5CY0GSTb4xNuyv+AD +IXpgvLSMtTR0fp9GV3vMw6QIWsehDqt7O5xKWi+3tYdaXRpb1cvnh8r/oCcvI4uL +k8kImNgsx+Cj+drKeQo03vFxBTDi1BTQFkfEt32fA2Aw5gYcGElM717sNMAMQFEH +P+OW5hYDH4kcLbtUypPXFbcXUbaf6jUjfiEp5lLjqquzAyDPLlkzMr5RVa9n3/rI +R6OQp5loPVzCRZMgDLALBU2TcFXLVP+6hAW8qM77c+q/rOysP+Yd+N7GAd0fvEvA +mfeA4Y6dP0mMRu96EEAJ1qSKFWUul6K6nuqy+JTxktpw8F/IBAz44na17Tf02MJH +GCUWyM0n5vuO5kK+Ykkkwd+v43ZlqDnwG7akDkLwgj6O0QNx2TGkdgt3+C6aHN5S +MiF0pi0qYbiN9LO0e05Ai2r3zTFC/pCaBWlG1ph2jx1pDy4yUVPfswWFNfe5I+4i +CMHPRFsZNYxQnIA2Prtgt2YMwz3VIGI6DT/Z56Joqw4eOfaJTTQSXCANts/gD7qW +D3SZXPc7wQD63TpDEjJdqhmepaTECbxN7x/p+GwIZYWJN+AYhvrfGXfjud3eDu8/ +i+YIbPKH1TAOMwiyxC106mIL705p+ORf5zATZMyB8Y0OvRIz5aKkBDFZM2QN6A== +=PzIf +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-crystal-8 b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-crystal-8 new file mode 100644 index 0000000..0130138 --- /dev/null +++ b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-crystal-8 @@ -0,0 +1,21 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v2.0.15 (GNU/Linux) + +mQENBGCKr5QBCADXhCz8qeiL+fnILIae3pGcaXAzsFynb9S86pmWHTIwrZIBHA0y +6T0d8F7ZX4Y7S+I6Gj+mUBi/9j8geF0SMjmHYss6nS8Txs1Ta2Ain+08MzFMss7d +GajwxxqODGoVxUG/4RKLFX1zCnoGmZlVEo9yvBM1eds5xocLyMq5YK9DP/yCVt3m +KHyP5XgRU/pIOyOo3g6+qIkhgynHVYIBuPbFQGEbOuUg7noAwTC9B9pYXSRFq9wk +T/q8rqOBiyO9SWB9gMiem8HNAzUo5TbVp9xPv2pl3mNXwe5te92pjlWdktOsBZuy +TfTgoj3y0HUY48He/z85aJ5j7gX5PU/6arxdABEBAAG0UGRldmVsOmxhbmd1YWdl +czpjcnlzdGFsIE9CUyBQcm9qZWN0IDxkZXZlbDpsYW5ndWFnZXM6Y3J5c3RhbEBi +dWlsZC5vcGVuc3VzZS5vcmc+iQE+BBMBCAAoBQJgiq+UAhsDBQkEHrAABgsJCAcD +AgYVCAIJCgsEFgIDAQIeAQIXgAAKCRDkVq5yhW0UdlBfB/9KrY8UIrQyxk+7Kywa +oQKaOMh8tsRF5QW55gPn4ARIwoIPFzjP0v+iDwpxV1EEBveS1LmAjSeXUzZ2zWIn +kfeG1u3AUDIlpAe2EAc4RVNOl3KTzn+8hPSpRSleLZluJOeZlbRHZq+ORcXhhj+3 +xOotPCjxcN/CF3+Q7y/oukf3ZtFUWMSnXrUE8lunhREBQ66lLl6dRCafEq/k1hWp +pTe40RjMynZ1cZo5T3zBZwhgj4Ix7GZvkQYCsxenvu1Duf+z6QU5IyTsZ+gjxAKH +fYdTYp7IObcywuWT3TLZqj75UdMcE3dwkaK3a56eMc9baPb6ZXb7fKYbfREu/cT6 +FgTHiEYEExECAAYFAmCKr5QACgkQOzARt2udZSNdFQCgtpRzGoKr9VWnhv+/k4pk +Cmp9fycAn0pdJ2xIEsqxOjPBFVDh7Sahecuq +=WWMN +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb-4.0 b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb-4.0 new file mode 100644 index 0000000..11c704f --- /dev/null +++ b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb-4.0 @@ -0,0 +1,30 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.4.11 (GNU/Linux) + +mQINBFrXrGwBEADKaehZF15XUJo8sea3YWv2w6SEBYNfEPW+W2HYgd77m5g64ILQ +mMiEsHCeR8CqzL2g81o5F4yWM1GlWpdYHOjVpSfJKSmWhSYRsXGCEqhFKpbZaMlU +4BWVHF8/rnuA4Mq7hhrU7K+K0Z4ng3r719vgcLN/ygQGPftAISc6eHrNdJMfPHl0 +isNjCqMyQQcU7zbnxKFi0b6qPFpwrz5I0gFfh1xQQ0rTNQruujlvwvhwmBGsrifb +r+5m2WpzjZEiczIM/1ZL/6BV8d0Lxq5mbFcQQ7j4PfaDL8zWjAxvtTzMx2mWVm8z +YsFGR/ioinBIHHE3WqJn4QoOknQEv/jBgs0zQ0jASwVfT28HOtdJPCQ3BuqAWXN+ +DLP9mgsXUPu31QVFZoY5/tZx4SjlrHJLdE+msfdBRMdtlwoLESLPEVFydRsqETOw +53SYA+N4JEAq63Bvs/gabG6ALcN1R8KOtAQ6isf81e6tq4boeRyMU6y6v4GFbetr +5YOVgWUYxaFwCtJH3V3kosbuRTFJFHXzsiJI9Y8P6UOMi5QuTf3KBESXEIiHjSs/ +dmguPA6Jwf0XswgoEYEFjruKyNldrKT6AQz/IV4WEX9BXp42d4mooAcHmOOyXh94 +Zr15oUA+6y4SwXHuPAAB5b4RegqKqLKrIu10QwRxsKg6MFSBb+SgW5ApmQARAQAB +tDdNb25nb0RCIDQuMCBSZWxlYXNlIFNpZ25pbmcgS2V5IDxwYWNrYWdpbmdAbW9u +Z29kYi5jb20+iQI+BBMBAgAoBQJa16xsAhsDBQkJZgGABgsJCAcDAgYVCAIJCgsE +FgIDAQIeAQIXgAAKCRBogYxy5SUp1LPbD/9pL0eKgem11oWhyEVjAcshaKiQl0QE +BLF8I9Y+y3vElP5wPwSqG6VLpSjObJtgtFCIm7y1ArYxViMoWBkFAKKooZqN+2P6 +ThwbtqvRKQS6QaUxwHvjbNNLyI9H4tOeUrNz/yuCUAJ+PbEoNPTm21pPAmt08loD +yAiGGfAAkQnAWtgfFoAFcVYrVg8oD55fTfUkuZQXTxSOT0zQdxKqPiAerFNkO46x +sZM7jHAGq9oNX/yq5BS+mcu9JfiTac3yCjQmdCh90/yizt5vW2UvYC+hIoYRgY+3 +KRosr2hkmm1lmZKsJE39hmC/mXDo/wh1uuv4w5c2/ywsQcwzl8q2UXRQ+JCp78SR +wwxAc86YQgY9s0bIQjiLA54fH79sMKq6j3Pkx1P+Cz09MLEaQhM6gjQ+HNVN9ues +ntWjKWGYHhWKCBgpUAORS9mGYIOWT15gs7PCCAm3Dug/DHm5UZZoDXYFr0sJfDrM +6Qb2mH7VM61Judefdkn5yd/KuJdQMgyh22bdTl/+uzVtCHgHWA3VlNyPva68e8CX +kXlqxyytJQH7D5+yylF6OB587KStuECpXP64F+EhlPM4HWs35jlNS/M4ak6hrHjI +rs/amsHmm9PTSDaqi4TDQIfcu7urE2Ajkx5CDB7E3xt++2p+1td8htp0fO6xC7kc ++K6TDOVVCfd2mA== +=HgS+ +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb-4.4 b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb-4.4 new file mode 100644 index 0000000..6911973 --- /dev/null +++ b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb-4.4 @@ -0,0 +1,30 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQINBFzteqwBEADSirbLWsjgkQmdWr06jXPN8049MCqXQIZ2ovy9uJPyLkHgOCta +8dmX+8Fkk5yNOLScjB1HUGJxAWJG+AhldW1xQGeo6loDfTW1mlfetq/zpW7CKbUp +qve9eYYulneAy/81M/UoUZSzHqj6XY39wzJCH20H+Qx3WwcqXgSU7fSFXyJ4EBYs +kWybbrAra5v29LUTBd7OvvS+Swovdh4T31YijUOUUL/gJkBI9UneVyV7/8DdUoVJ +a8ym2pZ6ALy+GZrWBHcCKD/rQjEkXJnDglu+FSUI50SzaC9YX31TTzEMJijiPi6I +MIZJMXLH7GpCIDcvyrLWIRYVJAQRoYJB4rmp42HTyed4eg4RnSiFrxVV5xQaDnSl +/8zSOdVMBVewp8ipv34VeRXgNTgRkhA2JmL+KlALMkPo7MbRkJF01DiOOsIdz3Iu +43oYg3QYmqxZI6kZNtXpUMnJeuRmMQJJN8yc9ZdOA9Ll2TTcIql8XEsjGcM7IWM9 +CP6zGwCcbrv72Ka+h/bGaLpwLbpkr5I8PjjSECn9fBcgnVX6HfKH7u3y11+Va1nh +a8ZEE1TuOqRxnVDQ+K4iwaZFgFYsBMKo2ghoU2ZbZxu14vs6Eksn6UFsm8DpPwfy +jtLtdje8jrbYAqAy5zIMLoW+I6Rb5sU3Olh9nI7NW4T5qQeemBcuRAwB4QARAQAB +tDdNb25nb0RCIDQuNCBSZWxlYXNlIFNpZ25pbmcgS2V5IDxwYWNrYWdpbmdAbW9u +Z29kYi5jb20+iQI+BBMBAgAoBQJc7XqsAhsDBQkJZgGABgsJCAcDAgYVCAIJCgsE +FgIDAQIeAQIXgAAKCRBlZAjjkM+x9SKmD/9BzdjFAgBPPkUnD5pJQgsBQKUEkDsu +cht6Q0Y4M635K7okpqJvXtZV5Mo+ajWZjUeHn4wPdVgzF2ItwVLRjjak3tIZfe3+ +ME5Y27Aej3LeqQC3Q5g6SnpeZwVEhWzU35CnyhQecP4AhDG3FO0gKUn3GkEgmsd6 +rnXAQLEw3VUYO8boxqBF3zjmFLIIaODYNmO1bLddJgvZlefUC62lWBBUs6Z7PBnl +q7qBQFhz9qV9zXZwCT2/vgGLg5JcwVdcJXwAsQSr1WCVd7Y79+JcA7BZiSg9FAQd +4t2dCkkctoUKgXsAH5fPwErGNj5L6iUnhFODPvdDJ7l35UcIZ2h74lqfEh+jh8eo +UgxkcI2y2FY/lPapcPPKe0FHzCxG2U/NRdM+sqrIfp9+s88Bj+Eub7OhW4dF3AlL +bh/BGHL9R8xAJRDLv8v7nsKkZWUnJaskeDFCKX3rjcTyTRWTG7EuMCmCn0Ou1hKc +R3ECvIq0pVfVh+qk0hu+A5Dvj6k3QDcTfse+KfSAJkYvRKiuRuq5KgYcX3YSzL6K +aZitMyu18XsQxKavpIGzaDhWyrVAig3XXF//zxowYVwuOikr5czgqizu87cqjpyn +S0vVG4Q3+LswH4xVTn3UWadY/9FkM167ecouu4g3op29VDi7hCKsMeFvFP6OOIls +G4vQ/QbzucK77Q== +=eD3N +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb-6.0 b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb-6.0 new file mode 100644 index 0000000..afbdfe9 --- /dev/null +++ b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb-6.0 @@ -0,0 +1,30 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.4.11 (GNU/Linux) + +mQINBGIWTroBEADgSBs1z1MC5Hog5yd2wYHskzPE0SOl9LGB35Xhw1894hrKsswp +AS7JnViltXE71iJMoAqepJBvfmZLOyQO0rXcLlHXExK/IctnosRqGQeyLxNZKS0h +e1xQYQrPCWRaHqseYLuJ5wME49aFQ2YS7caFowBvKjsT5AoT7B0uXDp6nHZDUQG2 +MBZJqUKziVYYt7PARv81llDNKqPvLDSc2McL/2aa4mNR/pM5r8iQjACbSnj37ERm +zca2gJ0GzCeZSqfmjoF7I6Ez1Nc/2ge1+fZA24pDFg+7W25du3JIqbnpJQAK5TAz +7tVzvEKU8WT9aQW3G1e5ox3YtlRPTSrTxN9dzLh123NGCd0J9a4moFkZIr8HmySd +jkdz4V1pKv9aTOhLjQpF/bhRaUuNuGK7TV7ZzY+PCVE51fmJx2EX4Ck5c6sW03rJ +59KbrxeTq02AcIBTFUY0Mfh7nxvYvwvLI0OKBOqFGXi4hFXpV4uo0rDLe+tGLFDD ++HsajFUUyAlMETE80PXOuTs44TZiW+SGCTyP2Sm8TBIiacSqsGNsryjgEDaIG6c1 +FB++njqTfGlyZujamYbF3s3wBK8nDBVRympJcsHjLqUhvbh1Bq4hyF2pxio93SgA +mPEm6kl0KBCqpJNZpAFSVHK8penQtQUa0jFQetYPDUFfgTsg7qdZDQNcUwARAQAB +tDdNb25nb0RCIDYuMCBSZWxlYXNlIFNpZ25pbmcgS2V5IDxwYWNrYWdpbmdAbW9u +Z29kYi5jb20+iQI+BBMBAgAoBQJiFk66AhsDBQkJZgGABgsJCAcDAgYVCAIJCgsE +FgIDAQIeAQIXgAAKCRBqJrGuZMPDiADhEACex1qu1HbVIeBwZO4GYYEc8OpswguI +LvTL1ufWMVbpSFkm0XDzx7JU0SewCEBzr7BTri2zjNaPm7RQHYFl1ztTnNvxrvzu +AUoj/BClAgQXujSuUcEu+uA9pBHObiLHAkYFy61EnKgXu2iTOMn7HqRvjvHZyOnr +5llGG2zUq8YbEVs4GTHVV9CjCWBkf78stdqEAPCH69DtR1Bv2jQfUslVSDKUnluX +feTRDgWXnIKo4ld6EoqtYurIbcJIGvXHbFx90PoZiPJXn+eTY+6HS3I/TXDGAOkF +xkgmVsPWcZvbU0dLXjAiTIADODyiEiZlonrxYXJztIs/KXLl5CnvAEeXKXACbgaN +nuIMKtprtrLvFDpXwfyI90He0Vv8iE1wXSLcuztT5R1h6NmisMz9oRYQL3hqsSEn +TjV+Ko34Kyo459Bs9PhJO0DcZGg+B8iU9TdJgfp1KEs2HJFAueVtYAUJ3y5+UJFn +AkQoD5CC0Y+93z0+nHQPvjyxQ/7swFWNtrumrthcpYbGMIKEWqaQoEz2My5gVXHh +v5pHEXxXiARNe44GsS8r+1DYQypDUAh5Tw9mQRagWuC5Dsaaqob5vCdcFEAgiK5W +a/coP3B6WzUoQE8NKa8qnKDvX5RU0dxG5oUre+PuOwiHpom9G+375YYkwIL9a6pE +RRM5efxf1F532A== +=Cc71 +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-free-el-8 b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-free-el-8 new file mode 100644 index 0000000..8327c84 --- /dev/null +++ b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-free-el-8 @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBFwzh0wBEADfwWMombl8hSzfzeWwGEyBXs4S+9YYmxgtFjnCzR4aUIXxevvf +tY8YWWEeaIosG/V+XJuw+EjcKCDk0RpFimBIyO6IjwkJTVmFVYuzVc/O3fs64Hbl +Dm1fMpOrnVUJnV59nUhDkcnYdysMPKuBJghw+a85FlhnlDlnVC94XPcD5QyTfjpR +bfvCSCFSTobIHUoOI7SK7r7x+qldQeopnCQILZyhaeXDW+jFC1E50oaUtw2sMvfF +q0d03f8yZsiJm2sVpPJ/zEJG8yXogJyEsfMXDoxn7sA8mP09W3cScci/fE7tIUu+ +3HXzAn8CqZRCxIp2uDvpeom7e8NqwIorWZDiP7IhdQr1sf4bud07buCdovmHRSjE ++IuW9gTFAHVFdL3dEwzOMKkdV3i6ru4VVjPm4K4SEbFHaKDrwJy+RlVmcPdH99HI +aHqj5GU140D4grp814hkciy2EXiJP6qMqi8thAQof3ljr4ZZB3/g9tOl/zE865Xp +RvmKS7qv45Vr6wCYvoquaAvm3wusUgQL3TWlAhfGqys13ijqmJIwz75YbL8J9hma +biwLHl4xrWe5quNXdUsC/ijThKbl8duUWYw4nBN1azcVZHV2bZMgnxOsZp3zN0lU +RB1K7U4kEni8c11PGHsL7uH/OuSy3Wq7WPpX7J5nrMbJMmqL3s5jyUkhVQARAQAB +tE5SUE0gRnVzaW9uIGZyZWUgcmVwb3NpdG9yeSBmb3IgRUwgKDgpIDxycG1mdXNp +b24tYnVpbGRzeXNAbGlzdHMucnBtZnVzaW9uLm9yZz6JAkUEEwEIAC8WIQSDeTXN +GeEjqn+KjmmXnwxpFYs4EQUCXDOHTAIbAwQLCQgHAxUICgIeAQIXgAAKCRCXnwxp +FYs4EVdWEADfHIbm/1o6Pf/KRU4SYLFm45AnDQ4OKCEH8y8SvvPJQMKZYnXfiblt +XYK1ec6F4obgl2eNKZoIrKS6CBwu3NpvjWXCPBn/rkiksB7pbDid6j0veHrZmrnG +6Ngo2VnGIjLcDRPcAn/WjzpevS8X9q6AF9bZoQ8BSoxCAoGueko1R02iWtZPlV1P +IQEW2cF9HQdI1vw0Nh+ohiDO87/mNyVUdjootpncVnArlf5MGj8Ut9zo6yJSlxG0 +7lvMnreH4OeIaJPGYRHhsFtSfe7HbPaCmYAmlCFLmw3AhHuEnYSCAt2kMVxlUrAc +li/FxEyXAKS/C2OYk3jDA215K/G14tBWDkNLwyULiURDH6lvWyRqyOVzr198AJLK +3WK6G5RfngV82VyW0SX4XScnQxuk55HsMC8CKapmPtdjDjqR1qrKDe6umNskwPII +tCU7ZZL+8Do/eMHqJgUBS5II+bSngVSfO4Yz/zeU0WWZhDirh9C3CeZ3kEVSLQq/ +eC9Zt2/x8xykXXKeswg5I0m0+FBAo9w2ChXyi9rQSFEScqCqml+7elQZTF/TrsHC +Os+yoXdCv3hm0wFMdQl4PeXrzmZOB/kMC+XIESoRpRVBod2C6EzHKYCXzoY9iqla +RmIw/1lMM/XnE/x+XQzvkcOQPHSxQ+iJjD5arhoROh2wCvfb3IPnYw== +=Fpo1 +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-free-el-9 b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-free-el-9 new file mode 100644 index 0000000..afc372b --- /dev/null +++ b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-free-el-9 @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGIw3n0BEAC0MbmmsILD88eg+nb+avNQtqeHi7lkFopM0bopg+wNhxMrDYOG +KJpeZw8nI2MOlp/Z422cpfZL7k7zxPOoOgvr2W5ubfdJhRSvfeGOCzj7q059tCow +Px5MF9XtX36duPlqKgXieR4WulGKbADlDOKVD4qJ2lnCJ73Spl6q1w90cESQ+jUS +bny7770XJeGKQJ3Qni0DmBceMUsrpJWzDx1JfMDvx2Y20DpdvS+0uO7bFNiHc5FN +ld6oVwfYBdgUuCAo6LnQFTjJA2wJZ5qz16wc3BQCU2SyEeZEIvwvaOE81DjK1XFY +EyuHFMdZxG+h9J0XBZSbfKb3T29fGH8wtsqSSoprDZo5ROeafieLuTdFp2dGORYh +PPkdj4pAR9feN98NPdkTBp+TYrlrtrq9UlDedRmnBwB+7cuTUSx/gFhf8LHPE4g4 +4yh59ZMFO83YH/y2jEEKmqwkcMMEWpMQe9WPRxO8ATvfhXDRKXi+9U80VJN8Yewo +zgTIhI0OvsUH/xxO9kt/tzzG6OqTlE31iw7fS83BavmQwyVWO1k9FseNCtJ/bgHZ +8PSSNH6qFX0uaMPNgC/Tn2Fb8REzOIGzDdRdofcT+ArayRk0AroZxyims7LpQTGq +zbbR4BjDeE1Q4cdOUEIpqkAGGzbzmc4edJ/+z1EdBeHldyQ0fRxqeLCzBQARAQAB +tFBSUE0gRnVzaW9uIGZyZWUgcmVwb3NpdG9yeSBmb3IgRUwgKDkpIDxycG1mdXNp +b24tZ3BnLWtleS1lbDktZnJlZUBycG1mdXNpb24ub3JnPokCRQQTAQgALxYhBO3A +D+dBjJ3370mRpHQD6jMpZFjzBQJiMN59AhsDBAsJCAcDFQgKAh4FAheAAAoJEHQD +6jMpZFjz76sP/RE/zORf8CDcqOOiahvfc9x0+Z9+q0K2WO1/GTcfBBp4ec4UIQiE +YYVsBNzmL6C0JqevHd5+R76+fqG85+ZtnarAERJy2Y4E7ey10yJrw6fFBXP1YMTx +wS7hhhgBN/EW9VCRyH44fnT3cl/FMkfKMETvSlMhOB4lKuHUzqkd/wL9CLUtYsr3 +QquH8qcelBc8jUK8UgKisJhMTR5aGBITZH0qf/TUv4JPbiAbVbgEDAU+aOksnuCL +HdYa2wxpy4XA1LAgCCxQjpb4bIfxah15qXn7MJFj9O81e2TvyQZXfDCq7sPqYM6T +/p9XKMqZz5JosWiTjITRiUYnIrV/Pyy2t26Nq3SS2bccCYn+ewn1poJgeXx9U9iw +FRz1TE/z3lu559wazuUDPJW326WVmuPI2NC/RycKQivb0bQRchTcPBaFa3+mCWVK +Sir3T6u8YnSncaiI6w3sXKpV8q+P78qWiEiSucCSGxeo9ZWys29NQAWGtp4y0we8 +rZYw69n8IDz68gnqiQL69bmdqmhedfKySTnERyIicL2qyLtwq2n/p5IpBsolphLZ +VtMEcp6s0eO4d1eE99M2f70LXVzqwACPjE93iKqtAsWgPOg5xpbyvToea7/Sdou5 +Qquv9511jjvhMVHR0mrPL6JQ3wpDzXXg1yCQ+IOea+90H+6jw6nEK1gx +=/O2H +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-nonfree-el-8 b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-nonfree-el-8 new file mode 100644 index 0000000..1b57da1 --- /dev/null +++ b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-nonfree-el-8 @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBFwzh1YBEAC7Ar5IGGne3Vm7nPLQjHB32NAlqRWNsnAfpyquGuRFeL3X/83k +FxaLX4wTBc/fqtRC+HRPaKxDNPlI9TOTyYnn8F96v8grOPB8joy9mbDIsekK4uAc +tec36/++mV00yiKiS8cPQKgkAr7oZTqgz4LXV8z/ROUwOKQqi68YjL1WvEzVEZ0B +QBo5TSiYhGP1qMTHuH6PN3n+MBCDTWBAj2WxK9i/ga3NgcsJIqnXEgmKg9NoL9qq +ZMTynrayGbaaqoPgF1vOmegQNa3/3xy3kF7Ax1bofy9l44sWYi0Dge5yYnsJrdZ7 +PYVSXghbWYNolZ1BS4tyXwQb+DfOq3vgfo+82eHK7RiM0KaJAfFzCIFxNe45ihAR +Mn8xSICN3RMiF+1uY6VNUXFZQVbxsmqEnBfXqBMWlM1aBjntpzf7+MzothmaEg67 +oSGG154vmyCnzwgeCWnptua+SUoZhXiHW3OtiwBtz6pP1xPVibKXeoLmP+wQ+rBA +gnAw/Qpnpx/xz906cl/5soKNzbKxIjh904+/1FYFWh4OcBwxVNtk9OcM7nBO+6u3 +CPhGav09YEByE9RR/MkM9FUK8oqkxXDfD2NPgZJ/wTvvanGbHNJDDa+jh97rajNs +OANp61jtNZv5i7ocNjkPl8Yh4UxmUW+TDWPqoBpXSAjT1Xis3h5sM9wJjwARAQAB +tFFSUE0gRnVzaW9uIG5vbmZyZWUgcmVwb3NpdG9yeSBmb3IgRUwgKDgpIDxycG1m +dXNpb24tYnVpbGRzeXNAbGlzdHMucnBtZnVzaW9uLm9yZz6JAkUEEwEIAC8WIQTP +n9WfYdZhIUbNrI4UtnktvdqEdQUCXDOHVgIbAwQLCQgHAxUICgIeAQIXgAAKCRAU +tnktvdqEdVZKD/9WOrxPq/cXRPlWxSxPPIe4FTo88HmOPwE1cbFwoq7e7zLoUkDS +efiD9m4szxYHUeGXvp0gkh6/FLDkvMQnlHoJviVDYK3sPAudqAOl2KtZlWE4SykD +mNjONZMcPXBtceGmur1ZiqSFiidBkDS8Z316dhfxAJqtiVZFL1iUuaIZVX2vYcJc +zvDJe4JVeZQ9lYxpvnwcmPOoe4M7eJlniKNK5tsBHa4daI2iIehIsVoz1CY4VO5N +C3rfAOUs8wDKJEKRFe30nPhPgzojA9uhD++cOymhnbxLQBQnS6mHlGJ7hYMI8YaJ +P21G8pRcYmyZbC/fbeB+91dR+uGeZ8qKPRO4/EnPCcbBkrlVawCmh1QXThx1Mwrt +j56J3ppZm15zMkf8PsXOj3VXQSHAPLwPATE0vmh+EAbEydBg41bv+e3SCkpaYsjC +egrXACGnoCL2wdXPxsJUCmUWWSkCGKmYbCMq2Rod+FqZ48igxh3V4v7kVSFThkML +fdF04ENL9r5PUdfM8JCW8KlXvkSjMROUxTzVyuyMd9Ct7FkUDIryBXufGKQ9jyA6 +FPYwBme26R8Vu3hI9VCFgO1e0rVFyvDuiBnJZ0atXqkn9vnXkA2zVfabb0PN5Pn/ +dHObVLLxbTYoPqQl+lCZtfyyELWx13EYkn4VkG+y0D79aC7sxwEeZX1n5w== +=WjVe +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-nonfree-el-9 b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-nonfree-el-9 new file mode 100644 index 0000000..cc4f581 --- /dev/null +++ b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rpmfusion-nonfree-el-9 @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGIw3oUBEACyLvk0T9azCSv1WxywbmM1NvesSeyP3V0FGizlEDs6GOiTeV0c +0ARzC2dfak1ctUntHN1imbbpHHv3ovXEMxOF9iN2MUKn7ucy9pU2+/3J/Kex7WcA +h3nCGJ83Q1UD51KESnMi7OOJwMJQrQqDjnKDyUzccizkRThZF3z6VsqEogalVG2P +QxB278tQBGFCY5PKsA5tYQW+kDjTJPXlHvRqEId3eyyCsUnBvVZUp03kcnAPsZx8 +qw4tjlCGc5P3sXmMemJQbSOUQFv8vVwjMX7ZT78ib0jZZLO1JvSZMtskH8SuhpIf +qOM6ZOQpO0JXvvBPBK5B8Da5CFsFSkr9FsWw9ObgLHqAuTuhggZiSYyrHJvKt8w1 +lQ5wjGR3eMZ8cmjBUJK2XKtz5GdelXd6UontzS4I6jXwjMSdBdkUUOitk5y3lPS9 +XDqylXQ6582IBopTYRlR25UJ6IZMyCxTO3O5RJ/e5lWxWTRy9mHBEFHz2NRLKjBE +/Nke3fPA36x28ihAMsCP39d54Yh9AlpF/ITCDPjsQAwhrqPkfLG2mdepahrRZFXY +j7AoM77CTS57bE6B8l74E/AZL60sFQkxfJ86fIiSr+T7qhmYBAaPHzF6e+C+hI4L +sbQp9rNggWIMdAF3MWmcCzk8bnYegmxuOaqxuSAJsZ3kOSPI8S2jQZkBDQARAQAB +tFZSUE0gRnVzaW9uIG5vbmZyZWUgcmVwb3NpdG9yeSBmb3IgRUwgKDkpIDxycG1m +dXNpb24tZ3BnLWtleS1lbDktbm9uZnJlZUBycG1mdXNpb24ub3JnPokCRQQTAQgA +LxYhBLskEopLs8M1LAt1ouQXTmuqshLqBQJiMN6FAhsDBAsJCAcDFQgKAh4FAheA +AAoJEOQXTmuqshLqDXYP/i8Dv+ESfQ9HcpcbEIf8/3KlaAzjT0w2ia9Cr+CafVcA +PtXviDMgvp3n4+P4K+mIWLnYGDXuBmS5i1fA7paSWDeG4cjE3y74hQpohGEonxdl +O3r5nVoziBTn6/xmBVeNUN4ppxZiNKPrcEllddgswHyCvTs3/7QbN+uuM6ZQwZcv +aNh/FHT+7/Dd2U2+g4gJMd/5UZ3/0DwlhUynKJw+ZbUUh8s4iTPh5DpP6JM6/qaQ +tyD/VA5X29W3A1GECU6j2Y47pwQZHJSpT1DogzC/bXrX4sy4+lndRq+HuGflLVeY +eFZQOroSFJc3NhYw/bDZZ4z0YDtjrSX9RvL7gQGf2dXsBHYiPxH6+HGsNjUkkbfD +4m6ogCKrra3Z6ysOBuLXHWexwSomDjqkqQb9AX3NaQVeWU4ahto1Ht3/OVsEFKfE +4GjkOKa9SUtGfA63SkFow+2qAckBk1LCxZmOTwLD4H4lNom6RaHsxQq5A6YglD1+ +Rah60gxx9NWQPno7H9zp+IZDPTADAcw5rcakmNe+ba1AqOsQNb54cE4HKIp0tFc6 +5DlUiO6Oq8LSRRTYcVV3E1McokJQoZ6LgfMPzpgC9nokaiHOYdA1GNCmU43a8F2I +v4PTNjNDVl66ij8IfbPe7H+CFtD++r1sujR/olMWhUn5pjBMwIi0F/V1ynnODdZQ +=YRC3 +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rspamd-stable b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rspamd-stable new file mode 100644 index 0000000..2a76846 --- /dev/null +++ b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-rspamd-stable @@ -0,0 +1,52 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQINBFW3VB8BEADAV1lBy8DPcSEBSLYVKgwsBx/dRmgenKeliMpiZyNYJJmF6tSV +s3v5DtDIUESgI2mBKNeptdneri3CDJScI/LgPLKqemrLBkAMfe+f57JgppY5ti4H +xo+VZdbF9bhCAwYwJnqnyuLjYSUu6nCuW4uPDoqBHXynwsIWr1O3fREpY+vgIgaT +Oqm3ncssqxSicymd6k0yuo55xuUvrc4Yu4IEnhFVRU53e0E3zmHg/7ONI99YtBan +7G/w2IfA1bfRDYZ2Avau+JqGcEl8vy+eLmYayKirdsMPN8Tx6RFOstDf1CnjW/bj +IX7SDOklIGJjJwcWW/iY+1P9SfNNqSDgXavJj2wmLMlUhgjyJFTXfdDRjmN0PFxo +f6OQu5xok1WHfKFJL+hLGknjHdXLmGd5MSuFlutdVHJQrieknjBea9xCiEsrfe8V +zyNqGhzgIYjOi/bO7jGpY/WiFHvM9XtBVp862tqM1S1WbAWW5u+es6NK4q9Cv0DR +tIalss+5gFhdsIFGFYQWfY7CrjOIC+C0+c5IGaBkHte35hCCvDpOO909xxVqUZYe +9Pl8zYgPDe1H4arMO+p6rSvVntvIWOqLqkuWYSiOY4TGADJTkeZRbopZhvqs/9mc +847fVMbOwKfkbeuGiHhUK0QFewXSu+cXJyGtyu3RgokBWr2yyzJFXIvJbQARAQAB +tEZSc3BhbWQgTmlnaHRseSBCdWlsZHMgKFJzcGFtZCBOaWdodGx5IEJ1aWxkcykg +PHZzZXZvbG9kQGhpZ2hzZWN1cmUucnU+iQI4BBMBAgAiBQJVt1QfAhsDBgsJCAcD +AgYVCAIJCgsEFgIDAQIeAQIXgAAKCRD/ojLtvyHiXucND/4ja0t+4RMiD0c0z3xD +Vp0Ysq7kZvzlteUrw98f1BMYbmSTJ+43JVZV67GJ8fV2d9/atIlyLce8Gn9hYmF7 +C5nPpCCOlNejkwkc9MhZgoM0z7sTNZwKLZ4fSnxHD10Z923G+IRQYeXswM7hE/T5 +8NgANOWBFs9BxIEIT6IfRNHF23SCmCeNFNmUen6uXLznjRzYbMmwP7u2BopfJcpN +ajnm66IypQDsUqVwBRnm9o9GAWUPbp4ahhf1vYu04T1vD7n4qhrLdhHmEJpukEhD +q613Wl/k0g0O8SahfSAaM1x5zLOJ0sMacyxCktQKXypAhkhhJc4J1KLbnNUsxZdk +Gn4wLZuhfIuzh2KfKBdwoL3zRq7kjgumJo7AQhEIIDGKutl6sZnbRHjBr4qBb1NJ +/7GC7UiZhIesdO6HdqrriNF0l8dRVIaHXGKF0PQWWG+J+147oQM+SJmm4W4oONSx +YCjyTllxwh/54fhu81jhSyBgbKAmV1gYLIPvAUgPkguAb5JWcvZOeXytHWZYLK9T +8rW5R0bviiouHHRyQYu0AX+wiSyAfoVnTVyad6xTWUT3aQ8jeL0I3uy323Mrq56U +7Yo0NFwKPF9z5kbuQje3daudQQymkhOfNcQm3dOaaWKGp5KPRi3OtKYMu+5Aphor +lwJWDec6PUe835YwqrARXtPaNLkCDQRVt1QfARAAvFiVTmD9Jr/L64e2qq92LcHQ +nhpAS+wb5GXv2RvySOs3oayMSNiP5cLMjQrRu24qYya4H7QNpa7vlRUdcqjBXPc9 +bPNB2E13gGaHZrQlJFAcI0MNy3Vn2v/VEM6Zfei8uh2prHAxfOeuiszGjVafIHoe +TN2Vx6+SnToB8Eg4+x4J3Aqi6Nor9HDXBPZ4dxo2V64xXaGo0XXg/EHJZXrafKRj +ULW/0zk/8CrwzPoiVt4EawjMeJ6UzOYz8qX0vRI9gicrOnaaUIvuagp2T0IZvOkM +7t90U6kPv+R8+briaWk94oByTJyU1vViW8GHU1u6AnK0ZmGqVilJbet+GeAOWgFU +6tcOHrPiTfCdGoRpM8beNZnrMB91J+jj0foaKC6QgumwRcHudq62eQXX/4SARdhF +GH68m86prsN2hLc/SXQ1HksFWdgh6blo0eloKCqYxdmEEVhIcbdtd1x3BjkDmyPP +zfvEZ0bODVlM3yZsDbC9q1reFB/2C92gDBo3uMl8y85iHEzPTUxqmVwpnq9CAcpu +uSjpXq4o2v10Gr+Qq6aOR00wgr8RpGWupYU88LqBy7eq54/Jxb7d8THMl2v7kzAH +pGQXsQMgE1tLOES/OloDf13sfJJkWYx5k6WI0l8isTjJ1tiiFwDqkS6jzSDxZC+p +/5W/Z2IGStzTIpwu/9kAEQEAAYkCHwQYAQIACQUCVbdUHwIbDAAKCRD/ojLtvyHi +XktTD/42A+IRAWq/uTXgAoWiHhQn21Sp2T06vF+Wiqw5hEhsqe1tn9ZLA/vYR01H +A3c8pEt3ey3iQlRawLbTCJovWBvlRJnjc0r4U17nZ7ZzeUcU/GU7H/kNS3em9y0t +58WVB2VSx6PFF1xTzPYoTbYUY0crH9dzCzrkSm3f8ENTi8UFKhAtvJqzEgg3yY+H +6Q1CM6n+Fapq+JiIbiFYQbQjRrPkeK+ZXPgbogN3821nAChnUN7n9G8ypt+6YW16 +508AvpCpiDTRt7KXgNABUjAV54Ie5+laGDNm7UT4pHK1DmzmwDoc4YruuTYN86d1 +dbTdJ5KZ4kXbftkfo5ux71HXFqCqOyd60BE7LI9BwyLB4Bl54RDucQddW9BqaXOn +88tlwzuxz0D/S4luWjmFgUFLgMSD14lcoZi8+rg7BMgyDOutmUdKH8gwAR9C2Jt9 +UBsv6DHFsFTtYQO8xnrOp6ip5HaBsrtRacbjxbfP24mVaOXM7C377+yZ4BB8M8ds +377a5y3U7iaZPxiE/K+Qjkhop2pBVOpTEgIdUPiJcRYFzor/Ix3qVkgYDMeCY1Jn +uC1gNyh3SBgjc35xRSnKrN8QB6cC4FPfZU21oz4WwIxIOqNkolupZv2mB4X/+ZUk +ip2OB8LrPMN7i4wNSurNOd4Jc/Tpil8UwHNMuRtb1vJ/ZkWbAw== +=BILd +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-zfsonlinux-8 b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-zfsonlinux-8 new file mode 100644 index 0000000..290ad24 --- /dev/null +++ b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-zfsonlinux-8 @@ -0,0 +1,30 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.4.13 (GNU/Linux) + +mQENBFFLdrcBCADLsZ7FG2e2Cr/ZV6J52+CyzXXmmmIt2ibYJdflxhSaaxhuag6k +1OgWEM1R/igjWD3im66O99m30+XeDqDwwBC2flplTlAM5cVb2Q37M+q+LyGLaJgL +JKIkHWfK3/arJO+QY5K2hxzvKUAO1ZJa5OYQMJmKIxLzKz3SX6YnRTtE7ohDq/bU +F32ysIrW549XZUpFX4DYCbR9IEaF5kCvus4FwidBTHDC5aWKvb7qStaL/Yo9koV0 +CADl4/nCNAKTcmRoo8Pz+zFwFFNLKRdwu58IefLgkqon7RQBDhwMqKPs1Kw14RwJ +HUv3HYw1JleFmkXv2hn1BMa0ct7jUrvenFBNABEBAAG0IVpGUyBvbiBMaW51eCA8 +emZzQHpmc29ubGludXgub3JnPokBOAQTAQIAIgUCUUt2twIbAwYLCQgHAwIGFQgC +CQoLBBYCAwECHgECF4AACgkQqdWhwPFKtiAHWgf/SVXT92gs02HlJsz3h+vmHKHH +8esxHq8DzG9PaBTyeWLB6mMuN5IQ6Kbtpy44xYCyPyBo+MEoFyPwJXw4qU7th/NX +fAaohXTT8KltyKYsibotdeUHGE4G/7ilbQl9kknlmbig9M16RCnRCDxBRiLrvdaQ +X84YhQUlV2CUShRevuogNbcfORViF8jGb0vkRRTJFhsfZpq7XmW53Q3RXrzoe5Pf +t9NzV2Tlx9ohyGGpPVGO4MgJ028vtoUSIVGA2a9Vg1dwKJkhZ/VvgeleZGds+jIP +0SpXCNJHgxTaE5AJ71GC8OiDst8b/syYmRFX4P1ioah9m7X3ImHiX4tZJc+Ee7kB +DQRRS3a3AQgAtGrh/OjWeqyUAbw8aO6ew28u+wG0GOEaNdMPEm9120uM3XoEHxg7 +FpixPJHj6u9VfhZvHBQOEYiZ0sIY7qj/0wyifsTFYjSZrSCJJHJpbM4SnflTkD43 +uJlvcUmqMv0vfCnkaMIO71I1xqPlgOYxOQlenttcI+5xEVzv78cSoQOdddCdGFMs +mdrfxh5NWJR6ehIEI1JQTl6iuZt8wJ/Tgqk2btyDYpDKvDdKLjkBpCbTwsVtQa0x +2/EvuSMbnu8rqqzqqVsOoBMzDi1ksxm1kC4fXmI9av2X+dGpGJNnn5AqIQAKsETE +8L4Ajzo8Tk2aaq2ase0i9sNdnsRtYGOjiwARAQABiQEfBBgBAgAJBQJRS3a3AhsM +AAoJEKnVocDxSrYgJXYH/3gAPOr7LwA4p3BhV6NIwIaMGmGY+1dbMp/OxB+mFuOS +NTTCKsBGUchGVFYjSlBtden07S8HNNTWB+bWLVfRTTgRkqomxp0DMMOZ8ry317l+ +cDKVRXMPEZvX9567q1PAOGDiGxiE8296ZUp9/hSFkOqv1sdp+HSM6KVMb4MP4Sx0 ++sAwcEumIQAKgXMzDLdpoPDrFnoAAfUmQfpddd7NKch0NJAhdlPtQryFpKdnmvpQ +oLINrelqJxuVMo0hd7q0Xc/vJT0s6pe0f0fXdqXy1ijD3qAewXLZHO3XGVSF8fLY +Q5XFCu4KH2vNBmn0lZxVX5BWm3R2M5XfuT/CJYHO+mk= +=A19+ +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-zfsonlinux-9 b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-zfsonlinux-9 new file mode 100644 index 0000000..c95b7db --- /dev/null +++ b/roles/yum/files/etc/pki/rpm-gpg/RPM-GPG-KEY-zfsonlinux-9 @@ -0,0 +1,57 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v2.0.22 (GNU/Linux) + +mQINBGLYiZQBEADFiO0tDOd+EOS2tLvLI+0fvX8xWPR+cohAnvMJFWciUt0ucN3c +XHkEwbTkZNzJJ3s2AIVzq+zhi8SF3t/y0VIiK4pba5OOp14HvzkxBPStPw6Q7KNG +x07QZxrQ5BwKW2IU1HNUm+bsj8pKjoYWFc2XAzvOR8I/247RyiNVHLD385oHRR6T +DQKv0ZLwEekokgqqtJwapjCm5nUmwxr4FmBQKzu7bHYS/hqv4q1z2d5YY23UQ9B0 +gazILmenU/xgIHWkPl/7HHetq0zbFrgFao9TfRkaMHLubmX34N7xJD99wszy8ZR0 +yf+b/16oQrNY3BRsD2ZMO5I3elRPYdaXvRvwuzYGVpULWdEEaDr2FaA+JnEJHZac +v9EdZhROROKIZI1BxPOeNxIlumAgSXTIvFIC2sRGWb7/a/WbI+N7bGXcMENn2s7d ++xiRHhAkdehqY6iWwLFX7jmueesL46Qzsaqn+547aHivuBxETPWuvLs+ANzmqBP4 +T5NP2VVpux9in5VOP5JbE+kRZRH3HrTMQJBMIqFhUFYlkfFBbVDsgZLEFMBpNbZx +4+xcIp2Qe3ODv1+gL2ocOaYmPdMKDoLk/+qecDiZGChHJlUk2MWLEJ+yZ0ZN3RWw +hb+JB8xoJVTRQrOgToPHaVeRTSwRmwMTGICLIG3KRxZ6aKgBEfjqGyeKLwARAQAB +tB1PcGVuWkZTIDxyZWxlYXNlQG9wZW56ZnMub3JnPokCNwQTAQoAIQUCYtiJlAIb +AwULCQgHAwUVCgkICwUWAgMBAAIeAQIXgAAKCRClmf1enbhBQTixD/9IxQ/StgUv +pf/qybWa38dEI2Iri+UvR6zy9Nja9SJ2rBrSF5umNNsuRxTD2qvbjNcvOt40sFoj +pM8aS8JO0Rv5ouMh/Kxbpn0fyzvXVpx3c/ulCHRC38Dnw9G/HijYwxGy+WbysbGF +HwxepI5MTdImbSJnteNx0q/2SPWCK+KdSTXcbKM113QDXM9b8mJFdOvRa0Mxfu0y +7qFz+yNmTDZ/tCNoWCCa4G3lmpDosCIjnDoHoethwVvf/M1THRYeXLT8SQEOXJDp +gT5K0ffzFbqnbio+3r4EDjCZFM+ZKfaRb5kSDdt+xYreW6Q35OIsoVZsEHeAy+J8 +gmk2HGmHCZ8nzO2iUFkq4OQWtOubmYpSB49CDn8zEplhy72BNFL6MTBH9RsaLOBH +uJbmZFwrFRA6aq5c/NKY2PsgWlxKx3no2grScQC/VmGWu1YZ/rnkiPSf2l+PmFWo +EvJyElSj52NmpJv0KfggDNGm4j7Axo9uxRMetO0g0Ee1xS0d2ApcpgCd5DmRYcEt +bUoj/qDdtlTJSLJLClWswEjxYM54NmPE2/Fp8qv58iFJgQsrgaB9RK0VShA8+zK2 +/lbv7aTlQ1SUBdryvMXb9W+xupjzBW1M4rJACZyJegQlnuBYmtlcYW2RarESWmEY +5vBCc5OBlsKFDLkmHITiFIvotDsDsDS+tokBHAQQAQoABgUCYtiNdgAKCRCp1aHA +8Uq2IM4EB/oCB0Wwysk08Xgl3nfpZccliG+QSL8Rj4FVV/eJUq+V8kxlkFDGeql/ +f5Qhji0ma8jIJyB8gsi6g/3HVJK7ry5XwHWBPyTv6NR+PrfB2tGrbN7S4R+S5rd7 +yfgRkvsP7+DjUQcMkzY8oXvy0YR84QcO2f+zcqZmY6trwn+p1S4HNjpG/28vZrix +Ytdogg8b9F1OFtfJiCQABC1XnT3R8mvIcwCjtkvwJY8L30CNkBZ6svOyVfRVsEG1 +HQl1bPo8LTLpDQU52uC41J89i0heBxv9tIUTrbxJIPx5l9QvQYSJ8pKTRxyAFrlR +n5ANBdk+deEpazZWoZmbDVsPMYXnTwzGuQINBGLYiZQBEACyqsd/q4GWA8MJuk2h +q/qqKGBf6xU3GBPDm0CF0EWB1sTKx17Rl9cwe7wyDrB0iw4w4bcfujO/k7y6rNGQ +7PuBpG17dMsQM9H5DBPptO0e00jn5DBNcgSvgTSJpXIzC0VBrfPRDTpZmBP6GWuI +/Xqa8RahhpEZmXOqxfOi1qZsD8+gDAv2G595025/9nf/KfbYZTibVWurkzHx/URG +GASMnip3Y0q7Plo3CjEP28EvtyK3fA+OpCOuHYbhJVJGKsVszP/ZRppjjh2yS4hz +EB6u41Zv0h5/imBFxMyCF3Q44ZvxeMyEXRZG9Omh7swqu3HW/BspEnefxCvc+zp5 +CW6Pjs5yVx4CKzb+Uo7fR7tnUwbKXvUnKJLWO+POFUn7sc3wtY8WpS9XSXIfwLHX +oiDqirGO3sKG/Mm9ydQL794zykjm6tM32A1VJT7Lz9eLAyo4BQl04X8na/O+kBbR +0LB7EEhpHokx5cD3NALfKa6S7ZK0/rsrH5n/7RsKnEunyoUjViOnSRbfuz19bV7b +A6SxrLkY+RRW7GVUHvPIYwOAlifCUQVFnezc9HEMMr5aM0D0PppKwDoJhisLttQN +FLp9pagcIAg7bxTtvRPJxPgXSeAbI0WOYpyD1dHy4YN1OpY4x0kONB+6rxAKEwUp +HzDmDSXXLYcFyXFDiNDPwWTaKwARAQABiQIeBBgBCgAJBQJi2ImUAhsMAAoJEKWZ +/V6duEFBeP8P9Ah4NUIX9AetziaKsG9nxMSc6O1C3BFr05ZRXT1ochHlJZEkI850 +EdLZv5e6cxO4Xuobb0DsdmsMavd0v739SpBqzXh+xvr5Y7JmwTiMzTrcJumHEVbs +9bUxCIrB2ORgbR3ZrgCK2tjB8EtTQRAaDnh3UdQIY58KVpgVLtY1uOEuP3Vi76i+ +RgPZtLSooPrIyL9uFD3bfn5Ebuml2mHlw/MEBTLCMh2gKqnYzYbB1C7OmqwM5RtM +SeVWFTctFHo/P9nkE8OSr29MCx7MKalYrS1rU6O8Cg2S7CIOQ/MHpepcs8Z7M1jn +suYWBkgzX/hnOwCNkoWQv/LRh9HTcRe4bYctsGKb35dUAArah0xB2BpQ+srw5IOf +C2spzYmFB2rx/wNSftEmAT7YwDlhFsS0/fGAPkW6Um2h3H0L2lLVG5XgBbfpY1my +o80d20LSVbvftDhAeR9/Dj3Plgve5tIdUZLNN6CXmAUJYlGkLdv03hQ69lIFkwPO +dn3ycQkk86Pnwt+DY2nUHsxFcEstZIASCr+htCv2YI/MYDWfDpO7j2TfCqspXV+7 +FgeCqkEZ1d8uha1/3VQmGXKHOQwc2YZ42k+at8LzlgseGdez+OBh4rc2WM3csB34 +yBGA1C8bQc8pIpWQ/eR8VGdmg1BYhrrSlyhepSjhBZ3UP3HjPL9WhoA= +=g37Q +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/yum/tasks/main.yml b/roles/yum/tasks/main.yml new file mode 100644 index 0000000..318b34e --- /dev/null +++ b/roles/yum/tasks/main.yml @@ -0,0 +1,29 @@ +- name: add yum repositories + yum_repository: + name: 'internal-{{ repo }}' + file: internal + description: '{{ yum_repository_defs[repo].name }}' + baseurl: '{{ yum_repository_defs[repo].baseurl }}' + gpgcheck: yes + gpgkey: 'file://{{ yum_gpg_path_prefix ~ yum_repository_defs[repo].gpgkey }}' + includepkgs: '{{ yum_repository_defs[repo].includepkgs | default(omit) }}' + enabled: yes + loop: '{{ yum_add_repositories }}' + loop_control: + loop_var: repo + +- name: copy gpg keys + copy: + src: '{{ yum_gpg_path_prefix[1:] ~ key }}' + dest: '{{ yum_gpg_path_prefix ~ key }}' + loop: "{{ yum_add_repositories | map('extract', yum_repository_defs) | map(attribute='gpgkey') | unique | map('replace', '$releasever', ansible_distribution_major_version) }}" + loop_control: + loop_var: key + +- name: import gpg keys + rpm_key: + key: '{{ yum_gpg_path_prefix ~ key }}' + state: present + loop: "{{ yum_add_repositories | map('extract', yum_repository_defs) | map(attribute='gpgkey') | unique | map('replace', '$releasever', ansible_distribution_major_version) }}" + loop_control: + loop_var: key diff --git a/roles/yum/vars/main.yml b/roles/yum/vars/main.yml new file mode 100644 index 0000000..f6566be --- /dev/null +++ b/roles/yum/vars/main.yml @@ -0,0 +1,89 @@ +yum_gpg_path_prefix: /etc/pki/rpm-gpg/RPM-GPG-KEY- + +yum_add_repositories: "{% if 'epel' in yum_repositories or yum_repositories == 'epel' %}{{ ([yum_repositories] if yum_repositories is string else yum_repositories) + ['rocky-powertools' if ansible_distribution_major_version == '8' else 'rocky-crb'] }}{% else %}{{ [yum_repositories] if yum_repositories is string else yum_repositories }}{% endif %}" + +yum_repository_defs: + rocky-baseos: + name: Rocky Linux $releasever - BaseOS + baseurl: http://{{ yum_host }}/rocky/$releasever/BaseOS/$basearch + gpgkey: Rocky-$releasever + replaces: Rocky-BaseOS + + rocky-appstream: + name: Rocky Linux $releasever - AppStream + baseurl: http://{{ yum_host}}/rocky/$releasever/AppStream/$basearch + gpgkey: Rocky-$releasever + replaces: Rocky-AppStream + + rocky-extras: + name: Rocky Linux $releasever - Extras + baseurl: http://{{ yum_host }}/rocky/$releasever/extras/$basearch + gpgkey: Rocky-$releasever + replaces: Rocky-Extras + + rocky-powertools: + name: Rocky Linux $releasever - PowerTools + baseurl: http://{{ yum_host }}/rocky/$releasever/PowerTools/$basearch + gpgkey: Rocky-$releasever + replaces: Rocky-PowerTools + + rocky-crb: + name: Rocky Linux $releasever - CRB + baseurl: http://{{ yum_host }}/rocky/$releasever/CRB/$basearch + gpgkey: Rocky-$releasever + replaces: Rocky-CRB + + epel: + name: Extra Packages for Enterprise Linux $releasever + baseurl: http://{{ yum_host }}/epel/$releasever/$basearch + gpgkey: EPEL-$releasever + + zfs-kmod: + name: ZFS on Linux for Enterprise Linux $releasever + baseurl: http://{{ yum_host }}/zfsonlinux/$releasever/$basearch + gpgkey: zfsonlinux-$releasever + + rpmfusion-free: + name: RPM Fusion for EL $releasever - Free + baseurl: http://{{ yum_host }}/rpmfusion/$releasever/free/$basearch + gpgkey: rpmfusion-free-el-$releasever + + rpmfusion-nonfree: + name: RPM Fusion for EL $releasever - Nonfree + baseurl: http://{{ yum_host }}/rpmfusion/$releasever/nonfree/$basearch + gpgkey: rpmfusion-nonfree-el-$releasever + + rpmfusion-free-tainted: + name: RPM Fusion for EL $releasever - Free Tainted + baseurl: http://{{ yum_host }}/rpmfusion/$releasever/free-tainted/$basearch + gpgkey: rpmfusion-free-el-$releasever + + rpmfusion-nonfree-tainted: + name: RPM Fusion for EL $releasever - Nonfree Tainted + baseurl: http://{{ yum_host }}/rpmfusion/$releasever/nonfree-tainted/$basearch + gpgkey: rpmfusion-nonfree-el-$releasever + + mongodb-4.0: + name: MongoDB 4.0 for EL $releasever + baseurl: http://{{ yum_host }}/mongodb/$releasever/4.0/$basearch + gpgkey: mongodb-4.0 + + mongodb-4.4: + name: MongoDB 4.4 for EL $releasever + baseurl: http://{{ yum_host }}/mongodb/$releasever/4.4/$basearch + gpgkey: mongodb-4.4 + + mongodb-6.0: + name: MongoDB 6.0 for EL $releasever + baseurl: http://{{ yum_host }}/mongodb/$releasever/6.0/$basearch + gpgkey: mongodb-6.0 + + crystal: + name: Crystal for EL $releasever + baseurl: http://{{ yum_host }}/crystal/$releasever/$basearch + gpgkey: crystal-$releasever + + rspamd: + name: Rspamd for EL $releasever + baseurl: http://{{ yum_host }}/rspamd/$releasever/$basearch + gpgkey: rspamd-stable diff --git a/roles/yum_disable_default_repos/tasks/main.yml b/roles/yum_disable_default_repos/tasks/main.yml new file mode 100644 index 0000000..009b219 --- /dev/null +++ b/roles/yum_disable_default_repos/tasks/main.yml @@ -0,0 +1,8 @@ +- name: disable default repositories + replace: + path: /etc/yum.repos.d/{{ repo }}.repo + regexp: '^enabled\s*=.*$' + replace: enabled=0 + loop: '{{ yum_default_repository_files[ansible_distribution_major_version] }}' + loop_control: + loop_var: repo diff --git a/roles/yum_disable_default_repos/vars/main.yml b/roles/yum_disable_default_repos/vars/main.yml new file mode 100644 index 0000000..3f4498d --- /dev/null +++ b/roles/yum_disable_default_repos/vars/main.yml @@ -0,0 +1,11 @@ +yum_default_repository_files: + '8': + - Rocky-BaseOS + - Rocky-AppStream + - Rocky-Extras + - Rocky-PowerTools + '9': + - rocky + - rocky-extras + - rocky-devel + - rocky-addons diff --git a/roles/yum_mirror/defaults/main.yml b/roles/yum_mirror/defaults/main.yml new file mode 100644 index 0000000..f0bbb1c --- /dev/null +++ b/roles/yum_mirror/defaults/main.yml @@ -0,0 +1,2 @@ +yum_sync_on_calendar: '22,23,10,11:00' +yum_mirrorlist_country: US diff --git a/roles/yum_mirror/handlers/main.yml b/roles/yum_mirror/handlers/main.yml new file mode 100644 index 0000000..d1e067c --- /dev/null +++ b/roles/yum_mirror/handlers/main.yml @@ -0,0 +1,5 @@ +- name: reload reposync timer + systemd: + name: reposync.timer + daemon-reload: yes + state: restarted diff --git a/roles/yum_mirror/tasks/main.yml b/roles/yum_mirror/tasks/main.yml new file mode 100644 index 0000000..2e00d17 --- /dev/null +++ b/roles/yum_mirror/tasks/main.yml @@ -0,0 +1,47 @@ +- name: install packages + dnf: + name: yum-utils + state: present + +- name: create repo definitions + yum_repository: + name: '{{ item.id }}' + file: mirrors + description: '{{ item.name }}' + metalink: '{{ item.metalink | default(omit) }}' + mirrorlist: '{{ item.mirrorlist | default(omit) }}' + baseurl: '{{ item.baseurl | default(omit) }}' + gpgcheck: no + enabled: no + state: present + loop: '{{ yum_mirror_repositories }}' + +- name: create repo directories + file: + path: '{{ yum_mirror_webroot }}/{{ item.path }}' + state: directory + recurse: yes + setype: _default + loop: '{{ yum_mirror_repositories }}' + +- name: generate reposync cron script + template: + src: '{{ yum_mirror_reposync_script[1:] }}.j2' + dest: '{{ yum_mirror_reposync_script }}' + mode: 0555 + +- name: set up reposync timer + include_role: + name: systemd_timer + vars: + timer_name: reposync + timer_description: Sync yum mirrors from upstream + timer_after: network.target + timer_on_calendar: '{{ yum_sync_on_calendar }}' + timer_exec: '{{ yum_mirror_reposync_script }}' + +- name: sync repositories + systemd: + name: reposync.service + state: started + changed_when: no diff --git a/roles/yum_mirror/templates/usr/local/sbin/reposync.sh.j2 b/roles/yum_mirror/templates/usr/local/sbin/reposync.sh.j2 new file mode 100644 index 0000000..368e41e --- /dev/null +++ b/roles/yum_mirror/templates/usr/local/sbin/reposync.sh.j2 @@ -0,0 +1,17 @@ +#!/bin/bash + +EXITSTATUS=0 + +{% for repo in yum_mirror_repositories %} +# {{ repo.name }} +dnf reposync \ + --norepopath \ + --download-path {{ yum_mirror_webroot }}/{{ repo.path }} \ + --download-metadata \ + --delete \ + --quiet \ + --repo {{ repo.id }} || EXITSTATUS=1 + +{% endfor %} + +exit $EXITSTATUS diff --git a/roles/yum_mirror/vars/main.yml b/roles/yum_mirror/vars/main.yml new file mode 100644 index 0000000..dd25a3b --- /dev/null +++ b/roles/yum_mirror/vars/main.yml @@ -0,0 +1,130 @@ +yum_mirror_webroot: /var/www/yum +yum_mirror_reposync_script: /usr/local/sbin/reposync.sh + +yum_mirror_repositories: + ### RHEL 8 ### + - id: rocky-8-baseos-x86_64 + name: Rocky Linux 8 - BaseOS + path: rocky/8/BaseOS/x86_64 + mirrorlist: https://mirrors.rockylinux.org/mirrorlist?arch=x86_64&country={{ yum_mirrorlist_country }}&repo=BaseOS-8 + + - id: rocky-8-appstream-x86_64 + name: Rocky Linux 8 - AppStream + path: rocky/8/AppStream/x86_64 + mirrorlist: https://mirrors.rockylinux.org/mirrorlist?arch=x86_64&country={{ yum_mirrorlist_country }}&repo=AppStream-8 + + - id: rocky-8-extras-x86_64 + name: Rocky Linux 8 - Extras + path: rocky/8/extras/x86_64 + mirrorlist: https://mirrors.rockylinux.org/mirrorlist?arch=x86_64&country={{ yum_mirrorlist_country }}&repo=extras-8 + + - id: rocky-8-powertools-x86_64 + name: Rocky Linux 8 - PowerTools + path: rocky/8/PowerTools/x86_64 + mirrorlist: https://mirrors.rockylinux.org/mirrorlist?arch=x86_64&country={{ yum_mirrorlist_country }}&repo=PowerTools-8 + + - id: epel-8-x86_64 + name: Extra Packages for Enterprise Linux 8 + path: epel/8/x86_64 + metalink: https://mirrors.fedoraproject.org/metalink?arch=x86_64&country={{ yum_mirrorlist_country }}&content=pub%2Frocky&repo=epel-8 + + - id: zfs-kmod-8.5-x86_64 + name: ZFS on Linux for Enterprise Linux 8.5 + path: zfsonlinux/8.5/x86_64 + baseurl: http://download.zfsonlinux.org/epel/8.5/kmod/x86_64/ + + - id: zfs-kmod-8-x86_64 + name: ZFS on Linux for Enterprise Linux 8 + path: zfsonlinux/8/x86_64 + baseurl: http://download.zfsonlinux.org/epel/8/kmod/x86_64/ + + - id: rpmfusion-free-8-x86_64 + name: RPM Fusion for EL 8 - Free + path: rpmfusion/8/free/x86_64 + mirrorlist: http://mirrors.rpmfusion.org/mirrorlist?repo=free-el-updates-released-8&arch=x86_64 + + - id: rpmfusion-nonfree-8-x86_64 + name: RPM Fusion for EL 8 - Nonfree + path: rpmfusion/8/nonfree/x86_64 + mirrorlist: http://mirrors.rpmfusion.org/mirrorlist?repo=nonfree-el-updates-released-8&arch=x86_64 + + - id: mongodb-4.0-8-x86_64 + name: MongoDB 4.0 for EL 8 + path: mongodb/8/4.0/x86_64 + baseurl: https://repo.mongodb.org/yum/redhat/8/mongodb-org/4.0/x86_64/ + + - id: mongodb-4.4-8-x86_64 + name: MongoDB 4.4 for EL 8 + path: mongodb/8/4.4/x86_64 + baseurl: https://repo.mongodb.org/yum/redhat/8/mongodb-org/4.4/x86_64/ + + - id: mongodb-6.0-8-x86_64 + name: MongoDB 6.0 for EL 8 + path: mongodb/8/6.0/x86_64 + baseurl: https://repo.mongodb.org/yum/redhat/8/mongodb-org/6.0/x86_64/ + + - id: crystal-8-x86_64 + name: Crystal Lang for EL 8 + path: crystal/8/x86_64 + baseurl: https://download.opensuse.org/repositories/devel:/languages:/crystal/CentOS_8/ + + - id: rspamd-8-x86_64 + name: Rspamd for EL 8 + path: rspamd/8/x86_64 + baseurl: http://rspamd.com/rpm-stable/centos-8/x86_64/ + + ### RHEL 9 ### + - id: rocky-9-baseos-x86_64 + name: Rocky Linux 9 - BaseOS + path: rocky/9/BaseOS/x86_64 + mirrorlist: https://mirrors.rockylinux.org/mirrorlist?arch=x86_64&country={{ yum_mirrorlist_country }}&repo=BaseOS-9 + + - id: rocky-9-appstream-x86_64 + name: Rocky Linux 9 - AppStream + path: rocky/9/AppStream/x86_64 + mirrorlist: https://mirrors.rockylinux.org/mirrorlist?arch=x86_64&country={{ yum_mirrorlist_country }}&repo=AppStream-9 + + - id: rocky-9-extras-x86_64 + name: Rocky Linux 9 - Extras + path: rocky/9/extras/x86_64 + mirrorlist: https://mirrors.rockylinux.org/mirrorlist?arch=x86_64&country={{ yum_mirrorlist_country }}&repo=extras-9 + + - id: rocky-9-crb-x86_64 + name: Rocky Linux 9 - CRB + path: rocky/9/CRB/x86_64 + mirrorlist: https://mirrors.rockylinux.org/mirrorlist?arch=x86_64&country={{ yum_mirrorlist_country }}&repo=CRB-9 + + - id: epel-9-x86_64 + name: Extra Packages for Enterprise Linux 9 + path: epel/9/x86_64 + metalink: https://mirrors.fedoraproject.org/metalink?arch=x86_64&country={{ yum_mirrorlist_country }}&content=pub%2Frocky&repo=epel-9 + + - id: zfs-kmod-9-x86_64 + name: ZFS on Linux for Enterprise Linux 9 + path: zfsonlinux/9/x86_64 + baseurl: http://download.zfsonlinux.org/epel/9/kmod/x86_64/ + + - id: rpmfusion-free-9-x86_64 + name: RPM Fusion for EL 9 - Free + path: rpmfusion/9/free/x86_64 + mirrorlist: http://mirrors.rpmfusion.org/mirrorlist?repo=free-el-updates-released-9&arch=x86_64&country={{ yum_mirrorlist_country }} + + - id: rpmfusion-free-tainted-9-x86_64 + name: RPM Fusion for EL 9 - Free Tainted + path: rpmfusion/9/free-tainted/x86_64 + mirrorlist: https://mirrors.rpmfusion.org/mirrorlist?repo=free-el-tainted-9&arch=x86_64&country={{ yum_mirrorlist_country }} + + - id: rpmfusion-nonfree-9-x86_64 + name: RPM Fusion for EL 9 - Nonfree + path: rpmfusion/9/nonfree/x86_64 + mirrorlist: http://mirrors.rpmfusion.org/mirrorlist?repo=nonfree-el-updates-released-9&arch=x86_64&country={{ yum_mirrorlist_country }} + + - id: rpmfusion-nonfree-tainted-9-x86_64 + name: RPM Fusion for EL 9 - Nonfree Tainted + path: rpmfusion/9/nonfree-tainted/x86_64 + mirrorlist: https://mirrors.rpmfusion.org/mirrorlist?repo=nonfree-el-tainted-9&arch=x86_64&country={{ yum_mirrorlist_country }} + + - id: mongodb-6.0-9-x86_64 + name: MongoDB 6.0 for EL 9 + path: mongodb/9/6.0/x86_64 + baseurl: https://repo.mongodb.org/yum/redhat/9/mongodb-org/6.0/x86_64/ diff --git a/roles/zfs/defaults/main.yml b/roles/zfs/defaults/main.yml new file mode 100644 index 0000000..bb080b7 --- /dev/null +++ b/roles/zfs/defaults/main.yml @@ -0,0 +1,10 @@ +zfs_pools: [] +zfs_datasets: [] +zfs_trim_on_calendar: monthly +zfs_scrub_on_calendar: monthly + +zfs_zed_email: root +zfs_zed_verbose: yes +zfs_zed_notify_interval_sec: 3600 + +zfs_auto_snapshot_version: master diff --git a/roles/zfs/handlers/main.yml b/roles/zfs/handlers/main.yml new file mode 100644 index 0000000..e85164d --- /dev/null +++ b/roles/zfs/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart zfs-zed + systemd: + name: zfs-zed + state: restarted diff --git a/roles/zfs/meta/main.yml b/roles/zfs/meta/main.yml new file mode 100644 index 0000000..08f6bb2 --- /dev/null +++ b/roles/zfs/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - role: yum + yum_repositories: zfs-kmod + tags: yum diff --git a/roles/zfs/tasks/create_zpool.yml b/roles/zfs/tasks/create_zpool.yml new file mode 100644 index 0000000..a99eb75 --- /dev/null +++ b/roles/zfs/tasks/create_zpool.yml @@ -0,0 +1,20 @@ +- name: check if zpool exists + command: zpool list -Ho name {{ zpool.name }} + failed_when: no + changed_when: no + register: zpool_exists + +- name: create zpools + command: > + zpool create + {% for property in (zpool.properties | default({}) | dict2items) %} + -o {{ item.key }}={{ item.value }} + {% endfor %} + {% if zpool.mountpoint is defined %} + -m {{ zpool.mountpoint }} + {% endif %} + {{ zpool.name }} + {% for vdev in zpool.vdevs %} + {{ vdev.type }} {% if vdev.type == 'log' and vdev.devices | length > 1 %}mirror {% endif %}{{ vdev.devices | join(' ') }} + {% endfor %} + when: zpool_exists.rc == 1 diff --git a/roles/zfs/tasks/main.yml b/roles/zfs/tasks/main.yml new file mode 100644 index 0000000..639b6c6 --- /dev/null +++ b/roles/zfs/tasks/main.yml @@ -0,0 +1,70 @@ +- name: install packages + dnf: + name: '{{ zfs_packages }}' + state: present + +- name: modprobe zfs + modprobe: + name: zfs + state: present + +- name: create systemd units + template: + src: etc/systemd/system/zfs-{{ item[0] }}@.{{ item[1] }}.j2 + dest: /etc/systemd/system/zfs-{{ item[0] }}@.{{ item[1] }} + loop: "{{ ['scrub', 'trim'] | product(['service', 'timer']) }}" + register: zfs_units + +- name: reload systemd units + systemd: + daemon-reload: yes + when: zfs_units.changed + +- name: create zpools + include_tasks: create_zpool.yml + loop: '{{ zfs_pools }}' + loop_control: + loop_var: zpool + label: '{{ zpool.name }}' + +- name: create datasets + zfs: + name: '{{ item if item is string else item.name }}' + state: present + extra_zfs_properties: '{{ omit if item is string else (item.properties | default({})) }}' + loop: '{{ zfs_datasets }}' + +- name: enable periodic trim and scrub + systemd: + name: zfs-{{ item[1] }}@{{ item[0].name }}.timer + state: started + enabled: yes + loop: "{{ zfs_pools | product(['trim', 'scrub']) }}" + loop_control: + label: zfs-{{ item[1] }}@{{ item[0].name }}.timer + +- name: generate zed config file + template: + src: etc/zfs/zed.d/zed.rc.j2 + dest: /etc/zfs/zed.d/zed.rc + notify: restart zfs-zed + +- name: enable zfs event daemon + systemd: + name: zfs-zed + enabled: yes + state: started + +- name: clone zfs-auto-snapshot + git: + repo: '{{ zfs_auto_snapshot_repo }}' + update: yes + version: '{{ zfs_auto_snapshot_version }}' + dest: '{{ zfs_auto_snapshot_dir }}' + register: zfs_auto_snapshot_git + +- name: install zfs-auto-snapshot + command: + cmd: make install PREFIX=/usr + chdir: '{{ zfs_auto_snapshot_dir }}' + when: zfs_auto_snapshot_git.changed diff --git a/roles/zfs/templates/etc/systemd/system/zfs-scrub@.service.j2 b/roles/zfs/templates/etc/systemd/system/zfs-scrub@.service.j2 new file mode 100644 index 0000000..3dfb199 --- /dev/null +++ b/roles/zfs/templates/etc/systemd/system/zfs-scrub@.service.j2 @@ -0,0 +1,11 @@ +[Unit] +Description=zpool scrub for %i + +[Service] +Nice=19 +IOSchedulingClass=idle +KillSignal=SIGINT +ExecStart=zpool scrub %i + +[Install] +WantedBy=multi-user.target diff --git a/roles/zfs/templates/etc/systemd/system/zfs-scrub@.timer.j2 b/roles/zfs/templates/etc/systemd/system/zfs-scrub@.timer.j2 new file mode 100644 index 0000000..d7b35e2 --- /dev/null +++ b/roles/zfs/templates/etc/systemd/system/zfs-scrub@.timer.j2 @@ -0,0 +1,10 @@ +[Unit] +Description=zpool scrub for %i on calendar interval + +[Timer] +OnCalendar={{ zfs_scrub_on_calendar }} +AccuracySec=1h +Persistent=true + +[Install] +WantedBy=multi-user.target diff --git a/roles/zfs/templates/etc/systemd/system/zfs-trim@.service.j2 b/roles/zfs/templates/etc/systemd/system/zfs-trim@.service.j2 new file mode 100644 index 0000000..ef3ec43 --- /dev/null +++ b/roles/zfs/templates/etc/systemd/system/zfs-trim@.service.j2 @@ -0,0 +1,11 @@ +[Unit] +Description=zpool trim for %i + +[Service] +Nice=19 +IOSchedulingClass=idle +KillSignal=SIGINT +ExecStart=zpool trim %i + +[Install] +WantedBy=multi-user.target diff --git a/roles/zfs/templates/etc/systemd/system/zfs-trim@.timer.j2 b/roles/zfs/templates/etc/systemd/system/zfs-trim@.timer.j2 new file mode 100644 index 0000000..77e5535 --- /dev/null +++ b/roles/zfs/templates/etc/systemd/system/zfs-trim@.timer.j2 @@ -0,0 +1,10 @@ +[Unit] +Description=Zpool trim for %i on calendar interval + +[Timer] +OnCalendar={{ zfs_trim_on_calendar }} +AccuracySec=1h +Persistent=true + +[Install] +WantedBy=multi-user.target diff --git a/roles/zfs/templates/etc/zfs/zed.d/zed.rc.j2 b/roles/zfs/templates/etc/zfs/zed.d/zed.rc.j2 new file mode 100644 index 0000000..4a09398 --- /dev/null +++ b/roles/zfs/templates/etc/zfs/zed.d/zed.rc.j2 @@ -0,0 +1,10 @@ +ZED_EMAIL_ADDR="{{ zfs_zed_email }}" +ZED_EMAIL_PROG="mail" +ZED_EMAIL_OPTS="-s '@SUBJECT@' @ADDRESS@" + +ZED_NOTIFY_INTERVAL_SECS={{ zfs_zed_notify_interval_sec }} +ZED_NOTIFY_VERBOSE={{ zfs_zed_verbose | bool | int }} + +ZED_USE_ENCLOSURE_LEDS=1 + +ZED_SYSLOG_SUBCLASS_EXCLUDE="history_event" diff --git a/roles/zfs/vars/main.yml b/roles/zfs/vars/main.yml new file mode 100644 index 0000000..e4781f5 --- /dev/null +++ b/roles/zfs/vars/main.yml @@ -0,0 +1,6 @@ +zfs_packages: + - zfs + - git + - make +zfs_auto_snapshot_repo: https://github.com/zfsonlinux/zfs-auto-snapshot +zfs_auto_snapshot_dir: /usr/local/src/zfs-auto-snapshot diff --git a/roles/znc/defaults/main.yml b/roles/znc/defaults/main.yml new file mode 100644 index 0000000..229ab0a --- /dev/null +++ b/roles/znc/defaults/main.yml @@ -0,0 +1,4 @@ +znc_irc_port: 6697 +znc_https_port: 8443 +znc_max_networks: 10 +znc_access_group: role-znc-access diff --git a/roles/znc/files/etc/sasl2/znc.conf b/roles/znc/files/etc/sasl2/znc.conf new file mode 100644 index 0000000..ad929f7 --- /dev/null +++ b/roles/znc/files/etc/sasl2/znc.conf @@ -0,0 +1,2 @@ +pwcheck_method: saslauthd +mech_list: plain diff --git a/roles/znc/handlers/main.yml b/roles/znc/handlers/main.yml new file mode 100644 index 0000000..4db4153 --- /dev/null +++ b/roles/znc/handlers/main.yml @@ -0,0 +1,8 @@ +- name: restart saslauthd + systemd: + name: saslauthd + state: restarted + +- name: reload znc + command: pkill -HUP znc + failed_when: no diff --git a/roles/znc/meta/main.yml b/roles/znc/meta/main.yml new file mode 100644 index 0000000..29230f9 --- /dev/null +++ b/roles/znc/meta/main.yml @@ -0,0 +1,4 @@ +dependencies: + - role: yum + yum_repositories: epel + tags: yum diff --git a/roles/znc/tasks/freeipa.yml b/roles/znc/tasks/freeipa.yml new file mode 100644 index 0000000..3e3ab07 --- /dev/null +++ b/roles/znc/tasks/freeipa.yml @@ -0,0 +1,49 @@ +- name: create HBAC service + ipahbacsvc: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ znc_hbac_service }}' + description: ZNC IRC Bouncer + state: present + run_once: yes + +- name: create znc-servers hostgroup + ipahostgroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ znc_hbac_hostgroup }}' + description: ZNC Servers + host: "{{ groups[znc_hbac_hostgroup] | map('regex_replace', '$', '.' ~ ansible_domain) }}" + state: present + run_once: yes + +- name: create access group + ipagroup: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: '{{ znc_access_group }}' + description: ZNC Users + nonposix: yes + state: present + run_once: yes + +- name: create HBAC rule + ipahbacrule: + ipaadmin_principal: '{{ ipa_user }}' + ipaadmin_password: '{{ ipa_pass }}' + name: allow_znc_on_znc_servers + description: Allow ZNC on ZNC servers + hostgroup: + - '{{ znc_hbac_hostgroup }}' + group: + - '{{ znc_access_group }}' + hbacsvc: + - '{{ znc_hbac_service }}' + run_once: yes + +- name: generate PAM configuration + copy: + content: | + auth required pam_sss.so + account required pam_sss.so + dest: /etc/pam.d/znc diff --git a/roles/znc/tasks/main.yml b/roles/znc/tasks/main.yml new file mode 100644 index 0000000..a64ffc5 --- /dev/null +++ b/roles/znc/tasks/main.yml @@ -0,0 +1,86 @@ +- name: install packages + dnf: + name: '{{ znc_packages }}' + state: present + +- name: request TLS certificate + include_role: + name: getcert_request + vars: + certificate_service: irc + certificate_path: '{{ znc_certificate_path }}' + certificate_key_path: '{{ znc_certificate_key_path }}' + certificate_owner: znc + certificate_hook: pkill -HUP znc + +- name: generate dhparams + openssl_dhparam: + path: '{{ znc_dhparams_path }}' + size: 2048 + +- import_tasks: freeipa.yml + tags: freeipa + +- name: configure saslauthd for znc + copy: + src: etc/sasl2/znc.conf + dest: /etc/sasl2/znc.conf + notify: restart saslauthd + +- name: enable saslauthd + systemd: + name: saslauthd + enabled: yes + state: started + +- name: create config directories + file: + path: '{{ znc_home }}/{{ item }}' + state: directory + owner: znc + group: znc + mode: 0700 + loop: + - '' + - 'configs' + - 'moddata' + - 'moddata/cyrusauth' + +- name: generate config files + template: + src: '{{ znc_home[1:] }}/{{ item }}.j2' + dest: '{{ znc_home }}/{{ item }}' + owner: znc + group: znc + loop: + - configs/znc.conf + - moddata/cyrusauth/.registry + notify: reload znc + +- name: start znc + systemd: + name: znc + enabled: yes + state: started + +- name: forward https port + firewalld: + permanent: yes + immediate: yes + rich_rule: 'rule family={{ item }} forward-port port={{ 443 }} protocol=tcp to-port={{ znc_https_port }}' + state: enabled + loop: + - ipv4 + - ipv6 + tags: firewalld + +- name: open firewall ports + firewalld: + permanent: yes + immediate: yes + service: '{{ item }}' + state: enabled + loop: + - ircs + - https + tags: firewalld diff --git a/roles/znc/templates/var/lib/znc/.znc/configs/znc.conf.j2 b/roles/znc/templates/var/lib/znc/.znc/configs/znc.conf.j2 new file mode 100644 index 0000000..10f4df5 --- /dev/null +++ b/roles/znc/templates/var/lib/znc/.znc/configs/znc.conf.j2 @@ -0,0 +1,62 @@ +AnonIPLimit = 10 +AuthOnlyViaModule = true +ConfigWriteDelay = 0 +ConnectDelay = 5 +HideVersion = false +LoadModule = cyrusauth saslauthd +LoadModule = webadmin +MaxBufferSize = 500 +ProtectWebSessions = true +SSLCertFile = {{ znc_certificate_path }} +SSLDHParamFile = {{ znc_dhparams_path }} +SSLKeyFile = {{ znc_certificate_key_path }} +ServerThrottle = 30 +Version = 1.8.2 + + + AllowIRC = false + AllowWeb = true + IPv4 = true + IPv6 = true + Port = {{ znc_https_port }} + SSL = true + + + + AllowIRC = true + AllowWeb = false + IPv4 = true + IPv6 = true + Port = {{ znc_irc_port }} + SSL = true + + + + Admin = true + Nick = znc_admin + AltNick = znc_admin_ + Ident = znc_admin + RealName = ZNC Administrator + + + Hash = :: + Method = MD5 + Salt = :: + + + + + Admin = false + Nick = znc_user + AltNick = znc_user_ + Ident = znc_user + RealName = ZNC User + MaxNetworks = {{ znc_max_networks }} + LoadModule = chansaver + + + Hash = :: + Method = MD5 + Salt = :: + + diff --git a/roles/znc/templates/var/lib/znc/.znc/moddata/cyrusauth/.registry.j2 b/roles/znc/templates/var/lib/znc/.znc/moddata/cyrusauth/.registry.j2 new file mode 100644 index 0000000..31bbe6e --- /dev/null +++ b/roles/znc/templates/var/lib/znc/.znc/moddata/cyrusauth/.registry.j2 @@ -0,0 +1,2 @@ +CloneUser {{ znc_clone_user }} +CreateUser yes diff --git a/roles/znc/vars/main.yml b/roles/znc/vars/main.yml new file mode 100644 index 0000000..27cebbf --- /dev/null +++ b/roles/znc/vars/main.yml @@ -0,0 +1,20 @@ +znc_packages: + - znc + - cyrus-sasl + - cyrus-sasl-plain + +znc_home: /var/lib/znc/.znc +znc_clone_user: cloneuser + +znc_hbac_hostgroup: znc_servers +znc_hbac_service: znc + +znc_certificate_path: /etc/pki/tls/certs/znc.pem +znc_certificate_key_path: /etc/pki/tls/private/znc.key +znc_dhparams_path: /etc/pki/tls/certs/dhparams-znc.pem + +znc_archive_shell: >- + TIMESTAMP=$(date +%Y%m%d%H%M%S); + tar czf "znc-${TIMESTAMP}.tar.gz" + --transform "s|^\.|znc-${TIMESTAMP}|" + -C "{{ znc_home }}" . diff --git a/vendor/ansible-freeipa b/vendor/ansible-freeipa new file mode 160000 index 0000000..0c23ae5 --- /dev/null +++ b/vendor/ansible-freeipa @@ -0,0 +1 @@ +Subproject commit 0c23ae5b3701f31104e33de52fc4890034d06e77 -- cgit