Kaynağa Gözat

Initial revision.

Andrew Klopper 8 yıl önce
işleme
2aebfe85e6
100 değiştirilmiş dosya ile 2049 ekleme ve 0 silme
  1. 2 0
      .gitignore
  2. 140 0
      base/admin/ca.sls
  3. 63 0
      base/admin/certbot.sls
  4. 1 0
      base/admin/files/certbot.cron
  5. 10 0
      base/admin/files/certbot.nginx
  6. 31 0
      base/admin/files/signing_policies.conf
  7. 49 0
      base/admin/init.sls
  8. 22 0
      base/bootstrap/files/debian/fix-resolv-conf
  9. 103 0
      base/bootstrap/init.sls
  10. 16 0
      base/circusd/files/circusd.ini
  11. 22 0
      base/circusd/files/circusd.logger.yaml
  12. 13 0
      base/circusd/files/circusd.service
  13. 2 0
      base/circusd/files/requirements.in
  14. 10 0
      base/circusd/files/requirements.txt
  15. 78 0
      base/circusd/init.sls
  16. 56 0
      base/circusd/lib.jinja
  17. 5 0
      base/constant_state.sls
  18. 3 0
      base/consul/files/conf.d/datacenter.json
  19. 6 0
      base/consul/files/conf.d/dns.json
  20. 3 0
      base/consul/files/conf.d/join.json
  21. 4 0
      base/consul/files/conf.d/security.json
  22. 7 0
      base/consul/files/conf.d/tls.json
  23. 14 0
      base/consul/files/consul.service
  24. 66 0
      base/consul/init.sls
  25. 5 0
      base/consul/server/files/server.json
  26. 15 0
      base/consul/server/init.sls
  27. 8 0
      base/disabled_users.sls
  28. 3 0
      base/dnsmasq/files/common.conf
  29. 2 0
      base/dnsmasq/files/consul.conf
  30. 18 0
      base/dnsmasq/init.sls
  31. 7 0
      base/dnsmasq/map.jinja
  32. 12 0
      base/firewall/backend_private_ip_entries.sls
  33. 5 0
      base/firewall/files/ipset-save.sh
  34. 21 0
      base/firewall/files/restore-iptables
  35. 292 0
      base/firewall/init.sls
  36. 2 0
      base/firewall/public/dns.sls
  37. 2 0
      base/firewall/public/http.sls
  38. 2 0
      base/firewall/public/https.sls
  39. 2 0
      base/firewall/public/imap.sls
  40. 2 0
      base/firewall/public/imaps.sls
  41. 11 0
      base/firewall/public/lib.jinja
  42. 2 0
      base/firewall/public/pop3.sls
  43. 2 0
      base/firewall/public/pop3s.sls
  44. 2 0
      base/firewall/public/smtp.sls
  45. 2 0
      base/firewall/public/submission.sls
  46. 2 0
      base/generic_packages/git.sls
  47. 2 0
      base/generic_packages/jq.sls
  48. 7 0
      base/generic_packages/lib.jinja
  49. 2 0
      base/generic_packages/m2crypto.sls
  50. 24 0
      base/generic_packages/map.jinja
  51. 2 0
      base/generic_packages/net_tools.sls
  52. 2 0
      base/generic_packages/sharutils.sls
  53. 2 0
      base/generic_packages/tcpdump.sls
  54. 2 0
      base/generic_packages/unzip.sls
  55. 2 0
      base/generic_packages/zip.sls
  56. 57 0
      base/gitea/files/app.ini
  57. 28 0
      base/gitea/files/gitea.nginx
  58. 26 0
      base/gitea/files/gitea.service
  59. 99 0
      base/gitea/init.sls
  60. 40 0
      base/globals.jinja
  61. 42 0
      base/lib.jinja
  62. 11 0
      base/mariadb/files/utf8mb4.cnf
  63. 22 0
      base/mariadb/init.sls
  64. 20 0
      base/mariadb/map.jinja
  65. 3 0
      base/mariadb/server/files/backup.sh
  66. 3 0
      base/mariadb/server/files/bind.cnf
  67. 2 0
      base/mariadb/server/files/mysql-my.cnf
  68. 92 0
      base/mariadb/server/init.sls
  69. 15 0
      base/mount/data.sls
  70. 5 0
      base/nginx/files/conf.d/drop-invalid-hosts.conf
  71. 25 0
      base/nginx/files/fastcgi.conf
  72. 8 0
      base/nginx/files/include/certbot.conf
  73. 5 0
      base/nginx/files/include/proxy.conf
  74. 3 0
      base/nginx/files/include/websockets.conf
  75. 85 0
      base/nginx/files/nginx.conf
  76. 13 0
      base/nginx/files/snippets/fastcgi-php.conf
  77. 5 0
      base/nginx/files/snippets/snakeoil.conf
  78. 32 0
      base/nginx/init.sls
  79. 25 0
      base/nginx/lib.jinja
  80. 16 0
      base/nginx/map.jinja
  81. 16 0
      base/nginx/ssl-files/conf.d/ssl-drop-invalid-hosts.conf
  82. 3 0
      base/nginx/ssl-files/include/ssl.conf
  83. 43 0
      base/nginx/ssl.sls
  84. 8 0
      base/pip/init.sls
  85. 19 0
      base/pip/virtualenvwrapper.sls
  86. 9 0
      base/root_user/files/restart_minion.sh
  87. 22 0
      base/root_user/init.sls
  88. 12 0
      base/saltstack/map.jinja
  89. 3 0
      base/saltstack/master/files/file_roots.conf
  90. 1 0
      base/saltstack/master/files/hash_type.conf
  91. 1 0
      base/saltstack/master/files/interface.conf
  92. 2 0
      base/saltstack/master/files/output.conf
  93. 3 0
      base/saltstack/master/files/peer.conf
  94. 3 0
      base/saltstack/master/files/pillar_roots.conf
  95. 2 0
      base/saltstack/master/files/runners.conf
  96. 1 0
      base/saltstack/master/files/worker_threads.conf
  97. 26 0
      base/saltstack/master/init.sls
  98. 1 0
      base/saltstack/minion/files/hash_type.conf
  99. 2 0
      base/saltstack/minion/files/master.conf
  100. 0 0
      base/saltstack/minion/init.sls

+ 2 - 0
.gitignore

@@ -0,0 +1,2 @@
1
+vendor/
2
+/base/all_private_ip_addresses.jinja

+ 140 - 0
base/admin/ca.sls

@@ -0,0 +1,140 @@
1
+{% import 'globals.jinja' as globals %}
2
+
3
+include:
4
+  - constant_state
5
+  - generic_packages.m2crypto
6
+
7
+/data/admin/pki/cacerts:
8
+  file.directory:
9
+    - mode: 755
10
+    - require:
11
+      - file: /data/admin/pki
12
+
13
+# Root CA
14
+
15
+/data/admin/pki/root:
16
+  file.directory:
17
+    - mode: 700
18
+    - require:
19
+      - file: /data/admin/pki
20
+
21
+/data/admin/pki/root/issued_certs:
22
+  file.directory:
23
+    - mode: 700
24
+    - require:
25
+      - file: /data/admin/pki/root
26
+
27
+{% if grains['id'] == globals.primary_admin_host %}
28
+
29
+/data/admin/pki/root/ca.key:
30
+  x509.private_key_managed:
31
+    - bits: 4096
32
+    - backup: True
33
+    - require:
34
+      - file: /data/admin/pki/root
35
+
36
+/data/admin/pki/root/ca.crt:
37
+  x509.certificate_managed:
38
+    - signing_private_key: /data/admin/pki/root/ca.key
39
+    - basicConstraints: "critical CA:true"
40
+    - keyUsage: "critical cRLSign, keyCertSign"
41
+    - subjectKeyIdentifier: hash
42
+    - authorityKeyIdentifier: keyid,issuer:always
43
+    - days_valid: 3650
44
+    - days_remaining: 0
45
+    - backup: True
46
+{%- for attr, value in pillar['global']['certificate_attributes']['root'].items() %}
47
+    - {{ attr }}: {{ value }}
48
+{%- endfor %}
49
+    - CN: Root Certificate Authority
50
+    - require:
51
+      - file: /data/admin/pki/cacerts
52
+      - x509: /data/admin/pki/root/ca.key
53
+
54
+/data/admin/pki/cacerts/ca_root.crt:
55
+  file.managed:
56
+    - source: /data/admin/pki/root/ca.crt
57
+    - require:
58
+      - x509: /data/admin/pki/root/ca.crt
59
+    - onchanges_in:
60
+      - module: mine_send_cacerts
61
+
62
+{% endif %}
63
+
64
+# Deployment CA
65
+
66
+/data/admin/pki/deployment:
67
+  file.directory:
68
+    - mode: 700
69
+    - require:
70
+      - file: /data/admin/pki
71
+
72
+/data/admin/pki/deployment/issued_certs:
73
+  file.directory:
74
+    - mode: 700
75
+    - require:
76
+      - file: /data/admin/pki/deployment
77
+
78
+{% if grains['id'] == globals.primary_admin_host %}
79
+
80
+/data/admin/pki/deployment/ca.key:
81
+  x509.private_key_managed:
82
+    - bits: 4096
83
+    - backup: True
84
+    - require:
85
+      - file: /data/admin/pki/deployment
86
+
87
+/data/admin/pki/deployment/ca.crt:
88
+  x509.certificate_managed:
89
+    - signing_cert: /data/admin/pki/root/ca.crt
90
+    - signing_private_key: /data/admin/pki/root/ca.key
91
+    - public_key: /data/admin/pki/deployment/ca.key
92
+    - basicConstraints: "critical CA:true"
93
+    - keyUsage: "critical cRLSign, keyCertSign"
94
+    - subjectKeyIdentifier: hash
95
+    - authorityKeyIdentifier: keyid,issuer:always
96
+    - days_valid: 3650
97
+    - days_remaining: 0
98
+    - backup: True
99
+{%- for attr, value in pillar['global']['certificate_attributes']['deployment'].items() %}
100
+    - {{ attr }}: {{ value }}
101
+{%- endfor %}
102
+    - CN: Deployment Certificate Authority
103
+    - require:
104
+      - file: /data/admin/pki/cacerts
105
+      - x509: /data/admin/pki/root/ca.key
106
+      - x509: /data/admin/pki/root/ca.crt
107
+      - x509: /data/admin/pki/deployment/ca.key
108
+
109
+/data/admin/pki/cacerts/ca_deployment.crt:
110
+  file.managed:
111
+    - source: /data/admin/pki/deployment/ca.crt
112
+    - require:
113
+      - x509: /data/admin/pki/deployment/ca.crt
114
+    - onchanges_in:
115
+      - module: mine_send_cacerts
116
+
117
+/etc/salt/minion.d/signing_policies.conf:
118
+  file.managed:
119
+    - source: salt://admin/files/signing_policies.conf
120
+    - template: jinja
121
+    - watch_in:
122
+      - service: salt_minion
123
+
124
+{% else %}
125
+
126
+/etc/salt/minion.d/signing_policies.conf:
127
+  file.absent:
128
+    - watch_in:
129
+      - service: salt_minion
130
+
131
+{% endif %}
132
+
133
+mine_send_cacerts:
134
+  module.run:
135
+    - name: mine.send
136
+    - func: x509.get_pem_entries
137
+    - kwargs:
138
+        glob_path: /data/admin/pki/cacerts/*.crt
139
+    - onchanges:
140
+      - test: constant_state

+ 63 - 0
base/admin/certbot.sls

@@ -0,0 +1,63 @@
1
+{% import 'globals.jinja' as globals %}
2
+
3
+include:
4
+  - nginx
5
+  - pip.virtualenvwrapper
6
+
7
+{% for subdir in [
8
+  'certbot-auto',
9
+  'letsencrypt',
10
+] %}
11
+/data/admin/{{ subdir }}:
12
+  file.directory:
13
+    - mode: 700
14
+    - require:
15
+      - file: /data/admin
16
+{% endfor %}
17
+
18
+/data/virtualenvs/letsencrypt:
19
+  file.directory:
20
+    - require:
21
+      - file: /data/virtualenvs
22
+    
23
+/root/.local/share:
24
+  file.symlink:
25
+    - target: /data/virtualenvs
26
+    - makedirs: True
27
+    - require:
28
+      - file: /data/virtualenvs
29
+
30
+/etc/letsencrypt:
31
+  file.symlink:
32
+    - target: /data/admin/letsencrypt
33
+    - require:
34
+      - file: /data/admin/letsencrypt
35
+
36
+/data/certbot/.well-known:
37
+  file.directory:
38
+    - mode: 755
39
+    - makedirs: True
40
+    - require:
41
+      - file: /data
42
+
43
+/etc/nginx/sites-enabled/certbot:
44
+  file.managed:
45
+    - source: salt://admin/files/certbot.nginx
46
+    - template: jinja
47
+    - watch_in:
48
+      - service: nginx
49
+
50
+{% if grains['id'] == globals.primary_admin_host %}
51
+
52
+# If the admin servers are replicated, then certbot must only be run on one
53
+# of them and the information replicated to all the others.
54
+/etc/cron.d/certbot:
55
+  file.managed:
56
+    - source: salt://admin/files/certbot.cron
57
+
58
+{% else %}
59
+
60
+/etc/cron.d/certbot:
61
+  file.absent
62
+
63
+{% endif %}

+ 1 - 0
base/admin/files/certbot.cron

@@ -0,0 +1 @@
1
+42 8,20 * * *   root    /data/admin/certbot-auto/certbot-auto renew --quiet --post-hook="/bin/true" > /dev/null 2>&1

+ 10 - 0
base/admin/files/certbot.nginx

@@ -0,0 +1,10 @@
1
+{% import 'globals.jinja' as globals %}
2
+server {
3
+  listen 80;
4
+  server_name {{ globals.private_fqdn }};
5
+  location /.well-known/ {
6
+      alias /data/certbot/.well-known/;
7
+      expires 30d;
8
+      autoindex off;
9
+  }
10
+}

+ 31 - 0
base/admin/files/signing_policies.conf

@@ -0,0 +1,31 @@
1
+{% import 'globals.jinja' as globals -%}
2
+x509_signing_policies:
3
+  deployment_client:
4
+    - signing_private_key: /data/admin/pki/deployment/ca.key
5
+    - signing_cert: /data/admin/pki/deployment/ca.crt
6
+    - copypath: /data/admin/pki/deployment/issued_certs
7
+    - prepend_cn: True
8
+    - days_valid: 90
9
+    - basicConstraints: "critical CA:false"
10
+    - keyUsage: "critical keyEncipherment"
11
+    - extendedKeyUsage: "clientAuth"
12
+    - subjectKeyIdentifier: hash
13
+    - authorityKeyIdentifier: keyid,issuer:always
14
+{%- for attr, value in pillar['global']['certificate_attributes']['deployment'].items() %}
15
+    - {{ attr }}: {{ value }}
16
+{%- endfor %}
17
+  deployment_server:
18
+    - minions: {{ ','.join(globals.admin_hosts) }}
19
+    - signing_private_key: /data/admin/pki/deployment/ca.key
20
+    - signing_cert: /data/admin/pki/deployment/ca.crt
21
+    - copypath: /data/admin/pki/deployment/issued_certs
22
+    - prepend_cn: True
23
+    - days_valid: 90
24
+    - basicConstraints: "critical CA:false"
25
+    - keyUsage: "critical keyEncipherment"
26
+    - extendedKeyUsage: "serverAuth,clientAuth"
27
+    - subjectKeyIdentifier: hash
28
+    - authorityKeyIdentifier: keyid,issuer:always
29
+{%- for attr, value in pillar['global']['certificate_attributes']['deployment'].items() %}
30
+    - {{ attr }}: {{ value }}
31
+{%- endfor %}

+ 49 - 0
base/admin/init.sls

@@ -0,0 +1,49 @@
1
+{% import "globals.jinja" as globals %}
2
+
3
+include:
4
+  - admin.ca
5
+  - admin.certbot
6
+  - saltstack.master
7
+
8
+/data/admin:
9
+  file.directory:
10
+    - mode: 700
11
+    - require:
12
+      - file: /data
13
+
14
+{% for subdir in [
15
+  'pillar',
16
+  'pki',
17
+  'salt',
18
+  'salt-modules',
19
+] %}
20
+/data/admin/{{ subdir }}:
21
+  file.directory:
22
+    - mode: 700
23
+    - require:
24
+      - file: /data/admin
25
+{% endfor %}
26
+
27
+admin_bashrc:
28
+  file.append:
29
+    - name: /root/.bashrc
30
+    - text:
31
+      - alias salt='salt -t60'
32
+
33
+/root/.ssh:
34
+  file.directory:
35
+    - mode: 700
36
+
37
+/root/.ssh/id_rsa:
38
+  file.managed:
39
+    - mode: 600
40
+    - contents_pillar: admin:ssh:id_rsa
41
+    - require:
42
+      - file: /root/.ssh
43
+
44
+/root/.ssh/id_rsa.pub:
45
+  file.managed:
46
+    - mode: 644
47
+    - contents_pillar: admin:ssh:id_rsa_pub
48
+    - require:
49
+      - file: /root/.ssh

+ 22 - 0
base/bootstrap/files/debian/fix-resolv-conf

@@ -0,0 +1,22 @@
1
+{% import 'globals.jinja' as globals -%}
2
+#!/bin/sh
3
+
4
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
5
+
6
+# Exit if the file contents have already been replaced.
7
+grep -q SKIP_SUBSTITUTION /etc/resolv.conf && exit 0
8
+
9
+cat <<'EOM' > /etc/resolv.conf.tmp && grep '^nameserver ' /etc/resolv.conf >> /etc/resolv.conf.tmp && mv /etc/resolv.conf.tmp /etc/resolv.conf
10
+# ****************************************************************
11
+# * DO NOT EDIT THIS FILE OR REMOVE THIS LINE: SKIP_SUBSTITUTION *
12
+# ****************************************************************
13
+# This file is generated by /etc/network/if-pre-up.d/fix-resolv-conf
14
+# using the list of nameservers from the original /etc/resolv.conf provided
15
+# at boot time by the Linode Network Helper.
16
+
17
+domain {{ globals.private_node_domain }}
18
+search {{ globals.private_node_domain }} {{ globals.private_service_domain }}
19
+nameserver 127.0.0.1
20
+
21
+# Linode nameserver entries will be appended below.
22
+EOM

+ 103 - 0
base/bootstrap/init.sls

@@ -0,0 +1,103 @@
1
+{% import 'globals.jinja' as globals %}
2
+
3
+# The if conditions below ensure that the CA already exists before trying to
4
+# create a deployment certificate or install consul on the primary admin server.
5
+# This requires initially running state.highstate twice on the primary admin
6
+# server in order to configure it fully.
7
+
8
+include:
9
+  - dnsmasq
10
+  - generic_packages.jq
11
+  - generic_packages.m2crypto
12
+  - generic_packages.sharutils
13
+  - generic_packages.tcpdump
14
+  - mount.data
15
+  - root_user
16
+  - saltstack.minion
17
+  - screen
18
+  - ssh.server
19
+{% if (grains['id'] != globals.primary_admin_host) or salt['file.file_exists']('/data/admin/pki/deployment/ca.crt') %}
20
+  - consul
21
+{%- if grains['id'] in globals.admin_hosts %}
22
+  - consul.server
23
+{%- endif %}
24
+
25
+deployment-keys:
26
+  group.present:
27
+    - system: True
28
+
29
+/etc/deployment:
30
+  file.directory
31
+
32
+/etc/deployment/ssl:
33
+  file.directory:
34
+    - require:
35
+      - file: /etc/deployment
36
+
37
+/etc/deployment/ssl/certs:
38
+  file.directory:
39
+    - require:
40
+      - file: /etc/deployment/ssl
41
+
42
+/etc/deployment/ssl/private:
43
+  file.directory:
44
+    - mode: 750
45
+    - group: deployment-keys
46
+    - require:
47
+      - group: deployment-keys
48
+      - file: /etc/deployment/ssl
49
+
50
+/etc/deployment/ssl/private/deployment.key:
51
+  x509.private_key_managed:
52
+    - bits: 4096
53
+    - backup: True
54
+    - require:
55
+      - file: /etc/deployment/ssl/private
56
+
57
+/etc/deployment/ssl/certs/deployment.crt:
58
+  x509.certificate_managed:
59
+    - ca_server: {{ globals.primary_admin_host }}
60
+    - signing_policy: {{ 'deployment_server' if grains['id'] in globals.admin_hosts else 'deployment_client' }}
61
+    - CN: {{ globals.private_fqdn }}
62
+    - days_remaining: 30
63
+    - backup: True
64
+    - public_key: /etc/deployment/ssl/private/deployment.key
65
+    - require:
66
+      - file: /etc/deployment/ssl/certs
67
+      - x509: /etc/deployment/ssl/private/deployment.key
68
+
69
+{% set ca_certs = salt['mine.get'](globals.primary_admin_host, 'x509.get_pem_entries')[globals.primary_admin_host] %}
70
+/etc/deployment/ssl/certs/ca-chain-deployment.crt:
71
+  # x509.pem_managed only allows one certificate per file, so we don't use it.
72
+  # It also seems redundant given the built-in abilities of file.managed and the
73
+  # jinja2 indent function.
74
+  file.managed:
75
+    - contents: |
76
+        {{ ca_certs['/data/admin/pki/cacerts/ca_root.crt']|indent(8) }}
77
+        {{ ca_certs['/data/admin/pki/cacerts/ca_deployment.crt']|indent(8) }}
78
+    - require:
79
+      - file: /etc/deployment/ssl/certs
80
+
81
+{% if grains['os_family'] == 'Debian' %}
82
+
83
+fix_resolv_conf:
84
+  file.managed:
85
+    - name: /etc/network/if-pre-up.d/fix-resolv-conf
86
+    - source: salt://bootstrap/files/debian/fix-resolv-conf
87
+    - template: jinja
88
+    - mode: 755
89
+
90
+# Ensure that the script gets run after it gets installed for the first time,
91
+# but only after dnsmasq is installed to redirect DNS lookups to consul.
92
+run_fix_resolv_conf:
93
+  cmd.run:
94
+    - name: /etc/network/if-pre-up.d/fix-resolv-conf
95
+    - require:
96
+      - pkg: dnsmasq
97
+      - service: consul
98
+    - onchanges:
99
+      - file: /etc/network/if-pre-up.d/fix-resolv-conf
100
+
101
+{% endif %}
102
+
103
+{% endif %}

+ 16 - 0
base/circusd/files/circusd.ini

@@ -0,0 +1,16 @@
1
+[circus]
2
+check_delay = 5
3
+endpoint = ipc:///var/lib/circus/endpoint.sock
4
+pubsub_endpoint = ipc:///var/lib/circus/pubsub.sock
5
+include_dir = /etc/circus/conf.d
6
+loggerconfig = /etc/circus/circusd.logger.yaml
7
+debug = False
8
+httpd = False
9
+
10
+;; Enabling this results in significant CPU usage (5-8% on most hosts)
11
+statsd = False
12
+
13
+[plugin:flapping]
14
+use = circus.plugins.flapping.Flapping
15
+retry_in = 3
16
+max_retry = 2

+ 22 - 0
base/circusd/files/circusd.logger.yaml

@@ -0,0 +1,22 @@
1
+version: 1
2
+disable_existing_loggers: false
3
+formatters:
4
+  simple:
5
+    format: '%(asctime)s %(name)s[%(process)d] [%(levelname)s] %(message)s'
6
+    datefmt: '%Y-%m-%d %H:%M:%S'
7
+handlers:
8
+  logfile:
9
+    class: logging.handlers.RotatingFileHandler
10
+    filename: /var/log/circus/circusd.log
11
+    maxBytes: 1048576
12
+    backupCount: 7
13
+    level: DEBUG
14
+    formatter: simple
15
+loggers:
16
+  circus:
17
+    level: INFO
18
+    handlers: [logfile]
19
+    propagate: no
20
+root:
21
+  level: INFO
22
+  handlers: [logfile]

+ 13 - 0
base/circusd/files/circusd.service

@@ -0,0 +1,13 @@
1
+[Unit]
2
+Description=Circus process manager
3
+After=syslog.target network.target nss-lookup.target
4
+
5
+[Service]
6
+Type=simple
7
+ExecReload=/data/virtualenvs/circus/bin/circusctl reload
8
+ExecStart=/data/virtualenvs/circus/bin/circusd /etc/circus/circusd.ini
9
+Restart=always
10
+RestartSec=5
11
+
12
+[Install]
13
+WantedBy=default.target

+ 2 - 0
base/circusd/files/requirements.in

@@ -0,0 +1,2 @@
1
+circus
2
+pyyaml

+ 10 - 0
base/circusd/files/requirements.txt

@@ -0,0 +1,10 @@
1
+backports-abc==0.5
2
+certifi==2017.4.17
3
+circus==0.14.0
4
+iowait==0.2
5
+psutil==5.2.2
6
+PyYAML==3.12
7
+pyzmq==16.0.2
8
+singledispatch==3.4.0.3
9
+six==1.10.0
10
+tornado==4.5.1

+ 78 - 0
base/circusd/init.sls

@@ -0,0 +1,78 @@
1
+include:
2
+  - constant_state
3
+  - pip.virtualenvwrapper
4
+
5
+/var/lib/circus:
6
+  file.directory
7
+
8
+/var/log/circus:
9
+  file.directory
10
+
11
+/etc/circus:
12
+  file.directory
13
+
14
+/etc/circus/circusd.ini:
15
+  file.managed:
16
+    - source: salt://circusd/files/circusd.ini
17
+    - require:
18
+      - file: /etc/circus
19
+
20
+/etc/circus/circusd.logger.yaml:
21
+  file.managed:
22
+    - source: salt://circusd/files/circusd.logger.yaml
23
+    - require:
24
+      - file: /etc/circus
25
+
26
+/etc/circus/conf.d:
27
+  file.directory:
28
+    - require:
29
+      - file: /etc/circus
30
+
31
+/data/virtualenvs/circus:
32
+  virtualenv.managed:
33
+    - requirements: salt://circusd/files/requirements.txt
34
+    - require:
35
+      - pkg: pip
36
+      - file: /data/virtualenvs
37
+
38
+/usr/bin/circusctl:
39
+  file.symlink:
40
+    - target: /data/virtualenvs/circus/bin/circusctl
41
+    - require:
42
+      - virtualenv: /data/virtualenvs/circus
43
+
44
+/etc/systemd/system/circusd.service:
45
+  file.managed:
46
+    - source: salt://circusd/files/circusd.service
47
+    - onchanges_in:
48
+      - module: reload_systemd
49
+
50
+circusd:
51
+  service.running:
52
+    - require:
53
+      - module: reload_systemd
54
+      - file: /var/lib/circus
55
+      - file: /var/log/circus
56
+    - watch:
57
+      - virtualenv: /data/virtualenvs/circus
58
+      - file: /etc/circus/circusd.ini
59
+      - file: /etc/circus/circusd.logger.yaml
60
+      - file: /etc/systemd/system/circusd.service
61
+
62
+circusd.reloadconfig:
63
+  module.run:
64
+    - endpoint: ipc:///var/lib/circus/endpoint.sock
65
+    - require:
66
+      - file: /usr/bin/circusctl
67
+      - service: circusd
68
+
69
+    # Set onchanges to a dummy value in case no other states require this state
70
+    # using 'onchanges_in'.
71
+    - onchanges:
72
+      - test: constant_state
73
+
74
+circusd_bashrc:
75
+  file.append:
76
+    - name: /root/.bashrc
77
+    - text:
78
+      - export CIRCUSCTL_ENDPOINT=ipc:///var/lib/circus/endpoint.sock

+ 56 - 0
base/circusd/lib.jinja

@@ -0,0 +1,56 @@
1
+# circusd_watcher_running() always runs last due to its dependency on
2
+# circusd.reloadconfig. If the service is not running it will be started. If
3
+# the service is already running, but changes were detected via the
4
+# xxx_watcher_requirements state, then the service will be restarted.
5
+#
6
+# circusd_watcher_configuration() keeps the appropriate circusd configuration
7
+# file up to date, and triggers a circusd.reloadconfig if any changes are
8
+# detected. It also generates the xxx_watcher_requiremnts state, which serves
9
+# as a dependency of both circusd.reloadconfig and circusd.watcher_running and
10
+# ensures that all requirements of the service are met before either of those
11
+# commands are executed (circusd.reloadconfig starts new services automatically,
12
+# so all requirements need to be met). The difference is that
13
+# circusd.reloadconfig only needs to be run if the actual circusd configuration
14
+# file changed (hence the require_in), while circusd.watcher_running always
15
+# need to be run (hence the watch_in).
16
+#
17
+# The test.succeed_with_changes state is a convenient way of putting all the
18
+# shared requirements for other states in one place. Because of the use of
19
+# 'onchanges', it will report changes if and only if any of its dependencies
20
+# change.
21
+
22
+{% macro circusd_watcher_running() %}
23
+  circusd.watcher_running:
24
+    - endpoint: ipc:///var/lib/circus/endpoint.sock
25
+    - require:
26
+      - module: circusd.reloadconfig
27
+{% endmacro %}
28
+
29
+{% macro circusd_watcher_configuration(name, source, context, extra_requires, extra_onchanges) %}
30
+/etc/circus/conf.d/{{ name }}.ini:
31
+  file.managed:
32
+    - source: {{ source }}
33
+{% if context %}
34
+    - template: jinja
35
+    - context: {{ context }}
36
+{% endif %}
37
+    - require:
38
+      - pkg: circusd
39
+    - onchanges_in:
40
+      - module: circusd.reloadconfig
41
+
42
+{{ name }}_watcher_requirements:
43
+  test.succeed_with_changes:
44
+    - require:
45
+      - service: circusd
46
+{% for item in extra_requires %}
47
+      - {{ item }}{% endfor %}
48
+    - onchanges:
49
+      - file: /etc/circus/conf.d/{{ name }}.ini
50
+{% for item in extra_onchanges %}
51
+      - {{ item }}{% endfor %}
52
+    - require_in:
53
+      - module: circusd.reloadconfig
54
+    - watch_in:
55
+      - circusd: {{ name }}
56
+{% endmacro %}

+ 5 - 0
base/constant_state.sls

@@ -0,0 +1,5 @@
1
+# constant_state is referenced in the 'onchanges' of another state to guarantee
2
+# that there is at least one 'onchanges' entry for that state so that it will
3
+# never execute on its own. We can then trigger the state via 'onchanges_in' in # other states that need it.
4
+constant_state:
5
+  test.succeed_without_changes

+ 3 - 0
base/consul/files/conf.d/datacenter.json

@@ -0,0 +1,3 @@
1
+{
2
+  "datacenter": "{{ consul_datacenter }}"
3
+}

+ 6 - 0
base/consul/files/conf.d/dns.json

@@ -0,0 +1,6 @@
1
+{
2
+	"dns_config": {
3
+		"allow_stale": true,
4
+		"max_stale": "87600h"
5
+	}
6
+}

+ 3 - 0
base/consul/files/conf.d/join.json

@@ -0,0 +1,3 @@
1
+{
2
+  "start_join": ["{{ server_ip_addresses|join('", "') }}"]
3
+}

+ 4 - 0
base/consul/files/conf.d/security.json

@@ -0,0 +1,4 @@
1
+{
2
+  "encrypt": "{{ consul_secret }}",
3
+  "disable_remote_exec": true
4
+}

+ 7 - 0
base/consul/files/conf.d/tls.json

@@ -0,0 +1,7 @@
1
+{
2
+  "ca_file": "/etc/deployment/ssl/certs/ca-chain-deployment.crt",
3
+  "cert_file": "/etc/deployment/ssl/certs/deployment.crt",
4
+  "key_file": "/etc/deployment/ssl/private/deployment.key",
5
+  "verify_incoming": true,
6
+  "verify_outgoing": true
7
+}

+ 14 - 0
base/consul/files/consul.service

@@ -0,0 +1,14 @@
1
+[Unit]
2
+Description=consul agent
3
+Requires=network-online.target
4
+After=network-online.target
5
+
6
+[Service]
7
+Environment=GOMAXPROCS=2
8
+Restart=on-failure
9
+ExecStart=/usr/bin/consul agent -config-dir=/etc/consul -data-dir=/data/consul
10
+#ExecReload=/bin/kill -HUP $MAINPID
11
+KillSignal=SIGTERM
12
+
13
+[Install]
14
+WantedBy=multi-user.target

+ 66 - 0
base/consul/init.sls

@@ -0,0 +1,66 @@
1
+{% import 'globals.jinja' as globals %}
2
+
3
+include:
4
+  - systemd.helpers
5
+
6
+consul:
7
+  file.managed:
8
+    - name: /usr/bin/consul
9
+    - source: salt://consul/files/vendor/0.8.3.linux-amd64/consul
10
+    - mode: 555
11
+  group.present:
12
+    - system: True
13
+  user.present:
14
+    - system: True
15
+    - gid: consul
16
+    - home: /data/consul
17
+    - createhome: False
18
+    - groups:
19
+      - deployment-keys
20
+    - require:
21
+      - group: consul
22
+      - group: deployment-keys
23
+  service.running:
24
+    - require:
25
+      - user: consul
26
+      - group: consul
27
+      - file: /data/consul
28
+      - module: reload_systemd
29
+    - watch:
30
+      - file: consul
31
+      - file: /etc/consul
32
+      - x509: /etc/deployment/ssl/private/deployment.key
33
+      - x509: /etc/deployment/ssl/certs/deployment.crt
34
+      - file: /etc/deployment/ssl/certs/ca-chain-deployment.crt
35
+      - file: /etc/systemd/system/consul.service
36
+
37
+# We create this directory manually rather than using 'user.present' with
38
+# 'createhome: True' to ensure that .bashrc, etc., are not copied into it.
39
+/data/consul:
40
+  file.directory:
41
+    - user: consul
42
+    - group: consul
43
+    - require:
44
+      - user: consul
45
+      - group: consul
46
+
47
+/etc/consul:
48
+  file.recurse:
49
+    - source: salt://consul/files/conf.d
50
+    - user: root
51
+    - group: consul
52
+    - dir_mode: 750
53
+    - file_mode: 640
54
+    - template: jinja
55
+    - context:
56
+        consul_datacenter: {{ pillar['global']['consul_datacenter'] }}
57
+        consul_secret: {{ pillar['global']['consul_secret'] }}
58
+        server_ip_addresses: {{ globals.admin_ip_addresses }}
59
+    - require:
60
+      - group: consul
61
+
62
+/etc/systemd/system/consul.service:
63
+  file.managed:
64
+    - source: salt://consul/files/consul.service
65
+    - onchanges_in:
66
+      - module: reload_systemd

+ 5 - 0
base/consul/server/files/server.json

@@ -0,0 +1,5 @@
1
+{% import 'globals.jinja' as globals %}
2
+{
3
+  "server": true,
4
+  "bootstrap_expect": {{ globals.admin_ip_addresses|count }}
5
+}

+ 15 - 0
base/consul/server/init.sls

@@ -0,0 +1,15 @@
1
+include:
2
+  - consul
3
+
4
+/etc/consul/server.json:
5
+  file.managed:
6
+    - source: salt://consul/server/files/server.json
7
+    - template: jinja
8
+    - mode: 640
9
+    - user: root
10
+    - group: consul
11
+    - require:
12
+      - file: /etc/consul
13
+      - group: consul
14
+    - watch_in:
15
+      - service: consul

+ 8 - 0
base/disabled_users.sls

@@ -0,0 +1,8 @@
1
+# We purposely use the same state name for disabled and enabled users to force
2
+# an error if an attempt is made to both enable and disable a user.
3
+{% for username in pillar['users']['disabled'] %}
4
+user_{{ username }}:
5
+  user.absent:
6
+    - name: {{ username }}
7
+    - purge: False
8
+{% endfor %}

+ 3 - 0
base/dnsmasq/files/common.conf

@@ -0,0 +1,3 @@
1
+bind-interfaces
2
+listen-address=127.0.0.1
3
+domain-needed

+ 2 - 0
base/dnsmasq/files/consul.conf

@@ -0,0 +1,2 @@
1
+server=/consul/127.0.0.1#8600
2
+server=/168.192.in-addr.arpa/127.0.0.1#8600

+ 18 - 0
base/dnsmasq/init.sls

@@ -0,0 +1,18 @@
1
+{% from 'dnsmasq/map.jinja' import dnsmasq %}
2
+
3
+dnsmasq:
4
+  pkg.installed:
5
+    - name: {{ dnsmasq.package }}
6
+  service.running:
7
+    - name: {{ dnsmasq.service }}
8
+    - watch:
9
+      - pkg: dnsmasq
10
+      - file: dnsmasq_files
11
+
12
+dnsmasq_files:
13
+  file.recurse:
14
+    - name: {{ dnsmasq.config_dir }}
15
+    - source: salt://dnsmasq/files
16
+    - file_mode: 644
17
+    - require:
18
+      - pkg: dnsmasq

+ 7 - 0
base/dnsmasq/map.jinja

@@ -0,0 +1,7 @@
1
+{% set dnsmasq = salt['grains.filter_by']({
2
+	'Debian': {
3
+		'package': 'dnsmasq',
4
+		'service': 'dnsmasq',
5
+		'config_dir': '/etc/dnsmasq.d'
6
+	}
7
+}, merge=salt['pillar.get']('dnsmasq:lookup')) %}

+ 12 - 0
base/firewall/backend_private_ip_entries.sls

@@ -0,0 +1,12 @@
1
+{% import 'globals.jinja' as globals %}
2
+ipset_backend_private_ip_entries:
3
+  ipset.present:
4
+    - require:
5
+      - ipset: ipset_backend_private_ips
6
+    - onchanges_in:
7
+      - cmd: do_ipset_save
8
+    - set_name: ipset_backend_private_ips
9
+    - entry:
10
+{% for ip in globals.all_private_ip_addresses %}
11
+      - {{ ip }}
12
+{% endfor %}

+ 5 - 0
base/firewall/files/ipset-save.sh

@@ -0,0 +1,5 @@
1
+#!/bin/sh
2
+
3
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
4
+
5
+ipset save > /etc/iptables/ipset.tmp && mv /etc/iptables/ipset.tmp /etc/iptables/ipset

+ 21 - 0
base/firewall/files/restore-iptables

@@ -0,0 +1,21 @@
1
+#!/bin/sh
2
+
3
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
4
+
5
+if [ -f /etc/iptables/rules.v4 -a -f /etc/iptables/ipset ]; then
6
+	# Flush the iptables so that we can cleanly flush the ipsets.
7
+	iptables -t raw -F
8
+	iptables -t filter -F
9
+fi
10
+
11
+if [ -f /etc/iptables/ipset ]; then
12
+	# We flush first in case some sets are still in use and the destroy
13
+	# doesn't work.
14
+	ipset flush
15
+	ipset destroy
16
+	ipset restore < /etc/iptables/ipset 2> /dev/null
17
+fi
18
+
19
+if [ -f /etc/iptables/rules.v4 ]; then
20
+	iptables-restore < /etc/iptables/rules.v4 2> /dev/null
21
+fi

+ 292 - 0
base/firewall/init.sls

@@ -0,0 +1,292 @@
1
+{% import 'globals.jinja' as globals %}
2
+
3
+# Required packages and helpers
4
+# -----------------------------
5
+ipset_package:
6
+  pkg.installed:
7
+    - name: ipset
8
+
9
+/etc/iptables/ipset-save.sh:
10
+  file.managed:
11
+    - source: salt://firewall/files/ipset-save.sh
12
+    - mode: 555
13
+    - makedirs: True
14
+
15
+/etc/network/if-pre-up.d/restore-iptables:
16
+  file.managed:
17
+    - source: salt://firewall/files/restore-iptables
18
+    - mode: 555
19
+
20
+do_ipset_save:
21
+  cmd.run:
22
+    - name: /etc/iptables/ipset-save.sh
23
+    - onchanges:
24
+      - pkg: ipset_package
25
+      - file: /etc/iptables/ipset-save.sh
26
+
27
+# IP Sets
28
+# -------
29
+
30
+# Backend server private IP addresses.
31
+ipset_backend_private_ips:
32
+  ipset.set_present:
33
+    - require:
34
+      - pkg: ipset_package
35
+    - onchanges_in:
36
+      - cmd: do_ipset_save
37
+    - set_type: bitmap:ip
38
+    - range: 192.168.128.0/17
39
+
40
+# Ensure we always include the admin server IP(s).
41
+ipset_backend_admin_ip_entries:
42
+  ipset.present:
43
+    - require:
44
+      - ipset: ipset_backend_private_ips
45
+    - onchanges_in:
46
+      - cmd: do_ipset_save
47
+    - set_name: ipset_backend_private_ips
48
+    - entry:
49
+{% for ip in globals.admin_ip_addresses %}
50
+      - {{ ip }}
51
+{% endfor %}
52
+
53
+include:
54
+  - firewall.backend_private_ip_entries
55
+
56
+# Publically accessible TCP ports.
57
+ipset_public_tcp_ports:
58
+  ipset.set_present:
59
+    - require:
60
+      - pkg: ipset_package
61
+    - onchanges_in:
62
+      - cmd: do_ipset_save
63
+    - set_type: bitmap:port
64
+    - range: 0-65535
65
+
66
+# Always include SSH access.
67
+ipset_public_tcp_port_entries:
68
+  ipset.present:
69
+    - require:
70
+      - ipset: ipset_public_tcp_ports
71
+    - onchanges_in:
72
+      - cmd: do_ipset_save
73
+    - set_name: ipset_public_tcp_ports
74
+    - entry:
75
+      - '22'
76
+
77
+# Publically accessible UDP ports.
78
+ipset_public_udp_ports:
79
+  ipset.set_present:
80
+    - require:
81
+      - pkg: ipset_package
82
+    - onchanges_in:
83
+      - cmd: do_ipset_save
84
+    - set_type: bitmap:port
85
+    - range: 0-65535
86
+
87
+# Default Policies
88
+# ----------------
89
+
90
+iptables_policy_input:
91
+  iptables.set_policy:
92
+    - save: True
93
+    - chain: INPUT
94
+    - policy: DROP
95
+
96
+# Prerouting rules
97
+# ----------------
98
+# These are used to bypass connection tracking on input as it is unnecessary.
99
+
100
+iptables_prerouting_localhost:
101
+  iptables.append:
102
+    - save: True
103
+    - table: raw
104
+    - chain: PREROUTING
105
+    - jump: NOTRACK
106
+    - in-interface: lo
107
+
108
+iptables_prerouting_backend_private_ips:
109
+  iptables.append:
110
+    - require:
111
+      - iptables: iptables_prerouting_localhost
112
+      - ipset: ipset_backend_private_ips
113
+    - save: True
114
+    - table: raw
115
+    - chain: PREROUTING
116
+    - jump: NOTRACK
117
+    - in-interface: eth0
118
+    - match-set: ipset_backend_private_ips src,dst
119
+
120
+iptables_prerouting_public_tcp_ports:
121
+  iptables.append:
122
+    - require:
123
+      - iptables: iptables_prerouting_backend_private_ips
124
+      - ipset: ipset_public_tcp_ports
125
+    - save: True
126
+    - table: raw
127
+    - chain: PREROUTING
128
+    - jump: NOTRACK
129
+    - in-interface: eth0
130
+    - proto: tcp
131
+    - match-set: ipset_public_tcp_ports dst
132
+
133
+iptables_prerouting_public_udp_ports:
134
+  iptables.append:
135
+    - require:
136
+      - iptables: iptables_prerouting_public_tcp_ports
137
+      - ipset: ipset_public_udp_ports
138
+    - save: True
139
+    - table: raw
140
+    - chain: PREROUTING
141
+    - jump: NOTRACK
142
+    - in-interface: eth0
143
+    - proto: udp
144
+    - match-set: ipset_public_udp_ports dst
145
+
146
+iptables_prerouting_icmp:
147
+  iptables.append:
148
+    - require:
149
+      - iptables: iptables_prerouting_public_udp_ports
150
+    - save: True
151
+    - table: raw
152
+    - chain: PREROUTING
153
+    - jump: NOTRACK
154
+    - in-interface: eth0
155
+    - proto: icmp
156
+
157
+# Input rules
158
+# -----------
159
+
160
+iptables_input_localhost:
161
+  iptables.append:
162
+    - save: True
163
+    - table: filter
164
+    - chain: INPUT
165
+    - jump: ACCEPT
166
+    - in-interface: lo
167
+
168
+iptables_input_backend_private_ips:
169
+  iptables.append:
170
+    - require:
171
+      - iptables: iptables_input_localhost
172
+      - ipset: ipset_backend_private_ips
173
+    - save: True
174
+    - table: filter
175
+    - chain: INPUT
176
+    - jump: ACCEPT
177
+    - in-interface: eth0
178
+    - match-set: ipset_backend_private_ips src,dst
179
+
180
+iptables_input_public_tcp_ports:
181
+  iptables.append:
182
+    - require:
183
+      - iptables: iptables_input_backend_private_ips
184
+      - ipset: ipset_public_tcp_ports
185
+    - save: True
186
+    - table: filter
187
+    - chain: INPUT
188
+    - jump: ACCEPT
189
+    - in-interface: eth0
190
+    - proto: tcp
191
+    - match-set: ipset_public_tcp_ports dst
192
+
193
+iptables_input_public_udp_ports:
194
+  iptables.append:
195
+    - require:
196
+      - iptables: iptables_input_public_tcp_ports
197
+      - ipset: ipset_public_udp_ports
198
+    - save: True
199
+    - table: filter
200
+    - chain: INPUT
201
+    - jump: ACCEPT
202
+    - in-interface: eth0
203
+    - proto: udp
204
+    - match-set: ipset_public_udp_ports dst
205
+
206
+iptables_input_icmp:
207
+  iptables.append:
208
+    - require:
209
+      - iptables: iptables_input_public_udp_ports
210
+    - save: True
211
+    - table: filter
212
+    - chain: INPUT
213
+    - jump: ACCEPT
214
+    - in-interface: eth0
215
+    - proto: icmp
216
+    - match: limit
217
+    - limit: 3/second
218
+    - limit-burst: 3
219
+
220
+iptables_input_established:
221
+  iptables.append:
222
+    - require:
223
+      - iptables: iptables_input_icmp
224
+    - save: True
225
+    - table: filter
226
+    - chain: INPUT
227
+    - jump: ACCEPT
228
+    - in-interface: eth0
229
+    - match: conntrack
230
+    - ctstate: ESTABLISHED,RELATED
231
+
232
+# Output Rules
233
+# ------------
234
+# These are used to bypass connection tracking on output where it is
235
+# unnecessary.
236
+
237
+iptables_output_localhost:
238
+  iptables.append:
239
+    - save: True
240
+    - table: raw
241
+    - chain: OUTPUT
242
+    - jump: NOTRACK
243
+    - out-interface: lo
244
+
245
+iptables_output_backend_private_ips:
246
+  iptables.append:
247
+    - require:
248
+      - iptables: iptables_output_localhost
249
+      - ipset: ipset_backend_private_ips
250
+    - save: True
251
+    - table: raw
252
+    - chain: OUTPUT
253
+    - jump: NOTRACK
254
+    - out-interface: eth0
255
+    - match-set: ipset_backend_private_ips src,dst
256
+
257
+iptables_output_public_tcp_ports:
258
+  iptables.append:
259
+    - require:
260
+      - iptables: iptables_output_backend_private_ips
261
+      - ipset: ipset_public_tcp_ports
262
+    - save: True
263
+    - table: raw
264
+    - chain: OUTPUT
265
+    - jump: NOTRACK
266
+    - out-interface: eth0
267
+    - proto: tcp
268
+    - match-set: ipset_public_tcp_ports src
269
+
270
+iptables_output_public_udp_ports:
271
+  iptables.append:
272
+    - require:
273
+      - iptables: iptables_output_public_tcp_ports
274
+      - ipset: ipset_public_udp_ports
275
+    - save: True
276
+    - table: raw
277
+    - chain: OUTPUT
278
+    - jump: NOTRACK
279
+    - out-interface: eth0
280
+    - proto: udp 
281
+    - match-set: ipset_public_udp_ports src
282
+
283
+iptables_output_icmp:
284
+  iptables.append:
285
+    - require:
286
+      - iptables: iptables_output_public_udp_ports
287
+    - save: True
288
+    - table: raw
289
+    - chain: OUTPUT
290
+    - jump: NOTRACK
291
+    - out-interface: eth0
292
+    - proto: icmp

+ 2 - 0
base/firewall/public/dns.sls

@@ -0,0 +1,2 @@
1
+{% from 'firewall/public/lib.jinja' import public_tcp_port_present %}
2
+{{ public_tcp_port_present(53) }}

+ 2 - 0
base/firewall/public/http.sls

@@ -0,0 +1,2 @@
1
+{% from 'firewall/public/lib.jinja' import public_tcp_port_present %}
2
+{{ public_tcp_port_present(80) }}

+ 2 - 0
base/firewall/public/https.sls

@@ -0,0 +1,2 @@
1
+{% from 'firewall/public/lib.jinja' import public_tcp_port_present %}
2
+{{ public_tcp_port_present(443) }}

+ 2 - 0
base/firewall/public/imap.sls

@@ -0,0 +1,2 @@
1
+{% from 'firewall/public/lib.jinja' import public_tcp_port_present %}
2
+{{ public_tcp_port_present(143) }}

+ 2 - 0
base/firewall/public/imaps.sls

@@ -0,0 +1,2 @@
1
+{% from 'firewall/public/lib.jinja' import public_tcp_port_present %}
2
+{{ public_tcp_port_present(993) }}

+ 11 - 0
base/firewall/public/lib.jinja

@@ -0,0 +1,11 @@
1
+{% macro public_tcp_port_present(port) %}
2
+ipset_public_tcp_port_{{ port }}:
3
+  ipset.present:
4
+    - require:
5
+      - ipset: ipset_public_tcp_ports
6
+    - onchanges_in:
7
+      - cmd: do_ipset_save
8
+    - set_name: ipset_public_tcp_ports
9
+    - entry:
10
+      - '{{ port }}'
11
+{% endmacro %}

+ 2 - 0
base/firewall/public/pop3.sls

@@ -0,0 +1,2 @@
1
+{% from 'firewall/public/lib.jinja' import public_tcp_port_present %}
2
+{{ public_tcp_port_present(110) }}

+ 2 - 0
base/firewall/public/pop3s.sls

@@ -0,0 +1,2 @@
1
+{% from 'firewall/public/lib.jinja' import public_tcp_port_present %}
2
+{{ public_tcp_port_present(995) }}

+ 2 - 0
base/firewall/public/smtp.sls

@@ -0,0 +1,2 @@
1
+{% from 'firewall/public/lib.jinja' import public_tcp_port_present %}
2
+{{ public_tcp_port_present(25) }}

+ 2 - 0
base/firewall/public/submission.sls

@@ -0,0 +1,2 @@
1
+{% from 'firewall/public/lib.jinja' import public_tcp_port_present %}
2
+{{ public_tcp_port_present(587) }}

+ 2 - 0
base/generic_packages/git.sls

@@ -0,0 +1,2 @@
1
+{% from 'generic_packages/lib.jinja' import generic_package_installed %}
2
+{{ generic_package_installed('git') }}

+ 2 - 0
base/generic_packages/jq.sls

@@ -0,0 +1,2 @@
1
+{% from 'generic_packages/lib.jinja' import generic_package_installed %}
2
+{{ generic_package_installed('jq') }}

+ 7 - 0
base/generic_packages/lib.jinja

@@ -0,0 +1,7 @@
1
+{% from 'generic_packages/map.jinja' import package_names %}
2
+
3
+{% macro generic_package_installed(package) %}
4
+{{ package }}:
5
+  pkg.installed:
6
+    - name: {{ package_names[package] }}
7
+{% endmacro %}

+ 2 - 0
base/generic_packages/m2crypto.sls

@@ -0,0 +1,2 @@
1
+{% from 'generic_packages/lib.jinja' import generic_package_installed %}
2
+{{ generic_package_installed('m2crypto') }}

+ 24 - 0
base/generic_packages/map.jinja

@@ -0,0 +1,24 @@
1
+{% set package_names = salt['grains.filter_by']({
2
+        'Debian': {
3
+                'sharutils': 'sharutils',
4
+                'lsyncd': 'lsyncd',
5
+                'tcpdump': 'tcpdump',
6
+                'debhelper': 'debhelper',
7
+                'devscripts': 'devscripts',
8
+                'jq': 'jq',
9
+                'git': 'git',
10
+		'm2crypto': 'm2crypto',
11
+		'unzip': 'unzip',
12
+		'zip': 'zip',
13
+        },
14
+	'Ubuntu': {
15
+		'sharutils': 'sharutils',
16
+		'tcpdump': 'tcpdump',
17
+		'jq': 'jq',
18
+		'git': 'git',
19
+		'm2crypto': 'm2crypto',
20
+		'net_tools': 'net-tools',
21
+		'unzip': 'unzip',
22
+		'zip': 'zip',
23
+	}
24
+}, grain='os', merge=salt['pillar.get']('package_names:lookup')) %}

+ 2 - 0
base/generic_packages/net_tools.sls

@@ -0,0 +1,2 @@
1
+{% from 'generic_packages/lib.jinja' import generic_package_installed %}
2
+{{ generic_package_installed('net_tools') }}

+ 2 - 0
base/generic_packages/sharutils.sls

@@ -0,0 +1,2 @@
1
+{% from 'generic_packages/lib.jinja' import generic_package_installed %}
2
+{{ generic_package_installed('sharutils') }}

+ 2 - 0
base/generic_packages/tcpdump.sls

@@ -0,0 +1,2 @@
1
+{% from 'generic_packages/lib.jinja' import generic_package_installed %}
2
+{{ generic_package_installed('tcpdump') }}

+ 2 - 0
base/generic_packages/unzip.sls

@@ -0,0 +1,2 @@
1
+{% from 'generic_packages/lib.jinja' import generic_package_installed %}
2
+{{ generic_package_installed('unzip') }}

+ 2 - 0
base/generic_packages/zip.sls

@@ -0,0 +1,2 @@
1
+{% from 'generic_packages/lib.jinja' import generic_package_installed %}
2
+{{ generic_package_installed('zip') }}

+ 57 - 0
base/gitea/files/app.ini

@@ -0,0 +1,57 @@
1
+APP_NAME = Gitea: Git with a cup of tea
2
+RUN_USER = git
3
+RUN_MODE = prod
4
+
5
+[database]
6
+DB_TYPE  = mysql
7
+HOST     = {{ db_host }}:{{ db_port }}
8
+NAME     = {{ db_name }}
9
+USER     = {{ db_user }}
10
+PASSWD   = {{ db_password }}
11
+SSL_MODE = disable
12
+PATH     = data/gitea.db
13
+
14
+[repository]
15
+ROOT = /data/repo
16
+
17
+[server]
18
+SSH_DOMAIN       = {{ server_name }}
19
+HTTP_PORT        = 3000
20
+ROOT_URL         = https://{{ server_name }}/
21
+DISABLE_SSH      = false
22
+SSH_PORT         = 22
23
+LFS_START_SERVER = false
24
+OFFLINE_MODE     = false
25
+
26
+[mailer]
27
+ENABLED = true
28
+HOST    = {{ mail_host }}
29
+FROM    = {{ mail_from }}
30
+USER    = 
31
+PASSWD  = 
32
+SKIP_VERIFY = true
33
+
34
+[service]
35
+REGISTER_EMAIL_CONFIRM     = false
36
+ENABLE_NOTIFY_MAIL         = false
37
+DISABLE_REGISTRATION       = true
38
+ENABLE_CAPTCHA             = false
39
+REQUIRE_SIGNIN_VIEW        = false
40
+DEFAULT_KEEP_EMAIL_PRIVATE = false
41
+NO_REPLY_ADDRESS           = {{ no_reply_address }}
42
+
43
+[picture]
44
+DISABLE_GRAVATAR        = false
45
+ENABLE_FEDERATED_AVATAR = false
46
+
47
+[session]
48
+PROVIDER = file
49
+
50
+[log]
51
+MODE      = file
52
+LEVEL     = Info
53
+ROOT_PATH = /var/log/gitea
54
+
55
+[security]
56
+INSTALL_LOCK = true
57
+SECRET_KEY   = {{ secret_key }}

+ 28 - 0
base/gitea/files/gitea.nginx

@@ -0,0 +1,28 @@
1
+server {
2
+  listen 80;
3
+  server_name {{ server_name }};
4
+
5
+  include /etc/nginx/include/certbot.conf;
6
+
7
+  location / {
8
+    return 301 https://$host$request_uri;
9
+  }
10
+}
11
+
12
+server {
13
+  listen 443 ssl;
14
+  server_name {{ server_name }};
15
+
16
+  include /etc/nginx/include/ssl.conf;
17
+
18
+  ssl_certificate /etc/nginx/ssl.d/{{ server_name }}/fullchain.pem;
19
+  ssl_certificate_key /etc/nginx/ssl.d/{{ server_name }}/privkey.pem;
20
+
21
+  location / {
22
+    # The trailing '/' is important as it causes nginx to send the
23
+    # cleaned URI through to the destination service (double slashes
24
+    # removed, etc.).
25
+    proxy_pass          http://127.0.0.1:3000/;
26
+    include		/etc/nginx/include/proxy.conf;
27
+  }
28
+}

+ 26 - 0
base/gitea/files/gitea.service

@@ -0,0 +1,26 @@
1
+[Unit]
2
+Description=Gitea (Git with a cup of tea)
3
+After=syslog.target
4
+After=network.target
5
+After=mysqld.service
6
+#After=postgresql.service
7
+#After=memcached.service
8
+#After=redis.service
9
+
10
+[Service]
11
+# Modify these two values and uncomment them if you have
12
+# repos with lots of files and get an HTTP error 500 because
13
+# of that
14
+###
15
+#LimitMEMLOCK=infinity
16
+#LimitNOFILE=65535
17
+Type=simple
18
+User=git
19
+Group=git
20
+WorkingDirectory=/home/git/gitea
21
+ExecStart=/usr/bin/gitea web
22
+Restart=always
23
+Environment=USER=git HOME=/home/git GITEA_CUSTOM=/etc/gitea GITEA_WORK_DIR=/home/git/gitea
24
+
25
+[Install]
26
+WantedBy=multi-user.target

+ 99 - 0
base/gitea/init.sls

@@ -0,0 +1,99 @@
1
+{% from 'nginx/lib.jinja' import nginx_ssl_cert_present %}
2
+
3
+gitea:
4
+  group.present:
5
+    - name: git
6
+  user.present:
7
+    - name: git
8
+    - gid: git
9
+    - home: /home/git
10
+    - createhome: False
11
+    - require:
12
+      - group: git
13
+  file.managed:
14
+    - name: /usr/bin/gitea
15
+    - source: salt://gitea/files/vendor/1.1.2.linux-amd64/gitea
16
+    - mode: 555
17
+  service.running:
18
+    - watch:
19
+      - file: gitea
20
+      - file: /etc/systemd/system/gitea.service
21
+
22
+/etc/systemd/system/gitea.service:
23
+  file.managed:
24
+    - source: salt://gitea/files/gitea.service
25
+    - onchanges_in:
26
+      - module: reload_systemd
27
+
28
+/etc/gitea:
29
+  file.directory:
30
+    - require:
31
+      - file: /data
32
+
33
+/etc/gitea/conf:
34
+  file.directory:
35
+    - mode: 750
36
+    - group: git
37
+    - require:
38
+      - file: /etc/gitea
39
+      - group: git
40
+
41
+/etc/gitea/conf/app.ini:
42
+  file.managed:
43
+    - source: salt://gitea/files/app.ini
44
+    - mode: 640
45
+    - group: git
46
+    - template: jinja
47
+    - context: {{ pillar['gitea'] }}
48
+    - require:
49
+      - file: /etc/gitea/conf
50
+      - group: git
51
+    - watch_in:
52
+      - service: gitea
53
+
54
+/var/log/gitea:
55
+  file.directory:
56
+    - user: git
57
+    - group: git
58
+    - require:
59
+      - user: git
60
+      - group: git
61
+
62
+/data/repo:
63
+  file.directory:
64
+    - user: git
65
+    - group: git
66
+    - require:
67
+      - file: /data
68
+      - user: git
69
+      - group: git
70
+
71
+/home/git:
72
+  file.directory:
73
+    - user: git
74
+    - group: git
75
+    - require:
76
+      - user: git
77
+      - group: git
78
+
79
+/home/git/gitea:
80
+  file.directory:
81
+    - user: git
82
+    - group: git
83
+    - require:
84
+      - file: /home/git
85
+      - user: git
86
+      - group: git
87
+
88
+/etc/nginx/sites-enabled/gitea:
89
+  file.managed:
90
+    - source: salt://gitea/files/gitea.nginx
91
+    - template: jinja
92
+    - context:
93
+        server_name: {{ pillar['gitea']['server_name'] }}
94
+    - require:
95
+      - file: nginx_conf
96
+    - watch_in:
97
+      - service: nginx
98
+
99
+{{ nginx_ssl_cert_present(pillar['gitea']['server_name']) }}

+ 40 - 0
base/globals.jinja

@@ -0,0 +1,40 @@
1
+{% import 'all_private_ip_addresses.jinja' as all_private_ip_addresses %}
2
+
3
+{% set interface_eth0 = salt['network.interface']('eth0')|sort(attribute="label")|list %}
4
+{% set public_interface = interface_eth0[0] %}
5
+{% set public_ip_address = public_interface.address %}
6
+{% if interface_eth0|length > 0 %}
7
+  {% set private_interface = interface_eth0[-1] %}
8
+  {% set private_ip_address = private_interface.address %}
9
+{% else %}
10
+  {% set private_interface = none %}
11
+  {% set private_ip_address = none %}
12
+{% endif %}
13
+
14
+{% set private_node_domain = pillar['global']['private_node_domain'] %}
15
+{% set private_service_domain = pillar['global']['private_service_domain'] %}
16
+
17
+{% set private_fqdn = grains['id'] + '.' + private_node_domain %}
18
+
19
+{% set public_domain = pillar['global']['public_domain'] %}
20
+{% set public_fqdn = grains['id'] + '.' + public_domain %}
21
+
22
+{% set admin_servers = pillar['global']['admin_servers'] %}
23
+
24
+# WARNING: DO NOT CHANGE THIS VALUE without reading the comment in the
25
+# global.sls pillar.
26
+{% set primary_admin_host = pillar['global']['primary_admin_host'] %}
27
+
28
+{% set primary_admin_fqdn = primary_admin_host + '.' + private_node_domain %}
29
+{% set primary_admin_ip_address = admin_servers[primary_admin_host] %}
30
+{% set admin_hosts = admin_servers.keys() %}
31
+{% set admin_ip_addresses = admin_servers.values() %}
32
+
33
+{% set all_private_ip_addresses = all_private_ip_addresses.addresses %}
34
+
35
+{% set certbot_proxy_ip_address = primary_admin_ip_address %}
36
+{% set certbot_proxy_host = primary_admin_fqdn %}
37
+
38
+{% set deploy_env = pillar['env']['name'] %}
39
+{% set config = pillar['env'][deploy_env] %}
40
+{% set common = pillar['env']['common'] %}

+ 42 - 0
base/lib.jinja

@@ -0,0 +1,42 @@
1
+{% macro user_present(username, details, extra_groups=None) %}
2
+
3
+user_{{ username }}:
4
+  group.present:
5
+    - name: {{ username }}
6
+
7
+  user.present:
8
+    - name: {{ username }}
9
+    - fullname: {{ details.full_name }}
10
+    - password: {{ details.password }}
11
+    - shell: {{ details.shell }}
12
+    {% if extra_groups %}
13
+    - groups:
14
+      {% for group in extra_groups %}
15
+      - {{ group }}
16
+      {% endfor %}
17
+    {% endif %}
18
+    - gid_from_name: True
19
+    - createhome: True
20
+    - require:
21
+      - group: {{ username }}
22
+
23
+ssh_key_{{ username }}:
24
+  ssh_auth.present:
25
+    - user: {{ username }}
26
+    - names:
27
+      {% for ssh_key in details['ssh_keys'] %}
28
+      - {{ ssh_key }}
29
+      {% endfor %}
30
+
31
+gitenv_{{ username }}:
32
+  file.managed:
33
+    - name: ~{{ username }}/.gitenv
34
+    - contents: |
35
+        {
36
+          "GIT_AUTHOR_NAME": "{{ details.full_name }}",
37
+          "GIT_AUTHOR_EMAIL": "{{ details.email_address }}",
38
+          "GIT_COMMITTER_NAME": "{{ details.full_name }}",
39
+          "GIT_COMMITTER_EMAIL": "{{ details.email_address }}"
40
+        }
41
+
42
+{% endmacro %}

+ 11 - 0
base/mariadb/files/utf8mb4.cnf

@@ -0,0 +1,11 @@
1
+[mysqld]
2
+character-set-client-handshake = FALSE
3
+character_set_server = utf8mb4
4
+collation_server = utf8mb4_general_ci
5
+#sql_mode=TRADITIONAL
6
+
7
+[mysql]
8
+default-character-set = utf8mb4
9
+
10
+[client]
11
+default-character-set = utf8mb4

+ 22 - 0
base/mariadb/init.sls

@@ -0,0 +1,22 @@
1
+{% from 'mariadb/map.jinja' import mariadb %}
2
+
3
+mariadb_repo:
4
+  pkgrepo.managed:
5
+    - name: {{ mariadb.repo_name }}
6
+    - file: {{ mariadb.repo_file }}
7
+    - keyserver: {{ mariadb.keyserver }}
8
+    - keyid: {{ mariadb.keyid }}
9
+    - clean_file: True
10
+    - require_in:
11
+      - pkg: mariadb_client
12
+
13
+mariadb_client:
14
+  pkg.installed:
15
+    - name: {{ mariadb.client_package }}
16
+
17
+mariadb_conf:
18
+  file.managed:
19
+    - name: /etc/mysql/conf.d/utf8mb4.cnf
20
+    - source: salt://mariadb/files/utf8mb4.cnf
21
+    - require:
22
+      - pkg: mariadb_client

+ 20 - 0
base/mariadb/map.jinja

@@ -0,0 +1,20 @@
1
+{% set mariadb = salt['grains.filter_by']({
2
+	'Debian': {
3
+		'client_package': 'mariadb-client',
4
+		'server_package': 'mariadb-server',
5
+		'server_service': 'mysql',
6
+		'repo_name': 'deb http://mirrors.coreix.net/mariadb/repo/10.2/debian %s main' % salt['grains.get']('oscodename'),
7
+		'repo_file': '/etc/apt/sources.list.d/mariadb.list',
8
+		'keyserver': 'keyserver.ubuntu.com',
9
+		'keyid': 'cbcb082a1bb943db'
10
+	},
11
+	'Ubuntu': {
12
+		'client_package': 'mariadb-client',
13
+		'server_package': 'mariadb-server',
14
+		'server_service': 'mysql',
15
+		'repo_name': 'deb [arch=amd64] http://lon1.mirrors.digitalocean.com/mariadb/repo/10.2/ubuntu %s main' % salt['grains.get']('oscodename'),
16
+		'repo_file': '/etc/apt/sources.list.d/mariadb.list',
17
+		'keyserver': 'keyserver.ubuntu.com',
18
+		'keyid': 'F1656F24C74CD1D8'
19
+	}
20
+}, grain='os', merge=salt['pillar.get']('mariadb:lookup')) %}

+ 3 - 0
base/mariadb/server/files/backup.sh

@@ -0,0 +1,3 @@
1
+#!/bin/sh
2
+
3
+innobackupex --user="{{ xtrabackup_user }}" --password="{{ xtrabackup_password }}" --compress --galera-info /data/backup/once_off

+ 3 - 0
base/mariadb/server/files/bind.cnf

@@ -0,0 +1,3 @@
1
+{% import 'globals.jinja' as globals %}
2
+[mysqld]
3
+bind-address = {{ globals.private_ip_address }}

+ 2 - 0
base/mariadb/server/files/mysql-my.cnf

@@ -0,0 +1,2 @@
1
+[mysqld]
2
+wsrep_sst_auth={{ xtrabackup_user }}:{{ xtrabackup_password }}

+ 92 - 0
base/mariadb/server/init.sls

@@ -0,0 +1,92 @@
1
+{% from 'mariadb/map.jinja' import mariadb %}
2
+{% import 'globals.jinja' as globals %}
3
+
4
+include:
5
+  - mariadb
6
+  - systemd.helpers
7
+
8
+/data/mysql:
9
+  file.directory:
10
+    - require:
11
+      - file: /data
12
+
13
+/data/backup:
14
+  file.directory:
15
+    - require:
16
+      - file: /data
17
+
18
+/data/backup/once_off:
19
+  file.directory:
20
+    - require:
21
+      - file: /data/backup
22
+
23
+/data/backup/daily:
24
+  file.directory:
25
+    - require:
26
+      - file: /data/backup
27
+
28
+/var/lib/mysql:
29
+  file.symlink:
30
+    - target: /data/mysql
31
+    - require:
32
+      - file: /data/mysql
33
+
34
+/root/backup.sh:
35
+  file.managed:
36
+    - mode: 700
37
+    - source: salt://mariadb/server/files/backup.sh
38
+    - template: jinja
39
+    - context:
40
+        xtrabackup_user: {{ globals.config.mariadb.xtrabackup_user }}
41
+        xtrabackup_password: {{ globals.config.mariadb.xtrabackup_password }}
42
+
43
+mariadb_server:
44
+  pkg.installed:
45
+    - name: {{ mariadb.server_package }}
46
+    - require:
47
+      - pkgrepo: mariadb_repo
48
+      - file: /var/lib/mysql
49
+      - pkg: mariadb_client
50
+  service.running:
51
+    - name: {{ mariadb.server_service }}
52
+    - require:
53
+      - file: mysql_data_final
54
+      - module: reload_systemd
55
+    - watch:
56
+      - file: mariadb_conf
57
+      - file: mariadb_server_bind_conf
58
+      - file: mariadb_server_protected_conf
59
+
60
+# The mysql user probably won't exist when /data/mysql gets created,
61
+# so we have to change the permissions after installing the package.
62
+mysql_data_final:
63
+  file.directory:
64
+    # We need to use a different name to avoid clashing.
65
+    - name: /data/mysql/.
66
+    - user: mysql
67
+    - group: mysql
68
+    - require:
69
+      - pkg: mariadb_server
70
+      - file: /data/mysql
71
+
72
+mariadb_server_bind_conf:
73
+  file.managed:
74
+    - name: /etc/mysql/conf.d/bind.cnf
75
+    - source: salt://mariadb/server/files/bind.cnf
76
+    - template: jinja
77
+    - require:
78
+      - pkg: mariadb_client
79
+
80
+mariadb_server_protected_conf:
81
+  file.managed:
82
+    - name: /var/lib/mysql/.my.cnf
83
+    - source: salt://mariadb/server/files/mysql-my.cnf
84
+    - user: mysql
85
+    - group: mysql
86
+    - mode: 600
87
+    - template: jinja
88
+    - context:
89
+        xtrabackup_user: {{ globals.config.mariadb.xtrabackup_user }}
90
+        xtrabackup_password: {{ globals.config.mariadb.xtrabackup_password }}
91
+    - require:
92
+      - pkg: mariadb_server

+ 15 - 0
base/mount/data.sls

@@ -0,0 +1,15 @@
1
+/data:
2
+  mount.mounted:
3
+    - device: /dev/sdc
4
+    - fstype: ext4
5
+    - mkmnt: True
6
+    - pass_num: 2
7
+    - opts:
8
+      - defaults
9
+
10
+data_permissions:
11
+  file.directory:
12
+    - name: /data
13
+    - mode: 755
14
+    - require:
15
+      - mount: /data

+ 5 - 0
base/nginx/files/conf.d/drop-invalid-hosts.conf

@@ -0,0 +1,5 @@
1
+server {
2
+	listen		80 default_server;
3
+	server_name	"";
4
+	return		444;
5
+}

+ 25 - 0
base/nginx/files/fastcgi.conf

@@ -0,0 +1,25 @@
1
+
2
+fastcgi_param  SCRIPT_FILENAME    $document_root$fastcgi_script_name;
3
+fastcgi_param  QUERY_STRING       $query_string;
4
+fastcgi_param  REQUEST_METHOD     $request_method;
5
+fastcgi_param  CONTENT_TYPE       $content_type;
6
+fastcgi_param  CONTENT_LENGTH     $content_length;
7
+
8
+fastcgi_param  SCRIPT_NAME        $fastcgi_script_name;
9
+fastcgi_param  REQUEST_URI        $request_uri;
10
+fastcgi_param  DOCUMENT_URI       $document_uri;
11
+fastcgi_param  DOCUMENT_ROOT      $document_root;
12
+fastcgi_param  SERVER_PROTOCOL    $server_protocol;
13
+fastcgi_param  HTTPS              $https if_not_empty;
14
+
15
+fastcgi_param  GATEWAY_INTERFACE  CGI/1.1;
16
+fastcgi_param  SERVER_SOFTWARE    nginx/$nginx_version;
17
+
18
+fastcgi_param  REMOTE_ADDR        $remote_addr;
19
+fastcgi_param  REMOTE_PORT        $remote_port;
20
+fastcgi_param  SERVER_ADDR        $server_addr;
21
+fastcgi_param  SERVER_PORT        $server_port;
22
+fastcgi_param  SERVER_NAME        $server_name;
23
+
24
+# PHP only, required if PHP was built with --enable-force-cgi-redirect
25
+fastcgi_param  REDIRECT_STATUS    200;

+ 8 - 0
base/nginx/files/include/certbot.conf

@@ -0,0 +1,8 @@
1
+{% import 'globals.jinja' as globals %}
2
+location /.well-known/ {
3
+    # The trailing '/.well-known/' is important as it causes nginx to send the
4
+    # cleaned URI through to the destination service (double slashes
5
+    # removed, etc.).
6
+    proxy_pass http://{{ globals.certbot_proxy_ip_address }}/.well-known/;
7
+    proxy_set_header Host {{ globals.certbot_proxy_host }};
8
+}

+ 5 - 0
base/nginx/files/include/proxy.conf

@@ -0,0 +1,5 @@
1
+proxy_set_header  Host               $host;
2
+proxy_set_header  X-Real-IP          $remote_addr;
3
+proxy_set_header  X-Forwarded-For    $remote_addr;
4
+proxy_set_header  X-Forwarded-Proto  $scheme;
5
+proxy_redirect    off;

+ 3 - 0
base/nginx/files/include/websockets.conf

@@ -0,0 +1,3 @@
1
+proxy_http_version  1.1;
2
+proxy_set_header    Upgrade $http_upgrade;
3
+proxy_set_header    Connection "upgrade";

+ 85 - 0
base/nginx/files/nginx.conf

@@ -0,0 +1,85 @@
1
+user nginx;
2
+worker_processes 4;
3
+pid /run/nginx.pid;
4
+
5
+events {
6
+	worker_connections 768;
7
+	# multi_accept on;
8
+}
9
+
10
+http {
11
+
12
+	##
13
+	# Basic Settings
14
+	##
15
+
16
+	sendfile on;
17
+	tcp_nopush on;
18
+	tcp_nodelay on;
19
+	keepalive_timeout 65;
20
+	types_hash_max_size 2048;
21
+	# server_tokens off;
22
+
23
+	# server_names_hash_bucket_size 64;
24
+	# server_name_in_redirect off;
25
+
26
+	include /etc/nginx/mime.types;
27
+	default_type application/octet-stream;
28
+
29
+	##
30
+	# SSL Settings
31
+	##
32
+
33
+	ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
34
+	ssl_prefer_server_ciphers on;
35
+
36
+	##
37
+	# Logging Settings
38
+	##
39
+
40
+	access_log /var/log/nginx/access.log;
41
+	error_log /var/log/nginx/error.log;
42
+
43
+	##
44
+	# Gzip Settings
45
+	##
46
+
47
+	gzip on;
48
+	gzip_disable "msie6";
49
+
50
+	# gzip_vary on;
51
+	# gzip_proxied any;
52
+	# gzip_comp_level 6;
53
+	# gzip_buffers 16 8k;
54
+	# gzip_http_version 1.1;
55
+	# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
56
+
57
+	##
58
+	# Virtual Host Configs
59
+	##
60
+
61
+	include /etc/nginx/conf.d/*.conf;
62
+	include /etc/nginx/sites-enabled/*;
63
+}
64
+
65
+
66
+#mail {
67
+#	# See sample authentication script at:
68
+#	# http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
69
+# 
70
+#	# auth_http localhost/auth.php;
71
+#	# pop3_capabilities "TOP" "USER";
72
+#	# imap_capabilities "IMAP4rev1" "UIDPLUS";
73
+# 
74
+#	server {
75
+#		listen     localhost:110;
76
+#		protocol   pop3;
77
+#		proxy      on;
78
+#	}
79
+# 
80
+#	server {
81
+#		listen     localhost:143;
82
+#		protocol   imap;
83
+#		proxy      on;
84
+#	}
85
+#}

+ 13 - 0
base/nginx/files/snippets/fastcgi-php.conf

@@ -0,0 +1,13 @@
1
+# regex to split $uri to $fastcgi_script_name and $fastcgi_path
2
+fastcgi_split_path_info ^(.+\.php)(/.+)$;
3
+
4
+# Check that the PHP script exists before passing it
5
+try_files $fastcgi_script_name =404;
6
+
7
+# Bypass the fact that try_files resets $fastcgi_path_info
8
+# see: http://trac.nginx.org/nginx/ticket/321
9
+set $path_info $fastcgi_path_info;
10
+fastcgi_param PATH_INFO $path_info;
11
+
12
+fastcgi_index index.php;
13
+include fastcgi.conf;

+ 5 - 0
base/nginx/files/snippets/snakeoil.conf

@@ -0,0 +1,5 @@
1
+# Self signed certificates generated by the ssl-cert package
2
+# Don't use them in a production server!
3
+
4
+ssl_certificate /etc/ssl/certs/ssl-cert-snakeoil.pem;
5
+ssl_certificate_key /etc/ssl/private/ssl-cert-snakeoil.key;

+ 32 - 0
base/nginx/init.sls

@@ -0,0 +1,32 @@
1
+{% from 'nginx/map.jinja' import nginx %}
2
+
3
+nginx:
4
+  pkgrepo.managed:
5
+    - name: {{ nginx.repo_name }}
6
+    - file: {{ nginx.repo_file }}
7
+    - key_url: {{ nginx.key_url }}
8
+    - clean_file: True
9
+    - require_in:
10
+      - pkg: nginx
11
+  pkg.installed:
12
+    - name: {{ nginx.package }}
13
+  service.running:
14
+    - name: {{ nginx.service }}
15
+    - watch:
16
+      - pkg: nginx
17
+      - file: nginx_conf
18
+
19
+/etc/nginx/conf.d/default.conf:
20
+  file.absent
21
+
22
+/etc/nginx/conf.d/example_ssl.conf:
23
+  file.absent
24
+
25
+nginx_conf:
26
+  file.recurse:
27
+    - name: /etc/nginx
28
+    - source: salt://nginx/files
29
+    - include_empty: True
30
+    - template: jinja
31
+    - require:
32
+      - pkg: nginx

+ 25 - 0
base/nginx/lib.jinja

@@ -0,0 +1,25 @@
1
+{% macro nginx_ssl_cert_present(server_domain) %}
2
+/etc/nginx/ssl.d/{{ server_domain }}:
3
+  file.directory:
4
+    - mode: 700
5
+    - require:
6
+      - file: /etc/nginx/ssl.d
7
+
8
+/etc/nginx/ssl.d/{{ server_domain }}/fullchain.pem:
9
+  file.managed:
10
+    - mode: 400
11
+    - contents_pillar: env:certs:host:{{ server_domain }}:fullchain.pem
12
+    - require:
13
+      - file: /etc/nginx/ssl.d/{{ server_domain }}
14
+    - watch_in:
15
+      - service: nginx
16
+
17
+/etc/nginx/ssl.d/{{ server_domain }}/privkey.pem:
18
+  file.managed:
19
+    - mode: 400
20
+    - contents_pillar: env:certs:host:{{ server_domain }}:privkey.pem
21
+    - require:
22
+      - file: /etc/nginx/ssl.d/{{ server_domain }}
23
+    - watch_in:
24
+      - service: nginx
25
+{% endmacro %}

+ 16 - 0
base/nginx/map.jinja

@@ -0,0 +1,16 @@
1
+{% set nginx = salt['grains.filter_by']({
2
+	'Debian': {
3
+		'package': 'nginx',
4
+		'service': 'nginx',
5
+		'repo_name': 'deb http://nginx.org/packages/debian/ %s nginx' % salt['grains.get']('oscodename'),
6
+		'repo_file': '/etc/apt/sources.list.d/nginx.list',
7
+		'key_url': 'https://nginx.org/keys/nginx_signing.key'
8
+	},
9
+	'Ubuntu': {
10
+		'package': 'nginx',
11
+		'service': 'nginx',
12
+		'repo_name': 'deb http://nginx.org/packages/ubuntu/ %s nginx' % 'xenial',
13
+		'repo_file': '/etc/apt/sources.list.d/nginx.list',
14
+		'key_url': 'https://nginx.org/keys/nginx_signing.key'
15
+	},
16
+}, grain='os', merge=salt['pillar.get']('nginx:lookup')) %}

+ 16 - 0
base/nginx/ssl-files/conf.d/ssl-drop-invalid-hosts.conf

@@ -0,0 +1,16 @@
1
+server {
2
+	listen		443 default_server;
3
+	server_name	"";
4
+
5
+	include		include/ssl.conf;
6
+
7
+	# This config won't work without a valid certificate.
8
+	# We use a dummy certificate instead of a wildcard one as
9
+	# Let's Encrypt currently does not offer wildcard certificates.
10
+	# Anyone who hits this virtual server is doing something wrong
11
+	# anway, so an invalid certificate won't add to their problems.
12
+	ssl_certificate /etc/nginx/ssl.d/dummy-cert.pem;
13
+	ssl_certificate_key /etc/nginx/ssl.d/dummy-key.pem;
14
+
15
+	return		444;
16
+}

+ 3 - 0
base/nginx/ssl-files/include/ssl.conf

@@ -0,0 +1,3 @@
1
+keepalive_timeout	70;
2
+ssl_session_cache	shared:SSL:10m;
3
+ssl_session_timeout	10m;

+ 43 - 0
base/nginx/ssl.sls

@@ -0,0 +1,43 @@
1
+include:
2
+  - nginx
3
+
4
+/etc/nginx/ssl.d:
5
+  file.directory:
6
+    - mode: 700
7
+    - require:
8
+      - file: nginx_conf
9
+
10
+/etc/nginx/ssl.d/dummy-cert.pem:
11
+  file.managed:
12
+    - mode: 400
13
+    - contents_pillar: env:certs:dummy-cert.pem
14
+    - require:
15
+      - file: /etc/nginx/ssl.d
16
+
17
+/etc/nginx/ssl.d/dummy-key.pem:
18
+  file.managed:
19
+    - mode: 400
20
+    - contents_pillar: env:certs:dummy-key.pem
21
+    - require:
22
+      - file: /etc/nginx/ssl.d
23
+
24
+# NOTE: naming the subdirectory 'files-ssl' instead of ssl-files causes it to be
25
+# picked up by the file.recurse in the nginx_conf state, which is only supposed
26
+# to copy the 'files' subdirectory. This causes the empty directories
27
+# /etc/nginx/../files-ssl/conf.d/, /etc/nginx/../files-ssl/include/, etc., to
28
+# be created on the minion (not sure why the contents aren't copied as well).
29
+# This is not a problem of multiple file.recurse states pointing at the same
30
+# destination directory. It also happens if we multiple 'file.managed' states
31
+# here for each individual file.
32
+nginx_conf_ssl:
33
+  file.recurse:
34
+    - name: /etc/nginx
35
+    - source: salt://nginx/ssl-files
36
+    - include_empty: True
37
+    - require:
38
+      - file: /etc/nginx/ssl.d
39
+    - watch:
40
+      - file: /etc/nginx/ssl.d/dummy-cert.pem
41
+      - file: /etc/nginx/ssl.d/dummy-key.pem
42
+    - watch_in:
43
+      - service: nginx

+ 8 - 0
base/pip/init.sls

@@ -0,0 +1,8 @@
1
+pip:
2
+  pkg.installed:
3
+    - pkgs:
4
+      - libcurl4-openssl-dev
5
+      - python2.7-dev
6
+      - libssl-dev
7
+      - python-pip
8
+      - virtualenv

+ 19 - 0
base/pip/virtualenvwrapper.sls

@@ -0,0 +1,19 @@
1
+include:
2
+  - pip
3
+
4
+/data/virtualenvs:
5
+  file.directory:
6
+    - require:
7
+      - file: /data
8
+
9
+virtualenvwrapper:
10
+  pip.installed:
11
+    - require:
12
+      - pkg: pip
13
+
14
+virtualenvwrapper_bashrc:
15
+  file.append:
16
+    - name: /root/.bashrc
17
+    - text:
18
+      - export WORKON_HOME=/data/virtualenvs
19
+      - source /usr/local/bin/virtualenvwrapper.sh

+ 9 - 0
base/root_user/files/restart_minion.sh

@@ -0,0 +1,9 @@
1
+#!/bin/sh
2
+
3
+service salt-minion stop
4
+sleep 5
5
+killall salt-minion
6
+sleep 5
7
+killall -9 salt-minion
8
+sleep 5
9
+service salt-minion start

+ 22 - 0
base/root_user/init.sls

@@ -0,0 +1,22 @@
1
+ssh_auth_root:
2
+  ssh_auth.present:
3
+    - user: root
4
+    - names:
5
+      - from="192.168.0.0/16" ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDICROKx+47R4SXP9tCzCWaOpHFuHHykvEBeSdklJ6jIeHYBH607XwJ/2eQeiO/KufExO6hwc3OTR5pCAERbSzEIPbWdeiqyQci2qxAFYc1gHsvPnStzR8B4yyXyMNDBsdruwyjIeX4J1hRcfLdUjeUEAUfsCcy49ooxUbR+7RN7DhAy9adTweFdl0E6xnRiP2unp9itY1cjzPBC/H5xdQa4BJl6AFpp0Ox37IDr5GZQzjqOMSU4MeSoPyiDfD7JqeKFMXa6eJfrxe6wSuDOWE2uwUIZDTMY+9kl498/E6d4b1pdKW/IG8EJEsTrSD4YF9LK18glt6i69yfWR5jSDOP root@admin1
6
+
7
+/root/restart_minion.sh:
8
+  file.managed:
9
+    - source: salt://root_user/files/restart_minion.sh
10
+    - mode: 555
11
+
12
+/root/.bashrc:
13
+  file.append:
14
+    - text: |
15
+        if [ "$SUDO_USER" != "" ]; then
16
+          SUDO_USER_HOME=$(getent passwd "$SUDO_USER" | cut -d: -f6)
17
+          if [ -f "$SUDO_USER_HOME/.gitenv" ]; then
18
+            for var in GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL GIT_COMMITTER_NAME GIT_COMMITTER_EMAIL; do
19
+              export $var="$(jq -r .$var "$SUDO_USER_HOME/.gitenv")"
20
+            done
21
+          fi
22
+        fi

+ 12 - 0
base/saltstack/map.jinja

@@ -0,0 +1,12 @@
1
+{% set saltstack = salt['grains.filter_by']({
2
+	'Debian': {
3
+		'repo_name': 'deb http://repo.saltstack.com/apt/debian/%s/amd64/latest %s main' % (salt['grains.get']('osmajorrelease'), salt['grains.get']('oscodename')),
4
+		'repo_file': '/etc/apt/sources.list.d/saltstack.list',
5
+		'repo_key_url': 'https://repo.saltstack.com/apt/debian/%s/amd64/latest/SALTSTACK-GPG-KEY.pub' % salt['grains.get']('osmajorrelease')
6
+	},
7
+	'Ubuntu': {
8
+		'repo_name': 'deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest xenial main',
9
+		'repo_file': '/etc/apt/sources.list.d/saltstack.list',
10
+		'repo_key_url': 'https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub',
11
+	}
12
+}, grain='os', merge=salt['pillar.get']('saltstack:lookup')) %}

+ 3 - 0
base/saltstack/master/files/file_roots.conf

@@ -0,0 +1,3 @@
1
+file_roots:
2
+  base:
3
+    - /data/admin/salt/base

+ 1 - 0
base/saltstack/master/files/hash_type.conf

@@ -0,0 +1 @@
1
+hash_type: sha256

+ 1 - 0
base/saltstack/master/files/interface.conf

@@ -0,0 +1 @@
1
+interface: {{ listen_address }}

+ 2 - 0
base/saltstack/master/files/output.conf

@@ -0,0 +1,2 @@
1
+state_verbose: False
2
+state_output: changes

+ 3 - 0
base/saltstack/master/files/peer.conf

@@ -0,0 +1,3 @@
1
+peer:
2
+  .*:
3
+    - x509.sign_remote_certificate

+ 3 - 0
base/saltstack/master/files/pillar_roots.conf

@@ -0,0 +1,3 @@
1
+pillar_roots:
2
+  base:
3
+    - /data/admin/pillar/base

+ 2 - 0
base/saltstack/master/files/runners.conf

@@ -0,0 +1,2 @@
1
+runner_dirs:
2
+  - /data/admin/salt-modules/runners

+ 1 - 0
base/saltstack/master/files/worker_threads.conf

@@ -0,0 +1 @@
1
+worker_threads: 15

+ 26 - 0
base/saltstack/master/init.sls

@@ -0,0 +1,26 @@
1
+{% import 'globals.jinja' as globals %}
2
+
3
+include:
4
+  - saltstack.minion
5
+
6
+salt_master:
7
+  pkg.installed:
8
+    - pkgs:
9
+      - salt-master
10
+    - require:
11
+      - pkg: salt_minion
12
+  service.running:
13
+    - name: salt-master
14
+    - watch:
15
+      - pkg: salt_master
16
+      - file: /etc/salt/master.d
17
+
18
+/etc/salt/master.d:
19
+  file.recurse:
20
+    - source: salt://saltstack/master/files
21
+    - file_mode: 644
22
+    - template: jinja
23
+    - context:
24
+        listen_address: {{ globals.private_ip_address }}
25
+    - require:
26
+      - pkg: salt_master

+ 1 - 0
base/saltstack/minion/files/hash_type.conf

@@ -0,0 +1 @@
1
+hash_type: sha256

+ 2 - 0
base/saltstack/minion/files/master.conf

@@ -0,0 +1,2 @@
1
+master:{% for ip in master_ip_addresses %}
2
+  - {{ ip }}{% endfor %}

+ 0 - 0
base/saltstack/minion/init.sls


Bu fark içinde çok fazla dosya değişikliği olduğu için bazı dosyalar gösterilmiyor