Coverage for reactive/content_cache.py: 96%
Shortcuts on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1import grp
2import os
3import pwd
4import random
5import subprocess
6import time
7from copy import deepcopy
9import jinja2
10import yaml
12from charms import reactive
13from charms.layer import status
14from charmhelpers import context
15from charmhelpers.core import hookenv, host, unitdata
16from charmhelpers.contrib.charmsupport import nrpe
18from lib import utils
19from lib import nginx
20from lib import haproxy as HAProxy
23SYSCTL_CONF_PATH = '/etc/sysctl.d/90-content-cache.conf'
26@reactive.hook('upgrade-charm')
27def upgrade_charm():
28 status.maintenance('forcing reconfiguration on upgrade-charm')
29 reactive.clear_flag('content_cache.active')
30 reactive.clear_flag('content_cache.installed')
31 reactive.clear_flag('content_cache.haproxy.configured')
32 reactive.clear_flag('content_cache.nginx.configured')
33 reactive.clear_flag('content_cache.sysctl.configured')
34 reactive.clear_flag('nagios-nrpe.configured')
37@reactive.hook('haproxy-statistics-relation-joined', 'haproxy-statistics-relation-changed')
38def fire_stats_hook():
39 """We don't have an interface for this relation yet, so just fake it here."""
40 reactive.set_flag('haproxy-statistics.available')
43@reactive.when_not('content_cache.installed')
44def install():
45 reactive.clear_flag('content_cache.active')
47 reactive.clear_flag('content_cache.haproxy.configured')
48 reactive.clear_flag('content_cache.nginx.configured')
49 reactive.clear_flag('content_cache.sysctl.configured')
50 reactive.set_flag('content_cache.installed')
53@reactive.when('config.changed')
54def config_changed():
55 reactive.clear_flag('content_cache.haproxy.configured')
56 reactive.clear_flag('content_cache.nginx.configured')
57 reactive.clear_flag('content_cache.sysctl.configured')
58 reactive.clear_flag('nagios-nrpe.configured')
61@reactive.when('content_cache.haproxy.configured', 'content_cache.nginx.configured', 'content_cache.sysctl.configured')
62@reactive.when_not('content_cache.active')
63def set_active(version_file='version'):
64 # XXX: Add more info such as nginx and haproxy status
66 revision = ''
67 if os.path.exists(version_file):
68 with open(version_file) as f:
69 line = f.readline().strip()
70 # We only want the first 8 characters, that's enough to tell
71 # which version of the charm we're using.
72 if len(line) > 8:
73 revision = ' (source version/commit {}…)'.format(line[:8])
74 else:
75 revision = ' (source version/commit {})'.format(line)
76 status.active('Ready{}'.format(revision))
77 reactive.set_flag('content_cache.active')
80@reactive.when_any('content_cache.haproxy.reload-required', 'content_cache.nginx.reload-required')
81def service_start_or_reload():
82 services = ['haproxy', 'nginx']
84 # Immediately start up services if they're not running.
85 for name in services:
86 if not host.service_running(name):
87 status.maintenance('Starting {}...'.format(name))
88 host.service_start(name)
89 reactive.clear_flag('content_cache.{}.reload-required'.format(name))
91 random.seed()
92 rnd = (random.random() * 100) % 20
93 status.maintenance('Reloading services in {}s...'.format(int(rnd)))
94 time.sleep(rnd)
96 for name in services:
97 if reactive.is_flag_set('content_cache.{}.configured'.format(name)) and reactive.is_flag_set(
98 'content_cache.{}.reload-required'.format(name)
99 ):
100 status.maintenance('Reloading {}...'.format(name))
101 host.service_reload(name)
102 reactive.clear_flag('content_cache.{}.reload-required'.format(name))
105def configure_nginx_metrics(ngx_conf, enable_prometheus_metrics, listen_address):
106 """Configure nginx to expose metrics.
108 Create the dedicated server exposing the metrics and add the logging of the cache hits for the other sites.
110 :param bool enable_prometheus_metrics: True is the metrics should be exposed, False otherwise
111 :returns: True if any change was made, False otherwise
112 :rtype: bool
113 """
114 changed = False
115 if copy_file('files/prometheus.lua', os.path.join(ngx_conf.conf_path, 'prometheus.lua')):
116 changed = True
117 if ngx_conf.toggle_metrics_site(enable_prometheus_metrics, listen_address):
118 changed = True
119 old_ports = [int(port.split('/')[0]) for port in hookenv.opened_ports()]
120 hookenv.log("Current opened ports: {}".format(old_ports))
121 if enable_prometheus_metrics and nginx.METRICS_PORT not in old_ports:
122 hookenv.log("Opening port {0}".format(nginx.METRICS_PORT))
123 hookenv.open_port(nginx.METRICS_PORT, 'TCP')
124 elif not enable_prometheus_metrics and nginx.METRICS_PORT in old_ports:
125 hookenv.log("Closing port {0}".format(nginx.METRICS_PORT))
126 hookenv.close_port(nginx.METRICS_PORT, 'TCP')
128 return changed
131@reactive.when_not('content_cache.nginx.installed')
132def stop_nginx():
133 # Just by installing the Nginx package, it has a default site configured
134 # and listens on TCP/80. This causes HAProxy to fail until such time as
135 # Nginx is configured and reloaded. We'll just stop it here.
136 host.service_stop('nginx')
137 reactive.set_flag('content_cache.nginx.installed')
140@reactive.when('content_cache.nginx.installed')
141@reactive.when_not('content_cache.nginx.configured')
142def configure_nginx(conf_path=None):
143 status.maintenance('setting up Nginx as caching layer')
144 reactive.clear_flag('content_cache.active')
146 config = hookenv.config()
148 if not config.get('sites'):
149 status.blocked('requires list of sites to configure')
150 return
152 enable_cache_bg_update = config.get('enable_cache_background_update', True)
153 enable_cache_lock = config.get('enable_cache_lock', True)
154 enable_prometheus_metrics = config.get('enable_prometheus_metrics')
156 ngx_conf = nginx.NginxConf(
157 conf_path,
158 hookenv.local_unit(),
159 enable_cache_bg_update=enable_cache_bg_update,
160 enable_cache_lock=enable_cache_lock,
161 )
163 sites_secrets = secrets_from_config(config.get('sites_secrets'))
164 blacklist_ports = [int(x.strip()) for x in config.get('blacklist_ports', '').split(',') if x.strip()]
165 sites = sites_from_config(config.get('sites'), sites_secrets, blacklist_ports=blacklist_ports)
166 if not sites:
167 status.blocked('list of sites provided is invalid')
168 return
170 # We only want the cache layer to listen only on localhost. This allows us
171 # to deploy to edge networks and not worry about having to firewall off
172 # access.
173 conf = {}
174 conf['cache_inactive_time'] = config['cache_inactive_time']
175 conf['cache_max_size'] = config['cache_max_size'] or utils.cache_max_size(config['cache_path'])
176 conf['cache_path'] = config['cache_path']
177 conf['listen_address'] = '127.0.0.1'
178 conf['reuseport'] = config['reuseport']
179 changed = False
180 for site, site_conf in sites.items():
181 conf['site'] = site
182 conf['site_name'] = site_conf.get('site-name') or site
183 conf['listen_port'] = site_conf['cache_port']
184 conf['locations'] = site_conf.get('locations', {})
185 conf['enable_prometheus_metrics'] = enable_prometheus_metrics
187 if ngx_conf.write_site(site, ngx_conf.render(conf)):
188 hookenv.log('Wrote out new configs for site: {}:{}'.format(site, conf['listen_port']))
189 changed = True
191 metrics_listen = config.get('metrics_listen_address', None)
192 if configure_nginx_metrics(ngx_conf, enable_prometheus_metrics, listen_address=metrics_listen):
193 hookenv.log('nginx metrics exposed to prometheus')
194 changed = True
196 # Include the site exposing metrics if needed
197 if enable_prometheus_metrics:
198 sites[nginx.METRICS_SITE] = None
200 connections = config['worker_connections']
201 processes = config['worker_processes']
202 if ngx_conf.sync_sites(sites.keys()) or ngx_conf.set_workers(connections, processes):
203 hookenv.log('Enabled sites: {}'.format(' '.join(sites.keys())))
204 changed = True
206 if copy_file('files/nginx-logging-format.conf', os.path.join(ngx_conf.conf_path, 'nginx-logging-format.conf')):
207 changed = True
209 if changed:
210 reactive.set_flag('content_cache.nginx.reload-required')
212 update_logrotate('nginx', retention=config.get('log_retention'))
213 reactive.set_flag('content_cache.nginx.configured')
216@reactive.when('content_cache.nginx.installed')
217@reactive.when_not('content_cache.haproxy.configured') # NOQA: C901 LP#1825084
218def configure_haproxy(): # NOQA: C901 LP#1825084
219 status.maintenance('setting up HAProxy for frontend and backend proxy')
220 reactive.clear_flag('content_cache.active')
222 config = hookenv.config()
224 if not config.get('sites'):
225 status.blocked('requires list of sites to configure')
226 return
228 max_connections = config.get('max_connections', 0)
229 hard_stop_after = config.get('haproxy_hard_stop_after')
230 load_balancing_algorithm = config.get('haproxy_load_balancing_algorithm')
231 haproxy = HAProxy.HAProxyConf(
232 max_connections=max_connections,
233 hard_stop_after=hard_stop_after,
234 load_balancing_algorithm=load_balancing_algorithm,
235 )
236 sites_secrets = secrets_from_config(config.get('sites_secrets'))
237 blacklist_ports = [int(x.strip()) for x in config.get('blacklist_ports', '').split(',') if x.strip()]
238 sites = sites_from_config(config.get('sites'), sites_secrets, blacklist_ports=blacklist_ports)
239 if not sites:
240 status.blocked('list of sites provided is invalid')
241 return
243 old_ports = {int(x.partition('/')[0]) for x in hookenv.opened_ports()}
244 hookenv.log("Current opened ports: {}".format(old_ports))
245 new_ports = set()
247 # We need to slot in the caching layer here.
248 new_conf = {}
249 for site, site_conf in sites.items():
250 cache_port = site_conf['cache_port']
251 cached_site = 'cached-{}'.format(site)
252 new_conf[cached_site] = {'site-name': site_conf.get('site-name') or site, 'locations': {}}
254 default_site = site_conf.get('default')
255 if default_site:
256 new_conf[cached_site]['default'] = default_site
258 default_port = 80
259 tls_cert_bundle_path = site_conf.get('tls-cert-bundle-path')
260 if tls_cert_bundle_path:
261 default_port = 443
262 new_conf[cached_site]['tls-cert-bundle-path'] = tls_cert_bundle_path
263 redirect_http_to_https = site_conf.get('redirect-http-to-https')
264 if redirect_http_to_https:
265 new_conf[cached_site]['redirect-http-to-https'] = redirect_http_to_https
266 new_ports.add(80)
268 new_conf[cached_site]['port'] = site_conf.get('port') or default_port
269 try:
270 new_ports.add(int(new_conf[cached_site]['port']))
271 except ValueError as e:
272 hookenv.log('Only integer ports are supported: {}'.format(e))
274 # XXX: Reduce complexity here
276 for location, loc_conf in site_conf.get('locations', {}).items():
277 new_cached_loc_conf = {}
278 new_cached_loc_conf['backends'] = ['127.0.0.1:{}'.format(cache_port)]
279 # For the caching layer here, we want the default, low,
280 # 2s no matter what. This is so it'll notice when the
281 # caching layer (nginx) is back up quicker.
282 new_cached_loc_conf['backend-inter-time'] = '2s'
283 # Also, for caching layer, we want higher fall count as it's less
284 # likely the caching layer is down, 2 mins here (inter * fall).
285 new_cached_loc_conf['backend-fall-count'] = 60
286 # Rather than enable haproxy's 'option forwardfor' we want to replace
287 # the X-F-F header in case it's spoofed.
288 new_cached_loc_conf['backend-options'] = ['http-request set-header X-Forwarded-For %[src]']
290 # No backends
291 if not site_conf['locations'][location].get('backends'):
292 if not new_conf[cached_site]['locations']: 292 ↛ 294line 292 didn't jump to line 294, because the condition on line 292 was never false
293 new_conf[cached_site]['locations'][location] = new_cached_loc_conf
294 continue
296 if new_conf.get(site) is None:
297 new_conf[site] = {
298 'site-name': site_conf.get('site-name') or site,
299 # We only want the backend proxy layer to listen only on localhost. This
300 # allows us to deploy to edge networks and not worry about having to
301 # firewall off access.
302 'listen-address': '127.0.0.1',
303 'port': loc_conf.get('backend_port'),
304 'locations': {},
305 }
307 new_loc_conf = new_conf[site]['locations'][location] = {'backends': loc_conf['backends']}
308 if 'backend_port' in loc_conf: 308 ↛ 311line 308 didn't jump to line 311, because the condition on line 308 was never false
309 new_loc_conf['backend_port'] = loc_conf['backend_port']
311 backend_maxconn = loc_conf.get('backend-maxconn', 200)
312 new_loc_conf['backend-maxconn'] = backend_maxconn
313 # Default to backend_maxconn times the no. of provided
314 # backends, so 1-to-1 mapping.
315 cache_maxconn = loc_conf.get('cache-maxconn', backend_maxconn * len(loc_conf['backends']))
316 new_cached_loc_conf['backend-maxconn'] = cache_maxconn
318 backend_check_method = loc_conf.get('backend-check-method')
319 if backend_check_method:
320 new_cached_loc_conf['backend-check-method'] = backend_check_method
321 new_loc_conf['backend-check-method'] = backend_check_method
322 backend_check_path = loc_conf.get('backend-check-path')
323 if backend_check_path:
324 new_cached_loc_conf['backend-check-path'] = backend_check_path
325 new_loc_conf['backend-check-path'] = backend_check_path
326 new_loc_conf['backend-options'] = []
327 backend_options = loc_conf.get('backend-options')
328 if backend_options:
329 new_loc_conf['backend-options'] = backend_options
331 # Make it more resilient to failures and redispatch requests to different backends.
332 new_loc_conf['backend-options'].append('retry-on all-retryable-errors')
333 new_loc_conf['backend-options'].append('redispatch 1')
335 new_cached_loc_conf['signed-url-hmac-key'] = loc_conf.get('signed-url-hmac-key')
336 # Pass through selected backend location configs, if defined.
337 for key in ('site-name', 'backend-inter-time', 'backend-tls'):
338 if key in loc_conf:
339 new_loc_conf[key] = loc_conf[key]
340 # No 'backend-tls' provided so let's try work out automatically.
341 if ('backend-tls' not in loc_conf) and tls_cert_bundle_path: 341 ↛ 342line 341 didn't jump to line 342, because the condition on line 341 was never true
342 new_cached_loc_conf['backend-tls'] = False
343 new_loc_conf['backend-tls'] = True
345 # When we have multiple locations, we only want/need one HAProxy
346 # stanza to redirect requests to the cache.
347 if not new_conf[cached_site]['locations']:
348 new_conf[cached_site]['locations'][location] = new_cached_loc_conf
350 if config.get('enable_prometheus_metrics'):
351 new_ports.add(nginx.METRICS_PORT)
353 hookenv.log("Desired opened ports: {}".format(new_ports))
354 for port in new_ports.difference(old_ports):
355 hookenv.log("Opening new port: {}".format(port))
356 hookenv.open_port(port)
357 for obsolete_port in old_ports.difference(new_ports):
358 hookenv.log("Closing obsolete port: {}".format(obsolete_port))
359 hookenv.close_port(obsolete_port)
361 monitoring_password = haproxy.monitoring_password
362 if not monitoring_password:
363 monitoring_password = host.pwgen(length=20)
364 num_procs = config.get('haproxy_processes')
365 num_threads = config.get('haproxy_threads')
366 tls_cipher_suites = config.get('tls_cipher_suites')
367 rendered_config = haproxy.render(new_conf, num_procs, num_threads, monitoring_password, tls_cipher_suites)
368 if haproxy.write(rendered_config):
369 haproxy.save_server_state()
370 reactive.set_flag('content_cache.haproxy.reload-required')
371 reactive.clear_flag('content_cache.sysctl.configured')
373 update_logrotate('haproxy', retention=config.get('log_retention'))
374 reactive.set_flag('content_cache.haproxy.configured')
377@reactive.when('content_cache.nginx.configured', 'content_cache.haproxy.configured')
378@reactive.when('nrpe-external-master.available')
379@reactive.when_not('nagios-nrpe.configured')
380def configure_nagios():
381 status.maintenance('setting up NRPE checks')
382 reactive.clear_flag('content_cache.active')
384 config = hookenv.config()
386 # Use charmhelpers.contrib.charmsupport's nrpe to determine hostname
387 hostname = nrpe.get_nagios_hostname()
388 nrpe_setup = nrpe.NRPE(hostname=hostname, primary=True)
390 sites_secrets = secrets_from_config(config.get('sites_secrets'))
391 blacklist_ports = [int(x.strip()) for x in config.get('blacklist_ports', '').split(',') if x.strip()]
392 sites = sites_from_config(config.get('sites'), sites_secrets, blacklist_ports=blacklist_ports)
394 for site, site_conf in sites.items():
395 site_name = site_conf.get('site-name', site)
396 cache_port = site_conf['cache_port']
398 default_port = 80
399 tls_cert_bundle_path = site_conf.get('tls-cert-bundle-path')
400 tls = ''
401 if tls_cert_bundle_path:
402 default_port = 443
403 tls = ' --ssl=1.2 --sni'
405 frontend_port = site_conf.get('port') or default_port
407 for location, loc_conf in site_conf.get('locations', {}).items():
408 backend_port = loc_conf.get('backend_port')
409 method = loc_conf.get('backend-check-method', 'HEAD')
410 path = loc_conf.get('backend-check-path', location)
411 token = ''
412 signed_url_hmac_key = loc_conf.get('signed-url-hmac-key')
413 if signed_url_hmac_key:
414 expiry_time = utils.never_expires_time()
415 token = '?token={}'.format(utils.generate_token(signed_url_hmac_key, path, expiry_time))
417 nagios_name = '{}-{}'.format(site, location)
419 # Listen / frontend check
420 check_name = utils.generate_nagios_check_name(nagios_name, 'site', 'listen')
421 cmd = (
422 '/usr/lib/nagios/plugins/check_http -I 127.0.0.1 -H {site_name}'
423 ' -p {port}{tls} -j {method} -u {path}{token}'.format(
424 site_name=site_name, port=frontend_port, method=method, path=path, token=token, tls=tls
425 )
426 )
427 if 'nagios-expect' in loc_conf:
428 cmd = '{cmd} --expect="{expected}"'.format(cmd=cmd, expected=loc_conf['nagios-expect'])
429 nrpe_setup.add_check(shortname=check_name, description='{} site listen check'.format(site), check_cmd=cmd)
431 # Cache layer check
432 check_name = utils.generate_nagios_check_name(nagios_name, 'site', 'cache')
433 cmd = (
434 '/usr/lib/nagios/plugins/check_http -I 127.0.0.1 -H {site_name}'
435 ' -p {cache_port} -j {method} -u {path}{token}'.format(
436 site_name=site_name, cache_port=cache_port, method=method, path=path, token=token
437 )
438 )
439 if 'nagios-expect' in loc_conf:
440 cmd = '{cmd} --expect="{expected}"'.format(cmd=cmd, expected=loc_conf['nagios-expect'])
441 nrpe_setup.add_check(shortname=check_name, description='{} cache check'.format(site), check_cmd=cmd)
443 if backend_port:
444 # Backend proxy layer check; no token needs to be passed here as it's
445 # stripped by the cache layer.
446 check_name = utils.generate_nagios_check_name(nagios_name, 'site', 'backend_proxy')
447 cmd = (
448 '/usr/lib/nagios/plugins/check_http -I 127.0.0.1 -H {site_name} -p {backend_port}'
449 ' -j {method} -u {path}'.format(
450 site_name=site_name, backend_port=backend_port, method=method, path=path
451 )
452 )
453 nrpe_setup.add_check(
454 shortname=check_name, description='{} backend proxy check'.format(site), check_cmd=cmd
455 )
457 # Ensure we don't have lingering HAProxy processes around - LP:1828496
458 num_procs = config.get('haproxy_processes', 0) + 2
459 check_name = 'haproxy_procs'
460 description = 'HAProxy process count'
461 cmd = '/usr/lib/nagios/plugins/check_procs -c{} -w{} -C haproxy'.format(num_procs, num_procs)
462 nrpe_setup.add_check(shortname=check_name, description=description, check_cmd=cmd)
464 nrpe_setup.write()
465 reactive.set_flag('nagios-nrpe.configured')
468_SYSCTL_CORE_DEFAULT_QDISC = '/proc/sys/net/core/default_qdisc'
471@reactive.when_not('content_cache.sysctl.configured')
472def configure_sysctl():
473 config = hookenv.config()
475 context = {
476 'net_core_default_qdisc': None,
477 'net_ipv4_tcp_congestion_control': None,
478 }
480 if os.path.exists(_SYSCTL_CORE_DEFAULT_QDISC):
481 context['net_core_default_qdisc'] = 'fq'
483 preferred_tcp_cc = ['bbr2', 'bbr']
484 context['net_ipv4_tcp_congestion_control'] = utils.select_tcp_congestion_control(preferred_tcp_cc)
485 context['net_ipv4_tcp_mem'] = utils.tune_tcp_mem(config['tune_tcp_mem_multiplier'])
487 # Set or lower tcp_notsent_lowat to optimise HTTP/2 prioritisation.
488 # https://blog.cloudflare.com/http-2-prioritization-with-nginx/
489 context['net_ipv4_tcp_notsent_lowat'] = '16384'
491 base = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
492 env = jinja2.Environment(loader=jinja2.FileSystemLoader(base))
493 template = env.get_template('templates/sysctl_conf.tmpl')
494 content = template.render(context)
495 try:
496 with open(SYSCTL_CONF_PATH, 'r', encoding='utf-8') as f:
497 current = f.read()
498 except FileNotFoundError:
499 current = ''
500 if content != current:
501 with open(SYSCTL_CONF_PATH, 'w', encoding='utf-8') as f:
502 f.write(content)
503 subprocess.call(['sysctl', '-p', SYSCTL_CONF_PATH])
504 reactive.set_flag('content_cache.sysctl.configured')
507@reactive.when('content_cache.haproxy.configured')
508@reactive.when('haproxy-statistics.available')
509def advertise_stats_endpoint():
510 rels = context.Relations()
511 password = HAProxy.HAProxyConf().monitoring_password
513 for rel in rels['haproxy-statistics'].values():
514 rel.local['enabled'] = "True"
515 rel.local['listener-address'] = "127.0.0.1"
516 rel.local['port'] = "10000"
517 rel.local['user'] = "haproxy"
518 rel.local['password'] = password
521@reactive.when('haproxy-statistics.available')
522@reactive.when('nrpe-external-master.available')
523@reactive.when_not('nagios-nrpe-telegraf.configured')
524def check_haproxy_alerts():
525 nrpe_setup = nrpe.NRPE(hostname=nrpe.get_nagios_hostname(), primary=True)
526 # Because check_http is really inefficient, the parsing of the metrics is quite slow
527 # hence increasing the timeout to 20 seconds
528 cmd = '/usr/lib/nagios/plugins/check_http -I 127.0.0.1 -p 9103 -u /metrics -r "haproxy_rate" -t 20'
529 nrpe_setup.add_check(
530 shortname='haproxy_telegraf_metrics',
531 description='Verify haproxy metrics are visible via telegraf subordinate',
532 check_cmd=cmd,
533 )
534 nrpe_setup.write()
535 reactive.set_flag('nagios-nrpe-telegraf.configured')
538def cleanout_sites(site_ports_map, sites):
539 new_site_ports_map = {}
540 for site, site_conf in site_ports_map.items():
541 if site not in sites:
542 continue
544 site_map = {'locations': {}}
545 site_map['cache_port'] = site_conf['cache_port']
546 for location, loc_conf in site_conf.get('locations', {}).items():
547 site_map['locations'][location] = loc_conf
549 new_site_ports_map[site] = site_map
551 return new_site_ports_map
554def allocated_ports(site_ports_map):
555 allocated_ports = []
556 for site, site_conf in site_ports_map.items():
557 allocated_ports.append(site_conf['cache_port'])
558 for location, loc_conf in site_conf.get('locations', {}).items():
559 if 'backend_port' not in loc_conf:
560 continue
561 allocated_ports.append(loc_conf['backend_port'])
562 return sorted(allocated_ports)
565def ports_map_lookup(ports_map, site, base_port, blacklist_ports=None, key=None):
566 if key:
567 (unused_port, port) = utils.next_port_pair(0, base_port, blacklist_ports=blacklist_ports)
568 else:
569 (port, unused_port) = utils.next_port_pair(base_port, 0, blacklist_ports=blacklist_ports)
571 if site not in ports_map:
572 return port
574 if key:
575 if 'locations' not in ports_map[site] or key not in ports_map[site]['locations']: 575 ↛ 576line 575 didn't jump to line 576, because the condition on line 575 was never true
576 return port
577 return ports_map[site]['locations'][key].get('backend_port', port)
578 else:
579 return ports_map[site].get('cache_port', port)
582def sites_from_config(sites_yaml, sites_secrets=None, blacklist_ports=None):
583 conf = yaml.safe_load(sites_yaml)
584 # 'configs' is special and used to host YAML anchors so let's remove it
585 conf.pop('configs', '')
586 sites = interpolate_secrets(conf, sites_secrets)
587 cache_port = 0
588 backend_port = 0
589 new_sites = {}
590 existing_site_ports_map = unitdata.kv().get('existing_site_ports_map', {})
591 new_site_ports_map = {}
592 if not blacklist_ports:
593 blacklist_ports = []
595 blacklist_ports += allocated_ports(existing_site_ports_map)
596 # We need to clean out sites and backends that no longer
597 # exists. This should happen after we've built a list of ports to
598 # blacklist to ensure that we don't reuse one for a site that's
599 # being or been removed.
600 existing_site_ports_map = cleanout_sites(existing_site_ports_map, sites)
601 for site, site_conf in sites.items():
602 if not site_conf:
603 continue
604 site_ports_map = {'locations': {}}
605 cache_port = ports_map_lookup(existing_site_ports_map, site, cache_port, blacklist_ports)
606 site_conf['cache_port'] = cache_port
607 site_ports_map['cache_port'] = cache_port
608 # With the new port allocated, make sure it's blacklisted so it doesn't
609 # get reused later.
610 blacklist_ports.append(cache_port)
612 for location, loc_conf in site_conf.get('locations', {}).items():
613 if not loc_conf or not loc_conf.get('backends'):
614 continue
615 location_map = {}
616 backend_port = ports_map_lookup(existing_site_ports_map, site, backend_port, blacklist_ports, key=location)
617 loc_conf['backend_port'] = backend_port
618 location_map['backend_port'] = backend_port
620 # With the new port allocated, make sure it's blacklisted so it doesn't
621 # get reused later.
622 blacklist_ports.append(backend_port)
623 site_ports_map['locations'][location] = location_map
625 new_sites[site] = site_conf
626 new_site_ports_map[site] = site_ports_map
628 unitdata.kv().set('existing_site_ports_map', new_site_ports_map)
629 return new_sites
632def secrets_from_config(secrets_yaml):
633 secrets = ''
634 if not secrets_yaml:
635 return {}
636 try:
637 secrets = yaml.safe_load(secrets_yaml)
638 except yaml.YAMLError:
639 return {}
640 if isinstance(secrets, dict):
641 return secrets
642 else:
643 return {}
646def interpolate_secrets(sites, secrets):
647 sites = deepcopy(sites)
648 for site, site_conf in sites.items():
649 if not secrets or not secrets.get(site):
650 continue
651 for location, loc_conf in site_conf.get('locations', {}).items():
652 location_secrets = secrets.get(site).get('locations').get(location)
653 if not location_secrets: 653 ↛ 654line 653 didn't jump to line 654, because the condition on line 653 was never true
654 continue
655 # Handle origin-headers secrets.
656 origin_headers = loc_conf.get('origin-headers')
657 if origin_headers:
658 origin_header_secrets = location_secrets.get('origin-headers')
659 loc_conf['origin-headers'] = _interpolate_secrets_origin_headers(origin_headers, origin_header_secrets)
660 # Handle other location config keys.
661 for k, v in loc_conf.items():
662 if type(v) == str and v.strip() == '${secret}':
663 if k not in location_secrets: 663 ↛ 665line 663 didn't jump to line 665, because the condition on line 663 was never true
664 # This will leave the secret marker in place.
665 continue
666 loc_conf[k] = location_secrets.get(k)
667 return sites
670def _interpolate_secrets_origin_headers(headers, secrets):
671 headers = deepcopy(headers)
672 for header in headers:
673 for k, v in header.items():
674 if v != '${secret}':
675 continue
676 header[k] = secrets.get(k)
677 return headers
680def update_logrotate(service, retention, dateext=True, **kwargs):
681 conf_path = os.path.join('/etc/logrotate.d', service)
682 write_file(utils.logrotate(conf_path, retention=retention, dateext=dateext), conf_path, **kwargs)
685def copy_file(source_path, dest_path, **kwargs):
686 """Copy a file from the charm directory onto the local filesystem.
688 Reads the contents of source_path and passes through to write_file().
689 Please see the help for write_file() for argument usage.
690 """
692 with open(source_path, 'r') as f:
693 source = f.read()
694 return write_file(source, dest_path, **kwargs)
697def write_file(source, dest_path, perms=0o644, owner=None, group=None):
698 """Write a source string to a file.
700 Returns True if the file was modified (new file, file changed, file
701 deleted), False if the file is not modified or is intentionally not
702 created.
703 """
705 # Compare and only write out file on change.
706 dest = ''
707 if os.path.exists(dest_path):
708 with open(dest_path, 'r') as f:
709 dest = f.read()
711 if source == dest:
712 return False
714 if not owner:
715 owner = pwd.getpwuid(os.getuid()).pw_name
716 if not group:
717 group = grp.getgrgid(os.getgid()).gr_name
719 host.write_file(path=dest_path, content=source, owner=owner, group=group, perms=perms)
720 return True