Merge ~hloeung/content-cache-charm:master into content-cache-charm:master

Proposed by Haw Loeung
Status: Superseded
Proposed branch: ~hloeung/content-cache-charm:master
Merge into: content-cache-charm:master
Diff against target: 406 lines (+56/-47)
8 files modified
config.yaml (+3/-3)
lib/haproxy.py (+12/-10)
reactive/content_cache.py (+0/-2)
templates/haproxy_cfg.tmpl (+11/-1)
tests/unit/files/content_cache_rendered_haproxy_test_output.txt (+10/-14)
tests/unit/files/haproxy_config_rendered_listen_stanzas_test_output.txt (+0/-3)
tests/unit/files/haproxy_config_rendered_test_output.txt (+10/-4)
tests/unit/test_content_cache.py (+10/-10)
Reviewer Review Type Date Requested Status
Canonical IS Reviewers Pending
Content Cache Charmers Pending
Review via email: mp+380402@code.launchpad.net

This proposal has been superseded by a proposal from 2020-03-10.

Commit message

Increase HAProxy SSL/TLS session cache

Default cache size is 20k. We want to improve user experience by
reducing TTFB. Obviously this applies to vistors revisiting sites.

Rather than hardcode high values here, let's just dynamically
calculate this using global_max_connections (which is a combination of
num. of cores and num. of configured sites). Each entry requires ~200
bytes so on a host iwth say 32 CPUs, 10 sites configured, each with
the default 2000 maxconns, it will only consume around 122 Mbytes,
which is not much!

To post a comment you must log in.
Revision history for this message
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote :

This merge proposal is being monitored by mergebot. Change the status to Approved to merge.

Revision history for this message
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote :

Unable to determine commit message from repository - please click "Set commit message" and enter the commit message manually.

Unmerged commits

b046595... by Haw Loeung

Increase HAProxy SSL/TLS session cache

Default cache size is 20k. We want to improve user experience by
reducing TTFB. Obviously this applies to vistors revisiting sites.

Rather than hardcode high values here, let's just dynamically
calculate this using global_max_connections (which is a combination of
num. of cores and num. of configured sites). Each entry requires ~200
bytes so on a host iwth say 32 CPUs, 10 sites configured, each with
the default 2000 maxconns, it will only consume around 122 Mbytes,
which is not much!

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/config.yaml b/config.yaml
2index d6bb918..4b5d99b 100644
3--- a/config.yaml
4+++ b/config.yaml
5@@ -38,10 +38,10 @@ options:
6 Number of log files to retain during rotation.
7 max_connections:
8 type: int
9- default: 8192
10+ default: 0
11 description: >
12- Configure maximum number of connections on frontend HAProxy. (8192 for now,
13- the default will automatically be calculated in the future)
14+ Configure maximum number of connections per site on frontend
15+ HAProxy. Defaults to auto-calculate (0).
16 nagios_context:
17 type: string
18 default: "juju"
19diff --git a/lib/haproxy.py b/lib/haproxy.py
20index 6c70fc4..e3a4d38 100644
21--- a/lib/haproxy.py
22+++ b/lib/haproxy.py
23@@ -14,9 +14,9 @@ TLS_CIPHER_SUITES = 'ECDHE+AESGCM:ECDHE+AES256:ECDHE+AES128:!SSLv3:!TLSv1'
24
25
26 class HAProxyConf:
27- def __init__(self, conf_path=HAPROXY_BASE_PATH, max_connections=2000):
28+ def __init__(self, conf_path=HAPROXY_BASE_PATH, max_connections=0):
29 self._conf_path = conf_path
30- self.max_connections = max_connections
31+ self.max_connections = int(max_connections)
32
33 @property
34 def conf_path(self):
35@@ -99,7 +99,6 @@ class HAProxyConf:
36 listen_stanza = """
37 listen {name}
38 {bind_config}
39- maxconn {max_connections}
40 {backend_config}"""
41 backend_conf = '{indent}use_backend backend-{backend} if {{ hdr(Host) -i {site_name} }}\n'
42 redirect_conf = '{indent}redirect scheme https code 301 if {{ hdr(Host) -i {site_name} }} !{{ ssl_fc }}\n'
43@@ -153,11 +152,7 @@ listen {name}
44 if address == '0.0.0.0':
45 bind_config += '\n{indent}bind :::{port}{tls}'.format(port=port, tls=tls_config, indent=INDENT)
46 output = listen_stanza.format(
47- name=name,
48- max_connections=self.max_connections,
49- backend_config=''.join(backend_config),
50- bind_config=bind_config,
51- indent=INDENT,
52+ name=name, backend_config=''.join(backend_config), bind_config=bind_config, indent=INDENT,
53 )
54 rendered_output.append(output)
55 return rendered_output
56@@ -250,18 +245,25 @@ backend backend-{name}
57 def render(self, config, num_threads=None, monitoring_password=None, tls_cipher_suites=None):
58 if not num_threads:
59 num_threads = multiprocessing.cpu_count()
60+ if self.max_connections:
61+ max_connections = self.max_connections
62+ else:
63+ max_connections = num_threads * 2000
64 if not tls_cipher_suites:
65 tls_cipher_suites = TLS_CIPHER_SUITES
66 tls_cipher_suites = utils.tls_cipher_suites(tls_cipher_suites)
67
68+ listen_stanzas = self.render_stanza_listen(config)
69+
70 base = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
71 env = jinja2.Environment(loader=jinja2.FileSystemLoader(base))
72 template = env.get_template('templates/haproxy_cfg.tmpl')
73 return template.render(
74 {
75 'backend': self.render_stanza_backend(config),
76- 'listen': self.render_stanza_listen(config),
77- 'max_connections': self.max_connections,
78+ 'global_max_connections': max_connections * len(listen_stanzas),
79+ 'listen': listen_stanzas,
80+ 'max_connections': max_connections,
81 'monitoring_password': monitoring_password or self.monitoring_password,
82 'num_threads': num_threads,
83 'tls_cipher_suites': tls_cipher_suites,
84diff --git a/reactive/content_cache.py b/reactive/content_cache.py
85index ea8cf20..ab83dae 100644
86--- a/reactive/content_cache.py
87+++ b/reactive/content_cache.py
88@@ -204,8 +204,6 @@ def configure_haproxy(): # NOQA: C901 LP#1825084
89 status.blocked('requires list of sites to configure')
90 return
91
92- # TODO: Calculate max connections if none specified. Likely use configured
93- # nbthreads (2000 * nbthreads). Or maybe even per site.
94 haproxy = HAProxy.HAProxyConf(max_connections=config['max_connections'])
95 sites_secrets = secrets_from_config(config.get('sites_secrets'))
96 blacklist_ports = [int(x.strip()) for x in config.get('blacklist_ports', '').split(',') if x.strip()]
97diff --git a/templates/haproxy_cfg.tmpl b/templates/haproxy_cfg.tmpl
98index a4ec3be..f2665dc 100644
99--- a/templates/haproxy_cfg.tmpl
100+++ b/templates/haproxy_cfg.tmpl
101@@ -1,6 +1,6 @@
102 global
103 nbthread {{num_threads}}
104- maxconn {{max_connections}}
105+ maxconn {{global_max_connections}}
106 log /dev/log local0
107 log /dev/log local1 notice
108 chroot /var/lib/haproxy
109@@ -24,8 +24,17 @@ global
110 # We'll eventually disable DHE (LP#1825321), but for now, bump DH params
111 tune.ssl.default-dh-param 2048
112
113+ # Increase the SSL/TLS session cache from the default 20k. But
114+ # rather than hardcode values, let's just dynamically calculate it
115+ # and using global_max_connections. Each entry requires ~200 bytes
116+ # so on a host with say 32 CPUs, 10 sites, each with 2000 max
117+ # conns will only consume around 122 Mbytes (32 * 10 * 2000 *
118+ # 200), which is not much.
119+ tune.ssl.cachesize {{global_max_connections}}
120+
121 defaults
122 log global
123+ maxconn {{max_connections}}
124 mode http
125 option httplog
126 option dontlognull
127@@ -59,3 +68,4 @@ listen stats
128 {% for stanza in backend -%}
129 {{stanza}}
130 {%- endfor -%}
131+
132diff --git a/tests/unit/files/content_cache_rendered_haproxy_test_output.txt b/tests/unit/files/content_cache_rendered_haproxy_test_output.txt
133index b715f9d..51ef6a4 100644
134--- a/tests/unit/files/content_cache_rendered_haproxy_test_output.txt
135+++ b/tests/unit/files/content_cache_rendered_haproxy_test_output.txt
136@@ -1,6 +1,6 @@
137 global
138 nbthread 4
139- maxconn 8192
140+ maxconn 104000
141 log /dev/log local0
142 log /dev/log local1 notice
143 chroot /var/lib/haproxy
144@@ -24,8 +24,17 @@ global
145 # We'll eventually disable DHE (LP#1825321), but for now, bump DH params
146 tune.ssl.default-dh-param 2048
147
148+ # Increase the SSL/TLS session cache from the default 20k. But
149+ # rather than hardcode values, let's just dynamically calculate it
150+ # and using global_max_connections. Each entry requires ~200 bytes
151+ # so on a host with say 32 CPUs, 10 sites, each with 2000 max
152+ # conns will only consume around 122 Mbytes (32 * 10 * 2000 *
153+ # 200), which is not much.
154+ tune.ssl.cachesize 104000
155+
156 defaults
157 log global
158+ maxconn 8000
159 mode http
160 option httplog
161 option dontlognull
162@@ -56,7 +65,6 @@ listen stats
163 listen combined-80
164 bind 0.0.0.0:80
165 bind :::80
166- maxconn 8192
167 use_backend backend-cached-site1-local if { hdr(Host) -i site1.local }
168 redirect scheme https code 301 if { hdr(Host) -i site2.local } !{ ssl_fc }
169 use_backend backend-cached-site3-local if { hdr(Host) -i site3.local }
170@@ -67,65 +75,53 @@ listen combined-80
171
172 listen site1-local
173 bind 127.0.0.1:8080
174- maxconn 8192
175 default_backend backend-site1-local
176
177 listen cached-site2-local
178 bind 0.0.0.0:443 ssl crt /etc/haproxy/site2-bundle.crt
179 bind :::443 ssl crt /etc/haproxy/site2-bundle.crt
180- maxconn 8192
181 default_backend backend-cached-site2-local
182
183 listen site2-local
184 bind 127.0.0.1:8081
185- maxconn 8192
186 default_backend backend-site2-local
187
188 listen site3-local
189 bind 127.0.0.1:8082
190- maxconn 8192
191 default_backend backend-site3-local
192
193 listen site5
194 bind 127.0.0.1:8083
195- maxconn 8192
196 default_backend backend-site5
197
198 listen site5-2
199 bind 127.0.0.1:8084
200- maxconn 8192
201 default_backend backend-site5
202
203 listen site6-local
204 bind 127.0.0.1:8085
205- maxconn 8192
206 default_backend backend-site6-local
207
208 listen combined-444
209 bind 0.0.0.0:444 ssl crt /etc/haproxy/site7-bundle.crt crt /etc/haproxy/site8-bundle.crt
210 bind :::444 ssl crt /etc/haproxy/site7-bundle.crt crt /etc/haproxy/site8-bundle.crt
211- maxconn 8192
212 use_backend backend-cached-site7-local if { hdr(Host) -i site7.local }
213 use_backend backend-cached-site8-local if { hdr(Host) -i site8.local }
214
215 listen site7-local
216 bind 127.0.0.1:8086
217- maxconn 8192
218 default_backend backend-site7-local
219
220 listen site8-local
221 bind 127.0.0.1:8087
222- maxconn 8192
223 default_backend backend-site8-local
224
225 listen site8-local-2
226 bind 127.0.0.1:8088
227- maxconn 8192
228 default_backend backend-site8-local
229
230 listen site9-local
231 bind 127.0.0.1:8089
232- maxconn 8192
233 default_backend backend-site9-local
234
235 backend backend-cached-site1-local
236diff --git a/tests/unit/files/haproxy_config_rendered_listen_stanzas_test_output.txt b/tests/unit/files/haproxy_config_rendered_listen_stanzas_test_output.txt
237index 57884f0..e98c5fe 100644
238--- a/tests/unit/files/haproxy_config_rendered_listen_stanzas_test_output.txt
239+++ b/tests/unit/files/haproxy_config_rendered_listen_stanzas_test_output.txt
240@@ -2,7 +2,6 @@
241 listen combined-80
242 bind 0.0.0.0:80
243 bind :::80
244- maxconn 8192
245 use_backend backend-site1-local if { hdr(Host) -i site1.local }
246 redirect scheme https code 301 if { hdr(Host) -i site2.local } !{ ssl_fc }
247 use_backend backend-site3-local if { hdr(Host) -i site3.local }
248@@ -14,12 +13,10 @@ listen combined-80
249 listen site2-local
250 bind 0.0.0.0:443 ssl crt /etc/haproxy/site2-bundle.crt
251 bind :::443 ssl crt /etc/haproxy/site2-bundle.crt
252- maxconn 8192
253 default_backend backend-site2-local
254
255 listen combined-444
256 bind 0.0.0.0:444 ssl crt /etc/haproxy/site7-bundle.crt crt /etc/haproxy/site8-bundle.crt
257 bind :::444 ssl crt /etc/haproxy/site7-bundle.crt crt /etc/haproxy/site8-bundle.crt
258- maxconn 8192
259 use_backend backend-site7-local if { hdr(Host) -i site7.local }
260 use_backend backend-site8-local if { hdr(Host) -i site8.local }
261diff --git a/tests/unit/files/haproxy_config_rendered_test_output.txt b/tests/unit/files/haproxy_config_rendered_test_output.txt
262index e3132ec..554ac26 100644
263--- a/tests/unit/files/haproxy_config_rendered_test_output.txt
264+++ b/tests/unit/files/haproxy_config_rendered_test_output.txt
265@@ -1,6 +1,6 @@
266 global
267 nbthread 4
268- maxconn 8192
269+ maxconn 24576
270 log /dev/log local0
271 log /dev/log local1 notice
272 chroot /var/lib/haproxy
273@@ -24,8 +24,17 @@ global
274 # We'll eventually disable DHE (LP#1825321), but for now, bump DH params
275 tune.ssl.default-dh-param 2048
276
277+ # Increase the SSL/TLS session cache from the default 20k. But
278+ # rather than hardcode values, let's just dynamically calculate it
279+ # and using global_max_connections. Each entry requires ~200 bytes
280+ # so on a host with say 32 CPUs, 10 sites, each with 2000 max
281+ # conns will only consume around 122 Mbytes (32 * 10 * 2000 *
282+ # 200), which is not much.
283+ tune.ssl.cachesize 24576
284+
285 defaults
286 log global
287+ maxconn 8192
288 mode http
289 option httplog
290 option dontlognull
291@@ -56,7 +65,6 @@ listen stats
292 listen combined-80
293 bind 0.0.0.0:80
294 bind :::80
295- maxconn 8192
296 use_backend backend-site1-local if { hdr(Host) -i site1.local }
297 redirect scheme https code 301 if { hdr(Host) -i site2.local } !{ ssl_fc }
298 use_backend backend-site3-local if { hdr(Host) -i site3.local }
299@@ -68,13 +76,11 @@ listen combined-80
300 listen site2-local
301 bind 0.0.0.0:443 ssl crt /etc/haproxy/site2-bundle.crt
302 bind :::443 ssl crt /etc/haproxy/site2-bundle.crt
303- maxconn 8192
304 default_backend backend-site2-local
305
306 listen combined-444
307 bind 0.0.0.0:444 ssl crt /etc/haproxy/site7-bundle.crt crt /etc/haproxy/site8-bundle.crt
308 bind :::444 ssl crt /etc/haproxy/site7-bundle.crt crt /etc/haproxy/site8-bundle.crt
309- maxconn 8192
310 use_backend backend-site7-local if { hdr(Host) -i site7.local }
311 use_backend backend-site8-local if { hdr(Host) -i site8.local }
312
313diff --git a/tests/unit/test_content_cache.py b/tests/unit/test_content_cache.py
314index 9e1de72..1946fe4 100644
315--- a/tests/unit/test_content_cache.py
316+++ b/tests/unit/test_content_cache.py
317@@ -176,7 +176,7 @@ class TestCharm(unittest.TestCase):
318 'cache_max_size': '1g',
319 'cache_path': '/var/lib/nginx/proxy',
320 'enable_prometheus_metrics': False,
321- 'max_connections': 8192,
322+ 'max_connections': 0,
323 'sites': ngx_config,
324 'worker_connections': 768,
325 'worker_processes': 0,
326@@ -255,7 +255,7 @@ site1.local:
327 'cache_inactive_time': '',
328 'cache_max_size': '1g',
329 'cache_path': '/var/lib/nginx/proxy',
330- 'max_connections': 8192,
331+ 'max_connections': 0,
332 'sites': config,
333 'sites_secrets': secrets,
334 'worker_connections': 768,
335@@ -308,7 +308,7 @@ site1.local:
336 'cache_inactive_time': '2h',
337 'cache_max_size': '1g',
338 'cache_path': '/var/lib/nginx/proxy',
339- 'max_connections': 8192,
340+ 'max_connections': 0,
341 'sites': config,
342 'worker_connections': 768,
343 'worker_processes': 0,
344@@ -326,7 +326,7 @@ site1.local:
345 'cache_inactive_time': '2h',
346 'cache_max_size': '20g',
347 'cache_path': '/srv/cache',
348- 'max_connections': 8192,
349+ 'max_connections': 0,
350 'sites': config,
351 'worker_connections': 768,
352 'worker_processes': 0,
353@@ -345,7 +345,7 @@ site1.local:
354 'cache_inactive_time': '2h',
355 'cache_max_size': '',
356 'cache_path': '/srv/cache',
357- 'max_connections': 8192,
358+ 'max_connections': 0,
359 'sites': config,
360 'worker_connections': 768,
361 'worker_processes': 0,
362@@ -368,7 +368,7 @@ site1.local:
363
364 status.reset_mock()
365 clear_flag.reset_mock()
366- self.mock_config.return_value = {'max_connections': 8192, 'sites': 'site1:'}
367+ self.mock_config.return_value = {'max_connections': 0, 'sites': 'site1:'}
368 content_cache.configure_haproxy()
369 status.blocked.assert_called()
370 clear_flag.assert_called_once_with('content_cache.active')
371@@ -379,7 +379,7 @@ site1.local:
372 def test_configure_haproxy_sites(self, logrotation, set_flag):
373 with open('tests/unit/files/config_test_config.txt', 'r', encoding='utf-8') as f:
374 ngx_config = f.read()
375- self.mock_config.return_value = {'max_connections': 8192, 'sites': ngx_config}
376+ self.mock_config.return_value = {'max_connections': 0, 'sites': ngx_config}
377
378 with mock.patch('lib.haproxy.HAProxyConf.conf_file', new_callable=mock.PropertyMock) as mock_conf_file:
379 mock_conf_file.return_value = os.path.join(self.tmpdir, 'haproxy.cfg')
380@@ -984,7 +984,7 @@ site1.local:
381 'cache_max_size': '1g',
382 'cache_path': '/var/lib/nginx/proxy',
383 'enable_prometheus_metrics': True,
384- 'max_connections': 8192,
385+ 'max_connections': 0,
386 'sites': ngx_config,
387 'worker_connections': 768,
388 'worker_processes': 0,
389@@ -1061,7 +1061,7 @@ site1.local:
390 # Test that haproxy calls close_port with the nginx.METRIC_PORT when enable_prometheus_metrics is False
391 self.mock_config.return_value = {
392 'enable_prometheus_metrics': False,
393- 'max_connections': 8192,
394+ 'max_connections': 0,
395 'sites': ngx_config,
396 }
397 opened_ports.return_value = {"80/tcp", "{0}/tcp".format(nginx.METRICS_PORT)}
398@@ -1073,7 +1073,7 @@ site1.local:
399 open_port.reset_mock()
400 self.mock_config.return_value = {
401 'enable_prometheus_metrics': True,
402- 'max_connections': 8192,
403+ 'max_connections': 0,
404 'sites': ngx_config,
405 }
406 content_cache.configure_haproxy()

Subscribers

People subscribed via source and target branches