Merge lp:~stub/charms/precise/postgresql/fix-races into lp:charms/postgresql

Proposed by Stuart Bishop
Status: Merged
Approved by: Mark Mims
Approved revision: 75
Merged at revision: 63
Proposed branch: lp:~stub/charms/precise/postgresql/fix-races
Merge into: lp:charms/postgresql
Prerequisite: lp:~stub/charms/precise/postgresql/cleanups
Diff against target: 650 lines (+247/-131)
3 files modified
config.yaml (+7/-0)
hooks/hooks.py (+208/-109)
test.py (+32/-22)
To merge this branch: bzr merge lp:~stub/charms/precise/postgresql/fix-races
Reviewer Review Type Date Requested Status
Mark Mims (community) Approve
Review via email: mp+181740@code.launchpad.net

Description of the change

The new local provider is quite fast, and exposes race conditions in the PostgreSQL charm.

This branch reworks the replication peer relationship so the test suite runs more reliably. For example, election has been rewritten to cope with situations such as, when creating a new service of 3 units, units 1 and 2 may have already elected a master amongst themselves before unit 0 has joined the relation, so the assumption that the lowest numbered unit in a new replication peer relation is the master is false.

To post a comment you must log in.
75. By Stuart Bishop

Merged cleanups into fix-races.

Revision history for this message
Mark Mims (mark-mims) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'config.yaml'
2--- config.yaml 2013-05-27 14:37:19 +0000
3+++ config.yaml 2013-08-23 09:40:16 +0000
4@@ -313,3 +313,10 @@
5 type: string
6 description: |
7 Extra archives to add with add-apt-repository(1).
8+ advisory_lock_restart_key:
9+ default: 765
10+ type: int
11+ description: |
12+ An advisory lock key used internally by the charm. You do not need
13+ to change it unless it happens to conflict with an advisory lock key
14+ being used by your applications.
15
16=== modified file 'hooks/hooks.py'
17--- hooks/hooks.py 2013-08-23 09:40:15 +0000
18+++ hooks/hooks.py 2013-08-23 09:40:16 +0000
19@@ -20,7 +20,7 @@
20
21 from charmhelpers.core import hookenv, host
22 from charmhelpers.core.hookenv import (
23- CRITICAL, ERROR, WARNING, INFO, DEBUG, log,
24+ CRITICAL, ERROR, WARNING, INFO, DEBUG,
25 )
26
27 hooks = hookenv.Hooks()
28@@ -28,16 +28,24 @@
29 # jinja2 may not be importable until the install hook has installed the
30 # required packages.
31 def Template(*args, **kw):
32+ """jinja2.Template with deferred jinja2 import"""
33 from jinja2 import Template
34 return Template(*args, **kw)
35
36
37 def log(msg, lvl=INFO):
38- # Per Bug #1208787, log messages sent via juju-log are being lost.
39- # Spit messages out to a log file to work around the problem.
40+ '''Log a message.
41+
42+ Per Bug #1208787, log messages sent via juju-log are being lost.
43+ Spit messages out to a log file to work around the problem.
44+ It is also rather nice to have the log messages we explicitly emit
45+ in a separate log file, rather than just mashed up with all the
46+ juju noise.
47+ '''
48 myname = hookenv.local_unit().replace('/', '-')
49- with open('/tmp/{}-debug.log'.format(myname), 'a') as f:
50- f.write('{}: {}\n'.format(lvl, msg))
51+ ts = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())
52+ with open('/var/log/juju/{}-debug.log'.format(myname), 'a') as f:
53+ f.write('{} {}: {}\n'.format(ts, lvl, msg))
54 hookenv.log(msg, lvl)
55
56
57@@ -49,6 +57,7 @@
58 self.load()
59
60 def load(self):
61+ '''Load stored state from local disk.'''
62 if os.path.exists(self._state_file):
63 state = pickle.load(open(self._state_file, 'rb'))
64 else:
65@@ -58,6 +67,7 @@
66 self.update(state)
67
68 def save(self):
69+ '''Store state to local disk.'''
70 state = {}
71 state.update(self)
72 pickle.dump(state, open(self._state_file, 'wb'))
73@@ -181,13 +191,13 @@
74
75
76 def postgresql_autostart(enabled):
77+ startup_file = os.path.join(postgresql_config_dir, 'start.conf')
78 if enabled:
79 log("Enabling PostgreSQL startup in {}".format(startup_file))
80 mode = 'auto'
81 else:
82 log("Disabling PostgreSQL startup in {}".format(startup_file))
83 mode = 'manual'
84- startup_file = os.path.join(postgresql_config_dir, 'start.conf')
85 contents = Template(open("templates/start_conf.tmpl").read()).render(
86 {'mode': mode})
87 host.write_file(
88@@ -209,6 +219,7 @@
89
90
91 def postgresql_is_running():
92+ '''Return true if PostgreSQL is running.'''
93 # init script always return true (9.1), add extra check to make it useful
94 status, output = commands.getstatusoutput("invoke-rc.d postgresql status")
95 if status != 0:
96@@ -219,72 +230,65 @@
97
98
99 def postgresql_stop():
100- host.service_stop('postgresql')
101- return not postgresql_is_running()
102+ '''Shutdown PostgreSQL.'''
103+ success = host.service_stop('postgresql')
104+ return not (success and postgresql_is_running())
105
106
107 def postgresql_start():
108- host.service_start('postgresql')
109- return postgresql_is_running()
110+ '''Start PostgreSQL if it is not already running.'''
111+ success = host.service_start('postgresql')
112+ return success and postgresql_is_running()
113
114
115 def postgresql_restart():
116+ '''Restart PostgreSQL, or start it if it is not already running.'''
117 if postgresql_is_running():
118- # If the database is in backup mode, we don't want to restart
119- # PostgreSQL and abort the procedure. This may be another unit being
120- # cloned, or a filesystem level backup is being made. There is no
121- # timeout here, as backups can take hours or days. Instead, keep
122- # logging so admins know wtf is going on.
123- last_warning = time.time()
124- while postgresql_is_in_backup_mode():
125- if time.time() + 120 > last_warning:
126- log("In backup mode. PostgreSQL restart blocked.", WARNING)
127- log(
128- "Run \"psql -U postgres -c 'SELECT pg_stop_backup()'\""
129- "to cancel backup mode and forcefully unblock this hook.")
130- last_warning = time.time()
131- time.sleep(5)
132-
133- return host.service_restart('postgresql')
134+ with restart_lock(hookenv.local_unit(), True):
135+ # 'service postgresql restart' fails; it only does a reload.
136+ # success = host.service_restart('postgresql')
137+ try:
138+ run('pg_ctlcluster -force {version} {cluster_name} '
139+ 'restart'.format(**config_data))
140+ success = True
141+ except subprocess.CalledProcessError as e:
142+ success = False
143 else:
144- return host.service_start('postgresql')
145+ success = host.service_start('postgresql')
146
147 # Store a copy of our known live configuration so
148 # postgresql_reload_or_restart() can make good choices.
149- if 'saved_config' in local_state:
150+ if success and 'saved_config' in local_state:
151 local_state['live_config'] = local_state['saved_config']
152 local_state.save()
153
154- return postgresql_is_running()
155+ return success and postgresql_is_running()
156
157
158 def postgresql_reload():
159+ '''Make PostgreSQL reload its configuration.'''
160 # reload returns a reliable exit status
161 status, output = commands.getstatusoutput("invoke-rc.d postgresql reload")
162 return (status == 0)
163
164
165-def postgresql_reload_or_restart():
166- """Reload PostgreSQL configuration, restarting if necessary."""
167- # Pull in current values of settings that can only be changed on
168- # server restart.
169+def requires_restart():
170+ '''Check for configuration changes requiring a restart to take effect.'''
171 if not postgresql_is_running():
172- return postgresql_restart()
173+ return True
174
175- # Suck in the config last written to postgresql.conf.
176 saved_config = local_state.get('saved_config', None)
177 if not saved_config:
178 # No record of postgresql.conf state, perhaps an upgrade.
179 # Better restart.
180- return postgresql_restart()
181+ return True
182
183- # Suck in our live config from last time we restarted.
184 live_config = local_state.setdefault('live_config', {})
185
186 # Pull in a list of PostgreSQL settings.
187 cur = db_cursor()
188 cur.execute("SELECT name, context FROM pg_settings")
189- requires_restart = False
190+ restart = False
191 for name, context in cur.fetchall():
192 live_value = live_config.get(name, None)
193 new_value = saved_config.get(name, None)
194@@ -296,23 +300,27 @@
195 if context == 'postmaster':
196 # A setting has changed that requires PostgreSQL to be
197 # restarted before it will take effect.
198- requires_restart = True
199-
200- if requires_restart:
201- # A change has been requested that requires a restart.
202- log(
203- "Configuration change requires PostgreSQL restart. Restarting.",
204+ restart = True
205+ return restart
206+
207+
208+def postgresql_reload_or_restart():
209+ """Reload PostgreSQL configuration, restarting if necessary."""
210+ if requires_restart():
211+ log("Configuration change requires PostgreSQL restart. Restarting.",
212 WARNING)
213- rc = postgresql_restart()
214+ success = postgresql_restart()
215+ if not success or requires_restart():
216+ log("Configuration changes failed to apply", WARNING)
217+ success = False
218 else:
219- log("PostgreSQL reload, config changes taking effect.", DEBUG)
220- rc = postgresql_reload() # No pending need to bounce, just reload.
221+ success = host.service_reload('postgresql')
222
223- if rc == 0 and 'saved_config' in local_state:
224- local_state['live_config'] = local_state['saved_config']
225+ if success:
226+ local_state['saved_config'] = local_state['live_config']
227 local_state.save()
228
229- return rc
230+ return success
231
232
233 def get_service_port(postgresql_config):
234@@ -344,8 +352,6 @@
235 config_data["shared_buffers"] = \
236 "%sMB" % (int(int(total_ram) * 0.15),)
237 # XXX: This is very messy - should probably be a subordinate charm
238- # file overlaps with __builtin__.file ... renaming to conf_file
239- # negronjl
240 conf_file = open("/etc/sysctl.d/50-postgresql.conf", "w")
241 conf_file.write("kernel.sem = 250 32000 100 1024\n")
242 conf_file.write("kernel.shmall = %s\n" %
243@@ -579,7 +585,7 @@
244
245
246 def db_cursor(autocommit=False, db='template1', user='postgres',
247- host=None, timeout=120):
248+ host=None, timeout=30):
249 import psycopg2
250 if host:
251 conn_str = "dbname={} host={} user={}".format(db, host, user)
252@@ -855,14 +861,16 @@
253
254 @hooks.hook()
255 def start():
256- if not postgresql_restart():
257+ if not postgresql_reload_or_restart():
258 raise SystemExit(1)
259
260
261 @hooks.hook()
262 def stop():
263- if not postgresql_stop():
264- raise SystemExit(1)
265+ if postgresql_is_running():
266+ with restart_lock(hookenv.local_unit(), True):
267+ if not postgresql_stop():
268+ raise SystemExit(1)
269
270
271 def quote_identifier(identifier):
272@@ -1163,7 +1171,7 @@
273 def db_relation_broken():
274 from psycopg2.extensions import AsIs
275
276- relid = os.environ['JUJU_RELATION_ID']
277+ relid = hookenv.relation_id()
278 if relid not in local_state['relations']['db']:
279 # This was to be a hot standby, but it had not yet got as far as
280 # receiving and handling credentials from the master.
281@@ -1174,7 +1182,7 @@
282 # we used from there. Instead, we have to persist this information
283 # ourselves.
284 relation = local_state['relations']['db'][relid]
285- unit_relation_data = relation[os.environ['JUJU_UNIT_NAME']]
286+ unit_relation_data = relation[hookenv.local_unit()]
287
288 if local_state['state'] in ('master', 'standalone'):
289 user = unit_relation_data.get('user', None)
290@@ -1303,27 +1311,75 @@
291 log("I am already the master", DEBUG)
292 return hookenv.local_unit()
293
294+ if local_state['state'] == 'hot standby':
295+ log("I am already following {}".format(
296+ local_state['following']), DEBUG)
297+ return local_state['following']
298+
299+ replication_relid = hookenv.relation_ids('replication')[0]
300+ replication_units = hookenv.related_units(replication_relid)
301+
302+ if local_state['state'] == 'standalone':
303+ log("I'm a standalone unit wanting to participate in replication")
304+ existing_replication = False
305+ for unit in replication_units:
306+ # If another peer thinks it is the master, believe it.
307+ remote_state = hookenv.relation_get(
308+ 'state', unit, replication_relid)
309+ if remote_state == 'master':
310+ log("{} thinks it is the master, believing it".format(
311+ unit), DEBUG)
312+ return unit
313+
314+ # If we find a peer that isn't standalone, we know
315+ # replication has already been setup at some point.
316+ if remote_state != 'standalone':
317+ existing_replication = True
318+
319+ # If we are joining a peer relation where replication has
320+ # already been setup, but there is currently no master, wait
321+ # until one of the remaining participating units has been
322+ # promoted to master. Only they have the data we need to
323+ # preserve.
324+ if existing_replication:
325+ log("Peers participating in replication need to elect a master",
326+ DEBUG)
327+ return None
328+
329+ # There are no peers claiming to be master, and there is no
330+ # election in progress, so lowest numbered unit wins.
331+ units = replication_units + [hookenv.local_unit()]
332+ master = unit_sorted(units)[0]
333+ if master == hookenv.local_unit():
334+ log("I'm Master - lowest numbered unit in new peer group")
335+ return master
336+ else:
337+ log("Waiting on {} to declare itself Master".format(master), DEBUG)
338+ return None
339+
340 if local_state['state'] == 'failover':
341 former_master = local_state['following']
342 log("Failover from {}".format(former_master))
343
344 units_not_in_failover = set()
345- for relid in hookenv.relation_ids('replication'):
346- for unit in hookenv.related_units(relid):
347- if unit == former_master:
348- log("Found dying master {}".format(unit), DEBUG)
349- continue
350-
351- relation = hookenv.relation_get(unit=unit, rid=relid)
352-
353- if relation['state'] == 'master':
354- log(
355- "{} says it already won the election".format(unit),
356- INFO)
357- return unit
358-
359- if relation['state'] != 'failover':
360- units_not_in_failover.add(unit)
361+ candidates = set()
362+ for unit in replication_units:
363+ if unit == former_master:
364+ log("Found dying master {}".format(unit), DEBUG)
365+ continue
366+
367+ relation = hookenv.relation_get(unit=unit, rid=replication_relid)
368+
369+ if relation['state'] == 'master':
370+ log("{} says it already won the election".format(unit),
371+ INFO)
372+ return unit
373+
374+ if relation['state'] == 'failover':
375+ candidates.add(unit)
376+
377+ elif relation['state'] != 'standalone':
378+ units_not_in_failover.add(unit)
379
380 if units_not_in_failover:
381 log("{} unaware of impending election. Deferring result.".format(
382@@ -1333,35 +1389,24 @@
383 log("Election in progress")
384 winner = None
385 winning_offset = -1
386- for relid in hookenv.relation_ids('replication'):
387- candidates = set(hookenv.related_units(relid))
388- candidates.add(hookenv.local_unit())
389- candidates.discard(former_master)
390- # Sort the unit lists so we get consistent results in a tie
391- # and lowest unit number wins.
392- for unit in unit_sorted(candidates):
393- relation = hookenv.relation_get(unit=unit, rid=relid)
394- if int(relation['wal_received_offset']) > winning_offset:
395- winner = unit
396- winning_offset = int(relation['wal_received_offset'])
397+ candidates.add(hookenv.local_unit())
398+ # Sort the unit lists so we get consistent results in a tie
399+ # and lowest unit number wins.
400+ for unit in unit_sorted(candidates):
401+ relation = hookenv.relation_get(unit=unit, rid=replication_relid)
402+ if int(relation['wal_received_offset']) > winning_offset:
403+ winner = unit
404+ winning_offset = int(relation['wal_received_offset'])
405
406 # All remaining hot standbys are in failover mode and have
407 # reported their wal_received_offset. We can declare victory.
408- log("{} won the election as is the new master".format(winner))
409- return winner
410-
411- # Maybe another peer thinks it is the master?
412- for relid in hookenv.relation_ids('replication'):
413- for unit in hookenv.related_units(relid):
414- if hookenv.relation_get('state', unit, relid) == 'master':
415- return unit
416-
417- # New peer group. Lowest numbered unit will be the master.
418- for relid in hookenv.relation_ids('replication'):
419- units = hookenv.related_units(relid) + [hookenv.local_unit()]
420- master = unit_sorted(units)[0]
421- log("New peer group. {} is elected master".format(master))
422- return master
423+ if winner == hookenv.local_unit():
424+ log("I won the election, announcing myself winner")
425+ return winner
426+ else:
427+ log("Waiting for {} to announce its victory".format(winner),
428+ DEBUG)
429+ return None
430
431
432 @hooks.hook('replication-relation-joined', 'replication-relation-changed')
433@@ -1419,10 +1464,7 @@
434 log("Fresh unit. I will clone {} and become a hot standby".format(
435 master))
436
437- # Before we start destroying anything, ensure that the
438- # master is contactable.
439 master_ip = hookenv.relation_get('private-address', master)
440- wait_for_db(db='postgres', user='juju_replication', host=master_ip)
441
442 clone_database(master, master_ip)
443
444@@ -1592,8 +1634,55 @@
445 os.chdir(org_dir)
446
447
448+@contextmanager
449+def restart_lock(unit, exclusive):
450+ '''Aquire the database restart lock on the given unit.
451+
452+ A database needing a restart should grab an exclusive lock before
453+ doing so. To block a remote database from doing a restart, grab a shared
454+ lock.
455+ '''
456+ import psycopg2
457+ key = long(config_data['advisory_lock_restart_key'])
458+ if exclusive:
459+ lock_function = 'pg_advisory_lock'
460+ else:
461+ lock_function = 'pg_advisory_lock_shared'
462+ q = 'SELECT {}({})'.format(lock_function, key)
463+
464+ # We will get an exception if the database is rebooted while waiting
465+ # for a shared lock. If the connection is killed, we retry a few
466+ # times to cope.
467+ num_retries = 3
468+
469+ for count in range(0, num_retries):
470+ try:
471+ if unit == hookenv.local_unit():
472+ cur = db_cursor(autocommit=True)
473+ else:
474+ host = hookenv.relation_get('private-address', unit)
475+ cur = db_cursor(
476+ autocommit=True, db='postgres',
477+ user='juju_replication', host=host)
478+ cur.execute(q)
479+ break
480+ except psycopg2.Error:
481+ if count == num_retries - 1:
482+ raise
483+
484+ try:
485+ yield
486+ finally:
487+ # Close our connection, swallowing any exceptions as the database
488+ # may be being rebooted now we have released our lock.
489+ try:
490+ del cur
491+ except psycopg2.Error:
492+ pass
493+
494+
495 def clone_database(master_unit, master_host):
496- with pgpass():
497+ with restart_lock(master_unit, False):
498 postgresql_stop()
499 log("Cloning master {}".format(master_unit))
500
501@@ -1607,9 +1696,10 @@
502 shutil.rmtree(postgresql_cluster_dir)
503
504 try:
505- # Change directory the postgres user can read.
506- with switch_cwd('/tmp'):
507- # Run the sudo command.
508+ # Change directory the postgres user can read, and need
509+ # .pgpass too.
510+ with switch_cwd('/tmp'), pgpass():
511+ # Clone the master with pg_basebackup.
512 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
513 log(output, DEBUG)
514 # Debian by default expects SSL certificates in the datadir.
515@@ -1626,8 +1716,8 @@
516 # can retry hooks again. Even assuming the charm is
517 # functioning correctly, the clone may still fail
518 # due to eg. lack of disk space.
519- log("Clone failed, db cluster destroyed", ERROR)
520 log(x.output, ERROR)
521+ log("Clone failed, local db destroyed", ERROR)
522 if os.path.exists(postgresql_cluster_dir):
523 shutil.rmtree(postgresql_cluster_dir)
524 if os.path.exists(postgresql_config_dir):
525@@ -1652,6 +1742,15 @@
526 os.path.join(postgresql_cluster_dir, 'backup_label'))
527
528
529+def pg_basebackup_is_running():
530+ cur = db_cursor(autocommit=True)
531+ cur.execute("""
532+ SELECT count(*) FROM pg_stat_activity
533+ WHERE usename='juju_replication' AND application_name='pg_basebackup'
534+ """)
535+ return cur.fetchone()[0] > 0
536+
537+
538 def postgresql_wal_received_offset():
539 """How much WAL we have.
540
541@@ -1694,7 +1793,7 @@
542 try:
543 nagios_uid = getpwnam('nagios').pw_uid
544 nagios_gid = getgrnam('nagios').gr_gid
545- except:
546+ except Exception:
547 hookenv.log("Nagios user not set up.", hookenv.DEBUG)
548 return
549
550
551=== modified file 'test.py'
552--- test.py 2013-08-23 09:40:15 +0000
553+++ test.py 2013-08-23 09:40:16 +0000
554@@ -74,12 +74,12 @@
555 return None
556
557 def deploy(self, charm, name=None, num_units=1):
558- # The first time we deploy a charm in the test run, it needs to
559- # deploy with --update to ensure we are testing the desired
560- # revision of the charm. Subsequent deploys we do not use
561- # --update to avoid overhead and needless incrementing of the
562+ # The first time we deploy a local: charm in the test run, it
563+ # needs to deploy with --update to ensure we are testing the
564+ # desired revision of the charm. Subsequent deploys we do not
565+ # use --update to avoid overhead and needless incrementing of the
566 # revision number.
567- if charm.startswith('cs:') or charm in self._deployed_charms:
568+ if not charm.startswith('local:') or charm in self._deployed_charms:
569 cmd = ['deploy']
570 else:
571 cmd = ['deploy', '-u']
572@@ -102,7 +102,7 @@
573 self.status = self.get_result(['status'])
574 return self.status
575
576- def wait_until_ready(self):
577+ def wait_until_ready(self, extra=45):
578 ready = False
579 while not ready:
580 self.refresh_status()
581@@ -128,7 +128,7 @@
582 # enough that our system is probably stable. This means we have
583 # extremely slow and flaky tests, but that is possibly better
584 # than no tests.
585- time.sleep(45)
586+ time.sleep(extra)
587
588 def setUp(self):
589 DEBUG("JujuFixture.setUp()")
590@@ -156,7 +156,7 @@
591 # Per Bug #1190250 (WONTFIX), we need to wait for dying services
592 # to die before we can continue.
593 if found_services:
594- self.wait_until_ready()
595+ self.wait_until_ready(0)
596
597 # We shouldn't reuse machines, as we have no guarantee they are
598 # still in a usable state, so tear them down too. Per
599@@ -305,15 +305,18 @@
600 self.juju.do(['add-relation', 'postgresql:db', 'psql:db'])
601 self.juju.wait_until_ready()
602
603- # On a freshly setup service, lowest numbered unit is always the
604- # master.
605- units = unit_sorted(
606- self.juju.status['services']['postgresql']['units'].keys())
607- master_unit, standby_unit_1, standby_unit_2 = units
608-
609- self.assertIs(True, self.is_master(master_unit))
610- self.assertIs(False, self.is_master(standby_unit_1))
611- self.assertIs(False, self.is_master(standby_unit_2))
612+ # Even on a freshly setup service, we have no idea which unit
613+ # will become the master as we have no control over which two
614+ # units join the peer relation first.
615+ units = sorted((self.is_master(unit), unit)
616+ for unit in
617+ self.juju.status['services']['postgresql']['units'].keys())
618+ self.assertFalse(units[0][0])
619+ self.assertFalse(units[1][0])
620+ self.assertTrue(units[2][0])
621+ standby_unit_1 = units[0][1]
622+ standby_unit_2 = units[1][1]
623+ master_unit = units[2][1]
624
625 self.sql('CREATE TABLE Token (x int)', master_unit)
626
627@@ -390,11 +393,18 @@
628 self.juju.do(['add-relation', 'postgresql:db-admin', 'psql:db-admin'])
629 self.juju.wait_until_ready()
630
631- # On a freshly setup service, lowest numbered unit is always the
632- # master.
633- units = unit_sorted(
634- self.juju.status['services']['postgresql']['units'].keys())
635- master_unit, standby_unit_1, standby_unit_2 = units
636+ # Even on a freshly setup service, we have no idea which unit
637+ # will become the master as we have no control over which two
638+ # units join the peer relation first.
639+ units = sorted((self.is_master(unit, 'postgres'), unit)
640+ for unit in
641+ self.juju.status['services']['postgresql']['units'].keys())
642+ self.assertFalse(units[0][0])
643+ self.assertFalse(units[1][0])
644+ self.assertTrue(units[2][0])
645+ standby_unit_1 = units[0][1]
646+ standby_unit_2 = units[1][1]
647+ master_unit = units[2][1]
648
649 # Shutdown PostgreSQL on standby_unit_1 and ensure
650 # standby_unit_2 will have received more WAL information from

Subscribers

People subscribed via source and target branches