Merge ~cjwatson/launchpad:remove-unused-replication-helpers into launchpad:master

Proposed by Colin Watson
Status: Merged
Approved by: Colin Watson
Approved revision: 2cd1d598dc094b287772560a2c4845cb4ed7bb41
Merge reported by: Otto Co-Pilot
Merged at revision: not available
Proposed branch: ~cjwatson/launchpad:remove-unused-replication-helpers
Merge into: launchpad:master
Diff against target: 793 lines (+1/-698)
5 files modified
.gitignore (+0/-2)
database/replication/helpers.py (+1/-603)
dev/null (+0/-31)
lib/lp/services/config/schema-lazr.conf (+0/-5)
lib/lp/services/database/postgresql.py (+0/-57)
Reviewer Review Type Date Requested Status
Guruprasad Approve
Review via email: mp+450570@code.launchpad.net

Commit message

Remove unused replication helpers

Description of the change

All this stuff seems to have been a remnant of when Launchpad used to use Slony-I for its own internal database replication, and is long obsolete.

To post a comment you must log in.
Revision history for this message
Guruprasad (lgp171188) wrote :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1diff --git a/.gitignore b/.gitignore
2index 9317a84..82f290a 100644
3--- a/.gitignore
4+++ b/.gitignore
5@@ -6,8 +6,6 @@ logs/*
6 +*
7 /botmaster
8 /tags
9-database/replication/lpslon_*.log
10-database/replication/preamble.sk
11 database/schema/diagrams/*.ps
12 database/schema/diagrams/*.dot
13 thread*.request
14diff --git a/database/replication/helpers.py b/database/replication/helpers.py
15index 66c9fef..b5fe45a 100644
16--- a/database/replication/helpers.py
17+++ b/database/replication/helpers.py
18@@ -5,241 +5,9 @@
19
20 __all__ = []
21
22-import subprocess
23-from tempfile import NamedTemporaryFile
24-from textwrap import dedent
25-
26 import psycopg2
27
28-from lp.services.config import config
29-from lp.services.database.postgresql import (
30- ConnectionString,
31- all_sequences_in_schema,
32- all_tables_in_schema,
33- fqn,
34-)
35-from lp.services.database.sqlbase import (
36- ISOLATION_LEVEL_DEFAULT,
37- connect,
38- sqlvalues,
39-)
40-from lp.services.scripts.logger import DEBUG2, log
41-
42-# The Slony-I clustername we use with Launchpad. Hardcoded because there
43-# is no point changing this, ever.
44-CLUSTERNAME = "sl"
45-
46-# The namespace in the database used to contain all the Slony-I tables.
47-CLUSTER_NAMESPACE = "_%s" % CLUSTERNAME
48-
49-# Replication set id constants. Don't change these without DBA help.
50-LPMAIN_SET_ID = 1
51-HOLDING_SET_ID = 666
52-SSO_SET_ID = 3
53-LPMIRROR_SET_ID = 4
54-
55-# Seed tables for the lpmain replication set to be passed to
56-# calculate_replication_set().
57-LPMAIN_SEED = frozenset(
58- [
59- ("public", "account"),
60- ("public", "person"),
61- ("public", "databasereplicationlag"),
62- ("public", "fticache"),
63- ("public", "nameblocklist"),
64- ("public", "openidconsumerassociation"),
65- ("public", "openidconsumernonce"),
66- ("public", "codeimportmachine"),
67- ("public", "scriptactivity"),
68- ("public", "launchpadstatistic"),
69- ("public", "parsedapachelog"),
70- ("public", "databasereplicationlag"),
71- ("public", "featureflag"),
72- ("public", "bugtaskflat"),
73- # suggestivepotemplate can be removed when the
74- # suggestivepotemplate.potemplate foreign key constraint exists on
75- # production.
76- ("public", "suggestivepotemplate"),
77- # These are odd. They are updated via slonik & EXECUTE SCRIPT, and
78- # the contents of these tables will be different on each node
79- # because we store timestamps when the patches were applied.
80- # However, we want the tables listed as replicated so that, when
81- # building a new replica, the data that documents the schema patch
82- # level matches the schema patch level and upgrade.py does the right
83- # thing. This is a bad thing to do, but we are safe in this
84- # particular case.
85- ("public", "launchpaddatabaserevision"),
86- ("public", "launchpaddatabaseupdatelog"),
87- ]
88-)
89-
90-# Explicitly list tables that should not be replicated. This includes the
91-# session tables, as these might exist in developer databases but will not
92-# exist in the production launchpad database.
93-IGNORED_TABLES = {
94- # Session tables that in some situations will exist in the main lp
95- # database.
96- "public.secret",
97- "public.sessiondata",
98- "public.sessionpkgdata",
99- # Mirror tables, per Bug #489078. These tables have their own private
100- # replication set that is setup manually.
101- "public.lp_account",
102- "public.lp_openididentifier",
103- "public.lp_person",
104- "public.lp_personlocation",
105- "public.lp_teamparticipation",
106- # Database statistics
107- "public.databasetablestats",
108- "public.databasecpustats",
109- "public.databasediskutilization",
110- # Don't replicate OAuthNonce - too busy and no real gain.
111- "public.oauthnonce",
112- # Ubuntu SSO database. These tables where created manually by ISD
113- # and the Launchpad scripts should not mess with them. Eventually
114- # these tables will be in a totally separate database.
115- "public.auth_permission",
116- "public.auth_group",
117- "public.auth_user",
118- "public.auth_message",
119- "public.django_content_type",
120- "public.auth_permission",
121- "public.django_session",
122- "public.django_site",
123- "public.django_admin_log",
124- "public.ssoopenidrpconfig",
125- "public.auth_group_permissions",
126- "public.auth_user_groups",
127- "public.auth_user_user_permissions",
128- "public.oauth_nonce",
129- "public.oauth_consumer",
130- "public.oauth_token",
131- "public.api_user",
132- "public.oauth_consumer_id_seq",
133- "public.api_user_id_seq",
134- "public.oauth_nonce_id_seq",
135-}
136-
137-# Calculate IGNORED_SEQUENCES
138-IGNORED_SEQUENCES = {"%s_id_seq" % table for table in IGNORED_TABLES}
139-
140-
141-def slony_installed(con):
142- """Return True if the connected database is part of a Launchpad Slony-I
143- cluster.
144- """
145- cur = con.cursor()
146- cur.execute(
147- """
148- SELECT TRUE FROM pg_class,pg_namespace
149- WHERE
150- nspname = %s
151- AND relname = 'sl_table'
152- AND pg_class.relnamespace = pg_namespace.oid
153- """
154- % sqlvalues(CLUSTER_NAMESPACE)
155- )
156- return cur.fetchone() is not None
157-
158-
159-class TableReplicationInfo:
160- """Internal table replication details."""
161-
162- table_id = None
163- replication_set_id = None
164- primary_node_id = None
165-
166- def __init__(self, con, namespace, table_name):
167- cur = con.cursor()
168- cur.execute(
169- """
170- SELECT tab_id, tab_set, set_origin
171- FROM %s.sl_table, %s.sl_set
172- WHERE tab_set = set_id
173- AND tab_nspname = %s
174- AND tab_relname = %s
175- """
176- % (
177- (CLUSTER_NAMESPACE, CLUSTER_NAMESPACE)
178- + sqlvalues(namespace, table_name)
179- )
180- )
181- row = cur.fetchone()
182- if row is None:
183- raise LookupError(fqn(namespace, table_name))
184- self.table_id, self.replication_set_id, self.primary_node_id = row
185-
186-
187-def sync(timeout, exit_on_fail=True):
188- """Generate a sync event and wait for it to complete on all nodes.
189-
190- This means that all pending events have propagated and are in sync
191- to the point in time this method was called. This might take several
192- hours if there is a large backlog of work to replicate.
193-
194- :param timeout: Number of seconds to wait for the sync. 0 to block
195- indefinitely.
196-
197- :param exit_on_fail: If True, on failure of the sync
198- SystemExit is raised using the slonik return code.
199-
200- :returns: True if the sync completed successfully. False if
201- exit_on_fail is False and the script failed for any reason.
202- """
203- return execute_slonik("", sync=timeout, exit_on_fail=exit_on_fail)
204-
205-
206-def execute_slonik(script, sync=None, exit_on_fail=True, auto_preamble=True):
207- """Use the slonik command line tool to run a slonik script.
208-
209- :param script: The script as a string. Preamble should not be included.
210-
211- :param sync: Number of seconds to wait for sync before failing. 0 to
212- block indefinitely.
213-
214- :param exit_on_fail: If True, on failure of the slonik script
215- SystemExit is raised using the slonik return code.
216-
217- :param auto_preamble: If True, the generated preamble will be
218- automatically included.
219-
220- :returns: True if the script completed successfully. False if
221- exit_on_fail is False and the script failed for any reason.
222- """
223-
224- # Add the preamble and optional sync to the script.
225- if auto_preamble:
226- script = preamble() + script
227-
228- if sync is not None:
229- sync_script = dedent(
230- """\
231- sync (id = @primary_node);
232- wait for event (
233- origin = @primary_node, confirmed = ALL,
234- wait on = @primary_node, timeout = %d);
235- """
236- % sync
237- )
238- script = script + sync_script
239-
240- # Copy the script to a NamedTemporaryFile rather than just pumping it
241- # to slonik via stdin. This way it can be examined if slonik appears
242- # to hang.
243- script_on_disk = NamedTemporaryFile(prefix="slonik", suffix=".sk")
244- print(script, file=script_on_disk, flush=True)
245-
246- # Run slonik
247- log.debug("Executing slonik script %s" % script_on_disk.name)
248- log.log(DEBUG2, "Running script:\n%s" % script)
249- returncode = subprocess.call(["slonik", script_on_disk.name])
250-
251- if returncode != 0:
252- log.error("slonik script failed")
253- if exit_on_fail:
254- raise SystemExit(1)
255-
256- return returncode == 0
257+from lp.services.database.sqlbase import ISOLATION_LEVEL_DEFAULT
258
259
260 class Node:
261@@ -255,373 +23,3 @@ class Node:
262 con = psycopg2.connect(str(self.connection_string))
263 con.set_isolation_level(isolation)
264 return con
265-
266-
267-def _get_nodes(con, query):
268- """Return a list of Nodes."""
269- if not slony_installed(con):
270- return []
271- cur = con.cursor()
272- cur.execute(query)
273- nodes = []
274- for node_id, nickname, connection_string, is_primary in cur.fetchall():
275- nodes.append(Node(node_id, nickname, connection_string, is_primary))
276- return nodes
277-
278-
279-def get_primary_node(con, set_id=1):
280- """Return the primary Node, or None if the cluster is still being setup."""
281- nodes = _get_nodes(
282- con,
283- """
284- SELECT DISTINCT
285- set_origin AS node_id,
286- 'primary',
287- pa_conninfo AS connection_string,
288- True
289- FROM _sl.sl_set
290- LEFT OUTER JOIN _sl.sl_path ON set_origin = pa_server
291- WHERE set_id = %d
292- """
293- % set_id,
294- )
295- if not nodes:
296- return None
297- assert len(nodes) == 1, "More than one primary found for set %s" % set_id
298- return nodes[0]
299-
300-
301-def get_standby_nodes(con, set_id=1):
302- """Return the list of standby Nodes."""
303- return _get_nodes(
304- con,
305- """
306- SELECT DISTINCT
307- pa_server AS node_id,
308- 'standby' || pa_server,
309- pa_conninfo AS connection_string,
310- False
311- FROM _sl.sl_set
312- JOIN _sl.sl_subscribe ON set_id = sub_set
313- JOIN _sl.sl_path ON sub_receiver = pa_server
314- WHERE
315- set_id = %d
316- ORDER BY node_id
317- """
318- % set_id,
319- )
320-
321-
322-def get_nodes(con, set_id=1):
323- """Return a list of all Nodes."""
324- primary_node = get_primary_node(con, set_id)
325- if primary_node is None:
326- return []
327- else:
328- return [primary_node] + get_standby_nodes(con, set_id)
329-
330-
331-def get_all_cluster_nodes(con):
332- """Return a list of all Nodes in the cluster.
333-
334- node.is_primary will be None, as this boolean doesn't make sense
335- in the context of a cluster rather than a single replication set.
336- """
337- if not slony_installed(con):
338- return []
339- nodes = _get_nodes(
340- con,
341- """
342- SELECT DISTINCT
343- pa_server AS node_id,
344- 'node' || pa_server || '_node',
345- pa_conninfo AS connection_string,
346- NULL
347- FROM _sl.sl_path
348- ORDER BY node_id
349- """,
350- )
351- if not nodes:
352- # There are no subscriptions yet, so no paths. Generate the
353- # primary Node.
354- cur = con.cursor()
355- cur.execute("SELECT no_id from _sl.sl_node")
356- node_ids = [row[0] for row in cur.fetchall()]
357- if len(node_ids) == 0:
358- return []
359- assert len(node_ids) == 1, "Multiple nodes but no paths."
360- primary_node_id = node_ids[0]
361- primary_connection_string = ConnectionString(
362- config.database.rw_main_primary
363- )
364- primary_connection_string.user = "slony"
365- return [
366- Node(
367- primary_node_id,
368- "node%d_node" % primary_node_id,
369- primary_connection_string,
370- True,
371- )
372- ]
373- return nodes
374-
375-
376-def preamble(con=None):
377- """Return the preable needed at the start of all slonik scripts."""
378-
379- if con is None:
380- con = connect(user="slony")
381-
382- primary_node = get_primary_node(con)
383- nodes = get_all_cluster_nodes(con)
384- if primary_node is None and len(nodes) == 1:
385- primary_node = nodes[0]
386-
387- preamble = [
388- dedent(
389- """\
390- #
391- # Every slonik script must start with a clustername, which cannot
392- # be changed once the cluster is initialized.
393- #
394- cluster name = sl;
395-
396- # Symbolic ids for replication sets.
397- define lpmain_set %d;
398- define holding_set %d;
399- define sso_set %d;
400- define lpmirror_set %d;
401- """
402- % (LPMAIN_SET_ID, HOLDING_SET_ID, SSO_SET_ID, LPMIRROR_SET_ID)
403- )
404- ]
405-
406- if primary_node is not None:
407- preamble.append(
408- dedent(
409- """\
410- # Symbolic id for the main replication set primary node.
411- define primary_node %d;
412- define primary_node_conninfo '%s';
413- """
414- % (primary_node.node_id, primary_node.connection_string)
415- )
416- )
417-
418- for node in nodes:
419- preamble.append(
420- dedent(
421- """\
422- define %s %d;
423- define %s_conninfo '%s';
424- node @%s admin conninfo = @%s_conninfo;
425- """
426- % (
427- node.nickname,
428- node.node_id,
429- node.nickname,
430- node.connection_string,
431- node.nickname,
432- node.nickname,
433- )
434- )
435- )
436-
437- return "\n\n".join(preamble)
438-
439-
440-def calculate_replication_set(cur, seeds):
441- """Return the minimal set of tables and sequences needed in a
442- replication set containing the seed table.
443-
444- A replication set must contain all tables linked by foreign key
445- reference to the given table, and sequences used to generate keys.
446- Tables and sequences can be added to the IGNORED_TABLES and
447- IGNORED_SEQUENCES lists for cases where we known can safely ignore
448- this restriction.
449-
450- :param seeds: [(namespace, tablename), ...]
451-
452- :returns: (tables, sequences)
453- """
454- # Results
455- tables = set()
456- sequences = set()
457-
458- # Our pending set to check
459- pending_tables = set(seeds)
460-
461- # Generate the set of tables that reference the seed directly
462- # or indirectly via foreign key constraints, including the seed itself.
463- while pending_tables:
464- namespace, tablename = pending_tables.pop()
465-
466- # Skip if the table doesn't exist - we might have seeds listed that
467- # have been removed or are yet to be created.
468- cur.execute(
469- """
470- SELECT TRUE
471- FROM pg_class, pg_namespace
472- WHERE pg_class.relnamespace = pg_namespace.oid
473- AND pg_namespace.nspname = %s
474- AND pg_class.relname = %s
475- """
476- % sqlvalues(namespace, tablename)
477- )
478- if cur.fetchone() is None:
479- log.debug("Table %s.%s doesn't exist" % (namespace, tablename))
480- continue
481-
482- tables.add((namespace, tablename))
483-
484- # Find all tables that reference the current (seed) table
485- # and all tables that the seed table references.
486- cur.execute(
487- """
488- SELECT ref_namespace.nspname, ref_class.relname
489- FROM
490- -- One of the seed tables
491- pg_class AS seed_class,
492- pg_namespace AS seed_namespace,
493-
494- -- A table referencing the seed, or being referenced by
495- -- the seed.
496- pg_class AS ref_class,
497- pg_namespace AS ref_namespace,
498-
499- pg_constraint
500- WHERE
501- seed_class.relnamespace = seed_namespace.oid
502- AND ref_class.relnamespace = ref_namespace.oid
503-
504- AND seed_namespace.nspname = %s
505- AND seed_class.relname = %s
506-
507- -- Foreign key constraints are all we care about.
508- AND pg_constraint.contype = 'f'
509-
510- -- We want tables referenced by, or referred to, the
511- -- seed table.
512- AND ((pg_constraint.conrelid = ref_class.oid
513- AND pg_constraint.confrelid = seed_class.oid)
514- OR (pg_constraint.conrelid = seed_class.oid
515- AND pg_constraint.confrelid = ref_class.oid)
516- )
517- """
518- % sqlvalues(namespace, tablename)
519- )
520- for namespace, tablename in cur.fetchall():
521- key = (namespace, tablename)
522- if (
523- key not in tables
524- and key not in pending_tables
525- and "%s.%s" % (namespace, tablename) not in IGNORED_TABLES
526- ):
527- pending_tables.add(key)
528-
529- # Generate the set of sequences that are linked to any of our set of
530- # tables. We assume these are all sequences created by creation of
531- # serial or bigserial columns, or other sequences OWNED BY a particular
532- # column.
533- for namespace, tablename in tables:
534- cur.execute(
535- """
536- SELECT seq
537- FROM (
538- SELECT pg_get_serial_sequence(%s, attname) AS seq
539- FROM pg_namespace, pg_class, pg_attribute
540- WHERE pg_namespace.nspname = %s
541- AND pg_class.relnamespace = pg_namespace.oid
542- AND pg_class.relname = %s
543- AND pg_attribute.attrelid = pg_class.oid
544- AND pg_attribute.attisdropped IS FALSE
545- ) AS whatever
546- WHERE seq IS NOT NULL;
547- """
548- % sqlvalues(fqn(namespace, tablename), namespace, tablename)
549- )
550- for (sequence,) in cur.fetchall():
551- if sequence not in IGNORED_SEQUENCES:
552- sequences.add(sequence)
553-
554- # We can't easily convert the sequence name to (namespace, name) tuples,
555- # so we might as well convert the tables to dot notation for consistancy.
556- tables = {fqn(namespace, tablename) for namespace, tablename in tables}
557-
558- return tables, sequences
559-
560-
561-def discover_unreplicated(cur):
562- """Inspect the database for tables and sequences in the public schema
563- that are not in a replication set.
564-
565- :returns: (unreplicated_tables_set, unreplicated_sequences_set)
566- """
567- all_tables = all_tables_in_schema(cur, "public")
568- all_sequences = all_sequences_in_schema(cur, "public")
569-
570- # Ignore any tables and sequences starting with temp_. These are
571- # transient and not to be replicated per Bug #778338.
572- all_tables = {
573- table for table in all_tables if not table.startswith("public.temp_")
574- }
575- all_sequences = {
576- sequence
577- for sequence in all_sequences
578- if not sequence.startswith("public.temp_")
579- }
580-
581- cur.execute(
582- """
583- SELECT tab_nspname, tab_relname FROM %s
584- WHERE tab_nspname = 'public'
585- """
586- % fqn(CLUSTER_NAMESPACE, "sl_table")
587- )
588- replicated_tables = {fqn(*row) for row in cur.fetchall()}
589-
590- cur.execute(
591- """
592- SELECT seq_nspname, seq_relname FROM %s
593- WHERE seq_nspname = 'public'
594- """
595- % fqn(CLUSTER_NAMESPACE, "sl_sequence")
596- )
597- replicated_sequences = {fqn(*row) for row in cur.fetchall()}
598-
599- return (
600- all_tables - replicated_tables - IGNORED_TABLES,
601- all_sequences - replicated_sequences - IGNORED_SEQUENCES,
602- )
603-
604-
605-class ReplicationConfigError(Exception):
606- """Exception raised by validate_replication_sets() when our replication
607- setup is misconfigured.
608- """
609-
610-
611-def validate_replication(cur):
612- """Raise a ReplicationSetupError if there is something wrong with
613- our replication sets.
614-
615- This might include tables exist that are not in a replication set,
616- or tables that exist in multiple replication sets for example.
617-
618- These is not necessarily limits with what Slony-I allows, but might
619- be due to policies we have made (eg. a table allowed in just one
620- replication set).
621- """
622- unrepl_tables, unrepl_sequences = discover_unreplicated(cur)
623- if unrepl_tables:
624- raise ReplicationConfigError(
625- "Unreplicated tables: %s" % repr(unrepl_tables)
626- )
627- if unrepl_sequences:
628- raise ReplicationConfigError(
629- "Unreplicated sequences: %s" % repr(unrepl_sequences)
630- )
631-
632- lpmain_tables, lpmain_sequences = calculate_replication_set(
633- cur, LPMAIN_SEED
634- )
635diff --git a/database/replication/preamble.py b/database/replication/preamble.py
636deleted file mode 100755
637index 694d1bb..0000000
638--- a/database/replication/preamble.py
639+++ /dev/null
640@@ -1,33 +0,0 @@
641-#!/usr/bin/python3 -S
642-#
643-# Copyright 2009-2011 Canonical Ltd. This software is licensed under the
644-# GNU Affero General Public License version 3 (see the file LICENSE).
645-
646-"""Generate a preamble for slonik(1) scripts based on the current LPCONFIG.
647-"""
648-
649-__all__ = []
650-
651-import _pythonpath # noqa: F401
652-
653-import time
654-from optparse import OptionParser
655-
656-import replication.helpers
657-from lp.services import scripts
658-from lp.services.config import config
659-from lp.services.database.sqlbase import connect
660-
661-if __name__ == "__main__":
662- parser = OptionParser()
663- scripts.db_options(parser)
664- (options, args) = parser.parse_args()
665- if args:
666- parser.error("Too many arguments")
667- scripts.execute_zcml_for_scripts(use_web_security=False)
668-
669- con = connect()
670- print("# slonik(1) preamble generated %s" % time.ctime())
671- print("# LPCONFIG=%s" % config.instance_name)
672- print()
673- print(replication.helpers.preamble(con))
674diff --git a/database/replication/sync.py b/database/replication/sync.py
675deleted file mode 100755
676index af646c7..0000000
677--- a/database/replication/sync.py
678+++ /dev/null
679@@ -1,31 +0,0 @@
680-#!/usr/bin/python3 -S
681-#
682-# Copyright 2010 Canonical Ltd. This software is licensed under the
683-# GNU Affero General Public License version 3 (see the file LICENSE).
684-
685-"""Block until the replication cluster synchronizes."""
686-
687-__all__ = []
688-
689-import _pythonpath # noqa: F401
690-
691-from optparse import OptionParser
692-
693-from lp.services.scripts import db_options, logger_options
694-from replication.helpers import sync
695-
696-if __name__ == "__main__":
697- parser = OptionParser()
698- parser.add_option(
699- "-t",
700- "--timeout",
701- dest="timeout",
702- metavar="SECS",
703- type="int",
704- help="Abort if no sync after SECS seconds.",
705- default=0,
706- )
707- logger_options(parser)
708- db_options(parser)
709- options, args = parser.parse_args()
710- sync(options.timeout)
711diff --git a/lib/lp/services/config/schema-lazr.conf b/lib/lp/services/config/schema-lazr.conf
712index c5efae3..ce448cf 100644
713--- a/lib/lp/services/config/schema-lazr.conf
714+++ b/lib/lp/services/config/schema-lazr.conf
715@@ -607,11 +607,6 @@ storm_cache: generational
716 # datatype: integer
717 storm_cache_size: 10000
718
719-# Where database/replication/slon_ctl.py dumps its logs. Used for the
720-# staging replication environment.
721-# datatype: existing_directory
722-replication_logdir: database/replication
723-
724 # If True, the connection string must contain a username, and the database
725 # adapter will switch to the target `dbuser` using `SET ROLE` after
726 # connecting (the session user must be a member of the target role). If
727diff --git a/lib/lp/services/database/postgresql.py b/lib/lp/services/database/postgresql.py
728index 9e72661..8ba5252 100644
729--- a/lib/lp/services/database/postgresql.py
730+++ b/lib/lp/services/database/postgresql.py
731@@ -475,63 +475,6 @@ def allow_sequential_scans(cur, permission):
732 cur.execute("SET enable_seqscan=%s" % permission_value)
733
734
735-def all_tables_in_schema(cur, schema):
736- """Return a set of all tables in the given schema.
737-
738- :returns: A set of quoted, fully qualified table names.
739- """
740- cur.execute(
741- """
742- SELECT nspname, relname
743- FROM pg_class, pg_namespace
744- WHERE
745- pg_class.relnamespace = pg_namespace.oid
746- AND pg_namespace.nspname = %s
747- AND pg_class.relkind = 'r'
748- """
749- % sqlvalues(schema)
750- )
751- return {
752- fqn(namespace, tablename) for namespace, tablename in cur.fetchall()
753- }
754-
755-
756-def all_sequences_in_schema(cur, schema):
757- """Return a set of all sequences in the given schema.
758-
759- :returns: A set of quoted, fully qualified table names.
760- """
761- cur.execute(
762- """
763- SELECT nspname, relname
764- FROM pg_class, pg_namespace
765- WHERE
766- pg_class.relnamespace = pg_namespace.oid
767- AND pg_namespace.nspname = %s
768- AND pg_class.relkind = 'S'
769- """
770- % sqlvalues(schema)
771- )
772- return {fqn(namespace, sequence) for namespace, sequence in cur.fetchall()}
773-
774-
775-def fqn(namespace, name):
776- """Return the fully qualified name by combining the namespace and name.
777-
778- Quoting is done for the non trivial cases.
779-
780- >>> print(fqn("public", "foo"))
781- public.foo
782- >>> print(fqn(" foo ", "$bar"))
783- " foo "."$bar"
784- """
785- if re.search(r"[^a-z_]", namespace) is not None:
786- namespace = quoteIdentifier(namespace)
787- if re.search(r"[^a-z_]", name) is not None:
788- name = quoteIdentifier(name)
789- return "%s.%s" % (namespace, name)
790-
791-
792 class ConnectionString:
793 """A libpq connection string.
794

Subscribers

People subscribed via source and target branches

to status/vote changes: