Merge lp:~jelmer/brz/fast-import-cmds into lp:brz

Proposed by Jelmer Vernooij
Status: Merged
Approved by: Jelmer Vernooij
Approved revision: no longer in the source branch.
Merge reported by: The Breezy Bot
Merged at revision: not available
Proposed branch: lp:~jelmer/brz/fast-import-cmds
Merge into: lp:brz
Diff against target: 781 lines (+12/-655)
9 files modified
breezy/plugins/fastimport/__init__.py (+5/-6)
breezy/plugins/fastimport/cache_manager.py (+1/-1)
breezy/plugins/fastimport/cmds.py (+1/-225)
breezy/plugins/fastimport/helpers.py (+0/-23)
breezy/plugins/fastimport/processors/generic_processor.py (+4/-1)
breezy/plugins/fastimport/processors/info_processor.py (+0/-282)
breezy/plugins/fastimport/reftracker.py (+0/-68)
breezy/plugins/fastimport/tests/test_commands.py (+0/-48)
setup.py (+1/-1)
To merge this branch: bzr merge lp:~jelmer/brz/fast-import-cmds
Reviewer Review Type Date Requested Status
Jelmer Vernooij Approve
Review via email: mp+342461@code.launchpad.net

Commit message

Remove the fast-import-{query,info,filter} commands.

Description of the change

Remove the fast-import-{query,info,filter} commands.

These are generic fast-import commands, and don't particularly need to be in
Breezy; I'm adding them to python-fastimport instead.

To post a comment you must log in.
Revision history for this message
Jelmer Vernooij (jelmer) :
review: Approve
Revision history for this message
The Breezy Bot (the-breezy-bot) wrote :

Running landing tests failed
https://ci.breezy-vcs.org/job/brz-dev/118/

Revision history for this message
The Breezy Bot (the-breezy-bot) wrote :

Running landing tests failed
https://ci.breezy-vcs.org/job/brz-dev/137/

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'breezy/plugins/fastimport/__init__.py'
2--- breezy/plugins/fastimport/__init__.py 2017-06-05 23:15:32 +0000
3+++ breezy/plugins/fastimport/__init__.py 2018-05-07 11:48:26 +0000
4@@ -46,9 +46,6 @@
5 reporting on fast-import streams, see the online help for the commands::
6
7 bzr help fast-import
8- bzr help fast-import-filter
9- bzr help fast-import-info
10- bzr help fast-import-query
11
12 Finally, you may wish to generate a fast-import dump file from a Bazaar
13 repository. The fast-export command is provided for that purpose.
14@@ -70,6 +67,11 @@
15 from ...errors import DependencyNotPresent
16 raise DependencyNotPresent("fastimport",
17 "fastimport requires the fastimport python module")
18+ if fastimport.__version__ < (0, 9, 8):
19+ from ...errors import DependencyNotPresent
20+ raise DependencyNotPresent("fastimport",
21+ "fastimport requires at least version 0.9.8 of the "
22+ "fastimport python module")
23
24
25 def test_suite():
26@@ -79,9 +81,6 @@
27
28 for name in [
29 "fast_import",
30- "fast_import_filter",
31- "fast_import_info",
32- "fast_import_query",
33 "fast_export",
34 ]:
35 plugin_cmds.register_lazy("cmd_%s" % name, [], "breezy.plugins.fastimport.cmds")
36
37=== modified file 'breezy/plugins/fastimport/cache_manager.py'
38--- breezy/plugins/fastimport/cache_manager.py 2018-01-22 02:23:22 +0000
39+++ breezy/plugins/fastimport/cache_manager.py 2018-05-07 11:48:26 +0000
40@@ -27,7 +27,7 @@
41 from . import (
42 branch_mapper,
43 )
44-from .reftracker import (
45+from fastimport.reftracker import (
46 RefTracker,
47 )
48 from .helpers import (
49
50=== modified file 'breezy/plugins/fastimport/cmds.py'
51--- breezy/plugins/fastimport/cmds.py 2017-11-12 02:01:00 +0000
52+++ breezy/plugins/fastimport/cmds.py 2018-05-07 11:48:26 +0000
53@@ -324,7 +324,7 @@
54 from fastimport import parser
55 from fastimport.errors import ParsingError
56 from ...errors import BzrCommandError
57- from .processors import info_processor
58+ from fastimport.processors import info_processor
59 stream = _get_source_stream(source)
60 output = StringIO()
61 try:
62@@ -341,230 +341,6 @@
63 return lines
64
65
66-class cmd_fast_import_filter(Command):
67- """Filter a fast-import stream to include/exclude files & directories.
68-
69- This command is useful for splitting a subdirectory or bunch of
70- files out from a project to create a new project complete with history
71- for just those files. It can also be used to create a new project
72- repository that removes all references to files that should not have
73- been committed, e.g. security-related information (like passwords),
74- commercially sensitive material, files with an incompatible license or
75- large binary files like CD images.
76-
77- To specify standard input as the input stream, use a source name
78- of '-'. If the source name ends in '.gz', it is assumed to be
79- compressed in gzip format.
80-
81- :File/directory filtering:
82-
83- This is supported by the -i and -x options. Excludes take precedence
84- over includes.
85-
86- When filtering out a subdirectory (or file), the new stream uses the
87- subdirectory (or subdirectory containing the file) as the root. As
88- fast-import doesn't know in advance whether a path is a file or
89- directory in the stream, you need to specify a trailing '/' on
90- directories passed to the `--includes option`. If multiple files or
91- directories are given, the new root is the deepest common directory.
92-
93- Note: If a path has been renamed, take care to specify the *original*
94- path name, not the final name that it ends up with.
95-
96- :User mapping:
97-
98- Some source repositories store just the user name while Bazaar
99- prefers a full email address. You can adjust user-ids
100- by using the --user-map option. The argument is a
101- text file with lines in the format::
102-
103- old-id = new-id
104-
105- Blank lines and lines beginning with # are ignored.
106- If old-id has the special value '@', then users without an
107- email address will get one created by using the matching new-id
108- as the domain, unless a more explicit address is given for them.
109- For example, given the user-map of::
110-
111- @ = example.com
112- bill = William Jones <bill@example.com>
113-
114- then user-ids are mapped as follows::
115-
116- maria => maria <maria@example.com>
117- bill => William Jones <bill@example.com>
118-
119- .. note::
120-
121- User mapping is supported by both the fast-import and
122- fast-import-filter commands.
123-
124- :History rewriting:
125-
126- By default fast-import-filter does quite aggressive history rewriting.
127- Empty commits (or commits which had all their content filtered out) will
128- be removed, and so are the references to commits not included in the stream.
129-
130- Flag --dont-squash-empty-commits reverses this behavior and makes it possible to
131- use fast-import-filter on incremental streams.
132-
133- :Examples:
134-
135- Create a new project from a library (note the trailing / on the
136- directory name of the library)::
137-
138- front-end | bzr fast-import-filter -i lib/xxx/ > xxx.fi
139- bzr fast-import xxx.fi mylibrary.bzr
140- (lib/xxx/foo is now foo)
141-
142- Create a new repository without a sensitive file::
143-
144- front-end | bzr fast-import-filter -x missile-codes.txt > clean.fi
145- bzr fast-import clean.fi clean.bzr
146- """
147- hidden = False
148- _see_also = ['fast-import']
149- takes_args = ['source?']
150- takes_options = ['verbose',
151- ListOption('include_paths', short_name='i', type=text_type,
152- help="Only include commits affecting these paths."
153- " Directories should have a trailing /."
154- ),
155- ListOption('exclude_paths', short_name='x', type=text_type,
156- help="Exclude these paths from commits."
157- ),
158- Option('user-map', type=text_type,
159- help="Path to file containing a map of user-ids.",
160- ),
161- Option('dont-squash-empty-commits',
162- help="Preserve all commits and links between them"
163- ),
164- ]
165- encoding_type = 'exact'
166- def run(self, source=None, verbose=False, include_paths=None,
167- exclude_paths=None, user_map=None, dont_squash_empty_commits=False):
168- from ...errors import BzrCommandError
169- load_fastimport()
170- from fastimport.processors import filter_processor
171- params = {
172- 'include_paths': include_paths,
173- 'exclude_paths': exclude_paths,
174- }
175- if ('squash_empty_commits' in
176- filter_processor.FilterProcessor.known_params):
177- params['squash_empty_commits'] = (not dont_squash_empty_commits)
178- else:
179- if dont_squash_empty_commits:
180- raise BzrCommandError("installed python-fastimport does not "
181- "support not squashing empty commits. Please install "
182- " a newer python-fastimport to use "
183- "--dont-squash-empty-commits")
184-
185- from fastimport.errors import ParsingError
186- from fastimport import parser
187- stream = _get_source_stream(source)
188- user_mapper = _get_user_mapper(user_map)
189- proc = filter_processor.FilterProcessor(params=params, verbose=verbose)
190- p = parser.ImportParser(stream, verbose=verbose, user_mapper=user_mapper)
191- try:
192- return proc.process(p.iter_commands)
193- except ParsingError as e:
194- raise BzrCommandError("%d: Parse error: %s" % (e.lineno, e))
195-
196-
197-class cmd_fast_import_info(Command):
198- """Output information about a fast-import stream.
199-
200- This command reads a fast-import stream and outputs
201- statistics and interesting properties about what it finds.
202- When run in verbose mode, the information is output as a
203- configuration file that can be passed to fast-import to
204- assist it in intelligently caching objects.
205-
206- To specify standard input as the input stream, use a source name
207- of '-'. If the source name ends in '.gz', it is assumed to be
208- compressed in gzip format.
209-
210- :Examples:
211-
212- Display statistics about the import stream produced by front-end::
213-
214- front-end | bzr fast-import-info -
215-
216- Create a hints file for running fast-import on a large repository::
217-
218- front-end | bzr fast-import-info -v - > front-end.cfg
219- """
220- hidden = False
221- _see_also = ['fast-import']
222- takes_args = ['source']
223- takes_options = ['verbose']
224- def run(self, source, verbose=False):
225- load_fastimport()
226- from .processors import info_processor
227- return _run(source, info_processor.InfoProcessor, verbose=verbose)
228-
229-
230-class cmd_fast_import_query(Command):
231- """Query a fast-import stream displaying selected commands.
232-
233- To specify standard input as the input stream, use a source name
234- of '-'. If the source name ends in '.gz', it is assumed to be
235- compressed in gzip format.
236-
237- To specify a commit to display, give its mark using the
238- --commit-mark option. The commit will be displayed with
239- file-commands included but with inline blobs hidden.
240-
241- To specify the commands to display, use the -C option one or
242- more times. To specify just some fields for a command, use the
243- syntax::
244-
245- command=field1,...
246-
247- By default, the nominated fields for the nominated commands
248- are displayed tab separated. To see the information in
249- a name:value format, use verbose mode.
250-
251- Note: Binary fields (e.g. data for blobs) are masked out
252- so it is generally safe to view the output in a terminal.
253-
254- :Examples:
255-
256- Show the commit with mark 429::
257-
258- bzr fast-import-query xxx.fi -m429
259-
260- Show all the fields of the reset and tag commands::
261-
262- bzr fast-import-query xxx.fi -Creset -Ctag
263-
264- Show the mark and merge fields of the commit commands::
265-
266- bzr fast-import-query xxx.fi -Ccommit=mark,merge
267- """
268- hidden = True
269- _see_also = ['fast-import', 'fast-import-filter']
270- takes_args = ['source']
271- takes_options = ['verbose',
272- Option('commit-mark', short_name='m', type=text_type,
273- help="Mark of the commit to display."
274- ),
275- ListOption('commands', short_name='C', type=text_type,
276- help="Display fields for these commands."
277- ),
278- ]
279- def run(self, source, verbose=False, commands=None, commit_mark=None):
280- load_fastimport()
281- from fastimport.processors import query_processor
282- from . import helpers
283- params = helpers.defines_to_dict(commands) or {}
284- if commit_mark:
285- params['commit-mark'] = commit_mark
286- return _run(source, query_processor.QueryProcessor, params=params,
287- verbose=verbose)
288-
289-
290 class cmd_fast_export(Command):
291 """Generate a fast-import stream from a Bazaar branch.
292
293
294=== modified file 'breezy/plugins/fastimport/helpers.py'
295--- breezy/plugins/fastimport/helpers.py 2017-08-26 15:17:57 +0000
296+++ breezy/plugins/fastimport/helpers.py 2018-05-07 11:48:26 +0000
297@@ -178,26 +178,3 @@
298 return single
299 else:
300 return plural
301-
302-
303-def invert_dictset(d):
304- """Invert a dictionary with keys matching a set of values, turned into lists."""
305- # Based on recipe from ASPN
306- result = {}
307- for k, c in d.items():
308- for v in c:
309- keys = result.setdefault(v, [])
310- keys.append(k)
311- return result
312-
313-
314-def invert_dict(d):
315- """Invert a dictionary with keys matching each value turned into a list."""
316- # Based on recipe from ASPN
317- result = {}
318- for k, v in d.items():
319- keys = result.setdefault(v, [])
320- keys.append(k)
321- return result
322-
323-
324
325=== modified file 'breezy/plugins/fastimport/processors/generic_processor.py'
326--- breezy/plugins/fastimport/processors/generic_processor.py 2018-03-25 02:55:15 +0000
327+++ breezy/plugins/fastimport/processors/generic_processor.py 2018-05-07 11:48:26 +0000
328@@ -47,6 +47,9 @@
329 errors as plugin_errors,
330 processor,
331 )
332+from fastimport.helpers import (
333+ invert_dictset,
334+ )
335
336
337 # How many commits before automatically reporting progress
338@@ -342,7 +345,7 @@
339 # Update the branches
340 self.note("Updating branch information ...")
341 updater = branch_updater.BranchUpdater(self.repo, self.branch,
342- self.cache_mgr, helpers.invert_dictset(
343+ self.cache_mgr, invert_dictset(
344 self.cache_mgr.reftracker.heads),
345 self.cache_mgr.reftracker.last_ref, self.tags)
346 branches_updated, branches_lost = updater.update()
347
348=== removed file 'breezy/plugins/fastimport/processors/info_processor.py'
349--- breezy/plugins/fastimport/processors/info_processor.py 2017-06-05 22:01:28 +0000
350+++ breezy/plugins/fastimport/processors/info_processor.py 1970-01-01 00:00:00 +0000
351@@ -1,282 +0,0 @@
352-# Copyright (C) 2008 Canonical Ltd
353-#
354-# This program is free software; you can redistribute it and/or modify
355-# it under the terms of the GNU General Public License as published by
356-# the Free Software Foundation; either version 2 of the License, or
357-# (at your option) any later version.
358-#
359-# This program is distributed in the hope that it will be useful,
360-# but WITHOUT ANY WARRANTY; without even the implied warranty of
361-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
362-# GNU General Public License for more details.
363-#
364-# You should have received a copy of the GNU General Public License
365-# along with this program. If not, see <http://www.gnu.org/licenses/>.
366-
367-"""Import processor that dump stats about the input (and doesn't import)."""
368-
369-from __future__ import absolute_import
370-
371-from .. import (
372- reftracker,
373- )
374-from ..helpers import (
375- invert_dict,
376- invert_dictset,
377- )
378-from fastimport import (
379- commands,
380- processor,
381- )
382-import stat
383-
384-
385-class InfoProcessor(processor.ImportProcessor):
386- """An import processor that dumps statistics about the input.
387-
388- No changes to the current repository are made.
389-
390- As well as providing useful information about an import
391- stream before importing it, this processor is useful for
392- benchmarking the speed at which data can be extracted from
393- the source.
394- """
395-
396- def __init__(self, params=None, verbose=0, outf=None):
397- processor.ImportProcessor.__init__(self, params, verbose,
398- outf=outf)
399-
400- def pre_process(self):
401- # Init statistics
402- self.cmd_counts = {}
403- for cmd in commands.COMMAND_NAMES:
404- self.cmd_counts[cmd] = 0
405- self.file_cmd_counts = {}
406- for fc in commands.FILE_COMMAND_NAMES:
407- self.file_cmd_counts[fc] = 0
408- self.parent_counts = {}
409- self.max_parent_count = 0
410- self.committers = set()
411- self.separate_authors_found = False
412- self.symlinks_found = False
413- self.executables_found = False
414- self.sha_blob_references = False
415- self.lightweight_tags = 0
416- # Blob usage tracking
417- self.blobs = {}
418- for usage in ['new', 'used', 'unknown', 'unmarked']:
419- self.blobs[usage] = set()
420- self.blob_ref_counts = {}
421- # Head tracking
422- self.reftracker = reftracker.RefTracker()
423- # Stuff to cache: a map from mark to # of times that mark is merged
424- self.merges = {}
425- # Stuff to cache: these are maps from mark to sets
426- self.rename_old_paths = {}
427- self.copy_source_paths = {}
428-
429- def post_process(self):
430- # Dump statistics
431- cmd_names = commands.COMMAND_NAMES
432- fc_names = commands.FILE_COMMAND_NAMES
433- self._dump_stats_group("Command counts",
434- [(c, self.cmd_counts[c]) for c in cmd_names], str)
435- self._dump_stats_group("File command counts",
436- [(c, self.file_cmd_counts[c]) for c in fc_names], str)
437-
438- # Commit stats
439- if self.cmd_counts['commit']:
440- p_items = []
441- for i in range(self.max_parent_count + 1):
442- if i in self.parent_counts:
443- count = self.parent_counts[i]
444- p_items.append(("parents-%d" % i, count))
445- merges_count = len(self.merges)
446- p_items.append(('total revisions merged', merges_count))
447- flags = {
448- 'separate authors found': self.separate_authors_found,
449- 'executables': self.executables_found,
450- 'symlinks': self.symlinks_found,
451- 'blobs referenced by SHA': self.sha_blob_references,
452- }
453- self._dump_stats_group("Parent counts", p_items, str)
454- self._dump_stats_group("Commit analysis", flags.items(), _found)
455- heads = invert_dictset(self.reftracker.heads)
456- self._dump_stats_group("Head analysis", heads.items(), None,
457- _iterable_as_config_list)
458- # note("\t%d\t%s" % (len(self.committers), 'unique committers'))
459- self._dump_stats_group("Merges", self.merges.items(), None)
460- # We only show the rename old path and copy source paths when -vv
461- # (verbose=2) is specified. The output here for mysql's data can't
462- # be parsed currently so this bit of code needs more work anyhow ..
463- if self.verbose >= 2:
464- self._dump_stats_group("Rename old paths",
465- self.rename_old_paths.items(), len,
466- _iterable_as_config_list)
467- self._dump_stats_group("Copy source paths",
468- self.copy_source_paths.items(), len,
469- _iterable_as_config_list)
470-
471- # Blob stats
472- if self.cmd_counts['blob']:
473- # In verbose mode, don't list every blob used
474- if self.verbose:
475- del self.blobs['used']
476- self._dump_stats_group("Blob usage tracking",
477- self.blobs.items(), len, _iterable_as_config_list)
478- if self.blob_ref_counts:
479- blobs_by_count = invert_dict(self.blob_ref_counts)
480- blob_items = sorted(blobs_by_count.items())
481- self._dump_stats_group("Blob reference counts",
482- blob_items, len, _iterable_as_config_list)
483-
484- # Other stats
485- if self.cmd_counts['reset']:
486- reset_stats = {
487- 'lightweight tags': self.lightweight_tags,
488- }
489- self._dump_stats_group("Reset analysis", reset_stats.items())
490-
491- def _dump_stats_group(self, title, items, normal_formatter=None,
492- verbose_formatter=None):
493- """Dump a statistics group.
494-
495- In verbose mode, do so as a config file so
496- that other processors can load the information if they want to.
497- :param normal_formatter: the callable to apply to the value
498- before displaying it in normal mode
499- :param verbose_formatter: the callable to apply to the value
500- before displaying it in verbose mode
501- """
502- if self.verbose:
503- self.outf.write("[%s]\n" % (title,))
504- for name, value in items:
505- if verbose_formatter is not None:
506- value = verbose_formatter(value)
507- if type(name) == str:
508- name = name.replace(' ', '-')
509- self.outf.write("%s = %s\n" % (name, value))
510- self.outf.write("\n")
511- else:
512- self.outf.write("%s:\n" % (title,))
513- for name, value in items:
514- if normal_formatter is not None:
515- value = normal_formatter(value)
516- self.outf.write("\t%s\t%s\n" % (value, name))
517-
518- def progress_handler(self, cmd):
519- """Process a ProgressCommand."""
520- self.cmd_counts[cmd.name] += 1
521-
522- def blob_handler(self, cmd):
523- """Process a BlobCommand."""
524- self.cmd_counts[cmd.name] += 1
525- if cmd.mark is None:
526- self.blobs['unmarked'].add(cmd.id)
527- else:
528- self.blobs['new'].add(cmd.id)
529- # Marks can be re-used so remove it from used if already there.
530- # Note: we definitely do NOT want to remove it from multi if
531- # it's already in that set.
532- try:
533- self.blobs['used'].remove(cmd.id)
534- except KeyError:
535- pass
536-
537- def checkpoint_handler(self, cmd):
538- """Process a CheckpointCommand."""
539- self.cmd_counts[cmd.name] += 1
540-
541- def commit_handler(self, cmd):
542- """Process a CommitCommand."""
543- self.cmd_counts[cmd.name] += 1
544- self.committers.add(cmd.committer)
545- if cmd.author is not None:
546- self.separate_authors_found = True
547- for fc in cmd.iter_files():
548- self.file_cmd_counts[fc.name] += 1
549- if isinstance(fc, commands.FileModifyCommand):
550- if fc.mode & 0111:
551- self.executables_found = True
552- if stat.S_ISLNK(fc.mode):
553- self.symlinks_found = True
554- if fc.dataref is not None:
555- if fc.dataref[0] == ':':
556- self._track_blob(fc.dataref)
557- else:
558- self.sha_blob_references = True
559- elif isinstance(fc, commands.FileRenameCommand):
560- self.rename_old_paths.setdefault(cmd.id, set()).add(fc.old_path)
561- elif isinstance(fc, commands.FileCopyCommand):
562- self.copy_source_paths.setdefault(cmd.id, set()).add(fc.src_path)
563-
564- # Track the heads
565- parents = self.reftracker.track_heads(cmd)
566-
567- # Track the parent counts
568- parent_count = len(parents)
569- if self.parent_counts.has_key(parent_count):
570- self.parent_counts[parent_count] += 1
571- else:
572- self.parent_counts[parent_count] = 1
573- if parent_count > self.max_parent_count:
574- self.max_parent_count = parent_count
575-
576- # Remember the merges
577- if cmd.merges:
578- #self.merges.setdefault(cmd.ref, set()).update(cmd.merges)
579- for merge in cmd.merges:
580- if merge in self.merges:
581- self.merges[merge] += 1
582- else:
583- self.merges[merge] = 1
584-
585- def reset_handler(self, cmd):
586- """Process a ResetCommand."""
587- self.cmd_counts[cmd.name] += 1
588- if cmd.ref.startswith('refs/tags/'):
589- self.lightweight_tags += 1
590- else:
591- if cmd.from_ is not None:
592- self.reftracker.track_heads_for_ref(
593- cmd.ref, cmd.from_)
594-
595- def tag_handler(self, cmd):
596- """Process a TagCommand."""
597- self.cmd_counts[cmd.name] += 1
598-
599- def feature_handler(self, cmd):
600- """Process a FeatureCommand."""
601- self.cmd_counts[cmd.name] += 1
602- feature = cmd.feature_name
603- if feature not in commands.FEATURE_NAMES:
604- self.warning("feature %s is not supported - parsing may fail"
605- % (feature,))
606-
607- def _track_blob(self, mark):
608- if mark in self.blob_ref_counts:
609- self.blob_ref_counts[mark] += 1
610- pass
611- elif mark in self.blobs['used']:
612- self.blob_ref_counts[mark] = 2
613- self.blobs['used'].remove(mark)
614- elif mark in self.blobs['new']:
615- self.blobs['used'].add(mark)
616- self.blobs['new'].remove(mark)
617- else:
618- self.blobs['unknown'].add(mark)
619-
620-def _found(b):
621- """Format a found boolean as a string."""
622- return ['no', 'found'][b]
623-
624-def _iterable_as_config_list(s):
625- """Format an iterable as a sequence of comma-separated strings.
626-
627- To match what ConfigObj expects, a single item list has a trailing comma.
628- """
629- items = sorted(s)
630- if len(items) == 1:
631- return "%s," % (items[0],)
632- else:
633- return ", ".join(items)
634
635=== removed file 'breezy/plugins/fastimport/reftracker.py'
636--- breezy/plugins/fastimport/reftracker.py 2017-05-23 23:21:16 +0000
637+++ breezy/plugins/fastimport/reftracker.py 1970-01-01 00:00:00 +0000
638@@ -1,68 +0,0 @@
639-# Copyright (C) 2009 Canonical Ltd
640-#
641-# This program is free software; you can redistribute it and/or modify
642-# it under the terms of the GNU General Public License as published by
643-# the Free Software Foundation; either version 2 of the License, or
644-# (at your option) any later version.
645-#
646-# This program is distributed in the hope that it will be useful,
647-# but WITHOUT ANY WARRANTY; without even the implied warranty of
648-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
649-# GNU General Public License for more details.
650-#
651-# You should have received a copy of the GNU General Public License
652-# along with this program. If not, see <http://www.gnu.org/licenses/>.
653-
654-
655-"""Tracker of refs."""
656-
657-from __future__ import absolute_import
658-
659-
660-class RefTracker(object):
661-
662- def __init__(self):
663- # Head tracking: last ref, last id per ref & map of commit ids to ref*s*
664- self.last_ref = None
665- self.last_ids = {}
666- self.heads = {}
667-
668- def dump_stats(self, note):
669- self._show_stats_for(self.last_ids, "last-ids", note=note)
670- self._show_stats_for(self.heads, "heads", note=note)
671-
672- def clear(self):
673- self.last_ids.clear()
674- self.heads.clear()
675-
676- def track_heads(self, cmd):
677- """Track the repository heads given a CommitCommand.
678-
679- :param cmd: the CommitCommand
680- :return: the list of parents in terms of commit-ids
681- """
682- # Get the true set of parents
683- if cmd.from_ is not None:
684- parents = [cmd.from_]
685- else:
686- last_id = self.last_ids.get(cmd.ref)
687- if last_id is not None:
688- parents = [last_id]
689- else:
690- parents = []
691- parents.extend(cmd.merges)
692-
693- # Track the heads
694- self.track_heads_for_ref(cmd.ref, cmd.id, parents)
695- return parents
696-
697- def track_heads_for_ref(self, cmd_ref, cmd_id, parents=None):
698- if parents is not None:
699- for parent in parents:
700- if parent in self.heads:
701- del self.heads[parent]
702- self.heads.setdefault(cmd_id, set()).add(cmd_ref)
703- self.last_ids[cmd_ref] = cmd_id
704- self.last_ref = cmd_ref
705-
706-
707
708=== modified file 'breezy/plugins/fastimport/tests/test_commands.py'
709--- breezy/plugins/fastimport/tests/test_commands.py 2018-02-18 15:21:06 +0000
710+++ breezy/plugins/fastimport/tests/test_commands.py 2018-05-07 11:48:26 +0000
711@@ -196,40 +196,6 @@
712
713 """
714
715-class TestFastImportInfo(ExternalBase):
716-
717- _test_needs_features = [FastimportFeature]
718-
719- def test_simple(self):
720- self.build_tree_contents([('simple.fi', simple_fast_import_stream)])
721- output = self.run_bzr("fast-import-info simple.fi")[0]
722- self.assertEquals(output, """Command counts:
723-\t0\tblob
724-\t0\tcheckpoint
725-\t1\tcommit
726-\t0\tfeature
727-\t0\tprogress
728-\t0\treset
729-\t0\ttag
730-File command counts:
731-\t0\tfilemodify
732-\t0\tfiledelete
733-\t0\tfilecopy
734-\t0\tfilerename
735-\t0\tfiledeleteall
736-Parent counts:
737-\t1\tparents-0
738-\t0\ttotal revisions merged
739-Commit analysis:
740-\tno\texecutables
741-\tno\tseparate authors found
742-\tno\tsymlinks
743-\tno\tblobs referenced by SHA
744-Head analysis:
745-\t[':1']\trefs/heads/master
746-Merges:
747-""")
748-
749
750 class TestFastImport(ExternalBase):
751
752@@ -256,17 +222,3 @@
753 self.make_branch_and_tree("br")
754 self.run_bzr_error(['brz: ERROR: 4: Parse error: line 4: Command commit is missing section committer\n'], "fast-import empty.fi br")
755
756-
757-class TestFastImportFilter(ExternalBase):
758-
759- _test_needs_features = [FastimportFeature]
760-
761- def test_empty(self):
762- self.build_tree_contents([('empty.fi', b"")])
763- self.make_branch_and_tree("br")
764- self.assertEquals("", self.run_bzr("fast-import-filter -")[0])
765-
766- def test_default_stdin(self):
767- self.build_tree_contents([('empty.fi', b"")])
768- self.make_branch_and_tree("br")
769- self.assertEquals("", self.run_bzr("fast-import-filter")[0])
770
771=== modified file 'setup.py'
772--- setup.py 2018-04-01 18:03:28 +0000
773+++ setup.py 2018-05-07 11:48:26 +0000
774@@ -58,7 +58,7 @@
775 'six>=1.9.0',
776 ],
777 'extras_require': {
778- 'fastimport': ['fastimport'],
779+ 'fastimport': ['fastimport>=0.9.8'],
780 },
781 }
782

Subscribers

People subscribed via source and target branches