Merge lp:~mitya57/ubuntu/raring/sphinx/1.1.3+dfsg-5ubuntu1 into lp:ubuntu/raring/sphinx

Proposed by Dmitry Shachnev
Status: Merged
Merged at revision: 39
Proposed branch: lp:~mitya57/ubuntu/raring/sphinx/1.1.3+dfsg-5ubuntu1
Merge into: lp:ubuntu/raring/sphinx
Diff against target: 5032 lines (+2599/-2162)
22 files modified
.pc/applied-patches (+3/-0)
.pc/fix_manpages_generation_with_new_docutils.diff/sphinx/writers/manpage.py (+0/-345)
.pc/l10n_fixes.diff/sphinx/environment.py (+1762/-0)
.pc/sort_stopwords.diff/sphinx/search/__init__.py (+287/-0)
.pc/support_python_3.3.diff/sphinx/environment.py (+0/-1762)
.pc/test_build_html_rb.diff/tests/test_build_html.py (+339/-0)
debian/changelog (+58/-0)
debian/control (+3/-3)
debian/dh-sphinxdoc/dh_sphinxdoc (+4/-2)
debian/patches/l10n_fixes.diff (+58/-0)
debian/patches/series (+3/-0)
debian/patches/sort_stopwords.diff (+16/-0)
debian/patches/support_python_3.3.diff (+11/-35)
debian/patches/test_build_html_rb.diff (+17/-0)
debian/rules (+3/-2)
debian/sphinx-autogen.1 (+1/-1)
debian/tests/control (+0/-2)
debian/tests/python-sphinx (+1/-1)
debian/tests/python3-sphinx (+1/-1)
sphinx/environment.py (+30/-6)
sphinx/search/__init__.py (+1/-1)
tests/test_build_html.py (+1/-1)
To merge this branch: bzr merge lp:~mitya57/ubuntu/raring/sphinx/1.1.3+dfsg-5ubuntu1
Reviewer Review Type Date Requested Status
Daniel Holbach (community) Approve
Ubuntu branches Pending
Review via email: mp+136582@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Daniel Holbach (dholbach) wrote :

Uploaded. Thanks.

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file '.pc/applied-patches'
2--- .pc/applied-patches 2012-11-01 21:39:16 +0000
3+++ .pc/applied-patches 2012-11-28 07:12:20 +0000
4@@ -9,4 +9,7 @@
5 pygments_byte_strings.diff
6 fix_shorthandoff.diff
7 fix_manpages_generation_with_new_docutils.diff
8+test_build_html_rb.diff
9+sort_stopwords.diff
10 support_python_3.3.diff
11+l10n_fixes.diff
12
13=== removed directory '.pc/fix_manpages_generation_with_new_docutils.diff'
14=== removed directory '.pc/fix_manpages_generation_with_new_docutils.diff/sphinx'
15=== removed directory '.pc/fix_manpages_generation_with_new_docutils.diff/sphinx/writers'
16=== removed file '.pc/fix_manpages_generation_with_new_docutils.diff/sphinx/writers/manpage.py'
17--- .pc/fix_manpages_generation_with_new_docutils.diff/sphinx/writers/manpage.py 2012-10-22 20:20:35 +0000
18+++ .pc/fix_manpages_generation_with_new_docutils.diff/sphinx/writers/manpage.py 1970-01-01 00:00:00 +0000
19@@ -1,345 +0,0 @@
20-# -*- coding: utf-8 -*-
21-"""
22- sphinx.writers.manpage
23- ~~~~~~~~~~~~~~~~~~~~~~
24-
25- Manual page writer, extended for Sphinx custom nodes.
26-
27- :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
28- :license: BSD, see LICENSE for details.
29-"""
30-
31-from docutils import nodes
32-try:
33- from docutils.writers.manpage import MACRO_DEF, Writer, \
34- Translator as BaseTranslator
35- has_manpage_writer = True
36-except ImportError:
37- # define the classes in any case, sphinx.application needs it
38- Writer = BaseTranslator = object
39- has_manpage_writer = False
40-
41-from sphinx import addnodes
42-from sphinx.locale import admonitionlabels, versionlabels, _
43-from sphinx.util.osutil import ustrftime
44-
45-
46-class ManualPageWriter(Writer):
47- def __init__(self, builder):
48- Writer.__init__(self)
49- self.builder = builder
50-
51- def translate(self):
52- visitor = ManualPageTranslator(self.builder, self.document)
53- self.visitor = visitor
54- self.document.walkabout(visitor)
55- self.output = visitor.astext()
56-
57-
58-class ManualPageTranslator(BaseTranslator):
59- """
60- Custom translator.
61- """
62-
63- def __init__(self, builder, *args, **kwds):
64- BaseTranslator.__init__(self, *args, **kwds)
65- self.builder = builder
66-
67- self.in_productionlist = 0
68-
69- # first title is the manpage title
70- self.section_level = -1
71-
72- # docinfo set by man_pages config value
73- self._docinfo['title'] = self.document.settings.title
74- self._docinfo['subtitle'] = self.document.settings.subtitle
75- if self.document.settings.authors:
76- # don't set it if no author given
77- self._docinfo['author'] = self.document.settings.authors
78- self._docinfo['manual_section'] = self.document.settings.section
79-
80- # docinfo set by other config values
81- self._docinfo['title_upper'] = self._docinfo['title'].upper()
82- if builder.config.today:
83- self._docinfo['date'] = builder.config.today
84- else:
85- self._docinfo['date'] = ustrftime(builder.config.today_fmt
86- or _('%B %d, %Y'))
87- self._docinfo['copyright'] = builder.config.copyright
88- self._docinfo['version'] = builder.config.version
89- self._docinfo['manual_group'] = builder.config.project
90-
91- # since self.append_header() is never called, need to do this here
92- self.body.append(MACRO_DEF)
93-
94- # overwritten -- added quotes around all .TH arguments
95- def header(self):
96- tmpl = (".TH \"%(title_upper)s\" \"%(manual_section)s\""
97- " \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
98- ".SH NAME\n"
99- "%(title)s \- %(subtitle)s\n")
100- return tmpl % self._docinfo
101-
102- def visit_start_of_file(self, node):
103- pass
104- def depart_start_of_file(self, node):
105- pass
106-
107- def visit_desc(self, node):
108- self.visit_definition_list(node)
109- def depart_desc(self, node):
110- self.depart_definition_list(node)
111-
112- def visit_desc_signature(self, node):
113- self.visit_definition_list_item(node)
114- self.visit_term(node)
115- def depart_desc_signature(self, node):
116- self.depart_term(node)
117-
118- def visit_desc_addname(self, node):
119- pass
120- def depart_desc_addname(self, node):
121- pass
122-
123- def visit_desc_type(self, node):
124- pass
125- def depart_desc_type(self, node):
126- pass
127-
128- def visit_desc_returns(self, node):
129- self.body.append(' -> ')
130- def depart_desc_returns(self, node):
131- pass
132-
133- def visit_desc_name(self, node):
134- pass
135- def depart_desc_name(self, node):
136- pass
137-
138- def visit_desc_parameterlist(self, node):
139- self.body.append('(')
140- self.first_param = 1
141- def depart_desc_parameterlist(self, node):
142- self.body.append(')')
143-
144- def visit_desc_parameter(self, node):
145- if not self.first_param:
146- self.body.append(', ')
147- else:
148- self.first_param = 0
149- def depart_desc_parameter(self, node):
150- pass
151-
152- def visit_desc_optional(self, node):
153- self.body.append('[')
154- def depart_desc_optional(self, node):
155- self.body.append(']')
156-
157- def visit_desc_annotation(self, node):
158- pass
159- def depart_desc_annotation(self, node):
160- pass
161-
162- def visit_desc_content(self, node):
163- self.visit_definition(node)
164- def depart_desc_content(self, node):
165- self.depart_definition(node)
166-
167- def visit_refcount(self, node):
168- self.body.append(self.defs['emphasis'][0])
169- def depart_refcount(self, node):
170- self.body.append(self.defs['emphasis'][1])
171-
172- def visit_versionmodified(self, node):
173- self.visit_paragraph(node)
174- text = versionlabels[node['type']] % node['version']
175- if len(node):
176- text += ': '
177- else:
178- text += '.'
179- self.body.append(text)
180- def depart_versionmodified(self, node):
181- self.depart_paragraph(node)
182-
183- def visit_termsep(self, node):
184- self.body.append(', ')
185- raise nodes.SkipNode
186-
187- # overwritten -- we don't want source comments to show up
188- def visit_comment(self, node):
189- raise nodes.SkipNode
190-
191- # overwritten -- added ensure_eol()
192- def visit_footnote(self, node):
193- self.ensure_eol()
194- BaseTranslator.visit_footnote(self, node)
195-
196- # overwritten -- handle footnotes rubric
197- def visit_rubric(self, node):
198- self.ensure_eol()
199- if len(node.children) == 1:
200- rubtitle = node.children[0].astext()
201- if rubtitle in ('Footnotes', _('Footnotes')):
202- self.body.append('.SH ' + self.deunicode(rubtitle).upper() +
203- '\n')
204- raise nodes.SkipNode
205- else:
206- self.body.append('.sp\n')
207- def depart_rubric(self, node):
208- pass
209-
210- def visit_seealso(self, node):
211- self.visit_admonition(node)
212- def depart_seealso(self, node):
213- self.depart_admonition(node)
214-
215- # overwritten -- use our own label translations
216- def visit_admonition(self, node, name=None):
217- if name:
218- self.body.append('.IP %s\n' %
219- self.deunicode(admonitionlabels.get(name, name)))
220-
221- def visit_productionlist(self, node):
222- self.ensure_eol()
223- names = []
224- self.in_productionlist += 1
225- self.body.append('.sp\n.nf\n')
226- for production in node:
227- names.append(production['tokenname'])
228- maxlen = max(len(name) for name in names)
229- for production in node:
230- if production['tokenname']:
231- lastname = production['tokenname'].ljust(maxlen)
232- self.body.append(self.defs['strong'][0])
233- self.body.append(self.deunicode(lastname))
234- self.body.append(self.defs['strong'][1])
235- self.body.append(' ::= ')
236- else:
237- self.body.append('%s ' % (' '*len(lastname)))
238- production.walkabout(self)
239- self.body.append('\n')
240- self.body.append('\n.fi\n')
241- self.in_productionlist -= 1
242- raise nodes.SkipNode
243-
244- def visit_production(self, node):
245- pass
246- def depart_production(self, node):
247- pass
248-
249- # overwritten -- don't emit a warning for images
250- def visit_image(self, node):
251- if 'alt' in node.attributes:
252- self.body.append(_('[image: %s]') % node['alt'] + '\n')
253- self.body.append(_('[image]') + '\n')
254- raise nodes.SkipNode
255-
256- # overwritten -- don't visit inner marked up nodes
257- def visit_reference(self, node):
258- self.body.append(self.defs['reference'][0])
259- self.body.append(node.astext())
260- self.body.append(self.defs['reference'][1])
261-
262- uri = node.get('refuri', '')
263- if uri.startswith('mailto:') or uri.startswith('http:') or \
264- uri.startswith('https:') or uri.startswith('ftp:'):
265- # if configured, put the URL after the link
266- if self.builder.config.man_show_urls and \
267- node.astext() != uri:
268- if uri.startswith('mailto:'):
269- uri = uri[7:]
270- self.body.extend([
271- ' <',
272- self.defs['strong'][0], uri, self.defs['strong'][1],
273- '>'])
274- raise nodes.SkipNode
275-
276- def visit_centered(self, node):
277- self.ensure_eol()
278- self.body.append('.sp\n.ce\n')
279- def depart_centered(self, node):
280- self.body.append('\n.ce 0\n')
281-
282- def visit_compact_paragraph(self, node):
283- pass
284- def depart_compact_paragraph(self, node):
285- pass
286-
287- def visit_highlightlang(self, node):
288- pass
289- def depart_highlightlang(self, node):
290- pass
291-
292- def visit_download_reference(self, node):
293- pass
294- def depart_download_reference(self, node):
295- pass
296-
297- def visit_toctree(self, node):
298- raise nodes.SkipNode
299-
300- def visit_index(self, node):
301- raise nodes.SkipNode
302-
303- def visit_tabular_col_spec(self, node):
304- raise nodes.SkipNode
305-
306- def visit_glossary(self, node):
307- pass
308- def depart_glossary(self, node):
309- pass
310-
311- def visit_acks(self, node):
312- self.ensure_eol()
313- self.body.append(', '.join(n.astext()
314- for n in node.children[0].children) + '.')
315- self.body.append('\n')
316- raise nodes.SkipNode
317-
318- def visit_hlist(self, node):
319- self.visit_bullet_list(node)
320- def depart_hlist(self, node):
321- self.depart_bullet_list(node)
322-
323- def visit_hlistcol(self, node):
324- pass
325- def depart_hlistcol(self, node):
326- pass
327-
328- def visit_literal_emphasis(self, node):
329- return self.visit_emphasis(node)
330- def depart_literal_emphasis(self, node):
331- return self.depart_emphasis(node)
332-
333- def visit_abbreviation(self, node):
334- pass
335- def depart_abbreviation(self, node):
336- pass
337-
338- # overwritten: handle section titles better than in 0.6 release
339- def visit_title(self, node):
340- if isinstance(node.parent, addnodes.seealso):
341- self.body.append('.IP "')
342- return
343- elif isinstance(node.parent, nodes.section):
344- if self.section_level == 0:
345- # skip the document title
346- raise nodes.SkipNode
347- elif self.section_level == 1:
348- self.body.append('.SH %s\n' %
349- self.deunicode(node.astext().upper()))
350- raise nodes.SkipNode
351- return BaseTranslator.visit_title(self, node)
352- def depart_title(self, node):
353- if isinstance(node.parent, addnodes.seealso):
354- self.body.append('"\n')
355- return
356- return BaseTranslator.depart_title(self, node)
357-
358- def visit_raw(self, node):
359- if 'manpage' in node.get('format', '').split():
360- self.body.append(node.astext())
361- raise nodes.SkipNode
362-
363- def unknown_visit(self, node):
364- raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
365
366=== added directory '.pc/l10n_fixes.diff'
367=== added directory '.pc/l10n_fixes.diff/sphinx'
368=== added file '.pc/l10n_fixes.diff/sphinx/environment.py'
369--- .pc/l10n_fixes.diff/sphinx/environment.py 1970-01-01 00:00:00 +0000
370+++ .pc/l10n_fixes.diff/sphinx/environment.py 2012-11-28 07:12:20 +0000
371@@ -0,0 +1,1762 @@
372+# -*- coding: utf-8 -*-
373+"""
374+ sphinx.environment
375+ ~~~~~~~~~~~~~~~~~~
376+
377+ Global creation environment.
378+
379+ :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
380+ :license: BSD, see LICENSE for details.
381+"""
382+
383+import re
384+import os
385+import sys
386+import time
387+import types
388+import codecs
389+import imghdr
390+import string
391+import unicodedata
392+import cPickle as pickle
393+from os import path
394+from glob import glob
395+from itertools import izip, groupby
396+
397+from docutils import nodes
398+from docutils.io import FileInput, NullOutput
399+from docutils.core import Publisher
400+from docutils.utils import Reporter, relative_path, new_document, \
401+ get_source_line
402+from docutils.readers import standalone
403+from docutils.parsers.rst import roles, directives, Parser as RSTParser
404+from docutils.parsers.rst.languages import en as english
405+from docutils.parsers.rst.directives.html import MetaBody
406+from docutils.writers import UnfilteredWriter
407+from docutils.transforms import Transform
408+from docutils.transforms.parts import ContentsFilter
409+
410+from sphinx import addnodes
411+from sphinx.util import url_re, get_matching_docs, docname_join, split_into, \
412+ FilenameUniqDict
413+from sphinx.util.nodes import clean_astext, make_refnode, extract_messages, \
414+ WarningStream
415+from sphinx.util.osutil import movefile, SEP, ustrftime, find_catalog
416+from sphinx.util.matching import compile_matchers
417+from sphinx.util.pycompat import all, class_types
418+from sphinx.util.websupport import is_commentable
419+from sphinx.errors import SphinxError, ExtensionError
420+from sphinx.locale import _, init as init_locale
421+from sphinx.versioning import add_uids, merge_doctrees
422+
423+fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
424+
425+orig_role_function = roles.role
426+orig_directive_function = directives.directive
427+
428+class ElementLookupError(Exception): pass
429+
430+
431+default_settings = {
432+ 'embed_stylesheet': False,
433+ 'cloak_email_addresses': True,
434+ 'pep_base_url': 'http://www.python.org/dev/peps/',
435+ 'rfc_base_url': 'http://tools.ietf.org/html/',
436+ 'input_encoding': 'utf-8-sig',
437+ 'doctitle_xform': False,
438+ 'sectsubtitle_xform': False,
439+ 'halt_level': 5,
440+}
441+
442+# This is increased every time an environment attribute is added
443+# or changed to properly invalidate pickle files.
444+ENV_VERSION = 41
445+
446+
447+default_substitutions = set([
448+ 'version',
449+ 'release',
450+ 'today',
451+])
452+
453+dummy_reporter = Reporter('', 4, 4)
454+
455+versioning_conditions = {
456+ 'none': False,
457+ 'text': nodes.TextElement,
458+ 'commentable': is_commentable,
459+}
460+
461+
462+class NoUri(Exception):
463+ """Raised by get_relative_uri if there is no URI available."""
464+ pass
465+
466+
467+class DefaultSubstitutions(Transform):
468+ """
469+ Replace some substitutions if they aren't defined in the document.
470+ """
471+ # run before the default Substitutions
472+ default_priority = 210
473+
474+ def apply(self):
475+ config = self.document.settings.env.config
476+ # only handle those not otherwise defined in the document
477+ to_handle = default_substitutions - set(self.document.substitution_defs)
478+ for ref in self.document.traverse(nodes.substitution_reference):
479+ refname = ref['refname']
480+ if refname in to_handle:
481+ text = config[refname]
482+ if refname == 'today' and not text:
483+ # special handling: can also specify a strftime format
484+ text = ustrftime(config.today_fmt or _('%B %d, %Y'))
485+ ref.replace_self(nodes.Text(text, text))
486+
487+
488+class MoveModuleTargets(Transform):
489+ """
490+ Move module targets that are the first thing in a section to the section
491+ title.
492+
493+ XXX Python specific
494+ """
495+ default_priority = 210
496+
497+ def apply(self):
498+ for node in self.document.traverse(nodes.target):
499+ if not node['ids']:
500+ continue
501+ if (node.has_key('ismod') and
502+ node.parent.__class__ is nodes.section and
503+ # index 0 is the section title node
504+ node.parent.index(node) == 1):
505+ node.parent['ids'][0:0] = node['ids']
506+ node.parent.remove(node)
507+
508+
509+class HandleCodeBlocks(Transform):
510+ """
511+ Several code block related transformations.
512+ """
513+ default_priority = 210
514+
515+ def apply(self):
516+ # move doctest blocks out of blockquotes
517+ for node in self.document.traverse(nodes.block_quote):
518+ if all(isinstance(child, nodes.doctest_block) for child
519+ in node.children):
520+ node.replace_self(node.children)
521+ # combine successive doctest blocks
522+ #for node in self.document.traverse(nodes.doctest_block):
523+ # if node not in node.parent.children:
524+ # continue
525+ # parindex = node.parent.index(node)
526+ # while len(node.parent) > parindex+1 and \
527+ # isinstance(node.parent[parindex+1], nodes.doctest_block):
528+ # node[0] = nodes.Text(node[0] + '\n\n' +
529+ # node.parent[parindex+1][0])
530+ # del node.parent[parindex+1]
531+
532+
533+class SortIds(Transform):
534+ """
535+ Sort secion IDs so that the "id[0-9]+" one comes last.
536+ """
537+ default_priority = 261
538+
539+ def apply(self):
540+ for node in self.document.traverse(nodes.section):
541+ if len(node['ids']) > 1 and node['ids'][0].startswith('id'):
542+ node['ids'] = node['ids'][1:] + [node['ids'][0]]
543+
544+
545+class CitationReferences(Transform):
546+ """
547+ Replace citation references by pending_xref nodes before the default
548+ docutils transform tries to resolve them.
549+ """
550+ default_priority = 619
551+
552+ def apply(self):
553+ for citnode in self.document.traverse(nodes.citation_reference):
554+ cittext = citnode.astext()
555+ refnode = addnodes.pending_xref(cittext, reftype='citation',
556+ reftarget=cittext, refwarn=True)
557+ refnode.line = citnode.line or citnode.parent.line
558+ refnode += nodes.Text('[' + cittext + ']')
559+ citnode.parent.replace(citnode, refnode)
560+
561+
562+class Locale(Transform):
563+ """
564+ Replace translatable nodes with their translated doctree.
565+ """
566+ default_priority = 0
567+ def apply(self):
568+ env = self.document.settings.env
569+ settings, source = self.document.settings, self.document['source']
570+ # XXX check if this is reliable
571+ assert source.startswith(env.srcdir)
572+ docname = path.splitext(relative_path(env.srcdir, source))[0]
573+ textdomain = find_catalog(docname,
574+ self.document.settings.gettext_compact)
575+
576+ # fetch translations
577+ dirs = [path.join(env.srcdir, directory)
578+ for directory in env.config.locale_dirs]
579+ catalog, has_catalog = init_locale(dirs, env.config.language,
580+ textdomain)
581+ if not has_catalog:
582+ return
583+
584+ parser = RSTParser()
585+
586+ for node, msg in extract_messages(self.document):
587+ patch = new_document(source, settings)
588+ msgstr = catalog.gettext(msg)
589+ # XXX add marker to untranslated parts
590+ if not msgstr or msgstr == msg: # as-of-yet untranslated
591+ continue
592+ parser.parse(msgstr, patch)
593+ patch = patch[0]
594+ # XXX doctest and other block markup
595+ if not isinstance(patch, nodes.paragraph):
596+ continue # skip for now
597+ for child in patch.children: # update leaves
598+ child.parent = node
599+ node.children = patch.children
600+
601+
602+class SphinxStandaloneReader(standalone.Reader):
603+ """
604+ Add our own transforms.
605+ """
606+ transforms = [Locale, CitationReferences, DefaultSubstitutions,
607+ MoveModuleTargets, HandleCodeBlocks, SortIds]
608+
609+ def get_transforms(self):
610+ return standalone.Reader.get_transforms(self) + self.transforms
611+
612+
613+class SphinxDummyWriter(UnfilteredWriter):
614+ supported = ('html',) # needed to keep "meta" nodes
615+
616+ def translate(self):
617+ pass
618+
619+
620+class SphinxContentsFilter(ContentsFilter):
621+ """
622+ Used with BuildEnvironment.add_toc_from() to discard cross-file links
623+ within table-of-contents link nodes.
624+ """
625+ def visit_pending_xref(self, node):
626+ text = node.astext()
627+ self.parent.append(nodes.literal(text, text))
628+ raise nodes.SkipNode
629+
630+ def visit_image(self, node):
631+ raise nodes.SkipNode
632+
633+
634+class BuildEnvironment:
635+ """
636+ The environment in which the ReST files are translated.
637+ Stores an inventory of cross-file targets and provides doctree
638+ transformations to resolve links to them.
639+ """
640+
641+ # --------- ENVIRONMENT PERSISTENCE ----------------------------------------
642+
643+ @staticmethod
644+ def frompickle(config, filename):
645+ picklefile = open(filename, 'rb')
646+ try:
647+ env = pickle.load(picklefile)
648+ finally:
649+ picklefile.close()
650+ if env.version != ENV_VERSION:
651+ raise IOError('env version not current')
652+ env.config.values = config.values
653+ return env
654+
655+ def topickle(self, filename):
656+ # remove unpicklable attributes
657+ warnfunc = self._warnfunc
658+ self.set_warnfunc(None)
659+ values = self.config.values
660+ del self.config.values
661+ domains = self.domains
662+ del self.domains
663+ # first write to a temporary file, so that if dumping fails,
664+ # the existing environment won't be overwritten
665+ picklefile = open(filename + '.tmp', 'wb')
666+ # remove potentially pickling-problematic values from config
667+ for key, val in vars(self.config).items():
668+ if key.startswith('_') or \
669+ isinstance(val, types.ModuleType) or \
670+ isinstance(val, types.FunctionType) or \
671+ isinstance(val, class_types):
672+ del self.config[key]
673+ try:
674+ pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL)
675+ finally:
676+ picklefile.close()
677+ movefile(filename + '.tmp', filename)
678+ # reset attributes
679+ self.domains = domains
680+ self.config.values = values
681+ self.set_warnfunc(warnfunc)
682+
683+ # --------- ENVIRONMENT INITIALIZATION -------------------------------------
684+
685+ def __init__(self, srcdir, doctreedir, config):
686+ self.doctreedir = doctreedir
687+ self.srcdir = srcdir
688+ self.config = config
689+
690+ # the method of doctree versioning; see set_versioning_method
691+ self.versioning_condition = None
692+
693+ # the application object; only set while update() runs
694+ self.app = None
695+
696+ # all the registered domains, set by the application
697+ self.domains = {}
698+
699+ # the docutils settings for building
700+ self.settings = default_settings.copy()
701+ self.settings['env'] = self
702+
703+ # the function to write warning messages with
704+ self._warnfunc = None
705+
706+ # this is to invalidate old pickles
707+ self.version = ENV_VERSION
708+
709+ # make this a set for faster testing
710+ self._nitpick_ignore = set(self.config.nitpick_ignore)
711+
712+ # All "docnames" here are /-separated and relative and exclude
713+ # the source suffix.
714+
715+ self.found_docs = set() # contains all existing docnames
716+ self.all_docs = {} # docname -> mtime at the time of build
717+ # contains all built docnames
718+ self.dependencies = {} # docname -> set of dependent file
719+ # names, relative to documentation root
720+ self.reread_always = set() # docnames to re-read unconditionally on
721+ # next build
722+
723+ # File metadata
724+ self.metadata = {} # docname -> dict of metadata items
725+
726+ # TOC inventory
727+ self.titles = {} # docname -> title node
728+ self.longtitles = {} # docname -> title node; only different if
729+ # set differently with title directive
730+ self.tocs = {} # docname -> table of contents nodetree
731+ self.toc_num_entries = {} # docname -> number of real entries
732+ # used to determine when to show the TOC
733+ # in a sidebar (don't show if it's only one item)
734+ self.toc_secnumbers = {} # docname -> dict of sectionid -> number
735+
736+ self.toctree_includes = {} # docname -> list of toctree includefiles
737+ self.files_to_rebuild = {} # docname -> set of files
738+ # (containing its TOCs) to rebuild too
739+ self.glob_toctrees = set() # docnames that have :glob: toctrees
740+ self.numbered_toctrees = set() # docnames that have :numbered: toctrees
741+
742+ # domain-specific inventories, here to be pickled
743+ self.domaindata = {} # domainname -> domain-specific dict
744+
745+ # Other inventories
746+ self.citations = {} # citation name -> docname, labelid
747+ self.indexentries = {} # docname -> list of
748+ # (type, string, target, aliasname)
749+ self.versionchanges = {} # version -> list of (type, docname,
750+ # lineno, module, descname, content)
751+
752+ # these map absolute path -> (docnames, unique filename)
753+ self.images = FilenameUniqDict()
754+ self.dlfiles = FilenameUniqDict()
755+
756+ # temporary data storage while reading a document
757+ self.temp_data = {}
758+
759+ def set_warnfunc(self, func):
760+ self._warnfunc = func
761+ self.settings['warning_stream'] = WarningStream(func)
762+
763+ def set_versioning_method(self, method):
764+ """This sets the doctree versioning method for this environment.
765+
766+ Versioning methods are a builder property; only builders with the same
767+ versioning method can share the same doctree directory. Therefore, we
768+ raise an exception if the user tries to use an environment with an
769+ incompatible versioning method.
770+ """
771+ if method not in versioning_conditions:
772+ raise ValueError('invalid versioning method: %r' % method)
773+ condition = versioning_conditions[method]
774+ if self.versioning_condition not in (None, condition):
775+ raise SphinxError('This environment is incompatible with the '
776+ 'selected builder, please choose another '
777+ 'doctree directory.')
778+ self.versioning_condition = condition
779+
780+ def warn(self, docname, msg, lineno=None):
781+ # strange argument order is due to backwards compatibility
782+ self._warnfunc(msg, (docname, lineno))
783+
784+ def warn_node(self, msg, node):
785+ self._warnfunc(msg, '%s:%s' % get_source_line(node))
786+
787+ def clear_doc(self, docname):
788+ """Remove all traces of a source file in the inventory."""
789+ if docname in self.all_docs:
790+ self.all_docs.pop(docname, None)
791+ self.reread_always.discard(docname)
792+ self.metadata.pop(docname, None)
793+ self.dependencies.pop(docname, None)
794+ self.titles.pop(docname, None)
795+ self.longtitles.pop(docname, None)
796+ self.tocs.pop(docname, None)
797+ self.toc_secnumbers.pop(docname, None)
798+ self.toc_num_entries.pop(docname, None)
799+ self.toctree_includes.pop(docname, None)
800+ self.indexentries.pop(docname, None)
801+ self.glob_toctrees.discard(docname)
802+ self.numbered_toctrees.discard(docname)
803+ self.images.purge_doc(docname)
804+ self.dlfiles.purge_doc(docname)
805+
806+ for subfn, fnset in self.files_to_rebuild.items():
807+ fnset.discard(docname)
808+ if not fnset:
809+ del self.files_to_rebuild[subfn]
810+ for key, (fn, _) in self.citations.items():
811+ if fn == docname:
812+ del self.citations[key]
813+ for version, changes in self.versionchanges.items():
814+ new = [change for change in changes if change[1] != docname]
815+ changes[:] = new
816+
817+ for domain in self.domains.values():
818+ domain.clear_doc(docname)
819+
820+ def doc2path(self, docname, base=True, suffix=None):
821+ """Return the filename for the document name.
822+
823+ If *base* is True, return absolute path under self.srcdir.
824+ If *base* is None, return relative path to self.srcdir.
825+ If *base* is a path string, return absolute path under that.
826+ If *suffix* is not None, add it instead of config.source_suffix.
827+ """
828+ docname = docname.replace(SEP, path.sep)
829+ suffix = suffix or self.config.source_suffix
830+ if base is True:
831+ return path.join(self.srcdir, docname) + suffix
832+ elif base is None:
833+ return docname + suffix
834+ else:
835+ return path.join(base, docname) + suffix
836+
837+ def relfn2path(self, filename, docname=None):
838+ """Return paths to a file referenced from a document, relative to
839+ documentation root and absolute.
840+
841+ Absolute filenames are relative to the source dir, while relative
842+ filenames are relative to the dir of the containing document.
843+ """
844+ if filename.startswith('/') or filename.startswith(os.sep):
845+ rel_fn = filename[1:]
846+ else:
847+ docdir = path.dirname(self.doc2path(docname or self.docname,
848+ base=None))
849+ rel_fn = path.join(docdir, filename)
850+ try:
851+ return rel_fn, path.join(self.srcdir, rel_fn)
852+ except UnicodeDecodeError:
853+ # the source directory is a bytestring with non-ASCII characters;
854+ # let's try to encode the rel_fn in the file system encoding
855+ enc_rel_fn = rel_fn.encode(sys.getfilesystemencoding())
856+ return rel_fn, path.join(self.srcdir, enc_rel_fn)
857+
858+ def find_files(self, config):
859+ """Find all source files in the source dir and put them in
860+ self.found_docs.
861+ """
862+ matchers = compile_matchers(
863+ config.exclude_patterns[:] +
864+ config.exclude_trees +
865+ [d + config.source_suffix for d in config.unused_docs] +
866+ ['**/' + d for d in config.exclude_dirnames] +
867+ ['**/_sources', '.#*']
868+ )
869+ self.found_docs = set(get_matching_docs(
870+ self.srcdir, config.source_suffix, exclude_matchers=matchers))
871+
872+ def get_outdated_files(self, config_changed):
873+ """Return (added, changed, removed) sets."""
874+ # clear all files no longer present
875+ removed = set(self.all_docs) - self.found_docs
876+
877+ added = set()
878+ changed = set()
879+
880+ if config_changed:
881+ # config values affect e.g. substitutions
882+ added = self.found_docs
883+ else:
884+ for docname in self.found_docs:
885+ if docname not in self.all_docs:
886+ added.add(docname)
887+ continue
888+ # if the doctree file is not there, rebuild
889+ if not path.isfile(self.doc2path(docname, self.doctreedir,
890+ '.doctree')):
891+ changed.add(docname)
892+ continue
893+ # check the "reread always" list
894+ if docname in self.reread_always:
895+ changed.add(docname)
896+ continue
897+ # check the mtime of the document
898+ mtime = self.all_docs[docname]
899+ newmtime = path.getmtime(self.doc2path(docname))
900+ if newmtime > mtime:
901+ changed.add(docname)
902+ continue
903+ # finally, check the mtime of dependencies
904+ for dep in self.dependencies.get(docname, ()):
905+ try:
906+ # this will do the right thing when dep is absolute too
907+ deppath = path.join(self.srcdir, dep)
908+ if not path.isfile(deppath):
909+ changed.add(docname)
910+ break
911+ depmtime = path.getmtime(deppath)
912+ if depmtime > mtime:
913+ changed.add(docname)
914+ break
915+ except EnvironmentError:
916+ # give it another chance
917+ changed.add(docname)
918+ break
919+
920+ return added, changed, removed
921+
922+ def update(self, config, srcdir, doctreedir, app=None):
923+ """(Re-)read all files new or changed since last update.
924+
925+ Returns a summary, the total count of documents to reread and an
926+ iterator that yields docnames as it processes them. Store all
927+ environment docnames in the canonical format (ie using SEP as a
928+ separator in place of os.path.sep).
929+ """
930+ config_changed = False
931+ if self.config is None:
932+ msg = '[new config] '
933+ config_changed = True
934+ else:
935+ # check if a config value was changed that affects how
936+ # doctrees are read
937+ for key, descr in config.values.iteritems():
938+ if descr[1] != 'env':
939+ continue
940+ if self.config[key] != config[key]:
941+ msg = '[config changed] '
942+ config_changed = True
943+ break
944+ else:
945+ msg = ''
946+ # this value is not covered by the above loop because it is handled
947+ # specially by the config class
948+ if self.config.extensions != config.extensions:
949+ msg = '[extensions changed] '
950+ config_changed = True
951+ # the source and doctree directories may have been relocated
952+ self.srcdir = srcdir
953+ self.doctreedir = doctreedir
954+ self.find_files(config)
955+ self.config = config
956+
957+ added, changed, removed = self.get_outdated_files(config_changed)
958+
959+ # allow user intervention as well
960+ for docs in app.emit('env-get-outdated', self, added, changed, removed):
961+ changed.update(set(docs) & self.found_docs)
962+
963+ # if files were added or removed, all documents with globbed toctrees
964+ # must be reread
965+ if added or removed:
966+ # ... but not those that already were removed
967+ changed.update(self.glob_toctrees & self.found_docs)
968+
969+ msg += '%s added, %s changed, %s removed' % (len(added), len(changed),
970+ len(removed))
971+
972+ def update_generator():
973+ self.app = app
974+
975+ # clear all files no longer present
976+ for docname in removed:
977+ if app:
978+ app.emit('env-purge-doc', self, docname)
979+ self.clear_doc(docname)
980+
981+ # read all new and changed files
982+ for docname in sorted(added | changed):
983+ yield docname
984+ self.read_doc(docname, app=app)
985+
986+ if config.master_doc not in self.all_docs:
987+ self.warn(None, 'master file %s not found' %
988+ self.doc2path(config.master_doc))
989+
990+ self.app = None
991+ if app:
992+ app.emit('env-updated', self)
993+
994+ return msg, len(added | changed), update_generator()
995+
996+ def check_dependents(self, already):
997+ to_rewrite = self.assign_section_numbers()
998+ for docname in to_rewrite:
999+ if docname not in already:
1000+ yield docname
1001+
1002+ # --------- SINGLE FILE READING --------------------------------------------
1003+
1004+ def warn_and_replace(self, error):
1005+ """Custom decoding error handler that warns and replaces."""
1006+ linestart = error.object.rfind('\n', 0, error.start)
1007+ lineend = error.object.find('\n', error.start)
1008+ if lineend == -1: lineend = len(error.object)
1009+ lineno = error.object.count('\n', 0, error.start) + 1
1010+ self.warn(self.docname, 'undecodable source characters, '
1011+ 'replacing with "?": %r' %
1012+ (error.object[linestart+1:error.start] + '>>>' +
1013+ error.object[error.start:error.end] + '<<<' +
1014+ error.object[error.end:lineend]), lineno)
1015+ return (u'?', error.end)
1016+
1017+ def lookup_domain_element(self, type, name):
1018+ """Lookup a markup element (directive or role), given its name which can
1019+ be a full name (with domain).
1020+ """
1021+ name = name.lower()
1022+ # explicit domain given?
1023+ if ':' in name:
1024+ domain_name, name = name.split(':', 1)
1025+ if domain_name in self.domains:
1026+ domain = self.domains[domain_name]
1027+ element = getattr(domain, type)(name)
1028+ if element is not None:
1029+ return element, []
1030+ # else look in the default domain
1031+ else:
1032+ def_domain = self.temp_data.get('default_domain')
1033+ if def_domain is not None:
1034+ element = getattr(def_domain, type)(name)
1035+ if element is not None:
1036+ return element, []
1037+ # always look in the std domain
1038+ element = getattr(self.domains['std'], type)(name)
1039+ if element is not None:
1040+ return element, []
1041+ raise ElementLookupError
1042+
1043+ def patch_lookup_functions(self):
1044+ """Monkey-patch directive and role dispatch, so that domain-specific
1045+ markup takes precedence.
1046+ """
1047+ def directive(name, lang_module, document):
1048+ try:
1049+ return self.lookup_domain_element('directive', name)
1050+ except ElementLookupError:
1051+ return orig_directive_function(name, lang_module, document)
1052+
1053+ def role(name, lang_module, lineno, reporter):
1054+ try:
1055+ return self.lookup_domain_element('role', name)
1056+ except ElementLookupError:
1057+ return orig_role_function(name, lang_module, lineno, reporter)
1058+
1059+ directives.directive = directive
1060+ roles.role = role
1061+
1062+ def read_doc(self, docname, src_path=None, save_parsed=True, app=None):
1063+ """Parse a file and add/update inventory entries for the doctree.
1064+
1065+ If srcpath is given, read from a different source file.
1066+ """
1067+ # remove all inventory entries for that file
1068+ if app:
1069+ app.emit('env-purge-doc', self, docname)
1070+
1071+ self.clear_doc(docname)
1072+
1073+ if src_path is None:
1074+ src_path = self.doc2path(docname)
1075+
1076+ self.temp_data['docname'] = docname
1077+ # defaults to the global default, but can be re-set in a document
1078+ self.temp_data['default_domain'] = \
1079+ self.domains.get(self.config.primary_domain)
1080+
1081+ self.settings['input_encoding'] = self.config.source_encoding
1082+ self.settings['trim_footnote_reference_space'] = \
1083+ self.config.trim_footnote_reference_space
1084+ self.settings['gettext_compact'] = self.config.gettext_compact
1085+
1086+ self.patch_lookup_functions()
1087+
1088+ if self.config.default_role:
1089+ role_fn, messages = roles.role(self.config.default_role, english,
1090+ 0, dummy_reporter)
1091+ if role_fn:
1092+ roles._roles[''] = role_fn
1093+ else:
1094+ self.warn(docname, 'default role %s not found' %
1095+ self.config.default_role)
1096+
1097+ codecs.register_error('sphinx', self.warn_and_replace)
1098+
1099+ class SphinxSourceClass(FileInput):
1100+ def __init__(self_, *args, **kwds):
1101+ # don't call sys.exit() on IOErrors
1102+ kwds['handle_io_errors'] = False
1103+ FileInput.__init__(self_, *args, **kwds)
1104+
1105+ def decode(self_, data):
1106+ if isinstance(data, unicode):
1107+ return data
1108+ return data.decode(self_.encoding, 'sphinx')
1109+
1110+ def read(self_):
1111+ data = FileInput.read(self_)
1112+ if app:
1113+ arg = [data]
1114+ app.emit('source-read', docname, arg)
1115+ data = arg[0]
1116+ if self.config.rst_epilog:
1117+ data = data + '\n' + self.config.rst_epilog + '\n'
1118+ if self.config.rst_prolog:
1119+ data = self.config.rst_prolog + '\n' + data
1120+ return data
1121+
1122+ # publish manually
1123+ pub = Publisher(reader=SphinxStandaloneReader(),
1124+ writer=SphinxDummyWriter(),
1125+ source_class=SphinxSourceClass,
1126+ destination_class=NullOutput)
1127+ pub.set_components(None, 'restructuredtext', None)
1128+ pub.process_programmatic_settings(None, self.settings, None)
1129+ pub.set_source(None, src_path.encode(fs_encoding))
1130+ pub.set_destination(None, None)
1131+ try:
1132+ pub.publish()
1133+ doctree = pub.document
1134+ except UnicodeError, err:
1135+ raise SphinxError(str(err))
1136+
1137+ # post-processing
1138+ self.filter_messages(doctree)
1139+ self.process_dependencies(docname, doctree)
1140+ self.process_images(docname, doctree)
1141+ self.process_downloads(docname, doctree)
1142+ self.process_metadata(docname, doctree)
1143+ self.process_refonly_bullet_lists(docname, doctree)
1144+ self.create_title_from(docname, doctree)
1145+ self.note_indexentries_from(docname, doctree)
1146+ self.note_citations_from(docname, doctree)
1147+ self.build_toc_from(docname, doctree)
1148+ for domain in self.domains.itervalues():
1149+ domain.process_doc(self, docname, doctree)
1150+
1151+ # allow extension-specific post-processing
1152+ if app:
1153+ app.emit('doctree-read', doctree)
1154+
1155+ # store time of build, for outdated files detection
1156+ self.all_docs[docname] = time.time()
1157+
1158+ if self.versioning_condition:
1159+ # get old doctree
1160+ try:
1161+ f = open(self.doc2path(docname,
1162+ self.doctreedir, '.doctree'), 'rb')
1163+ try:
1164+ old_doctree = pickle.load(f)
1165+ finally:
1166+ f.close()
1167+ except EnvironmentError:
1168+ old_doctree = None
1169+
1170+ # add uids for versioning
1171+ if old_doctree is None:
1172+ list(add_uids(doctree, self.versioning_condition))
1173+ else:
1174+ list(merge_doctrees(
1175+ old_doctree, doctree, self.versioning_condition))
1176+
1177+ # make it picklable
1178+ doctree.reporter = None
1179+ doctree.transformer = None
1180+ doctree.settings.warning_stream = None
1181+ doctree.settings.env = None
1182+ doctree.settings.record_dependencies = None
1183+ for metanode in doctree.traverse(MetaBody.meta):
1184+ # docutils' meta nodes aren't picklable because the class is nested
1185+ metanode.__class__ = addnodes.meta
1186+
1187+ # cleanup
1188+ self.temp_data.clear()
1189+
1190+ if save_parsed:
1191+ # save the parsed doctree
1192+ doctree_filename = self.doc2path(docname, self.doctreedir,
1193+ '.doctree')
1194+ dirname = path.dirname(doctree_filename)
1195+ if not path.isdir(dirname):
1196+ os.makedirs(dirname)
1197+ f = open(doctree_filename, 'wb')
1198+ try:
1199+ pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
1200+ finally:
1201+ f.close()
1202+ else:
1203+ return doctree
1204+
1205+ # utilities to use while reading a document
1206+
1207+ @property
1208+ def docname(self):
1209+ """Backwards compatible alias."""
1210+ return self.temp_data['docname']
1211+
1212+ @property
1213+ def currmodule(self):
1214+ """Backwards compatible alias."""
1215+ return self.temp_data.get('py:module')
1216+
1217+ @property
1218+ def currclass(self):
1219+ """Backwards compatible alias."""
1220+ return self.temp_data.get('py:class')
1221+
1222+ def new_serialno(self, category=''):
1223+ """Return a serial number, e.g. for index entry targets."""
1224+ key = category + 'serialno'
1225+ cur = self.temp_data.get(key, 0)
1226+ self.temp_data[key] = cur + 1
1227+ return cur
1228+
1229+ def note_dependency(self, filename):
1230+ self.dependencies.setdefault(self.docname, set()).add(filename)
1231+
1232+ def note_reread(self):
1233+ self.reread_always.add(self.docname)
1234+
1235+ def note_versionchange(self, type, version, node, lineno):
1236+ self.versionchanges.setdefault(version, []).append(
1237+ (type, self.temp_data['docname'], lineno,
1238+ self.temp_data.get('py:module'),
1239+ self.temp_data.get('object'), node.astext()))
1240+
1241+ # post-processing of read doctrees
1242+
1243+ def filter_messages(self, doctree):
1244+ """Filter system messages from a doctree."""
1245+ filterlevel = self.config.keep_warnings and 2 or 5
1246+ for node in doctree.traverse(nodes.system_message):
1247+ if node['level'] < filterlevel:
1248+ node.parent.remove(node)
1249+
1250+
1251+ def process_dependencies(self, docname, doctree):
1252+ """Process docutils-generated dependency info."""
1253+ cwd = os.getcwd()
1254+ frompath = path.join(path.normpath(self.srcdir), 'dummy')
1255+ deps = doctree.settings.record_dependencies
1256+ if not deps:
1257+ return
1258+ for dep in deps.list:
1259+ # the dependency path is relative to the working dir, so get
1260+ # one relative to the srcdir
1261+ relpath = relative_path(frompath,
1262+ path.normpath(path.join(cwd, dep)))
1263+ self.dependencies.setdefault(docname, set()).add(relpath)
1264+
1265+ def process_downloads(self, docname, doctree):
1266+ """Process downloadable file paths. """
1267+ for node in doctree.traverse(addnodes.download_reference):
1268+ targetname = node['reftarget']
1269+ rel_filename, filename = self.relfn2path(targetname, docname)
1270+ self.dependencies.setdefault(docname, set()).add(rel_filename)
1271+ if not os.access(filename, os.R_OK):
1272+ self.warn_node('download file not readable: %s' % filename,
1273+ node)
1274+ continue
1275+ uniquename = self.dlfiles.add_file(docname, filename)
1276+ node['filename'] = uniquename
1277+
1278+ def process_images(self, docname, doctree):
1279+ """Process and rewrite image URIs."""
1280+ for node in doctree.traverse(nodes.image):
1281+ # Map the mimetype to the corresponding image. The writer may
1282+ # choose the best image from these candidates. The special key * is
1283+ # set if there is only single candidate to be used by a writer.
1284+ # The special key ? is set for nonlocal URIs.
1285+ node['candidates'] = candidates = {}
1286+ imguri = node['uri']
1287+ if imguri.find('://') != -1:
1288+ self.warn_node('nonlocal image URI found: %s' % imguri, node)
1289+ candidates['?'] = imguri
1290+ continue
1291+ rel_imgpath, full_imgpath = self.relfn2path(imguri, docname)
1292+ # set imgpath as default URI
1293+ node['uri'] = rel_imgpath
1294+ if rel_imgpath.endswith(os.extsep + '*'):
1295+ for filename in glob(full_imgpath):
1296+ new_imgpath = relative_path(self.srcdir, filename)
1297+ if filename.lower().endswith('.pdf'):
1298+ candidates['application/pdf'] = new_imgpath
1299+ elif filename.lower().endswith('.svg'):
1300+ candidates['image/svg+xml'] = new_imgpath
1301+ else:
1302+ try:
1303+ f = open(filename, 'rb')
1304+ try:
1305+ imgtype = imghdr.what(f)
1306+ finally:
1307+ f.close()
1308+ except (OSError, IOError), err:
1309+ self.warn_node('image file %s not readable: %s' %
1310+ (filename, err), node)
1311+ if imgtype:
1312+ candidates['image/' + imgtype] = new_imgpath
1313+ else:
1314+ candidates['*'] = rel_imgpath
1315+ # map image paths to unique image names (so that they can be put
1316+ # into a single directory)
1317+ for imgpath in candidates.itervalues():
1318+ self.dependencies.setdefault(docname, set()).add(imgpath)
1319+ if not os.access(path.join(self.srcdir, imgpath), os.R_OK):
1320+ self.warn_node('image file not readable: %s' % imgpath,
1321+ node)
1322+ continue
1323+ self.images.add_file(docname, imgpath)
1324+
1325+ def process_metadata(self, docname, doctree):
1326+ """Process the docinfo part of the doctree as metadata.
1327+
1328+ Keep processing minimal -- just return what docutils says.
1329+ """
1330+ self.metadata[docname] = md = {}
1331+ try:
1332+ docinfo = doctree[0]
1333+ except IndexError:
1334+ # probably an empty document
1335+ return
1336+ if docinfo.__class__ is not nodes.docinfo:
1337+ # nothing to see here
1338+ return
1339+ for node in docinfo:
1340+ # nodes are multiply inherited...
1341+ if isinstance(node, nodes.authors):
1342+ md['authors'] = [author.astext() for author in node]
1343+ elif isinstance(node, nodes.TextElement): # e.g. author
1344+ md[node.__class__.__name__] = node.astext()
1345+ else:
1346+ name, body = node
1347+ md[name.astext()] = body.astext()
1348+ del doctree[0]
1349+
1350+ def process_refonly_bullet_lists(self, docname, doctree):
1351+ """Change refonly bullet lists to use compact_paragraphs.
1352+
1353+ Specifically implemented for 'Indices and Tables' section, which looks
1354+ odd when html_compact_lists is false.
1355+ """
1356+ if self.config.html_compact_lists:
1357+ return
1358+
1359+ class RefOnlyListChecker(nodes.GenericNodeVisitor):
1360+ """Raise `nodes.NodeFound` if non-simple list item is encountered.
1361+
1362+ Here 'simple' means a list item containing only a paragraph with a
1363+ single reference in it.
1364+ """
1365+
1366+ def default_visit(self, node):
1367+ raise nodes.NodeFound
1368+
1369+ def visit_bullet_list(self, node):
1370+ pass
1371+
1372+ def visit_list_item(self, node):
1373+ children = []
1374+ for child in node.children:
1375+ if not isinstance(child, nodes.Invisible):
1376+ children.append(child)
1377+ if len(children) != 1:
1378+ raise nodes.NodeFound
1379+ if not isinstance(children[0], nodes.paragraph):
1380+ raise nodes.NodeFound
1381+ para = children[0]
1382+ if len(para) != 1:
1383+ raise nodes.NodeFound
1384+ if not isinstance(para[0], addnodes.pending_xref):
1385+ raise nodes.NodeFound
1386+ raise nodes.SkipChildren
1387+
1388+ def invisible_visit(self, node):
1389+ """Invisible nodes should be ignored."""
1390+ pass
1391+
1392+ def check_refonly_list(node):
1393+ """Check for list with only references in it."""
1394+ visitor = RefOnlyListChecker(doctree)
1395+ try:
1396+ node.walk(visitor)
1397+ except nodes.NodeFound:
1398+ return False
1399+ else:
1400+ return True
1401+
1402+ for node in doctree.traverse(nodes.bullet_list):
1403+ if check_refonly_list(node):
1404+ for item in node.traverse(nodes.list_item):
1405+ para = item[0]
1406+ ref = para[0]
1407+ compact_para = addnodes.compact_paragraph()
1408+ compact_para += ref
1409+ item.replace(para, compact_para)
1410+
1411+ def create_title_from(self, docname, document):
1412+ """Add a title node to the document (just copy the first section title),
1413+ and store that title in the environment.
1414+ """
1415+ titlenode = nodes.title()
1416+ longtitlenode = titlenode
1417+ # explicit title set with title directive; use this only for
1418+ # the <title> tag in HTML output
1419+ if document.has_key('title'):
1420+ longtitlenode = nodes.title()
1421+ longtitlenode += nodes.Text(document['title'])
1422+ # look for first section title and use that as the title
1423+ for node in document.traverse(nodes.section):
1424+ visitor = SphinxContentsFilter(document)
1425+ node[0].walkabout(visitor)
1426+ titlenode += visitor.get_entry_text()
1427+ break
1428+ else:
1429+ # document has no title
1430+ titlenode += nodes.Text('<no title>')
1431+ self.titles[docname] = titlenode
1432+ self.longtitles[docname] = longtitlenode
1433+
1434+ def note_indexentries_from(self, docname, document):
1435+ entries = self.indexentries[docname] = []
1436+ for node in document.traverse(addnodes.index):
1437+ entries.extend(node['entries'])
1438+
1439+ def note_citations_from(self, docname, document):
1440+ for node in document.traverse(nodes.citation):
1441+ label = node[0].astext()
1442+ if label in self.citations:
1443+ self.warn_node('duplicate citation %s, ' % label +
1444+ 'other instance in %s' % self.doc2path(
1445+ self.citations[label][0]), node)
1446+ self.citations[label] = (docname, node['ids'][0])
1447+
1448+ def note_toctree(self, docname, toctreenode):
1449+ """Note a TOC tree directive in a document and gather information about
1450+ file relations from it.
1451+ """
1452+ if toctreenode['glob']:
1453+ self.glob_toctrees.add(docname)
1454+ if toctreenode.get('numbered'):
1455+ self.numbered_toctrees.add(docname)
1456+ includefiles = toctreenode['includefiles']
1457+ for includefile in includefiles:
1458+ # note that if the included file is rebuilt, this one must be
1459+ # too (since the TOC of the included file could have changed)
1460+ self.files_to_rebuild.setdefault(includefile, set()).add(docname)
1461+ self.toctree_includes.setdefault(docname, []).extend(includefiles)
1462+
1463+ def build_toc_from(self, docname, document):
1464+ """Build a TOC from the doctree and store it in the inventory."""
1465+ numentries = [0] # nonlocal again...
1466+
1467+ try:
1468+ maxdepth = int(self.metadata[docname].get('tocdepth', 0))
1469+ except ValueError:
1470+ maxdepth = 0
1471+
1472+ def traverse_in_section(node, cls):
1473+ """Like traverse(), but stay within the same section."""
1474+ result = []
1475+ if isinstance(node, cls):
1476+ result.append(node)
1477+ for child in node.children:
1478+ if isinstance(child, nodes.section):
1479+ continue
1480+ result.extend(traverse_in_section(child, cls))
1481+ return result
1482+
1483+ def build_toc(node, depth=1):
1484+ entries = []
1485+ for sectionnode in node:
1486+ # find all toctree nodes in this section and add them
1487+ # to the toc (just copying the toctree node which is then
1488+ # resolved in self.get_and_resolve_doctree)
1489+ if isinstance(sectionnode, addnodes.only):
1490+ onlynode = addnodes.only(expr=sectionnode['expr'])
1491+ blist = build_toc(sectionnode, depth)
1492+ if blist:
1493+ onlynode += blist.children
1494+ entries.append(onlynode)
1495+ if not isinstance(sectionnode, nodes.section):
1496+ for toctreenode in traverse_in_section(sectionnode,
1497+ addnodes.toctree):
1498+ item = toctreenode.copy()
1499+ entries.append(item)
1500+ # important: do the inventory stuff
1501+ self.note_toctree(docname, toctreenode)
1502+ continue
1503+ title = sectionnode[0]
1504+ # copy the contents of the section title, but without references
1505+ # and unnecessary stuff
1506+ visitor = SphinxContentsFilter(document)
1507+ title.walkabout(visitor)
1508+ nodetext = visitor.get_entry_text()
1509+ if not numentries[0]:
1510+ # for the very first toc entry, don't add an anchor
1511+ # as it is the file's title anyway
1512+ anchorname = ''
1513+ else:
1514+ anchorname = '#' + sectionnode['ids'][0]
1515+ numentries[0] += 1
1516+ # make these nodes:
1517+ # list_item -> compact_paragraph -> reference
1518+ reference = nodes.reference(
1519+ '', '', internal=True, refuri=docname,
1520+ anchorname=anchorname, *nodetext)
1521+ para = addnodes.compact_paragraph('', '', reference)
1522+ item = nodes.list_item('', para)
1523+ if maxdepth == 0 or depth < maxdepth:
1524+ item += build_toc(sectionnode, depth+1)
1525+ entries.append(item)
1526+ if entries:
1527+ return nodes.bullet_list('', *entries)
1528+ return []
1529+ toc = build_toc(document)
1530+ if toc:
1531+ self.tocs[docname] = toc
1532+ else:
1533+ self.tocs[docname] = nodes.bullet_list('')
1534+ self.toc_num_entries[docname] = numentries[0]
1535+
1536+ def get_toc_for(self, docname, builder):
1537+ """Return a TOC nodetree -- for use on the same page only!"""
1538+ try:
1539+ toc = self.tocs[docname].deepcopy()
1540+ except KeyError:
1541+ # the document does not exist anymore: return a dummy node that
1542+ # renders to nothing
1543+ return nodes.paragraph()
1544+ self.process_only_nodes(toc, builder, docname)
1545+ for node in toc.traverse(nodes.reference):
1546+ node['refuri'] = node['anchorname'] or '#'
1547+ return toc
1548+
1549+ def get_toctree_for(self, docname, builder, collapse, **kwds):
1550+ """Return the global TOC nodetree."""
1551+ doctree = self.get_doctree(self.config.master_doc)
1552+ toctrees = []
1553+ if 'includehidden' not in kwds:
1554+ kwds['includehidden'] = True
1555+ if 'maxdepth' not in kwds:
1556+ kwds['maxdepth'] = 0
1557+ kwds['collapse'] = collapse
1558+ for toctreenode in doctree.traverse(addnodes.toctree):
1559+ toctree = self.resolve_toctree(docname, builder, toctreenode,
1560+ prune=True, **kwds)
1561+ toctrees.append(toctree)
1562+ if not toctrees:
1563+ return None
1564+ result = toctrees[0]
1565+ for toctree in toctrees[1:]:
1566+ result.extend(toctree.children)
1567+ return result
1568+
1569+ def get_domain(self, domainname):
1570+ """Return the domain instance with the specified name.
1571+
1572+ Raises an ExtensionError if the domain is not registered.
1573+ """
1574+ try:
1575+ return self.domains[domainname]
1576+ except KeyError:
1577+ raise ExtensionError('Domain %r is not registered' % domainname)
1578+
1579+ # --------- RESOLVING REFERENCES AND TOCTREES ------------------------------
1580+
1581+ def get_doctree(self, docname):
1582+ """Read the doctree for a file from the pickle and return it."""
1583+ doctree_filename = self.doc2path(docname, self.doctreedir, '.doctree')
1584+ f = open(doctree_filename, 'rb')
1585+ try:
1586+ doctree = pickle.load(f)
1587+ finally:
1588+ f.close()
1589+ doctree.settings.env = self
1590+ doctree.reporter = Reporter(self.doc2path(docname), 2, 5,
1591+ stream=WarningStream(self._warnfunc))
1592+ return doctree
1593+
1594+
1595+ def get_and_resolve_doctree(self, docname, builder, doctree=None,
1596+ prune_toctrees=True):
1597+ """Read the doctree from the pickle, resolve cross-references and
1598+ toctrees and return it.
1599+ """
1600+ if doctree is None:
1601+ doctree = self.get_doctree(docname)
1602+
1603+ # resolve all pending cross-references
1604+ self.resolve_references(doctree, docname, builder)
1605+
1606+ # now, resolve all toctree nodes
1607+ for toctreenode in doctree.traverse(addnodes.toctree):
1608+ result = self.resolve_toctree(docname, builder, toctreenode,
1609+ prune=prune_toctrees)
1610+ if result is None:
1611+ toctreenode.replace_self([])
1612+ else:
1613+ toctreenode.replace_self(result)
1614+
1615+ return doctree
1616+
1617+ def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
1618+ titles_only=False, collapse=False, includehidden=False):
1619+ """Resolve a *toctree* node into individual bullet lists with titles
1620+ as items, returning None (if no containing titles are found) or
1621+ a new node.
1622+
1623+ If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0,
1624+ to the value of the *maxdepth* option on the *toctree* node.
1625+ If *titles_only* is True, only toplevel document titles will be in the
1626+ resulting tree.
1627+ If *collapse* is True, all branches not containing docname will
1628+ be collapsed.
1629+ """
1630+ if toctree.get('hidden', False) and not includehidden:
1631+ return None
1632+
1633+ def _walk_depth(node, depth, maxdepth):
1634+ """Utility: Cut a TOC at a specified depth."""
1635+
1636+ # For reading this function, it is useful to keep in mind the node
1637+ # structure of a toctree (using HTML-like node names for brevity):
1638+ #
1639+ # <ul>
1640+ # <li>
1641+ # <p><a></p>
1642+ # <p><a></p>
1643+ # ...
1644+ # <ul>
1645+ # ...
1646+ # </ul>
1647+ # </li>
1648+ # </ul>
1649+
1650+ for subnode in node.children[:]:
1651+ if isinstance(subnode, (addnodes.compact_paragraph,
1652+ nodes.list_item)):
1653+ # for <p> and <li>, just indicate the depth level and
1654+ # recurse to children
1655+ subnode['classes'].append('toctree-l%d' % (depth-1))
1656+ _walk_depth(subnode, depth, maxdepth)
1657+
1658+ elif isinstance(subnode, nodes.bullet_list):
1659+ # for <ul>, determine if the depth is too large or if the
1660+ # entry is to be collapsed
1661+ if maxdepth > 0 and depth > maxdepth:
1662+ subnode.parent.replace(subnode, [])
1663+ else:
1664+ # to find out what to collapse, *first* walk subitems,
1665+ # since that determines which children point to the
1666+ # current page
1667+ _walk_depth(subnode, depth+1, maxdepth)
1668+ # cull sub-entries whose parents aren't 'current'
1669+ if (collapse and depth > 1 and
1670+ 'iscurrent' not in subnode.parent):
1671+ subnode.parent.remove(subnode)
1672+
1673+ elif isinstance(subnode, nodes.reference):
1674+ # for <a>, identify which entries point to the current
1675+ # document and therefore may not be collapsed
1676+ if subnode['refuri'] == docname:
1677+ if not subnode['anchorname']:
1678+ # give the whole branch a 'current' class
1679+ # (useful for styling it differently)
1680+ branchnode = subnode
1681+ while branchnode:
1682+ branchnode['classes'].append('current')
1683+ branchnode = branchnode.parent
1684+ # mark the list_item as "on current page"
1685+ if subnode.parent.parent.get('iscurrent'):
1686+ # but only if it's not already done
1687+ return
1688+ while subnode:
1689+ subnode['iscurrent'] = True
1690+ subnode = subnode.parent
1691+
1692+ def _entries_from_toctree(toctreenode, parents,
1693+ separate=False, subtree=False):
1694+ """Return TOC entries for a toctree node."""
1695+ refs = [(e[0], str(e[1])) for e in toctreenode['entries']]
1696+ entries = []
1697+ for (title, ref) in refs:
1698+ try:
1699+ refdoc = None
1700+ if url_re.match(ref):
1701+ reference = nodes.reference('', '', internal=False,
1702+ refuri=ref, anchorname='',
1703+ *[nodes.Text(title)])
1704+ para = addnodes.compact_paragraph('', '', reference)
1705+ item = nodes.list_item('', para)
1706+ toc = nodes.bullet_list('', item)
1707+ elif ref == 'self':
1708+ # 'self' refers to the document from which this
1709+ # toctree originates
1710+ ref = toctreenode['parent']
1711+ if not title:
1712+ title = clean_astext(self.titles[ref])
1713+ reference = nodes.reference('', '', internal=True,
1714+ refuri=ref,
1715+ anchorname='',
1716+ *[nodes.Text(title)])
1717+ para = addnodes.compact_paragraph('', '', reference)
1718+ item = nodes.list_item('', para)
1719+ # don't show subitems
1720+ toc = nodes.bullet_list('', item)
1721+ else:
1722+ if ref in parents:
1723+ self.warn(ref, 'circular toctree references '
1724+ 'detected, ignoring: %s <- %s' %
1725+ (ref, ' <- '.join(parents)))
1726+ continue
1727+ refdoc = ref
1728+ toc = self.tocs[ref].deepcopy()
1729+ self.process_only_nodes(toc, builder, ref)
1730+ if title and toc.children and len(toc.children) == 1:
1731+ child = toc.children[0]
1732+ for refnode in child.traverse(nodes.reference):
1733+ if refnode['refuri'] == ref and \
1734+ not refnode['anchorname']:
1735+ refnode.children = [nodes.Text(title)]
1736+ if not toc.children:
1737+ # empty toc means: no titles will show up in the toctree
1738+ self.warn_node(
1739+ 'toctree contains reference to document %r that '
1740+ 'doesn\'t have a title: no link will be generated'
1741+ % ref, toctreenode)
1742+ except KeyError:
1743+ # this is raised if the included file does not exist
1744+ self.warn_node(
1745+ 'toctree contains reference to nonexisting document %r'
1746+ % ref, toctreenode)
1747+ else:
1748+ # if titles_only is given, only keep the main title and
1749+ # sub-toctrees
1750+ if titles_only:
1751+ # delete everything but the toplevel title(s)
1752+ # and toctrees
1753+ for toplevel in toc:
1754+ # nodes with length 1 don't have any children anyway
1755+ if len(toplevel) > 1:
1756+ subtrees = toplevel.traverse(addnodes.toctree)
1757+ toplevel[1][:] = subtrees
1758+ # resolve all sub-toctrees
1759+ for toctreenode in toc.traverse(addnodes.toctree):
1760+ if not (toctreenode.get('hidden', False)
1761+ and not includehidden):
1762+ i = toctreenode.parent.index(toctreenode) + 1
1763+ for item in _entries_from_toctree(
1764+ toctreenode, [refdoc] + parents,
1765+ subtree=True):
1766+ toctreenode.parent.insert(i, item)
1767+ i += 1
1768+ toctreenode.parent.remove(toctreenode)
1769+ if separate:
1770+ entries.append(toc)
1771+ else:
1772+ entries.extend(toc.children)
1773+ if not subtree and not separate:
1774+ ret = nodes.bullet_list()
1775+ ret += entries
1776+ return [ret]
1777+ return entries
1778+
1779+ maxdepth = maxdepth or toctree.get('maxdepth', -1)
1780+ if not titles_only and toctree.get('titlesonly', False):
1781+ titles_only = True
1782+
1783+ # NOTE: previously, this was separate=True, but that leads to artificial
1784+ # separation when two or more toctree entries form a logical unit, so
1785+ # separating mode is no longer used -- it's kept here for history's sake
1786+ tocentries = _entries_from_toctree(toctree, [], separate=False)
1787+ if not tocentries:
1788+ return None
1789+
1790+ newnode = addnodes.compact_paragraph('', '', *tocentries)
1791+ newnode['toctree'] = True
1792+
1793+ # prune the tree to maxdepth and replace titles, also set level classes
1794+ _walk_depth(newnode, 1, prune and maxdepth or 0)
1795+
1796+ # set the target paths in the toctrees (they are not known at TOC
1797+ # generation time)
1798+ for refnode in newnode.traverse(nodes.reference):
1799+ if not url_re.match(refnode['refuri']):
1800+ refnode['refuri'] = builder.get_relative_uri(
1801+ docname, refnode['refuri']) + refnode['anchorname']
1802+ return newnode
1803+
1804+ def resolve_references(self, doctree, fromdocname, builder):
1805+ for node in doctree.traverse(addnodes.pending_xref):
1806+ contnode = node[0].deepcopy()
1807+ newnode = None
1808+
1809+ typ = node['reftype']
1810+ target = node['reftarget']
1811+ refdoc = node.get('refdoc', fromdocname)
1812+ domain = None
1813+
1814+ try:
1815+ if 'refdomain' in node and node['refdomain']:
1816+ # let the domain try to resolve the reference
1817+ try:
1818+ domain = self.domains[node['refdomain']]
1819+ except KeyError:
1820+ raise NoUri
1821+ newnode = domain.resolve_xref(self, fromdocname, builder,
1822+ typ, target, node, contnode)
1823+ # really hardwired reference types
1824+ elif typ == 'doc':
1825+ # directly reference to document by source name;
1826+ # can be absolute or relative
1827+ docname = docname_join(refdoc, target)
1828+ if docname in self.all_docs:
1829+ if node['refexplicit']:
1830+ # reference with explicit title
1831+ caption = node.astext()
1832+ else:
1833+ caption = clean_astext(self.titles[docname])
1834+ innernode = nodes.emphasis(caption, caption)
1835+ newnode = nodes.reference('', '', internal=True)
1836+ newnode['refuri'] = builder.get_relative_uri(
1837+ fromdocname, docname)
1838+ newnode.append(innernode)
1839+ elif typ == 'citation':
1840+ docname, labelid = self.citations.get(target, ('', ''))
1841+ if docname:
1842+ newnode = make_refnode(builder, fromdocname, docname,
1843+ labelid, contnode)
1844+ # no new node found? try the missing-reference event
1845+ if newnode is None:
1846+ newnode = builder.app.emit_firstresult(
1847+ 'missing-reference', self, node, contnode)
1848+ # still not found? warn if in nit-picky mode
1849+ if newnode is None:
1850+ self._warn_missing_reference(
1851+ fromdocname, typ, target, node, domain)
1852+ except NoUri:
1853+ newnode = contnode
1854+ node.replace_self(newnode or contnode)
1855+
1856+ # remove only-nodes that do not belong to our builder
1857+ self.process_only_nodes(doctree, builder, fromdocname)
1858+
1859+ # allow custom references to be resolved
1860+ builder.app.emit('doctree-resolved', doctree, fromdocname)
1861+
1862+ def _warn_missing_reference(self, fromdoc, typ, target, node, domain):
1863+ warn = node.get('refwarn')
1864+ if self.config.nitpicky:
1865+ warn = True
1866+ if self._nitpick_ignore:
1867+ dtype = domain and '%s:%s' % (domain.name, typ) or typ
1868+ if (dtype, target) in self._nitpick_ignore:
1869+ warn = False
1870+ if not warn:
1871+ return
1872+ if domain and typ in domain.dangling_warnings:
1873+ msg = domain.dangling_warnings[typ]
1874+ elif typ == 'doc':
1875+ msg = 'unknown document: %(target)s'
1876+ elif typ == 'citation':
1877+ msg = 'citation not found: %(target)s'
1878+ elif node.get('refdomain', 'std') != 'std':
1879+ msg = '%s:%s reference target not found: %%(target)s' % \
1880+ (node['refdomain'], typ)
1881+ else:
1882+ msg = '%s reference target not found: %%(target)s' % typ
1883+ self.warn_node(msg % {'target': target}, node)
1884+
1885+ def process_only_nodes(self, doctree, builder, fromdocname=None):
1886+ # A comment on the comment() nodes being inserted: replacing by [] would
1887+ # result in a "Losing ids" exception if there is a target node before
1888+ # the only node, so we make sure docutils can transfer the id to
1889+ # something, even if it's just a comment and will lose the id anyway...
1890+ for node in doctree.traverse(addnodes.only):
1891+ try:
1892+ ret = builder.tags.eval_condition(node['expr'])
1893+ except Exception, err:
1894+ self.warn_node('exception while evaluating only '
1895+ 'directive expression: %s' % err, node)
1896+ node.replace_self(node.children or nodes.comment())
1897+ else:
1898+ if ret:
1899+ node.replace_self(node.children or nodes.comment())
1900+ else:
1901+ node.replace_self(nodes.comment())
1902+
1903+ def assign_section_numbers(self):
1904+ """Assign a section number to each heading under a numbered toctree."""
1905+ # a list of all docnames whose section numbers changed
1906+ rewrite_needed = []
1907+
1908+ old_secnumbers = self.toc_secnumbers
1909+ self.toc_secnumbers = {}
1910+
1911+ def _walk_toc(node, secnums, depth, titlenode=None):
1912+ # titlenode is the title of the document, it will get assigned a
1913+ # secnumber too, so that it shows up in next/prev/parent rellinks
1914+ for subnode in node.children:
1915+ if isinstance(subnode, nodes.bullet_list):
1916+ numstack.append(0)
1917+ _walk_toc(subnode, secnums, depth-1, titlenode)
1918+ numstack.pop()
1919+ titlenode = None
1920+ elif isinstance(subnode, nodes.list_item):
1921+ _walk_toc(subnode, secnums, depth, titlenode)
1922+ titlenode = None
1923+ elif isinstance(subnode, addnodes.only):
1924+ # at this stage we don't know yet which sections are going
1925+ # to be included; just include all of them, even if it leads
1926+ # to gaps in the numbering
1927+ _walk_toc(subnode, secnums, depth, titlenode)
1928+ titlenode = None
1929+ elif isinstance(subnode, addnodes.compact_paragraph):
1930+ numstack[-1] += 1
1931+ if depth > 0:
1932+ number = tuple(numstack)
1933+ else:
1934+ number = None
1935+ secnums[subnode[0]['anchorname']] = \
1936+ subnode[0]['secnumber'] = number
1937+ if titlenode:
1938+ titlenode['secnumber'] = number
1939+ titlenode = None
1940+ elif isinstance(subnode, addnodes.toctree):
1941+ _walk_toctree(subnode, depth)
1942+
1943+ def _walk_toctree(toctreenode, depth):
1944+ if depth == 0:
1945+ return
1946+ for (title, ref) in toctreenode['entries']:
1947+ if url_re.match(ref) or ref == 'self':
1948+ # don't mess with those
1949+ continue
1950+ if ref in self.tocs:
1951+ secnums = self.toc_secnumbers[ref] = {}
1952+ _walk_toc(self.tocs[ref], secnums, depth,
1953+ self.titles.get(ref))
1954+ if secnums != old_secnumbers.get(ref):
1955+ rewrite_needed.append(ref)
1956+
1957+ for docname in self.numbered_toctrees:
1958+ doctree = self.get_doctree(docname)
1959+ for toctreenode in doctree.traverse(addnodes.toctree):
1960+ depth = toctreenode.get('numbered', 0)
1961+ if depth:
1962+ # every numbered toctree gets new numbering
1963+ numstack = [0]
1964+ _walk_toctree(toctreenode, depth)
1965+
1966+ return rewrite_needed
1967+
1968+ def create_index(self, builder, group_entries=True,
1969+ _fixre=re.compile(r'(.*) ([(][^()]*[)])')):
1970+ """Create the real index from the collected index entries."""
1971+ new = {}
1972+
1973+ def add_entry(word, subword, link=True, dic=new):
1974+ entry = dic.get(word)
1975+ if not entry:
1976+ dic[word] = entry = [[], {}]
1977+ if subword:
1978+ add_entry(subword, '', link=link, dic=entry[1])
1979+ elif link:
1980+ try:
1981+ uri = builder.get_relative_uri('genindex', fn) + '#' + tid
1982+ except NoUri:
1983+ pass
1984+ else:
1985+ entry[0].append((main, uri))
1986+
1987+ for fn, entries in self.indexentries.iteritems():
1988+ # new entry types must be listed in directives/other.py!
1989+ for type, value, tid, main in entries:
1990+ try:
1991+ if type == 'single':
1992+ try:
1993+ entry, subentry = split_into(2, 'single', value)
1994+ except ValueError:
1995+ entry, = split_into(1, 'single', value)
1996+ subentry = ''
1997+ add_entry(entry, subentry)
1998+ elif type == 'pair':
1999+ first, second = split_into(2, 'pair', value)
2000+ add_entry(first, second)
2001+ add_entry(second, first)
2002+ elif type == 'triple':
2003+ first, second, third = split_into(3, 'triple', value)
2004+ add_entry(first, second+' '+third)
2005+ add_entry(second, third+', '+first)
2006+ add_entry(third, first+' '+second)
2007+ elif type == 'see':
2008+ first, second = split_into(2, 'see', value)
2009+ add_entry(first, _('see %s') % second, link=False)
2010+ elif type == 'seealso':
2011+ first, second = split_into(2, 'see', value)
2012+ add_entry(first, _('see also %s') % second, link=False)
2013+ else:
2014+ self.warn(fn, 'unknown index entry type %r' % type)
2015+ except ValueError, err:
2016+ self.warn(fn, str(err))
2017+
2018+ # sort the index entries; put all symbols at the front, even those
2019+ # following the letters in ASCII, this is where the chr(127) comes from
2020+ def keyfunc(entry, lcletters=string.ascii_lowercase + '_'):
2021+ lckey = unicodedata.normalize('NFD', entry[0].lower())
2022+ if lckey[0:1] in lcletters:
2023+ return chr(127) + lckey
2024+ return lckey
2025+ newlist = new.items()
2026+ newlist.sort(key=keyfunc)
2027+
2028+ if group_entries:
2029+ # fixup entries: transform
2030+ # func() (in module foo)
2031+ # func() (in module bar)
2032+ # into
2033+ # func()
2034+ # (in module foo)
2035+ # (in module bar)
2036+ oldkey = ''
2037+ oldsubitems = None
2038+ i = 0
2039+ while i < len(newlist):
2040+ key, (targets, subitems) = newlist[i]
2041+ # cannot move if it has subitems; structure gets too complex
2042+ if not subitems:
2043+ m = _fixre.match(key)
2044+ if m:
2045+ if oldkey == m.group(1):
2046+ # prefixes match: add entry as subitem of the
2047+ # previous entry
2048+ oldsubitems.setdefault(m.group(2), [[], {}])[0].\
2049+ extend(targets)
2050+ del newlist[i]
2051+ continue
2052+ oldkey = m.group(1)
2053+ else:
2054+ oldkey = key
2055+ oldsubitems = subitems
2056+ i += 1
2057+
2058+ # group the entries by letter
2059+ def keyfunc2(item, letters=string.ascii_uppercase + '_'):
2060+ # hack: mutating the subitems dicts to a list in the keyfunc
2061+ k, v = item
2062+ v[1] = sorted((si, se) for (si, (se, void)) in v[1].iteritems())
2063+ # now calculate the key
2064+ letter = unicodedata.normalize('NFD', k[0])[0].upper()
2065+ if letter in letters:
2066+ return letter
2067+ else:
2068+ # get all other symbols under one heading
2069+ return 'Symbols'
2070+ return [(key, list(group))
2071+ for (key, group) in groupby(newlist, keyfunc2)]
2072+
2073+ def collect_relations(self):
2074+ relations = {}
2075+ getinc = self.toctree_includes.get
2076+ def collect(parents, parents_set, docname, previous, next):
2077+ # circular relationship?
2078+ if docname in parents_set:
2079+ # we will warn about this in resolve_toctree()
2080+ return
2081+ includes = getinc(docname)
2082+ # previous
2083+ if not previous:
2084+ # if no previous sibling, go to parent
2085+ previous = parents[0][0]
2086+ else:
2087+ # else, go to previous sibling, or if it has children, to
2088+ # the last of its children, or if that has children, to the
2089+ # last of those, and so forth
2090+ while 1:
2091+ previncs = getinc(previous)
2092+ if previncs:
2093+ previous = previncs[-1]
2094+ else:
2095+ break
2096+ # next
2097+ if includes:
2098+ # if it has children, go to first of them
2099+ next = includes[0]
2100+ elif next:
2101+ # else, if next sibling, go to it
2102+ pass
2103+ else:
2104+ # else, go to the next sibling of the parent, if present,
2105+ # else the grandparent's sibling, if present, and so forth
2106+ for parname, parindex in parents:
2107+ parincs = getinc(parname)
2108+ if parincs and parindex + 1 < len(parincs):
2109+ next = parincs[parindex+1]
2110+ break
2111+ # else it will stay None
2112+ # same for children
2113+ if includes:
2114+ for subindex, args in enumerate(izip(includes,
2115+ [None] + includes,
2116+ includes[1:] + [None])):
2117+ collect([(docname, subindex)] + parents,
2118+ parents_set.union([docname]), *args)
2119+ relations[docname] = [parents[0][0], previous, next]
2120+ collect([(None, 0)], set(), self.config.master_doc, None, None)
2121+ return relations
2122+
2123+ def check_consistency(self):
2124+ """Do consistency checks."""
2125+ for docname in sorted(self.all_docs):
2126+ if docname not in self.files_to_rebuild:
2127+ if docname == self.config.master_doc:
2128+ # the master file is not included anywhere ;)
2129+ continue
2130+ if 'orphan' in self.metadata[docname]:
2131+ continue
2132+ self.warn(docname, 'document isn\'t included in any toctree')
2133+
2134
2135=== added directory '.pc/sort_stopwords.diff'
2136=== added directory '.pc/sort_stopwords.diff/sphinx'
2137=== added directory '.pc/sort_stopwords.diff/sphinx/search'
2138=== added file '.pc/sort_stopwords.diff/sphinx/search/__init__.py'
2139--- .pc/sort_stopwords.diff/sphinx/search/__init__.py 1970-01-01 00:00:00 +0000
2140+++ .pc/sort_stopwords.diff/sphinx/search/__init__.py 2012-11-28 07:12:20 +0000
2141@@ -0,0 +1,287 @@
2142+# -*- coding: utf-8 -*-
2143+"""
2144+ sphinx.search
2145+ ~~~~~~~~~~~~~
2146+
2147+ Create a full-text search index for offline search.
2148+
2149+ :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
2150+ :license: BSD, see LICENSE for details.
2151+"""
2152+import re
2153+import cPickle as pickle
2154+
2155+from docutils.nodes import comment, Text, NodeVisitor, SkipNode
2156+
2157+from sphinx.util import jsdump, rpartition
2158+
2159+
2160+class SearchLanguage(object):
2161+ """
2162+ This class is the base class for search natural language preprocessors. If
2163+ you want to add support for a new language, you should override the methods
2164+ of this class.
2165+
2166+ You should override `lang` class property too (e.g. 'en', 'fr' and so on).
2167+
2168+ .. attribute:: stopwords
2169+
2170+ This is a set of stop words of the target language. Default `stopwords`
2171+ is empty. This word is used for building index and embedded in JS.
2172+
2173+ .. attribute:: js_stemmer_code
2174+
2175+ Return stemmer class of JavaScript version. This class' name should be
2176+ ``Stemmer`` and this class must have ``stemWord`` method. This string is
2177+ embedded as-is in searchtools.js.
2178+
2179+ This class is used to preprocess search word which Sphinx HTML readers
2180+ type, before searching index. Default implementation does nothing.
2181+ """
2182+ lang = None
2183+ stopwords = set()
2184+ js_stemmer_code = """
2185+/**
2186+ * Dummy stemmer for languages without stemming rules.
2187+ */
2188+var Stemmer = function() {
2189+ this.stemWord = function(w) {
2190+ return w;
2191+ }
2192+}
2193+"""
2194+
2195+ _word_re = re.compile(r'\w+(?u)')
2196+
2197+ def __init__(self, options):
2198+ self.options = options
2199+ self.init(options)
2200+
2201+ def init(self, options):
2202+ """
2203+ Initialize the class with the options the user has given.
2204+ """
2205+
2206+ def split(self, input):
2207+ """
2208+ This method splits a sentence into words. Default splitter splits input
2209+ at white spaces, which should be enough for most languages except CJK
2210+ languages.
2211+ """
2212+ return self._word_re.findall(input)
2213+
2214+ def stem(self, word):
2215+ """
2216+ This method implements stemming algorithm of the Python version.
2217+
2218+ Default implementation does nothing. You should implement this if the
2219+ language has any stemming rules.
2220+
2221+ This class is used to preprocess search words before registering them in
2222+ the search index. The stemming of the Python version and the JS version
2223+ (given in the js_stemmer_code attribute) must be compatible.
2224+ """
2225+ return word
2226+
2227+ def word_filter(self, word):
2228+ """
2229+ Return true if the target word should be registered in the search index.
2230+ This method is called after stemming.
2231+ """
2232+ return not (((len(word) < 3) and (12353 < ord(word[0]) < 12436)) or
2233+ (ord(word[0]) < 256 and (len(word) < 3 or word in self.stopwords or
2234+ word.isdigit())))
2235+
2236+from sphinx.search import en, ja
2237+
2238+languages = {
2239+ 'en': en.SearchEnglish,
2240+ 'ja': ja.SearchJapanese,
2241+}
2242+
2243+
2244+class _JavaScriptIndex(object):
2245+ """
2246+ The search index as javascript file that calls a function
2247+ on the documentation search object to register the index.
2248+ """
2249+
2250+ PREFIX = 'Search.setIndex('
2251+ SUFFIX = ')'
2252+
2253+ def dumps(self, data):
2254+ return self.PREFIX + jsdump.dumps(data) + self.SUFFIX
2255+
2256+ def loads(self, s):
2257+ data = s[len(self.PREFIX):-len(self.SUFFIX)]
2258+ if not data or not s.startswith(self.PREFIX) or not \
2259+ s.endswith(self.SUFFIX):
2260+ raise ValueError('invalid data')
2261+ return jsdump.loads(data)
2262+
2263+ def dump(self, data, f):
2264+ f.write(self.dumps(data))
2265+
2266+ def load(self, f):
2267+ return self.loads(f.read())
2268+
2269+
2270+js_index = _JavaScriptIndex()
2271+
2272+
2273+class WordCollector(NodeVisitor):
2274+ """
2275+ A special visitor that collects words for the `IndexBuilder`.
2276+ """
2277+
2278+ def __init__(self, document, lang):
2279+ NodeVisitor.__init__(self, document)
2280+ self.found_words = []
2281+ self.lang = lang
2282+
2283+ def dispatch_visit(self, node):
2284+ if node.__class__ is comment:
2285+ raise SkipNode
2286+ if node.__class__ is Text:
2287+ self.found_words.extend(self.lang.split(node.astext()))
2288+
2289+
2290+class IndexBuilder(object):
2291+ """
2292+ Helper class that creates a searchindex based on the doctrees
2293+ passed to the `feed` method.
2294+ """
2295+ formats = {
2296+ 'jsdump': jsdump,
2297+ 'pickle': pickle
2298+ }
2299+
2300+ def __init__(self, env, lang, options):
2301+ self.env = env
2302+ # filename -> title
2303+ self._titles = {}
2304+ # stemmed word -> set(filenames)
2305+ self._mapping = {}
2306+ # objtype -> index
2307+ self._objtypes = {}
2308+ # objtype index -> (domain, type, objname (localized))
2309+ self._objnames = {}
2310+ # add language-specific SearchLanguage instance
2311+ self.lang = languages[lang](options)
2312+
2313+ def load(self, stream, format):
2314+ """Reconstruct from frozen data."""
2315+ if isinstance(format, basestring):
2316+ format = self.formats[format]
2317+ frozen = format.load(stream)
2318+ # if an old index is present, we treat it as not existing.
2319+ if not isinstance(frozen, dict):
2320+ raise ValueError('old format')
2321+ index2fn = frozen['filenames']
2322+ self._titles = dict(zip(index2fn, frozen['titles']))
2323+ self._mapping = {}
2324+ for k, v in frozen['terms'].iteritems():
2325+ if isinstance(v, int):
2326+ self._mapping[k] = set([index2fn[v]])
2327+ else:
2328+ self._mapping[k] = set(index2fn[i] for i in v)
2329+ # no need to load keywords/objtypes
2330+
2331+ def dump(self, stream, format):
2332+ """Dump the frozen index to a stream."""
2333+ if isinstance(format, basestring):
2334+ format = self.formats[format]
2335+ format.dump(self.freeze(), stream)
2336+
2337+ def get_objects(self, fn2index):
2338+ rv = {}
2339+ otypes = self._objtypes
2340+ onames = self._objnames
2341+ for domainname, domain in self.env.domains.iteritems():
2342+ for fullname, dispname, type, docname, anchor, prio in \
2343+ domain.get_objects():
2344+ # XXX use dispname?
2345+ if docname not in fn2index:
2346+ continue
2347+ if prio < 0:
2348+ continue
2349+ prefix, name = rpartition(fullname, '.')
2350+ pdict = rv.setdefault(prefix, {})
2351+ try:
2352+ typeindex = otypes[domainname, type]
2353+ except KeyError:
2354+ typeindex = len(otypes)
2355+ otypes[domainname, type] = typeindex
2356+ otype = domain.object_types.get(type)
2357+ if otype:
2358+ # use unicode() to fire translation proxies
2359+ onames[typeindex] = (domainname, type,
2360+ unicode(domain.get_type_name(otype)))
2361+ else:
2362+ onames[typeindex] = (domainname, type, type)
2363+ if anchor == fullname:
2364+ shortanchor = ''
2365+ elif anchor == type + '-' + fullname:
2366+ shortanchor = '-'
2367+ else:
2368+ shortanchor = anchor
2369+ pdict[name] = (fn2index[docname], typeindex, prio, shortanchor)
2370+ return rv
2371+
2372+ def get_terms(self, fn2index):
2373+ rv = {}
2374+ for k, v in self._mapping.iteritems():
2375+ if len(v) == 1:
2376+ fn, = v
2377+ if fn in fn2index:
2378+ rv[k] = fn2index[fn]
2379+ else:
2380+ rv[k] = [fn2index[fn] for fn in v if fn in fn2index]
2381+ return rv
2382+
2383+ def freeze(self):
2384+ """Create a usable data structure for serializing."""
2385+ filenames = self._titles.keys()
2386+ titles = self._titles.values()
2387+ fn2index = dict((f, i) for (i, f) in enumerate(filenames))
2388+ terms = self.get_terms(fn2index)
2389+ objects = self.get_objects(fn2index) # populates _objtypes
2390+ objtypes = dict((v, k[0] + ':' + k[1])
2391+ for (k, v) in self._objtypes.iteritems())
2392+ objnames = self._objnames
2393+ return dict(filenames=filenames, titles=titles, terms=terms,
2394+ objects=objects, objtypes=objtypes, objnames=objnames)
2395+
2396+ def prune(self, filenames):
2397+ """Remove data for all filenames not in the list."""
2398+ new_titles = {}
2399+ for filename in filenames:
2400+ if filename in self._titles:
2401+ new_titles[filename] = self._titles[filename]
2402+ self._titles = new_titles
2403+ for wordnames in self._mapping.itervalues():
2404+ wordnames.intersection_update(filenames)
2405+
2406+ def feed(self, filename, title, doctree):
2407+ """Feed a doctree to the index."""
2408+ self._titles[filename] = title
2409+
2410+ visitor = WordCollector(doctree, self.lang)
2411+ doctree.walk(visitor)
2412+
2413+ def add_term(word, stem=self.lang.stem):
2414+ word = stem(word)
2415+ if self.lang.word_filter(word):
2416+ self._mapping.setdefault(word, set()).add(filename)
2417+
2418+ for word in self.lang.split(title):
2419+ add_term(word)
2420+
2421+ for word in visitor.found_words:
2422+ add_term(word)
2423+
2424+ def context_for_searchtool(self):
2425+ return dict(
2426+ search_language_stemming_code = self.lang.js_stemmer_code,
2427+ search_language_stop_words = jsdump.dumps(self.lang.stopwords),
2428+ )
2429
2430=== removed file '.pc/support_python_3.3.diff/sphinx/environment.py'
2431--- .pc/support_python_3.3.diff/sphinx/environment.py 2012-11-01 21:39:16 +0000
2432+++ .pc/support_python_3.3.diff/sphinx/environment.py 1970-01-01 00:00:00 +0000
2433@@ -1,1762 +0,0 @@
2434-# -*- coding: utf-8 -*-
2435-"""
2436- sphinx.environment
2437- ~~~~~~~~~~~~~~~~~~
2438-
2439- Global creation environment.
2440-
2441- :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
2442- :license: BSD, see LICENSE for details.
2443-"""
2444-
2445-import re
2446-import os
2447-import sys
2448-import time
2449-import types
2450-import codecs
2451-import imghdr
2452-import string
2453-import unicodedata
2454-import cPickle as pickle
2455-from os import path
2456-from glob import glob
2457-from itertools import izip, groupby
2458-
2459-from docutils import nodes
2460-from docutils.io import FileInput, NullOutput
2461-from docutils.core import Publisher
2462-from docutils.utils import Reporter, relative_path, new_document, \
2463- get_source_line
2464-from docutils.readers import standalone
2465-from docutils.parsers.rst import roles, directives, Parser as RSTParser
2466-from docutils.parsers.rst.languages import en as english
2467-from docutils.parsers.rst.directives.html import MetaBody
2468-from docutils.writers import UnfilteredWriter
2469-from docutils.transforms import Transform
2470-from docutils.transforms.parts import ContentsFilter
2471-
2472-from sphinx import addnodes
2473-from sphinx.util import url_re, get_matching_docs, docname_join, split_into, \
2474- FilenameUniqDict
2475-from sphinx.util.nodes import clean_astext, make_refnode, extract_messages, \
2476- WarningStream
2477-from sphinx.util.osutil import movefile, SEP, ustrftime, find_catalog
2478-from sphinx.util.matching import compile_matchers
2479-from sphinx.util.pycompat import all, class_types
2480-from sphinx.util.websupport import is_commentable
2481-from sphinx.errors import SphinxError, ExtensionError
2482-from sphinx.locale import _, init as init_locale
2483-from sphinx.versioning import add_uids, merge_doctrees
2484-
2485-fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
2486-
2487-orig_role_function = roles.role
2488-orig_directive_function = directives.directive
2489-
2490-class ElementLookupError(Exception): pass
2491-
2492-
2493-default_settings = {
2494- 'embed_stylesheet': False,
2495- 'cloak_email_addresses': True,
2496- 'pep_base_url': 'http://www.python.org/dev/peps/',
2497- 'rfc_base_url': 'http://tools.ietf.org/html/',
2498- 'input_encoding': 'utf-8-sig',
2499- 'doctitle_xform': False,
2500- 'sectsubtitle_xform': False,
2501- 'halt_level': 5,
2502-}
2503-
2504-# This is increased every time an environment attribute is added
2505-# or changed to properly invalidate pickle files.
2506-ENV_VERSION = 41
2507-
2508-
2509-default_substitutions = set([
2510- 'version',
2511- 'release',
2512- 'today',
2513-])
2514-
2515-dummy_reporter = Reporter('', 4, 4)
2516-
2517-versioning_conditions = {
2518- 'none': False,
2519- 'text': nodes.TextElement,
2520- 'commentable': is_commentable,
2521-}
2522-
2523-
2524-class NoUri(Exception):
2525- """Raised by get_relative_uri if there is no URI available."""
2526- pass
2527-
2528-
2529-class DefaultSubstitutions(Transform):
2530- """
2531- Replace some substitutions if they aren't defined in the document.
2532- """
2533- # run before the default Substitutions
2534- default_priority = 210
2535-
2536- def apply(self):
2537- config = self.document.settings.env.config
2538- # only handle those not otherwise defined in the document
2539- to_handle = default_substitutions - set(self.document.substitution_defs)
2540- for ref in self.document.traverse(nodes.substitution_reference):
2541- refname = ref['refname']
2542- if refname in to_handle:
2543- text = config[refname]
2544- if refname == 'today' and not text:
2545- # special handling: can also specify a strftime format
2546- text = ustrftime(config.today_fmt or _('%B %d, %Y'))
2547- ref.replace_self(nodes.Text(text, text))
2548-
2549-
2550-class MoveModuleTargets(Transform):
2551- """
2552- Move module targets that are the first thing in a section to the section
2553- title.
2554-
2555- XXX Python specific
2556- """
2557- default_priority = 210
2558-
2559- def apply(self):
2560- for node in self.document.traverse(nodes.target):
2561- if not node['ids']:
2562- continue
2563- if (node.has_key('ismod') and
2564- node.parent.__class__ is nodes.section and
2565- # index 0 is the section title node
2566- node.parent.index(node) == 1):
2567- node.parent['ids'][0:0] = node['ids']
2568- node.parent.remove(node)
2569-
2570-
2571-class HandleCodeBlocks(Transform):
2572- """
2573- Several code block related transformations.
2574- """
2575- default_priority = 210
2576-
2577- def apply(self):
2578- # move doctest blocks out of blockquotes
2579- for node in self.document.traverse(nodes.block_quote):
2580- if all(isinstance(child, nodes.doctest_block) for child
2581- in node.children):
2582- node.replace_self(node.children)
2583- # combine successive doctest blocks
2584- #for node in self.document.traverse(nodes.doctest_block):
2585- # if node not in node.parent.children:
2586- # continue
2587- # parindex = node.parent.index(node)
2588- # while len(node.parent) > parindex+1 and \
2589- # isinstance(node.parent[parindex+1], nodes.doctest_block):
2590- # node[0] = nodes.Text(node[0] + '\n\n' +
2591- # node.parent[parindex+1][0])
2592- # del node.parent[parindex+1]
2593-
2594-
2595-class SortIds(Transform):
2596- """
2597- Sort secion IDs so that the "id[0-9]+" one comes last.
2598- """
2599- default_priority = 261
2600-
2601- def apply(self):
2602- for node in self.document.traverse(nodes.section):
2603- if len(node['ids']) > 1 and node['ids'][0].startswith('id'):
2604- node['ids'] = node['ids'][1:] + [node['ids'][0]]
2605-
2606-
2607-class CitationReferences(Transform):
2608- """
2609- Replace citation references by pending_xref nodes before the default
2610- docutils transform tries to resolve them.
2611- """
2612- default_priority = 619
2613-
2614- def apply(self):
2615- for citnode in self.document.traverse(nodes.citation_reference):
2616- cittext = citnode.astext()
2617- refnode = addnodes.pending_xref(cittext, reftype='citation',
2618- reftarget=cittext, refwarn=True)
2619- refnode.line = citnode.line or citnode.parent.line
2620- refnode += nodes.Text('[' + cittext + ']')
2621- citnode.parent.replace(citnode, refnode)
2622-
2623-
2624-class Locale(Transform):
2625- """
2626- Replace translatable nodes with their translated doctree.
2627- """
2628- default_priority = 0
2629- def apply(self):
2630- env = self.document.settings.env
2631- settings, source = self.document.settings, self.document['source']
2632- # XXX check if this is reliable
2633- assert source.startswith(env.srcdir)
2634- docname = path.splitext(relative_path(env.srcdir, source))[0]
2635- textdomain = find_catalog(docname,
2636- self.document.settings.gettext_compact)
2637-
2638- # fetch translations
2639- dirs = [path.join(env.srcdir, directory)
2640- for directory in env.config.locale_dirs]
2641- catalog, has_catalog = init_locale(dirs, env.config.language,
2642- textdomain)
2643- if not has_catalog:
2644- return
2645-
2646- parser = RSTParser()
2647-
2648- for node, msg in extract_messages(self.document):
2649- patch = new_document(source, settings)
2650- msgstr = catalog.gettext(msg)
2651- # XXX add marker to untranslated parts
2652- if not msgstr or msgstr == msg: # as-of-yet untranslated
2653- continue
2654- parser.parse(msgstr, patch)
2655- patch = patch[0]
2656- # XXX doctest and other block markup
2657- if not isinstance(patch, nodes.paragraph):
2658- continue # skip for now
2659- for child in patch.children: # update leaves
2660- child.parent = node
2661- node.children = patch.children
2662-
2663-
2664-class SphinxStandaloneReader(standalone.Reader):
2665- """
2666- Add our own transforms.
2667- """
2668- transforms = [Locale, CitationReferences, DefaultSubstitutions,
2669- MoveModuleTargets, HandleCodeBlocks, SortIds]
2670-
2671- def get_transforms(self):
2672- return standalone.Reader.get_transforms(self) + self.transforms
2673-
2674-
2675-class SphinxDummyWriter(UnfilteredWriter):
2676- supported = ('html',) # needed to keep "meta" nodes
2677-
2678- def translate(self):
2679- pass
2680-
2681-
2682-class SphinxContentsFilter(ContentsFilter):
2683- """
2684- Used with BuildEnvironment.add_toc_from() to discard cross-file links
2685- within table-of-contents link nodes.
2686- """
2687- def visit_pending_xref(self, node):
2688- text = node.astext()
2689- self.parent.append(nodes.literal(text, text))
2690- raise nodes.SkipNode
2691-
2692- def visit_image(self, node):
2693- raise nodes.SkipNode
2694-
2695-
2696-class BuildEnvironment:
2697- """
2698- The environment in which the ReST files are translated.
2699- Stores an inventory of cross-file targets and provides doctree
2700- transformations to resolve links to them.
2701- """
2702-
2703- # --------- ENVIRONMENT PERSISTENCE ----------------------------------------
2704-
2705- @staticmethod
2706- def frompickle(config, filename):
2707- picklefile = open(filename, 'rb')
2708- try:
2709- env = pickle.load(picklefile)
2710- finally:
2711- picklefile.close()
2712- if env.version != ENV_VERSION:
2713- raise IOError('env version not current')
2714- env.config.values = config.values
2715- return env
2716-
2717- def topickle(self, filename):
2718- # remove unpicklable attributes
2719- warnfunc = self._warnfunc
2720- self.set_warnfunc(None)
2721- values = self.config.values
2722- del self.config.values
2723- domains = self.domains
2724- del self.domains
2725- # first write to a temporary file, so that if dumping fails,
2726- # the existing environment won't be overwritten
2727- picklefile = open(filename + '.tmp', 'wb')
2728- # remove potentially pickling-problematic values from config
2729- for key, val in vars(self.config).items():
2730- if key.startswith('_') or \
2731- isinstance(val, types.ModuleType) or \
2732- isinstance(val, types.FunctionType) or \
2733- isinstance(val, class_types):
2734- del self.config[key]
2735- try:
2736- pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL)
2737- finally:
2738- picklefile.close()
2739- movefile(filename + '.tmp', filename)
2740- # reset attributes
2741- self.domains = domains
2742- self.config.values = values
2743- self.set_warnfunc(warnfunc)
2744-
2745- # --------- ENVIRONMENT INITIALIZATION -------------------------------------
2746-
2747- def __init__(self, srcdir, doctreedir, config):
2748- self.doctreedir = doctreedir
2749- self.srcdir = srcdir
2750- self.config = config
2751-
2752- # the method of doctree versioning; see set_versioning_method
2753- self.versioning_condition = None
2754-
2755- # the application object; only set while update() runs
2756- self.app = None
2757-
2758- # all the registered domains, set by the application
2759- self.domains = {}
2760-
2761- # the docutils settings for building
2762- self.settings = default_settings.copy()
2763- self.settings['env'] = self
2764-
2765- # the function to write warning messages with
2766- self._warnfunc = None
2767-
2768- # this is to invalidate old pickles
2769- self.version = ENV_VERSION
2770-
2771- # make this a set for faster testing
2772- self._nitpick_ignore = set(self.config.nitpick_ignore)
2773-
2774- # All "docnames" here are /-separated and relative and exclude
2775- # the source suffix.
2776-
2777- self.found_docs = set() # contains all existing docnames
2778- self.all_docs = {} # docname -> mtime at the time of build
2779- # contains all built docnames
2780- self.dependencies = {} # docname -> set of dependent file
2781- # names, relative to documentation root
2782- self.reread_always = set() # docnames to re-read unconditionally on
2783- # next build
2784-
2785- # File metadata
2786- self.metadata = {} # docname -> dict of metadata items
2787-
2788- # TOC inventory
2789- self.titles = {} # docname -> title node
2790- self.longtitles = {} # docname -> title node; only different if
2791- # set differently with title directive
2792- self.tocs = {} # docname -> table of contents nodetree
2793- self.toc_num_entries = {} # docname -> number of real entries
2794- # used to determine when to show the TOC
2795- # in a sidebar (don't show if it's only one item)
2796- self.toc_secnumbers = {} # docname -> dict of sectionid -> number
2797-
2798- self.toctree_includes = {} # docname -> list of toctree includefiles
2799- self.files_to_rebuild = {} # docname -> set of files
2800- # (containing its TOCs) to rebuild too
2801- self.glob_toctrees = set() # docnames that have :glob: toctrees
2802- self.numbered_toctrees = set() # docnames that have :numbered: toctrees
2803-
2804- # domain-specific inventories, here to be pickled
2805- self.domaindata = {} # domainname -> domain-specific dict
2806-
2807- # Other inventories
2808- self.citations = {} # citation name -> docname, labelid
2809- self.indexentries = {} # docname -> list of
2810- # (type, string, target, aliasname)
2811- self.versionchanges = {} # version -> list of (type, docname,
2812- # lineno, module, descname, content)
2813-
2814- # these map absolute path -> (docnames, unique filename)
2815- self.images = FilenameUniqDict()
2816- self.dlfiles = FilenameUniqDict()
2817-
2818- # temporary data storage while reading a document
2819- self.temp_data = {}
2820-
2821- def set_warnfunc(self, func):
2822- self._warnfunc = func
2823- self.settings['warning_stream'] = WarningStream(func)
2824-
2825- def set_versioning_method(self, method):
2826- """This sets the doctree versioning method for this environment.
2827-
2828- Versioning methods are a builder property; only builders with the same
2829- versioning method can share the same doctree directory. Therefore, we
2830- raise an exception if the user tries to use an environment with an
2831- incompatible versioning method.
2832- """
2833- if method not in versioning_conditions:
2834- raise ValueError('invalid versioning method: %r' % method)
2835- condition = versioning_conditions[method]
2836- if self.versioning_condition not in (None, condition):
2837- raise SphinxError('This environment is incompatible with the '
2838- 'selected builder, please choose another '
2839- 'doctree directory.')
2840- self.versioning_condition = condition
2841-
2842- def warn(self, docname, msg, lineno=None):
2843- # strange argument order is due to backwards compatibility
2844- self._warnfunc(msg, (docname, lineno))
2845-
2846- def warn_node(self, msg, node):
2847- self._warnfunc(msg, '%s:%s' % get_source_line(node))
2848-
2849- def clear_doc(self, docname):
2850- """Remove all traces of a source file in the inventory."""
2851- if docname in self.all_docs:
2852- self.all_docs.pop(docname, None)
2853- self.reread_always.discard(docname)
2854- self.metadata.pop(docname, None)
2855- self.dependencies.pop(docname, None)
2856- self.titles.pop(docname, None)
2857- self.longtitles.pop(docname, None)
2858- self.tocs.pop(docname, None)
2859- self.toc_secnumbers.pop(docname, None)
2860- self.toc_num_entries.pop(docname, None)
2861- self.toctree_includes.pop(docname, None)
2862- self.indexentries.pop(docname, None)
2863- self.glob_toctrees.discard(docname)
2864- self.numbered_toctrees.discard(docname)
2865- self.images.purge_doc(docname)
2866- self.dlfiles.purge_doc(docname)
2867-
2868- for subfn, fnset in self.files_to_rebuild.items():
2869- fnset.discard(docname)
2870- if not fnset:
2871- del self.files_to_rebuild[subfn]
2872- for key, (fn, _) in self.citations.items():
2873- if fn == docname:
2874- del self.citations[key]
2875- for version, changes in self.versionchanges.items():
2876- new = [change for change in changes if change[1] != docname]
2877- changes[:] = new
2878-
2879- for domain in self.domains.values():
2880- domain.clear_doc(docname)
2881-
2882- def doc2path(self, docname, base=True, suffix=None):
2883- """Return the filename for the document name.
2884-
2885- If *base* is True, return absolute path under self.srcdir.
2886- If *base* is None, return relative path to self.srcdir.
2887- If *base* is a path string, return absolute path under that.
2888- If *suffix* is not None, add it instead of config.source_suffix.
2889- """
2890- docname = docname.replace(SEP, path.sep)
2891- suffix = suffix or self.config.source_suffix
2892- if base is True:
2893- return path.join(self.srcdir, docname) + suffix
2894- elif base is None:
2895- return docname + suffix
2896- else:
2897- return path.join(base, docname) + suffix
2898-
2899- def relfn2path(self, filename, docname=None):
2900- """Return paths to a file referenced from a document, relative to
2901- documentation root and absolute.
2902-
2903- Absolute filenames are relative to the source dir, while relative
2904- filenames are relative to the dir of the containing document.
2905- """
2906- if filename.startswith('/') or filename.startswith(os.sep):
2907- rel_fn = filename[1:]
2908- else:
2909- docdir = path.dirname(self.doc2path(docname or self.docname,
2910- base=None))
2911- rel_fn = path.join(docdir, filename)
2912- try:
2913- return rel_fn, path.join(self.srcdir, rel_fn)
2914- except UnicodeDecodeError:
2915- # the source directory is a bytestring with non-ASCII characters;
2916- # let's try to encode the rel_fn in the file system encoding
2917- enc_rel_fn = rel_fn.encode(sys.getfilesystemencoding())
2918- return rel_fn, path.join(self.srcdir, enc_rel_fn)
2919-
2920- def find_files(self, config):
2921- """Find all source files in the source dir and put them in
2922- self.found_docs.
2923- """
2924- matchers = compile_matchers(
2925- config.exclude_patterns[:] +
2926- config.exclude_trees +
2927- [d + config.source_suffix for d in config.unused_docs] +
2928- ['**/' + d for d in config.exclude_dirnames] +
2929- ['**/_sources', '.#*']
2930- )
2931- self.found_docs = set(get_matching_docs(
2932- self.srcdir, config.source_suffix, exclude_matchers=matchers))
2933-
2934- def get_outdated_files(self, config_changed):
2935- """Return (added, changed, removed) sets."""
2936- # clear all files no longer present
2937- removed = set(self.all_docs) - self.found_docs
2938-
2939- added = set()
2940- changed = set()
2941-
2942- if config_changed:
2943- # config values affect e.g. substitutions
2944- added = self.found_docs
2945- else:
2946- for docname in self.found_docs:
2947- if docname not in self.all_docs:
2948- added.add(docname)
2949- continue
2950- # if the doctree file is not there, rebuild
2951- if not path.isfile(self.doc2path(docname, self.doctreedir,
2952- '.doctree')):
2953- changed.add(docname)
2954- continue
2955- # check the "reread always" list
2956- if docname in self.reread_always:
2957- changed.add(docname)
2958- continue
2959- # check the mtime of the document
2960- mtime = self.all_docs[docname]
2961- newmtime = path.getmtime(self.doc2path(docname))
2962- if newmtime > mtime:
2963- changed.add(docname)
2964- continue
2965- # finally, check the mtime of dependencies
2966- for dep in self.dependencies.get(docname, ()):
2967- try:
2968- # this will do the right thing when dep is absolute too
2969- deppath = path.join(self.srcdir, dep)
2970- if not path.isfile(deppath):
2971- changed.add(docname)
2972- break
2973- depmtime = path.getmtime(deppath)
2974- if depmtime > mtime:
2975- changed.add(docname)
2976- break
2977- except EnvironmentError:
2978- # give it another chance
2979- changed.add(docname)
2980- break
2981-
2982- return added, changed, removed
2983-
2984- def update(self, config, srcdir, doctreedir, app=None):
2985- """(Re-)read all files new or changed since last update.
2986-
2987- Returns a summary, the total count of documents to reread and an
2988- iterator that yields docnames as it processes them. Store all
2989- environment docnames in the canonical format (ie using SEP as a
2990- separator in place of os.path.sep).
2991- """
2992- config_changed = False
2993- if self.config is None:
2994- msg = '[new config] '
2995- config_changed = True
2996- else:
2997- # check if a config value was changed that affects how
2998- # doctrees are read
2999- for key, descr in config.values.iteritems():
3000- if descr[1] != 'env':
3001- continue
3002- if self.config[key] != config[key]:
3003- msg = '[config changed] '
3004- config_changed = True
3005- break
3006- else:
3007- msg = ''
3008- # this value is not covered by the above loop because it is handled
3009- # specially by the config class
3010- if self.config.extensions != config.extensions:
3011- msg = '[extensions changed] '
3012- config_changed = True
3013- # the source and doctree directories may have been relocated
3014- self.srcdir = srcdir
3015- self.doctreedir = doctreedir
3016- self.find_files(config)
3017- self.config = config
3018-
3019- added, changed, removed = self.get_outdated_files(config_changed)
3020-
3021- # allow user intervention as well
3022- for docs in app.emit('env-get-outdated', self, added, changed, removed):
3023- changed.update(set(docs) & self.found_docs)
3024-
3025- # if files were added or removed, all documents with globbed toctrees
3026- # must be reread
3027- if added or removed:
3028- # ... but not those that already were removed
3029- changed.update(self.glob_toctrees & self.found_docs)
3030-
3031- msg += '%s added, %s changed, %s removed' % (len(added), len(changed),
3032- len(removed))
3033-
3034- def update_generator():
3035- self.app = app
3036-
3037- # clear all files no longer present
3038- for docname in removed:
3039- if app:
3040- app.emit('env-purge-doc', self, docname)
3041- self.clear_doc(docname)
3042-
3043- # read all new and changed files
3044- for docname in sorted(added | changed):
3045- yield docname
3046- self.read_doc(docname, app=app)
3047-
3048- if config.master_doc not in self.all_docs:
3049- self.warn(None, 'master file %s not found' %
3050- self.doc2path(config.master_doc))
3051-
3052- self.app = None
3053- if app:
3054- app.emit('env-updated', self)
3055-
3056- return msg, len(added | changed), update_generator()
3057-
3058- def check_dependents(self, already):
3059- to_rewrite = self.assign_section_numbers()
3060- for docname in to_rewrite:
3061- if docname not in already:
3062- yield docname
3063-
3064- # --------- SINGLE FILE READING --------------------------------------------
3065-
3066- def warn_and_replace(self, error):
3067- """Custom decoding error handler that warns and replaces."""
3068- linestart = error.object.rfind('\n', 0, error.start)
3069- lineend = error.object.find('\n', error.start)
3070- if lineend == -1: lineend = len(error.object)
3071- lineno = error.object.count('\n', 0, error.start) + 1
3072- self.warn(self.docname, 'undecodable source characters, '
3073- 'replacing with "?": %r' %
3074- (error.object[linestart+1:error.start] + '>>>' +
3075- error.object[error.start:error.end] + '<<<' +
3076- error.object[error.end:lineend]), lineno)
3077- return (u'?', error.end)
3078-
3079- def lookup_domain_element(self, type, name):
3080- """Lookup a markup element (directive or role), given its name which can
3081- be a full name (with domain).
3082- """
3083- name = name.lower()
3084- # explicit domain given?
3085- if ':' in name:
3086- domain_name, name = name.split(':', 1)
3087- if domain_name in self.domains:
3088- domain = self.domains[domain_name]
3089- element = getattr(domain, type)(name)
3090- if element is not None:
3091- return element, []
3092- # else look in the default domain
3093- else:
3094- def_domain = self.temp_data.get('default_domain')
3095- if def_domain is not None:
3096- element = getattr(def_domain, type)(name)
3097- if element is not None:
3098- return element, []
3099- # always look in the std domain
3100- element = getattr(self.domains['std'], type)(name)
3101- if element is not None:
3102- return element, []
3103- raise ElementLookupError
3104-
3105- def patch_lookup_functions(self):
3106- """Monkey-patch directive and role dispatch, so that domain-specific
3107- markup takes precedence.
3108- """
3109- def directive(name, lang_module, document):
3110- try:
3111- return self.lookup_domain_element('directive', name)
3112- except ElementLookupError:
3113- return orig_directive_function(name, lang_module, document)
3114-
3115- def role(name, lang_module, lineno, reporter):
3116- try:
3117- return self.lookup_domain_element('role', name)
3118- except ElementLookupError:
3119- return orig_role_function(name, lang_module, lineno, reporter)
3120-
3121- directives.directive = directive
3122- roles.role = role
3123-
3124- def read_doc(self, docname, src_path=None, save_parsed=True, app=None):
3125- """Parse a file and add/update inventory entries for the doctree.
3126-
3127- If srcpath is given, read from a different source file.
3128- """
3129- # remove all inventory entries for that file
3130- if app:
3131- app.emit('env-purge-doc', self, docname)
3132-
3133- self.clear_doc(docname)
3134-
3135- if src_path is None:
3136- src_path = self.doc2path(docname)
3137-
3138- self.temp_data['docname'] = docname
3139- # defaults to the global default, but can be re-set in a document
3140- self.temp_data['default_domain'] = \
3141- self.domains.get(self.config.primary_domain)
3142-
3143- self.settings['input_encoding'] = self.config.source_encoding
3144- self.settings['trim_footnote_reference_space'] = \
3145- self.config.trim_footnote_reference_space
3146- self.settings['gettext_compact'] = self.config.gettext_compact
3147-
3148- self.patch_lookup_functions()
3149-
3150- if self.config.default_role:
3151- role_fn, messages = roles.role(self.config.default_role, english,
3152- 0, dummy_reporter)
3153- if role_fn:
3154- roles._roles[''] = role_fn
3155- else:
3156- self.warn(docname, 'default role %s not found' %
3157- self.config.default_role)
3158-
3159- codecs.register_error('sphinx', self.warn_and_replace)
3160-
3161- class SphinxSourceClass(FileInput):
3162- def __init__(self_, *args, **kwds):
3163- # don't call sys.exit() on IOErrors
3164- kwds['handle_io_errors'] = False
3165- FileInput.__init__(self_, *args, **kwds)
3166-
3167- def decode(self_, data):
3168- if isinstance(data, unicode):
3169- return data
3170- return data.decode(self_.encoding, 'sphinx')
3171-
3172- def read(self_):
3173- data = FileInput.read(self_)
3174- if app:
3175- arg = [data]
3176- app.emit('source-read', docname, arg)
3177- data = arg[0]
3178- if self.config.rst_epilog:
3179- data = data + '\n' + self.config.rst_epilog + '\n'
3180- if self.config.rst_prolog:
3181- data = self.config.rst_prolog + '\n' + data
3182- return data
3183-
3184- # publish manually
3185- pub = Publisher(reader=SphinxStandaloneReader(),
3186- writer=SphinxDummyWriter(),
3187- source_class=SphinxSourceClass,
3188- destination_class=NullOutput)
3189- pub.set_components(None, 'restructuredtext', None)
3190- pub.process_programmatic_settings(None, self.settings, None)
3191- pub.set_source(None, src_path.encode(fs_encoding))
3192- pub.set_destination(None, None)
3193- try:
3194- pub.publish()
3195- doctree = pub.document
3196- except UnicodeError, err:
3197- raise SphinxError(str(err))
3198-
3199- # post-processing
3200- self.filter_messages(doctree)
3201- self.process_dependencies(docname, doctree)
3202- self.process_images(docname, doctree)
3203- self.process_downloads(docname, doctree)
3204- self.process_metadata(docname, doctree)
3205- self.process_refonly_bullet_lists(docname, doctree)
3206- self.create_title_from(docname, doctree)
3207- self.note_indexentries_from(docname, doctree)
3208- self.note_citations_from(docname, doctree)
3209- self.build_toc_from(docname, doctree)
3210- for domain in self.domains.itervalues():
3211- domain.process_doc(self, docname, doctree)
3212-
3213- # allow extension-specific post-processing
3214- if app:
3215- app.emit('doctree-read', doctree)
3216-
3217- # store time of build, for outdated files detection
3218- self.all_docs[docname] = time.time()
3219-
3220- if self.versioning_condition:
3221- # get old doctree
3222- try:
3223- f = open(self.doc2path(docname,
3224- self.doctreedir, '.doctree'), 'rb')
3225- try:
3226- old_doctree = pickle.load(f)
3227- finally:
3228- f.close()
3229- except EnvironmentError:
3230- old_doctree = None
3231-
3232- # add uids for versioning
3233- if old_doctree is None:
3234- list(add_uids(doctree, self.versioning_condition))
3235- else:
3236- list(merge_doctrees(
3237- old_doctree, doctree, self.versioning_condition))
3238-
3239- # make it picklable
3240- doctree.reporter = None
3241- doctree.transformer = None
3242- doctree.settings.warning_stream = None
3243- doctree.settings.env = None
3244- doctree.settings.record_dependencies = None
3245- for metanode in doctree.traverse(MetaBody.meta):
3246- # docutils' meta nodes aren't picklable because the class is nested
3247- metanode.__class__ = addnodes.meta
3248-
3249- # cleanup
3250- self.temp_data.clear()
3251-
3252- if save_parsed:
3253- # save the parsed doctree
3254- doctree_filename = self.doc2path(docname, self.doctreedir,
3255- '.doctree')
3256- dirname = path.dirname(doctree_filename)
3257- if not path.isdir(dirname):
3258- os.makedirs(dirname)
3259- f = open(doctree_filename, 'wb')
3260- try:
3261- pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
3262- finally:
3263- f.close()
3264- else:
3265- return doctree
3266-
3267- # utilities to use while reading a document
3268-
3269- @property
3270- def docname(self):
3271- """Backwards compatible alias."""
3272- return self.temp_data['docname']
3273-
3274- @property
3275- def currmodule(self):
3276- """Backwards compatible alias."""
3277- return self.temp_data.get('py:module')
3278-
3279- @property
3280- def currclass(self):
3281- """Backwards compatible alias."""
3282- return self.temp_data.get('py:class')
3283-
3284- def new_serialno(self, category=''):
3285- """Return a serial number, e.g. for index entry targets."""
3286- key = category + 'serialno'
3287- cur = self.temp_data.get(key, 0)
3288- self.temp_data[key] = cur + 1
3289- return cur
3290-
3291- def note_dependency(self, filename):
3292- self.dependencies.setdefault(self.docname, set()).add(filename)
3293-
3294- def note_reread(self):
3295- self.reread_always.add(self.docname)
3296-
3297- def note_versionchange(self, type, version, node, lineno):
3298- self.versionchanges.setdefault(version, []).append(
3299- (type, self.temp_data['docname'], lineno,
3300- self.temp_data.get('py:module'),
3301- self.temp_data.get('object'), node.astext()))
3302-
3303- # post-processing of read doctrees
3304-
3305- def filter_messages(self, doctree):
3306- """Filter system messages from a doctree."""
3307- filterlevel = self.config.keep_warnings and 2 or 5
3308- for node in doctree.traverse(nodes.system_message):
3309- if node['level'] < filterlevel:
3310- node.parent.remove(node)
3311-
3312-
3313- def process_dependencies(self, docname, doctree):
3314- """Process docutils-generated dependency info."""
3315- cwd = os.getcwd()
3316- frompath = path.join(path.normpath(self.srcdir), 'dummy')
3317- deps = doctree.settings.record_dependencies
3318- if not deps:
3319- return
3320- for dep in deps.list:
3321- # the dependency path is relative to the working dir, so get
3322- # one relative to the srcdir
3323- relpath = relative_path(frompath,
3324- path.normpath(path.join(cwd, dep)))
3325- self.dependencies.setdefault(docname, set()).add(relpath)
3326-
3327- def process_downloads(self, docname, doctree):
3328- """Process downloadable file paths. """
3329- for node in doctree.traverse(addnodes.download_reference):
3330- targetname = node['reftarget']
3331- rel_filename, filename = self.relfn2path(targetname, docname)
3332- self.dependencies.setdefault(docname, set()).add(rel_filename)
3333- if not os.access(filename, os.R_OK):
3334- self.warn_node('download file not readable: %s' % filename,
3335- node)
3336- continue
3337- uniquename = self.dlfiles.add_file(docname, filename)
3338- node['filename'] = uniquename
3339-
3340- def process_images(self, docname, doctree):
3341- """Process and rewrite image URIs."""
3342- for node in doctree.traverse(nodes.image):
3343- # Map the mimetype to the corresponding image. The writer may
3344- # choose the best image from these candidates. The special key * is
3345- # set if there is only single candidate to be used by a writer.
3346- # The special key ? is set for nonlocal URIs.
3347- node['candidates'] = candidates = {}
3348- imguri = node['uri']
3349- if imguri.find('://') != -1:
3350- self.warn_node('nonlocal image URI found: %s' % imguri, node)
3351- candidates['?'] = imguri
3352- continue
3353- rel_imgpath, full_imgpath = self.relfn2path(imguri, docname)
3354- # set imgpath as default URI
3355- node['uri'] = rel_imgpath
3356- if rel_imgpath.endswith(os.extsep + '*'):
3357- for filename in glob(full_imgpath):
3358- new_imgpath = relative_path(self.srcdir, filename)
3359- if filename.lower().endswith('.pdf'):
3360- candidates['application/pdf'] = new_imgpath
3361- elif filename.lower().endswith('.svg'):
3362- candidates['image/svg+xml'] = new_imgpath
3363- else:
3364- try:
3365- f = open(filename, 'rb')
3366- try:
3367- imgtype = imghdr.what(f)
3368- finally:
3369- f.close()
3370- except (OSError, IOError), err:
3371- self.warn_node('image file %s not readable: %s' %
3372- (filename, err), node)
3373- if imgtype:
3374- candidates['image/' + imgtype] = new_imgpath
3375- else:
3376- candidates['*'] = rel_imgpath
3377- # map image paths to unique image names (so that they can be put
3378- # into a single directory)
3379- for imgpath in candidates.itervalues():
3380- self.dependencies.setdefault(docname, set()).add(imgpath)
3381- if not os.access(path.join(self.srcdir, imgpath), os.R_OK):
3382- self.warn_node('image file not readable: %s' % imgpath,
3383- node)
3384- continue
3385- self.images.add_file(docname, imgpath)
3386-
3387- def process_metadata(self, docname, doctree):
3388- """Process the docinfo part of the doctree as metadata.
3389-
3390- Keep processing minimal -- just return what docutils says.
3391- """
3392- self.metadata[docname] = md = {}
3393- try:
3394- docinfo = doctree[0]
3395- except IndexError:
3396- # probably an empty document
3397- return
3398- if docinfo.__class__ is not nodes.docinfo:
3399- # nothing to see here
3400- return
3401- for node in docinfo:
3402- # nodes are multiply inherited...
3403- if isinstance(node, nodes.authors):
3404- md['authors'] = [author.astext() for author in node]
3405- elif isinstance(node, nodes.TextElement): # e.g. author
3406- md[node.__class__.__name__] = node.astext()
3407- else:
3408- name, body = node
3409- md[name.astext()] = body.astext()
3410- del doctree[0]
3411-
3412- def process_refonly_bullet_lists(self, docname, doctree):
3413- """Change refonly bullet lists to use compact_paragraphs.
3414-
3415- Specifically implemented for 'Indices and Tables' section, which looks
3416- odd when html_compact_lists is false.
3417- """
3418- if self.config.html_compact_lists:
3419- return
3420-
3421- class RefOnlyListChecker(nodes.GenericNodeVisitor):
3422- """Raise `nodes.NodeFound` if non-simple list item is encountered.
3423-
3424- Here 'simple' means a list item containing only a paragraph with a
3425- single reference in it.
3426- """
3427-
3428- def default_visit(self, node):
3429- raise nodes.NodeFound
3430-
3431- def visit_bullet_list(self, node):
3432- pass
3433-
3434- def visit_list_item(self, node):
3435- children = []
3436- for child in node.children:
3437- if not isinstance(child, nodes.Invisible):
3438- children.append(child)
3439- if len(children) != 1:
3440- raise nodes.NodeFound
3441- if not isinstance(children[0], nodes.paragraph):
3442- raise nodes.NodeFound
3443- para = children[0]
3444- if len(para) != 1:
3445- raise nodes.NodeFound
3446- if not isinstance(para[0], addnodes.pending_xref):
3447- raise nodes.NodeFound
3448- raise nodes.SkipChildren
3449-
3450- def invisible_visit(self, node):
3451- """Invisible nodes should be ignored."""
3452- pass
3453-
3454- def check_refonly_list(node):
3455- """Check for list with only references in it."""
3456- visitor = RefOnlyListChecker(doctree)
3457- try:
3458- node.walk(visitor)
3459- except nodes.NodeFound:
3460- return False
3461- else:
3462- return True
3463-
3464- for node in doctree.traverse(nodes.bullet_list):
3465- if check_refonly_list(node):
3466- for item in node.traverse(nodes.list_item):
3467- para = item[0]
3468- ref = para[0]
3469- compact_para = addnodes.compact_paragraph()
3470- compact_para += ref
3471- item.replace(para, compact_para)
3472-
3473- def create_title_from(self, docname, document):
3474- """Add a title node to the document (just copy the first section title),
3475- and store that title in the environment.
3476- """
3477- titlenode = nodes.title()
3478- longtitlenode = titlenode
3479- # explicit title set with title directive; use this only for
3480- # the <title> tag in HTML output
3481- if document.has_key('title'):
3482- longtitlenode = nodes.title()
3483- longtitlenode += nodes.Text(document['title'])
3484- # look for first section title and use that as the title
3485- for node in document.traverse(nodes.section):
3486- visitor = SphinxContentsFilter(document)
3487- node[0].walkabout(visitor)
3488- titlenode += visitor.get_entry_text()
3489- break
3490- else:
3491- # document has no title
3492- titlenode += nodes.Text('<no title>')
3493- self.titles[docname] = titlenode
3494- self.longtitles[docname] = longtitlenode
3495-
3496- def note_indexentries_from(self, docname, document):
3497- entries = self.indexentries[docname] = []
3498- for node in document.traverse(addnodes.index):
3499- entries.extend(node['entries'])
3500-
3501- def note_citations_from(self, docname, document):
3502- for node in document.traverse(nodes.citation):
3503- label = node[0].astext()
3504- if label in self.citations:
3505- self.warn_node('duplicate citation %s, ' % label +
3506- 'other instance in %s' % self.doc2path(
3507- self.citations[label][0]), node)
3508- self.citations[label] = (docname, node['ids'][0])
3509-
3510- def note_toctree(self, docname, toctreenode):
3511- """Note a TOC tree directive in a document and gather information about
3512- file relations from it.
3513- """
3514- if toctreenode['glob']:
3515- self.glob_toctrees.add(docname)
3516- if toctreenode.get('numbered'):
3517- self.numbered_toctrees.add(docname)
3518- includefiles = toctreenode['includefiles']
3519- for includefile in includefiles:
3520- # note that if the included file is rebuilt, this one must be
3521- # too (since the TOC of the included file could have changed)
3522- self.files_to_rebuild.setdefault(includefile, set()).add(docname)
3523- self.toctree_includes.setdefault(docname, []).extend(includefiles)
3524-
3525- def build_toc_from(self, docname, document):
3526- """Build a TOC from the doctree and store it in the inventory."""
3527- numentries = [0] # nonlocal again...
3528-
3529- try:
3530- maxdepth = int(self.metadata[docname].get('tocdepth', 0))
3531- except ValueError:
3532- maxdepth = 0
3533-
3534- def traverse_in_section(node, cls):
3535- """Like traverse(), but stay within the same section."""
3536- result = []
3537- if isinstance(node, cls):
3538- result.append(node)
3539- for child in node.children:
3540- if isinstance(child, nodes.section):
3541- continue
3542- result.extend(traverse_in_section(child, cls))
3543- return result
3544-
3545- def build_toc(node, depth=1):
3546- entries = []
3547- for sectionnode in node:
3548- # find all toctree nodes in this section and add them
3549- # to the toc (just copying the toctree node which is then
3550- # resolved in self.get_and_resolve_doctree)
3551- if isinstance(sectionnode, addnodes.only):
3552- onlynode = addnodes.only(expr=sectionnode['expr'])
3553- blist = build_toc(sectionnode, depth)
3554- if blist:
3555- onlynode += blist.children
3556- entries.append(onlynode)
3557- if not isinstance(sectionnode, nodes.section):
3558- for toctreenode in traverse_in_section(sectionnode,
3559- addnodes.toctree):
3560- item = toctreenode.copy()
3561- entries.append(item)
3562- # important: do the inventory stuff
3563- self.note_toctree(docname, toctreenode)
3564- continue
3565- title = sectionnode[0]
3566- # copy the contents of the section title, but without references
3567- # and unnecessary stuff
3568- visitor = SphinxContentsFilter(document)
3569- title.walkabout(visitor)
3570- nodetext = visitor.get_entry_text()
3571- if not numentries[0]:
3572- # for the very first toc entry, don't add an anchor
3573- # as it is the file's title anyway
3574- anchorname = ''
3575- else:
3576- anchorname = '#' + sectionnode['ids'][0]
3577- numentries[0] += 1
3578- # make these nodes:
3579- # list_item -> compact_paragraph -> reference
3580- reference = nodes.reference(
3581- '', '', internal=True, refuri=docname,
3582- anchorname=anchorname, *nodetext)
3583- para = addnodes.compact_paragraph('', '', reference)
3584- item = nodes.list_item('', para)
3585- if maxdepth == 0 or depth < maxdepth:
3586- item += build_toc(sectionnode, depth+1)
3587- entries.append(item)
3588- if entries:
3589- return nodes.bullet_list('', *entries)
3590- return []
3591- toc = build_toc(document)
3592- if toc:
3593- self.tocs[docname] = toc
3594- else:
3595- self.tocs[docname] = nodes.bullet_list('')
3596- self.toc_num_entries[docname] = numentries[0]
3597-
3598- def get_toc_for(self, docname, builder):
3599- """Return a TOC nodetree -- for use on the same page only!"""
3600- try:
3601- toc = self.tocs[docname].deepcopy()
3602- except KeyError:
3603- # the document does not exist anymore: return a dummy node that
3604- # renders to nothing
3605- return nodes.paragraph()
3606- self.process_only_nodes(toc, builder, docname)
3607- for node in toc.traverse(nodes.reference):
3608- node['refuri'] = node['anchorname'] or '#'
3609- return toc
3610-
3611- def get_toctree_for(self, docname, builder, collapse, **kwds):
3612- """Return the global TOC nodetree."""
3613- doctree = self.get_doctree(self.config.master_doc)
3614- toctrees = []
3615- if 'includehidden' not in kwds:
3616- kwds['includehidden'] = True
3617- if 'maxdepth' not in kwds:
3618- kwds['maxdepth'] = 0
3619- kwds['collapse'] = collapse
3620- for toctreenode in doctree.traverse(addnodes.toctree):
3621- toctree = self.resolve_toctree(docname, builder, toctreenode,
3622- prune=True, **kwds)
3623- toctrees.append(toctree)
3624- if not toctrees:
3625- return None
3626- result = toctrees[0]
3627- for toctree in toctrees[1:]:
3628- result.extend(toctree.children)
3629- return result
3630-
3631- def get_domain(self, domainname):
3632- """Return the domain instance with the specified name.
3633-
3634- Raises an ExtensionError if the domain is not registered.
3635- """
3636- try:
3637- return self.domains[domainname]
3638- except KeyError:
3639- raise ExtensionError('Domain %r is not registered' % domainname)
3640-
3641- # --------- RESOLVING REFERENCES AND TOCTREES ------------------------------
3642-
3643- def get_doctree(self, docname):
3644- """Read the doctree for a file from the pickle and return it."""
3645- doctree_filename = self.doc2path(docname, self.doctreedir, '.doctree')
3646- f = open(doctree_filename, 'rb')
3647- try:
3648- doctree = pickle.load(f)
3649- finally:
3650- f.close()
3651- doctree.settings.env = self
3652- doctree.reporter = Reporter(self.doc2path(docname), 2, 5,
3653- stream=WarningStream(self._warnfunc))
3654- return doctree
3655-
3656-
3657- def get_and_resolve_doctree(self, docname, builder, doctree=None,
3658- prune_toctrees=True):
3659- """Read the doctree from the pickle, resolve cross-references and
3660- toctrees and return it.
3661- """
3662- if doctree is None:
3663- doctree = self.get_doctree(docname)
3664-
3665- # resolve all pending cross-references
3666- self.resolve_references(doctree, docname, builder)
3667-
3668- # now, resolve all toctree nodes
3669- for toctreenode in doctree.traverse(addnodes.toctree):
3670- result = self.resolve_toctree(docname, builder, toctreenode,
3671- prune=prune_toctrees)
3672- if result is None:
3673- toctreenode.replace_self([])
3674- else:
3675- toctreenode.replace_self(result)
3676-
3677- return doctree
3678-
3679- def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
3680- titles_only=False, collapse=False, includehidden=False):
3681- """Resolve a *toctree* node into individual bullet lists with titles
3682- as items, returning None (if no containing titles are found) or
3683- a new node.
3684-
3685- If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0,
3686- to the value of the *maxdepth* option on the *toctree* node.
3687- If *titles_only* is True, only toplevel document titles will be in the
3688- resulting tree.
3689- If *collapse* is True, all branches not containing docname will
3690- be collapsed.
3691- """
3692- if toctree.get('hidden', False) and not includehidden:
3693- return None
3694-
3695- def _walk_depth(node, depth, maxdepth):
3696- """Utility: Cut a TOC at a specified depth."""
3697-
3698- # For reading this function, it is useful to keep in mind the node
3699- # structure of a toctree (using HTML-like node names for brevity):
3700- #
3701- # <ul>
3702- # <li>
3703- # <p><a></p>
3704- # <p><a></p>
3705- # ...
3706- # <ul>
3707- # ...
3708- # </ul>
3709- # </li>
3710- # </ul>
3711-
3712- for subnode in node.children[:]:
3713- if isinstance(subnode, (addnodes.compact_paragraph,
3714- nodes.list_item)):
3715- # for <p> and <li>, just indicate the depth level and
3716- # recurse to children
3717- subnode['classes'].append('toctree-l%d' % (depth-1))
3718- _walk_depth(subnode, depth, maxdepth)
3719-
3720- elif isinstance(subnode, nodes.bullet_list):
3721- # for <ul>, determine if the depth is too large or if the
3722- # entry is to be collapsed
3723- if maxdepth > 0 and depth > maxdepth:
3724- subnode.parent.replace(subnode, [])
3725- else:
3726- # to find out what to collapse, *first* walk subitems,
3727- # since that determines which children point to the
3728- # current page
3729- _walk_depth(subnode, depth+1, maxdepth)
3730- # cull sub-entries whose parents aren't 'current'
3731- if (collapse and depth > 1 and
3732- 'iscurrent' not in subnode.parent):
3733- subnode.parent.remove(subnode)
3734-
3735- elif isinstance(subnode, nodes.reference):
3736- # for <a>, identify which entries point to the current
3737- # document and therefore may not be collapsed
3738- if subnode['refuri'] == docname:
3739- if not subnode['anchorname']:
3740- # give the whole branch a 'current' class
3741- # (useful for styling it differently)
3742- branchnode = subnode
3743- while branchnode:
3744- branchnode['classes'].append('current')
3745- branchnode = branchnode.parent
3746- # mark the list_item as "on current page"
3747- if subnode.parent.parent.get('iscurrent'):
3748- # but only if it's not already done
3749- return
3750- while subnode:
3751- subnode['iscurrent'] = True
3752- subnode = subnode.parent
3753-
3754- def _entries_from_toctree(toctreenode, parents,
3755- separate=False, subtree=False):
3756- """Return TOC entries for a toctree node."""
3757- refs = [(e[0], str(e[1])) for e in toctreenode['entries']]
3758- entries = []
3759- for (title, ref) in refs:
3760- try:
3761- refdoc = None
3762- if url_re.match(ref):
3763- reference = nodes.reference('', '', internal=False,
3764- refuri=ref, anchorname='',
3765- *[nodes.Text(title)])
3766- para = addnodes.compact_paragraph('', '', reference)
3767- item = nodes.list_item('', para)
3768- toc = nodes.bullet_list('', item)
3769- elif ref == 'self':
3770- # 'self' refers to the document from which this
3771- # toctree originates
3772- ref = toctreenode['parent']
3773- if not title:
3774- title = clean_astext(self.titles[ref])
3775- reference = nodes.reference('', '', internal=True,
3776- refuri=ref,
3777- anchorname='',
3778- *[nodes.Text(title)])
3779- para = addnodes.compact_paragraph('', '', reference)
3780- item = nodes.list_item('', para)
3781- # don't show subitems
3782- toc = nodes.bullet_list('', item)
3783- else:
3784- if ref in parents:
3785- self.warn(ref, 'circular toctree references '
3786- 'detected, ignoring: %s <- %s' %
3787- (ref, ' <- '.join(parents)))
3788- continue
3789- refdoc = ref
3790- toc = self.tocs[ref].deepcopy()
3791- self.process_only_nodes(toc, builder, ref)
3792- if title and toc.children and len(toc.children) == 1:
3793- child = toc.children[0]
3794- for refnode in child.traverse(nodes.reference):
3795- if refnode['refuri'] == ref and \
3796- not refnode['anchorname']:
3797- refnode.children = [nodes.Text(title)]
3798- if not toc.children:
3799- # empty toc means: no titles will show up in the toctree
3800- self.warn_node(
3801- 'toctree contains reference to document %r that '
3802- 'doesn\'t have a title: no link will be generated'
3803- % ref, toctreenode)
3804- except KeyError:
3805- # this is raised if the included file does not exist
3806- self.warn_node(
3807- 'toctree contains reference to nonexisting document %r'
3808- % ref, toctreenode)
3809- else:
3810- # if titles_only is given, only keep the main title and
3811- # sub-toctrees
3812- if titles_only:
3813- # delete everything but the toplevel title(s)
3814- # and toctrees
3815- for toplevel in toc:
3816- # nodes with length 1 don't have any children anyway
3817- if len(toplevel) > 1:
3818- subtrees = toplevel.traverse(addnodes.toctree)
3819- toplevel[1][:] = subtrees
3820- # resolve all sub-toctrees
3821- for toctreenode in toc.traverse(addnodes.toctree):
3822- if not (toctreenode.get('hidden', False)
3823- and not includehidden):
3824- i = toctreenode.parent.index(toctreenode) + 1
3825- for item in _entries_from_toctree(
3826- toctreenode, [refdoc] + parents,
3827- subtree=True):
3828- toctreenode.parent.insert(i, item)
3829- i += 1
3830- toctreenode.parent.remove(toctreenode)
3831- if separate:
3832- entries.append(toc)
3833- else:
3834- entries.extend(toc.children)
3835- if not subtree and not separate:
3836- ret = nodes.bullet_list()
3837- ret += entries
3838- return [ret]
3839- return entries
3840-
3841- maxdepth = maxdepth or toctree.get('maxdepth', -1)
3842- if not titles_only and toctree.get('titlesonly', False):
3843- titles_only = True
3844-
3845- # NOTE: previously, this was separate=True, but that leads to artificial
3846- # separation when two or more toctree entries form a logical unit, so
3847- # separating mode is no longer used -- it's kept here for history's sake
3848- tocentries = _entries_from_toctree(toctree, [], separate=False)
3849- if not tocentries:
3850- return None
3851-
3852- newnode = addnodes.compact_paragraph('', '', *tocentries)
3853- newnode['toctree'] = True
3854-
3855- # prune the tree to maxdepth and replace titles, also set level classes
3856- _walk_depth(newnode, 1, prune and maxdepth or 0)
3857-
3858- # set the target paths in the toctrees (they are not known at TOC
3859- # generation time)
3860- for refnode in newnode.traverse(nodes.reference):
3861- if not url_re.match(refnode['refuri']):
3862- refnode['refuri'] = builder.get_relative_uri(
3863- docname, refnode['refuri']) + refnode['anchorname']
3864- return newnode
3865-
3866- def resolve_references(self, doctree, fromdocname, builder):
3867- for node in doctree.traverse(addnodes.pending_xref):
3868- contnode = node[0].deepcopy()
3869- newnode = None
3870-
3871- typ = node['reftype']
3872- target = node['reftarget']
3873- refdoc = node.get('refdoc', fromdocname)
3874- domain = None
3875-
3876- try:
3877- if 'refdomain' in node and node['refdomain']:
3878- # let the domain try to resolve the reference
3879- try:
3880- domain = self.domains[node['refdomain']]
3881- except KeyError:
3882- raise NoUri
3883- newnode = domain.resolve_xref(self, fromdocname, builder,
3884- typ, target, node, contnode)
3885- # really hardwired reference types
3886- elif typ == 'doc':
3887- # directly reference to document by source name;
3888- # can be absolute or relative
3889- docname = docname_join(refdoc, target)
3890- if docname in self.all_docs:
3891- if node['refexplicit']:
3892- # reference with explicit title
3893- caption = node.astext()
3894- else:
3895- caption = clean_astext(self.titles[docname])
3896- innernode = nodes.emphasis(caption, caption)
3897- newnode = nodes.reference('', '', internal=True)
3898- newnode['refuri'] = builder.get_relative_uri(
3899- fromdocname, docname)
3900- newnode.append(innernode)
3901- elif typ == 'citation':
3902- docname, labelid = self.citations.get(target, ('', ''))
3903- if docname:
3904- newnode = make_refnode(builder, fromdocname, docname,
3905- labelid, contnode)
3906- # no new node found? try the missing-reference event
3907- if newnode is None:
3908- newnode = builder.app.emit_firstresult(
3909- 'missing-reference', self, node, contnode)
3910- # still not found? warn if in nit-picky mode
3911- if newnode is None:
3912- self._warn_missing_reference(
3913- fromdocname, typ, target, node, domain)
3914- except NoUri:
3915- newnode = contnode
3916- node.replace_self(newnode or contnode)
3917-
3918- # remove only-nodes that do not belong to our builder
3919- self.process_only_nodes(doctree, builder, fromdocname)
3920-
3921- # allow custom references to be resolved
3922- builder.app.emit('doctree-resolved', doctree, fromdocname)
3923-
3924- def _warn_missing_reference(self, fromdoc, typ, target, node, domain):
3925- warn = node.get('refwarn')
3926- if self.config.nitpicky:
3927- warn = True
3928- if self._nitpick_ignore:
3929- dtype = domain and '%s:%s' % (domain.name, typ) or typ
3930- if (dtype, target) in self._nitpick_ignore:
3931- warn = False
3932- if not warn:
3933- return
3934- if domain and typ in domain.dangling_warnings:
3935- msg = domain.dangling_warnings[typ]
3936- elif typ == 'doc':
3937- msg = 'unknown document: %(target)s'
3938- elif typ == 'citation':
3939- msg = 'citation not found: %(target)s'
3940- elif node.get('refdomain', 'std') != 'std':
3941- msg = '%s:%s reference target not found: %%(target)s' % \
3942- (node['refdomain'], typ)
3943- else:
3944- msg = '%s reference target not found: %%(target)s' % typ
3945- self.warn_node(msg % {'target': target}, node)
3946-
3947- def process_only_nodes(self, doctree, builder, fromdocname=None):
3948- # A comment on the comment() nodes being inserted: replacing by [] would
3949- # result in a "Losing ids" exception if there is a target node before
3950- # the only node, so we make sure docutils can transfer the id to
3951- # something, even if it's just a comment and will lose the id anyway...
3952- for node in doctree.traverse(addnodes.only):
3953- try:
3954- ret = builder.tags.eval_condition(node['expr'])
3955- except Exception, err:
3956- self.warn_node('exception while evaluating only '
3957- 'directive expression: %s' % err, node)
3958- node.replace_self(node.children or nodes.comment())
3959- else:
3960- if ret:
3961- node.replace_self(node.children or nodes.comment())
3962- else:
3963- node.replace_self(nodes.comment())
3964-
3965- def assign_section_numbers(self):
3966- """Assign a section number to each heading under a numbered toctree."""
3967- # a list of all docnames whose section numbers changed
3968- rewrite_needed = []
3969-
3970- old_secnumbers = self.toc_secnumbers
3971- self.toc_secnumbers = {}
3972-
3973- def _walk_toc(node, secnums, depth, titlenode=None):
3974- # titlenode is the title of the document, it will get assigned a
3975- # secnumber too, so that it shows up in next/prev/parent rellinks
3976- for subnode in node.children:
3977- if isinstance(subnode, nodes.bullet_list):
3978- numstack.append(0)
3979- _walk_toc(subnode, secnums, depth-1, titlenode)
3980- numstack.pop()
3981- titlenode = None
3982- elif isinstance(subnode, nodes.list_item):
3983- _walk_toc(subnode, secnums, depth, titlenode)
3984- titlenode = None
3985- elif isinstance(subnode, addnodes.only):
3986- # at this stage we don't know yet which sections are going
3987- # to be included; just include all of them, even if it leads
3988- # to gaps in the numbering
3989- _walk_toc(subnode, secnums, depth, titlenode)
3990- titlenode = None
3991- elif isinstance(subnode, addnodes.compact_paragraph):
3992- numstack[-1] += 1
3993- if depth > 0:
3994- number = tuple(numstack)
3995- else:
3996- number = None
3997- secnums[subnode[0]['anchorname']] = \
3998- subnode[0]['secnumber'] = number
3999- if titlenode:
4000- titlenode['secnumber'] = number
4001- titlenode = None
4002- elif isinstance(subnode, addnodes.toctree):
4003- _walk_toctree(subnode, depth)
4004-
4005- def _walk_toctree(toctreenode, depth):
4006- if depth == 0:
4007- return
4008- for (title, ref) in toctreenode['entries']:
4009- if url_re.match(ref) or ref == 'self':
4010- # don't mess with those
4011- continue
4012- if ref in self.tocs:
4013- secnums = self.toc_secnumbers[ref] = {}
4014- _walk_toc(self.tocs[ref], secnums, depth,
4015- self.titles.get(ref))
4016- if secnums != old_secnumbers.get(ref):
4017- rewrite_needed.append(ref)
4018-
4019- for docname in self.numbered_toctrees:
4020- doctree = self.get_doctree(docname)
4021- for toctreenode in doctree.traverse(addnodes.toctree):
4022- depth = toctreenode.get('numbered', 0)
4023- if depth:
4024- # every numbered toctree gets new numbering
4025- numstack = [0]
4026- _walk_toctree(toctreenode, depth)
4027-
4028- return rewrite_needed
4029-
4030- def create_index(self, builder, group_entries=True,
4031- _fixre=re.compile(r'(.*) ([(][^()]*[)])')):
4032- """Create the real index from the collected index entries."""
4033- new = {}
4034-
4035- def add_entry(word, subword, link=True, dic=new):
4036- entry = dic.get(word)
4037- if not entry:
4038- dic[word] = entry = [[], {}]
4039- if subword:
4040- add_entry(subword, '', link=link, dic=entry[1])
4041- elif link:
4042- try:
4043- uri = builder.get_relative_uri('genindex', fn) + '#' + tid
4044- except NoUri:
4045- pass
4046- else:
4047- entry[0].append((main, uri))
4048-
4049- for fn, entries in self.indexentries.iteritems():
4050- # new entry types must be listed in directives/other.py!
4051- for type, value, tid, main in entries:
4052- try:
4053- if type == 'single':
4054- try:
4055- entry, subentry = split_into(2, 'single', value)
4056- except ValueError:
4057- entry, = split_into(1, 'single', value)
4058- subentry = ''
4059- add_entry(entry, subentry)
4060- elif type == 'pair':
4061- first, second = split_into(2, 'pair', value)
4062- add_entry(first, second)
4063- add_entry(second, first)
4064- elif type == 'triple':
4065- first, second, third = split_into(3, 'triple', value)
4066- add_entry(first, second+' '+third)
4067- add_entry(second, third+', '+first)
4068- add_entry(third, first+' '+second)
4069- elif type == 'see':
4070- first, second = split_into(2, 'see', value)
4071- add_entry(first, _('see %s') % second, link=False)
4072- elif type == 'seealso':
4073- first, second = split_into(2, 'see', value)
4074- add_entry(first, _('see also %s') % second, link=False)
4075- else:
4076- self.warn(fn, 'unknown index entry type %r' % type)
4077- except ValueError, err:
4078- self.warn(fn, str(err))
4079-
4080- # sort the index entries; put all symbols at the front, even those
4081- # following the letters in ASCII, this is where the chr(127) comes from
4082- def keyfunc(entry, lcletters=string.ascii_lowercase + '_'):
4083- lckey = unicodedata.normalize('NFD', entry[0].lower())
4084- if lckey[0:1] in lcletters:
4085- return chr(127) + lckey
4086- return lckey
4087- newlist = new.items()
4088- newlist.sort(key=keyfunc)
4089-
4090- if group_entries:
4091- # fixup entries: transform
4092- # func() (in module foo)
4093- # func() (in module bar)
4094- # into
4095- # func()
4096- # (in module foo)
4097- # (in module bar)
4098- oldkey = ''
4099- oldsubitems = None
4100- i = 0
4101- while i < len(newlist):
4102- key, (targets, subitems) = newlist[i]
4103- # cannot move if it has subitems; structure gets too complex
4104- if not subitems:
4105- m = _fixre.match(key)
4106- if m:
4107- if oldkey == m.group(1):
4108- # prefixes match: add entry as subitem of the
4109- # previous entry
4110- oldsubitems.setdefault(m.group(2), [[], {}])[0].\
4111- extend(targets)
4112- del newlist[i]
4113- continue
4114- oldkey = m.group(1)
4115- else:
4116- oldkey = key
4117- oldsubitems = subitems
4118- i += 1
4119-
4120- # group the entries by letter
4121- def keyfunc2(item, letters=string.ascii_uppercase + '_'):
4122- # hack: mutating the subitems dicts to a list in the keyfunc
4123- k, v = item
4124- v[1] = sorted((si, se) for (si, (se, void)) in v[1].iteritems())
4125- # now calculate the key
4126- letter = unicodedata.normalize('NFD', k[0])[0].upper()
4127- if letter in letters:
4128- return letter
4129- else:
4130- # get all other symbols under one heading
4131- return 'Symbols'
4132- return [(key, list(group))
4133- for (key, group) in groupby(newlist, keyfunc2)]
4134-
4135- def collect_relations(self):
4136- relations = {}
4137- getinc = self.toctree_includes.get
4138- def collect(parents, parents_set, docname, previous, next):
4139- # circular relationship?
4140- if docname in parents_set:
4141- # we will warn about this in resolve_toctree()
4142- return
4143- includes = getinc(docname)
4144- # previous
4145- if not previous:
4146- # if no previous sibling, go to parent
4147- previous = parents[0][0]
4148- else:
4149- # else, go to previous sibling, or if it has children, to
4150- # the last of its children, or if that has children, to the
4151- # last of those, and so forth
4152- while 1:
4153- previncs = getinc(previous)
4154- if previncs:
4155- previous = previncs[-1]
4156- else:
4157- break
4158- # next
4159- if includes:
4160- # if it has children, go to first of them
4161- next = includes[0]
4162- elif next:
4163- # else, if next sibling, go to it
4164- pass
4165- else:
4166- # else, go to the next sibling of the parent, if present,
4167- # else the grandparent's sibling, if present, and so forth
4168- for parname, parindex in parents:
4169- parincs = getinc(parname)
4170- if parincs and parindex + 1 < len(parincs):
4171- next = parincs[parindex+1]
4172- break
4173- # else it will stay None
4174- # same for children
4175- if includes:
4176- for subindex, args in enumerate(izip(includes,
4177- [None] + includes,
4178- includes[1:] + [None])):
4179- collect([(docname, subindex)] + parents,
4180- parents_set.union([docname]), *args)
4181- relations[docname] = [parents[0][0], previous, next]
4182- collect([(None, 0)], set(), self.config.master_doc, None, None)
4183- return relations
4184-
4185- def check_consistency(self):
4186- """Do consistency checks."""
4187- for docname in sorted(self.all_docs):
4188- if docname not in self.files_to_rebuild:
4189- if docname == self.config.master_doc:
4190- # the master file is not included anywhere ;)
4191- continue
4192- if 'orphan' in self.metadata[docname]:
4193- continue
4194- self.warn(docname, 'document isn\'t included in any toctree')
4195-
4196
4197=== added directory '.pc/test_build_html_rb.diff'
4198=== added directory '.pc/test_build_html_rb.diff/tests'
4199=== added file '.pc/test_build_html_rb.diff/tests/test_build_html.py'
4200--- .pc/test_build_html_rb.diff/tests/test_build_html.py 1970-01-01 00:00:00 +0000
4201+++ .pc/test_build_html_rb.diff/tests/test_build_html.py 2012-11-28 07:12:20 +0000
4202@@ -0,0 +1,339 @@
4203+# -*- coding: utf-8 -*-
4204+"""
4205+ test_build_html
4206+ ~~~~~~~~~~~~~~~
4207+
4208+ Test the HTML builder and check output against XPath.
4209+
4210+ :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
4211+ :license: BSD, see LICENSE for details.
4212+"""
4213+
4214+import os
4215+import re
4216+import sys
4217+import htmlentitydefs
4218+from StringIO import StringIO
4219+
4220+try:
4221+ import pygments
4222+except ImportError:
4223+ pygments = None
4224+
4225+from sphinx import __version__
4226+from util import *
4227+from etree13 import ElementTree as ET
4228+
4229+
4230+def teardown_module():
4231+ (test_root / '_build').rmtree(True)
4232+
4233+
4234+html_warnfile = StringIO()
4235+
4236+ENV_WARNINGS = """\
4237+%(root)s/autodoc_fodder.py:docstring of autodoc_fodder\\.MarkupError:2: \
4238+WARNING: Explicit markup ends without a blank line; unexpected \
4239+unindent\\.\\n?
4240+%(root)s/images.txt:9: WARNING: image file not readable: foo.png
4241+%(root)s/images.txt:23: WARNING: nonlocal image URI found: \
4242+http://www.python.org/logo.png
4243+%(root)s/includes.txt:\\d*: WARNING: Encoding 'utf-8-sig' used for \
4244+reading included file u'.*?wrongenc.inc' seems to be wrong, try giving an \
4245+:encoding: option\\n?
4246+%(root)s/includes.txt:4: WARNING: download file not readable: .*?nonexisting.png
4247+%(root)s/objects.txt:\\d*: WARNING: using old C markup; please migrate to \
4248+new-style markup \(e.g. c:function instead of cfunction\), see \
4249+http://sphinx.pocoo.org/domains.html
4250+"""
4251+
4252+HTML_WARNINGS = ENV_WARNINGS + """\
4253+%(root)s/images.txt:20: WARNING: no matching candidate for image URI u'foo.\\*'
4254+%(root)s/markup.txt:: WARNING: invalid single index entry u''
4255+%(root)s/markup.txt:: WARNING: invalid pair index entry u''
4256+%(root)s/markup.txt:: WARNING: invalid pair index entry u'keyword; '
4257+"""
4258+
4259+if sys.version_info >= (3, 0):
4260+ ENV_WARNINGS = remove_unicode_literals(ENV_WARNINGS)
4261+ HTML_WARNINGS = remove_unicode_literals(HTML_WARNINGS)
4262+
4263+
4264+def tail_check(check):
4265+ rex = re.compile(check)
4266+ def checker(nodes):
4267+ for node in nodes:
4268+ if node.tail and rex.search(node.tail):
4269+ return True
4270+ assert False, '%r not found in tail of any nodes %s' % (check, nodes)
4271+ return checker
4272+
4273+
4274+HTML_XPATH = {
4275+ 'images.html': [
4276+ (".//img[@src='_images/img.png']", ''),
4277+ (".//img[@src='_images/img1.png']", ''),
4278+ (".//img[@src='_images/simg.png']", ''),
4279+ (".//img[@src='_images/svgimg.svg']", ''),
4280+ ],
4281+ 'subdir/images.html': [
4282+ (".//img[@src='../_images/img1.png']", ''),
4283+ (".//img[@src='../_images/rimg.png']", ''),
4284+ ],
4285+ 'subdir/includes.html': [
4286+ (".//a[@href='../_downloads/img.png']", ''),
4287+ (".//img[@src='../_images/img.png']", ''),
4288+ (".//p", 'This is an include file.'),
4289+ ],
4290+ 'includes.html': [
4291+ (".//pre", u'Max Strauß'),
4292+ (".//a[@href='_downloads/img.png']", ''),
4293+ (".//a[@href='_downloads/img1.png']", ''),
4294+ (".//pre", u'"quotes"'),
4295+ (".//pre", u"'included'"),
4296+ ],
4297+ 'autodoc.html': [
4298+ (".//dt[@id='test_autodoc.Class']", ''),
4299+ (".//dt[@id='test_autodoc.function']/em", r'\*\*kwds'),
4300+ (".//dd/p", r'Return spam\.'),
4301+ ],
4302+ 'extapi.html': [
4303+ (".//strong", 'from function: Foo'),
4304+ (".//strong", 'from class: Bar'),
4305+ ],
4306+ 'markup.html': [
4307+ (".//title", 'set by title directive'),
4308+ (".//p/em", 'Section author: Georg Brandl'),
4309+ (".//p/em", 'Module author: Georg Brandl'),
4310+ # created by the meta directive
4311+ (".//meta[@name='author'][@content='Me']", ''),
4312+ (".//meta[@name='keywords'][@content='docs, sphinx']", ''),
4313+ # a label created by ``.. _label:``
4314+ (".//div[@id='label']", ''),
4315+ # code with standard code blocks
4316+ (".//pre", '^some code$'),
4317+ # an option list
4318+ (".//span[@class='option']", '--help'),
4319+ # admonitions
4320+ (".//p[@class='first admonition-title']", 'My Admonition'),
4321+ (".//p[@class='last']", 'Note text.'),
4322+ (".//p[@class='last']", 'Warning text.'),
4323+ # inline markup
4324+ (".//li/strong", r'^command\\n$'),
4325+ (".//li/strong", r'^program\\n$'),
4326+ (".//li/em", r'^dfn\\n$'),
4327+ (".//li/tt/span[@class='pre']", r'^kbd\\n$'),
4328+ (".//li/em", u'File \N{TRIANGULAR BULLET} Close'),
4329+ (".//li/tt/span[@class='pre']", '^a/$'),
4330+ (".//li/tt/em/span[@class='pre']", '^varpart$'),
4331+ (".//li/tt/em/span[@class='pre']", '^i$'),
4332+ (".//a[@href='http://www.python.org/dev/peps/pep-0008']"
4333+ "[@class='pep reference external']/strong", 'PEP 8'),
4334+ (".//a[@href='http://tools.ietf.org/html/rfc1.html']"
4335+ "[@class='rfc reference external']/strong", 'RFC 1'),
4336+ (".//a[@href='objects.html#envvar-HOME']"
4337+ "[@class='reference internal']/tt/span[@class='pre']", 'HOME'),
4338+ (".//a[@href='#with']"
4339+ "[@class='reference internal']/tt/span[@class='pre']", '^with$'),
4340+ (".//a[@href='#grammar-token-try_stmt']"
4341+ "[@class='reference internal']/tt/span", '^statement$'),
4342+ (".//a[@href='subdir/includes.html']"
4343+ "[@class='reference internal']/em", 'Including in subdir'),
4344+ (".//a[@href='objects.html#cmdoption-python-c']"
4345+ "[@class='reference internal']/em", 'Python -c option'),
4346+ # abbreviations
4347+ (".//abbr[@title='abbreviation']", '^abbr$'),
4348+ # version stuff
4349+ (".//span[@class='versionmodified']", 'New in version 0.6'),
4350+ # footnote reference
4351+ (".//a[@class='footnote-reference']", r'\[1\]'),
4352+ # created by reference lookup
4353+ (".//a[@href='contents.html#ref1']", ''),
4354+ # ``seealso`` directive
4355+ (".//div/p[@class='first admonition-title']", 'See also'),
4356+ # a ``hlist`` directive
4357+ (".//table[@class='hlist']/tr/td/ul/li", '^This$'),
4358+ # a ``centered`` directive
4359+ (".//p[@class='centered']/strong", 'LICENSE'),
4360+ # a glossary
4361+ (".//dl/dt[@id='term-boson']", 'boson'),
4362+ # a production list
4363+ (".//pre/strong", 'try_stmt'),
4364+ (".//pre/a[@href='#grammar-token-try1_stmt']/tt/span", 'try1_stmt'),
4365+ # tests for ``only`` directive
4366+ (".//p", 'A global substitution.'),
4367+ (".//p", 'In HTML.'),
4368+ (".//p", 'In both.'),
4369+ (".//p", 'Always present'),
4370+ ],
4371+ 'objects.html': [
4372+ (".//dt[@id='mod.Cls.meth1']", ''),
4373+ (".//dt[@id='errmod.Error']", ''),
4374+ (".//dt/tt", r'long\(parameter,\s* list\)'),
4375+ (".//dt/tt", 'another one'),
4376+ (".//a[@href='#mod.Cls'][@class='reference internal']", ''),
4377+ (".//dl[@class='userdesc']", ''),
4378+ (".//dt[@id='userdesc-myobj']", ''),
4379+ (".//a[@href='#userdesc-myobj'][@class='reference internal']", ''),
4380+ # C references
4381+ (".//span[@class='pre']", 'CFunction()'),
4382+ (".//a[@href='#Sphinx_DoSomething']", ''),
4383+ (".//a[@href='#SphinxStruct.member']", ''),
4384+ (".//a[@href='#SPHINX_USE_PYTHON']", ''),
4385+ (".//a[@href='#SphinxType']", ''),
4386+ (".//a[@href='#sphinx_global']", ''),
4387+ # reference from old C markup extension
4388+ (".//a[@href='#Sphinx_Func']", ''),
4389+ # test global TOC created by toctree()
4390+ (".//ul[@class='current']/li[@class='toctree-l1 current']/a[@href='']",
4391+ 'Testing object descriptions'),
4392+ (".//li[@class='toctree-l1']/a[@href='markup.html']",
4393+ 'Testing various markup'),
4394+ # custom sidebar
4395+ (".//h4", 'Custom sidebar'),
4396+ # docfields
4397+ (".//td[@class='field-body']/strong", '^moo$'),
4398+ (".//td[@class='field-body']/strong",
4399+ tail_check(r'\(Moo\) .* Moo')),
4400+ (".//td[@class='field-body']/ul/li/strong", '^hour$'),
4401+ (".//td[@class='field-body']/ul/li/em", '^DuplicateType$'),
4402+ (".//td[@class='field-body']/ul/li/em",
4403+ tail_check(r'.* Some parameter')),
4404+ ],
4405+ 'contents.html': [
4406+ (".//meta[@name='hc'][@content='hcval']", ''),
4407+ (".//meta[@name='hc_co'][@content='hcval_co']", ''),
4408+ (".//meta[@name='testopt'][@content='testoverride']", ''),
4409+ (".//td[@class='label']", r'\[Ref1\]'),
4410+ (".//td[@class='label']", ''),
4411+ (".//li[@class='toctree-l1']/a", 'Testing various markup'),
4412+ (".//li[@class='toctree-l2']/a", 'Inline markup'),
4413+ (".//title", 'Sphinx <Tests>'),
4414+ (".//div[@class='footer']", 'Georg Brandl & Team'),
4415+ (".//a[@href='http://python.org/']"
4416+ "[@class='reference external']", ''),
4417+ (".//li/a[@href='genindex.html']/em", 'Index'),
4418+ (".//li/a[@href='py-modindex.html']/em", 'Module Index'),
4419+ (".//li/a[@href='search.html']/em", 'Search Page'),
4420+ # custom sidebar only for contents
4421+ (".//h4", 'Contents sidebar'),
4422+ # custom JavaScript
4423+ (".//script[@src='file://moo.js']", ''),
4424+ ],
4425+ 'bom.html': [
4426+ (".//title", " File with UTF-8 BOM"),
4427+ ],
4428+ 'extensions.html': [
4429+ (".//a[@href='http://python.org/dev/']", "http://python.org/dev/"),
4430+ (".//a[@href='http://bugs.python.org/issue1000']", "issue 1000"),
4431+ (".//a[@href='http://bugs.python.org/issue1042']", "explicit caption"),
4432+ ],
4433+ '_static/statictmpl.html': [
4434+ (".//project", 'Sphinx <Tests>'),
4435+ ],
4436+ 'genindex.html': [
4437+ # index entries
4438+ (".//a/strong", "Main"),
4439+ (".//a/strong", "[1]"),
4440+ (".//a/strong", "Other"),
4441+ (".//a", "entry"),
4442+ (".//dt/a", "double"),
4443+ ]
4444+}
4445+
4446+if pygments:
4447+ HTML_XPATH['includes.html'].extend([
4448+ (".//pre/span[@class='s']", u'üöä'),
4449+ (".//div[@class='inc-pyobj1 highlight-text']//pre",
4450+ r'^class Foo:\n pass\n\s*$'),
4451+ (".//div[@class='inc-pyobj2 highlight-text']//pre",
4452+ r'^ def baz\(\):\n pass\n\s*$'),
4453+ (".//div[@class='inc-lines highlight-text']//pre",
4454+ r'^class Foo:\n pass\nclass Bar:\n$'),
4455+ (".//div[@class='inc-startend highlight-text']//pre",
4456+ ur'^foo = "Including Unicode characters: üöä"\n$'),
4457+ (".//div[@class='inc-preappend highlight-text']//pre",
4458+ r'(?m)^START CODE$'),
4459+ (".//div[@class='inc-pyobj-dedent highlight-python']//span",
4460+ r'def'),
4461+ (".//div[@class='inc-tab3 highlight-text']//pre",
4462+ r'-| |-'),
4463+ (".//div[@class='inc-tab8 highlight-python']//pre/span",
4464+ r'-| |-'),
4465+ ])
4466+ HTML_XPATH['subdir/includes.html'].extend([
4467+ (".//pre/span", 'line 1'),
4468+ (".//pre/span", 'line 2'),
4469+ ])
4470+
4471+class NslessParser(ET.XMLParser):
4472+ """XMLParser that throws away namespaces in tag names."""
4473+
4474+ def _fixname(self, key):
4475+ try:
4476+ return self._names[key]
4477+ except KeyError:
4478+ name = key
4479+ br = name.find('}')
4480+ if br > 0:
4481+ name = name[br+1:]
4482+ self._names[key] = name = self._fixtext(name)
4483+ return name
4484+
4485+
4486+def check_xpath(etree, fname, path, check):
4487+ nodes = list(etree.findall(path))
4488+ assert nodes != [], ('did not find any node matching xpath '
4489+ '%r in file %s' % (path, fname))
4490+ if hasattr(check, '__call__'):
4491+ check(nodes)
4492+ elif not check:
4493+ # only check for node presence
4494+ pass
4495+ else:
4496+ rex = re.compile(check)
4497+ for node in nodes:
4498+ if node.text and rex.search(node.text):
4499+ break
4500+ else:
4501+ assert False, ('%r not found in any node matching '
4502+ 'path %s in %s: %r' % (check, path, fname,
4503+ [node.text for node in nodes]))
4504+
4505+def check_static_entries(outdir):
4506+ staticdir = outdir / '_static'
4507+ assert staticdir.isdir()
4508+ # a file from a directory entry in html_static_path
4509+ assert (staticdir / 'README').isfile()
4510+ # a directory from a directory entry in html_static_path
4511+ assert (staticdir / 'subdir' / 'foo.css').isfile()
4512+ # a file from a file entry in html_static_path
4513+ assert (staticdir / 'templated.css').isfile()
4514+ assert (staticdir / 'templated.css').text().splitlines()[1] == __version__
4515+ # a file from _static, but matches exclude_patterns
4516+ assert not (staticdir / 'excluded.css').exists()
4517+
4518+@gen_with_app(buildername='html', warning=html_warnfile, cleanenv=True,
4519+ confoverrides={'html_context.hckey_co': 'hcval_co'},
4520+ tags=['testtag'])
4521+def test_html(app):
4522+ app.builder.build_all()
4523+ html_warnings = html_warnfile.getvalue().replace(os.sep, '/')
4524+ html_warnings_exp = HTML_WARNINGS % {'root': re.escape(app.srcdir)}
4525+ assert re.match(html_warnings_exp + '$', html_warnings), \
4526+ 'Warnings don\'t match:\n' + \
4527+ '--- Expected (regex):\n' + html_warnings_exp + \
4528+ '--- Got:\n' + html_warnings
4529+
4530+ for fname, paths in HTML_XPATH.iteritems():
4531+ parser = NslessParser()
4532+ parser.entity.update(htmlentitydefs.entitydefs)
4533+ fp = open(os.path.join(app.outdir, fname))
4534+ try:
4535+ etree = ET.parse(fp, parser)
4536+ finally:
4537+ fp.close()
4538+ for path, check in paths:
4539+ yield check_xpath, etree, fname, path, check
4540+
4541+ check_static_entries(app.builder.outdir)
4542
4543=== modified file 'debian/changelog'
4544--- debian/changelog 2012-11-01 21:39:16 +0000
4545+++ debian/changelog 2012-11-28 07:12:20 +0000
4546@@ -1,3 +1,61 @@
4547+sphinx (1.1.3+dfsg-5ubuntu1) UNRELEASED; urgency=low
4548+
4549+ * Merge with Debian packaging SVN.
4550+ - This brings in fix for LP: #1068493.
4551+ * Remaining Ubuntu changes:
4552+ - Switch to dh_python2.
4553+ - debian/rules: export NO_PKG_MANGLE=1 in order to not have translations
4554+ stripped.
4555+ - debian/rules: Modify xvfb-run to use auto-servernum flag (fixes FTBFS).
4556+ - debian/control: Drop the dependency on python-whoosh.
4557+ - debian/control: Add "XS-Testsuite: autopkgtest" header.
4558+ - debian/patches/fix_manpages_generation_with_new_docutils.diff:
4559+ Fix FTBFS with the new python-docutils package.
4560+
4561+ -- Dmitry Shachnev <mitya57@ubuntu.com> Tue, 27 Nov 2012 19:20:44 +0400
4562+
4563+sphinx (1.1.3+dfsg-6) UNRELEASED; urgency=low
4564+
4565+ [ Jakub Wilk ]
4566+ * DEP-8 tests: remove “Features: no-build-needed”; it's the default now.
4567+ * Bump standards version to 3.9.4; no changes needed.
4568+
4569+ [ Dmitry Shachnev ]
4570+ * debian/patches/l10n_fixes.diff: fix crashes and not working external
4571+ links in l10n mode (closes: #691719).
4572+
4573+ -- Jakub Wilk <jwilk@debian.org> Tue, 13 Nov 2012 22:36:10 +0100
4574+
4575+sphinx (1.1.3+dfsg-5) experimental; urgency=low
4576+
4577+ [ Jakub Wilk ]
4578+ * DEP-8 tests: use $ADTTMP.
4579+ * dh_sphinxdoc: ignore comments when analysing HTML files (closes: #682850).
4580+ Thanks to Dmitry Shachnev for the bug report.
4581+ * Add dvipng to Suggests (closes: #687273). Thanks to Matthias Klose for the
4582+ bug report.
4583+ * Set PYTHONHASHSEED=random in debian/rules and in DEP-8 tests.
4584+ * Backport upstream patch to fix encoding issues in test_build_html. Now
4585+ that this is fixed, stop running Python 3 tests under LC_ALL=C.
4586+ * Make “debian/rules binary-arch” no-op.
4587+ * Update version number in the sphinx-autogen manpage.
4588+ * Improve dh_sphinxdoc:
4589+ + Fix the --tmpdir option. Thanks to Andriy Senkovych for the bug report.
4590+ + Ignore references to JavaScript code that start with an URI scheme.
4591+ Thanks to Dmitry Shachnev for the bug report.
4592+ + Strip query (?...) and fragment (#...) components from JavaScript
4593+ references. Thanks to Dmitry Shachnev for the bug report.
4594+ * Sort stopwords in searchtools.js. Thanks to Dmitry Shachnev for the bug
4595+ report.
4596+ * Fix compatibility with Python 3.3. Thanks to Dmitry Shachnev for the bug
4597+ report and hunting down the upstream patch.
4598+
4599+ [ Dmitry Shachnev ]
4600+ * Update Homepage field to point to http://sphinx-doc.org/.
4601+ * Build-depend of python3-all instead of python3.
4602+
4603+ -- Jakub Wilk <jwilk@debian.org> Thu, 08 Nov 2012 16:28:23 +0100
4604+
4605 sphinx (1.1.3+dfsg-4ubuntu5) raring; urgency=low
4606
4607 * Build-depend on python3-all instead of python3.
4608
4609=== modified file 'debian/control'
4610--- debian/control 2012-11-01 21:39:16 +0000
4611+++ debian/control 2012-11-28 07:12:20 +0000
4612@@ -22,7 +22,7 @@
4613 XS-Python-Version: >= 2.5
4614 X-Python3-Version: >= 3.1
4615 XS-Testsuite: autopkgtest
4616-Standards-Version: 3.9.3
4617+Standards-Version: 3.9.4
4618 Vcs-Svn: svn://svn.debian.org/python-modules/packages/sphinx/trunk/
4619 Vcs-Browser: http://svn.debian.org/viewsvn/python-modules/packages/sphinx/trunk/
4620
4621@@ -36,7 +36,7 @@
4622 Recommends: python (>= 2.6) | python-simplejson, python-imaging,
4623 sphinx-doc
4624 Suggests:
4625- jsmath, libjs-mathjax,
4626+ jsmath, libjs-mathjax, dvipng,
4627 texlive-latex-recommended, texlive-latex-extra, texlive-fonts-recommended
4628 Description: documentation generator for Python projects (implemented in Python 2)
4629 Sphinx is a tool for producing documentation for Python projects, using
4630@@ -62,7 +62,7 @@
4631 sphinx-common (= ${source:Version})
4632 Recommends: python3-imaging
4633 Suggests:
4634- jsmath, libjs-mathjax,
4635+ jsmath, libjs-mathjax, dvipng,
4636 texlive-latex-recommended, texlive-latex-extra, texlive-fonts-recommended,
4637 sphinx-doc
4638 Description: documentation generator for Python projects (implemented in Python 3)
4639
4640=== modified file 'debian/dh-sphinxdoc/dh_sphinxdoc'
4641--- debian/dh-sphinxdoc/dh_sphinxdoc 2012-08-21 17:45:58 +0000
4642+++ debian/dh-sphinxdoc/dh_sphinxdoc 2012-11-28 07:12:20 +0000
4643@@ -109,7 +109,9 @@
4644 sub load_packaged_js()
4645 {
4646 my %versions = ();
4647- my $root = tmpdir('libjs-sphinxdoc');
4648+ my $root = 'debian/libjs-sphinxdoc'; # It's tempting to use
4649+ # tmpdir('libjs-sphinxdoc') here, but it would break if the user passed
4650+ # --tmpdir to the command.
4651 $root = '' unless -d $root;
4652 my $path = "$root/usr/share/javascript/sphinxdoc";
4653 open(F, '<', "$path/index") or error("cannot open $path/index");
4654@@ -185,7 +187,7 @@
4655 close F;
4656 $search =~ s/<!--.*?-->//g; # strip comments
4657 my %js = ();
4658- grep { $js{$_} = 1 unless excludefile("$path/$_"); } $search =~ m{<script type="text/javascript" src="([^"]++)"></script>}g;
4659+ grep { s/[?#].*//; $js{$_} = 1 unless m/^[a-z][a-z0-9.+-]*:/i or excludefile("$path/$_"); } $search =~ m{<script type="text/javascript" src="([^"]++)"></script>}g;
4660 my $loads_searchindex = $search =~ m/\QjQuery(function() { Search.loadIndex("searchindex.js"); });\E/;
4661 my ($has_source) = $search =~ m{HAS_SOURCE:\s*(true|false)};
4662 my ($url_root) = $search =~ m{URL_ROOT:\s*'([^']*)'};
4663
4664=== added file 'debian/patches/l10n_fixes.diff'
4665--- debian/patches/l10n_fixes.diff 1970-01-01 00:00:00 +0000
4666+++ debian/patches/l10n_fixes.diff 2012-11-28 07:12:20 +0000
4667@@ -0,0 +1,58 @@
4668+Description: Fix l10n build of text containing footnotes
4669+ Based on initial patch by Cristophe Simonis and modifications by Takayuki Shimizukawa
4670+ (upstream pull request #86).
4671+Bug: https://bitbucket.org/birkenfeld/sphinx/issue/955/cant-build-html-with-footnotes-when-using
4672+Bug-Debian: http://bugs.debian.org/691719
4673+Author: Takayuki Shimizukawa <shimizukawa@gmail.com>
4674+Last-Update: 2012-11-27
4675+
4676+=== modified file 'sphinx/environment.py'
4677+--- a/sphinx/environment.py 2012-03-12 12:18:37 +0000
4678++++ b/sphinx/environment.py 2012-11-27 14:05:36 +0000
4679+@@ -213,16 +213,44 @@
4680+ parser = RSTParser()
4681+
4682+ for node, msg in extract_messages(self.document):
4683+- patch = new_document(source, settings)
4684+ msgstr = catalog.gettext(msg)
4685+ # XXX add marker to untranslated parts
4686+ if not msgstr or msgstr == msg: # as-of-yet untranslated
4687+ continue
4688++
4689++ patch = new_document(source, settings)
4690+ parser.parse(msgstr, patch)
4691+ patch = patch[0]
4692+ # XXX doctest and other block markup
4693+ if not isinstance(patch, nodes.paragraph):
4694+ continue # skip for now
4695++
4696++ footnote_refs = [r for r in node.children
4697++ if isinstance(r, nodes.footnote_reference)
4698++ and r.get('auto') == 1]
4699++ refs = [r for r in node.children if isinstance(r, nodes.reference)]
4700++
4701++ for i, child in enumerate(patch.children): # update leaves
4702++ if isinstance(child, nodes.footnote_reference) \
4703++ and child.get('auto') == 1:
4704++ # use original 'footnote_reference' object.
4705++ # this object is already registered in self.document.autofootnote_refs
4706++ patch.children[i] = footnote_refs.pop(0)
4707++ # Some duplicated footnote_reference in msgstr causes
4708++ # IndexError in .pop(0). That is invalid msgstr.
4709++
4710++ elif isinstance(child, nodes.reference):
4711++ # reference should use original 'refname'.
4712++ # * reference target ".. _Python: ..." is not translatable.
4713++ # * section refname is not translatable.
4714++ # * inline reference "`Python <...>`_" has no 'refname'.
4715++ if refs and 'refname' in refs[0]:
4716++ refname = child['refname'] = refs.pop(0)['refname']
4717++ self.document.refnames.setdefault(
4718++ refname, []).append(child)
4719++ # if number of reference nodes had been changed, that
4720++ # would often generate unknown link target warning.
4721++
4722+ for child in patch.children: # update leaves
4723+ child.parent = node
4724+ node.children = patch.children
4725+
4726
4727=== modified file 'debian/patches/series'
4728--- debian/patches/series 2012-11-01 21:39:16 +0000
4729+++ debian/patches/series 2012-11-28 07:12:20 +0000
4730@@ -9,4 +9,7 @@
4731 pygments_byte_strings.diff
4732 fix_shorthandoff.diff
4733 fix_manpages_generation_with_new_docutils.diff
4734+test_build_html_rb.diff
4735+sort_stopwords.diff
4736 support_python_3.3.diff
4737+l10n_fixes.diff
4738
4739=== added file 'debian/patches/sort_stopwords.diff'
4740--- debian/patches/sort_stopwords.diff 1970-01-01 00:00:00 +0000
4741+++ debian/patches/sort_stopwords.diff 2012-11-28 07:12:20 +0000
4742@@ -0,0 +1,16 @@
4743+Description: sort stopwords in searchtools.js
4744+ The order of stopwords in searchtools.js would be random if hash randomization
4745+ was enabled, breaking dh_sphinxdoc. This patch makes the order deterministic.
4746+Author: Jakub Wilk <jwilk@debian.org>
4747+Applied-Upstream: https://bitbucket.org/birkenfeld/sphinx/changeset/6cf5320e65
4748+Last-Update: 2012-11-10
4749+
4750+--- a/sphinx/search/__init__.py
4751++++ b/sphinx/search/__init__.py
4752+@@ -283,5 +283,5 @@
4753+ def context_for_searchtool(self):
4754+ return dict(
4755+ search_language_stemming_code = self.lang.js_stemmer_code,
4756+- search_language_stop_words = jsdump.dumps(self.lang.stopwords),
4757++ search_language_stop_words = jsdump.dumps(sorted(self.lang.stopwords)),
4758+ )
4759
4760=== modified file 'debian/patches/support_python_3.3.diff'
4761--- debian/patches/support_python_3.3.diff 2012-11-01 21:39:16 +0000
4762+++ debian/patches/support_python_3.3.diff 2012-11-28 07:12:20 +0000
4763@@ -1,29 +1,11 @@
4764-Description: Fix various testsuite failures with Python 3.3
4765+Description: fix compatibility with Python 3.3
4766 Author: Takayuki Shimizukawa <shimizukawa@gmail.com>
4767 Bug: https://bitbucket.org/birkenfeld/sphinx/issue/1008/test-failures-with-python-33
4768 Bug-Ubuntu: https://bugs.launchpad.net/bugs/1070336
4769-Last-Update: 2012-11-01
4770-
4771-=== modified file 'sphinx/environment.py'
4772---- a/sphinx/environment.py 2012-03-30 23:32:16 +0000
4773-+++ b/sphinx/environment.py 2012-11-01 17:33:08 +0000
4774-@@ -782,7 +782,11 @@
4775- app.emit('doctree-read', doctree)
4776-
4777- # store time of build, for outdated files detection
4778-- self.all_docs[docname] = time.time()
4779-+ # (Some filesystems have coarse timestamp resolution;
4780-+ # therefore time.time() is older than filesystem's timestamp.
4781-+ # For example, FAT32 has 2sec timestamp resolution.)
4782-+ self.all_docs[docname] = max(
4783-+ time.time(), path.getmtime(self.doc2path(docname)))
4784-
4785- if self.versioning_condition:
4786- # get old doctree
4787-
4788-=== modified file 'sphinx/ext/autodoc.py'
4789---- a/sphinx/ext/autodoc.py 2012-03-30 23:32:16 +0000
4790-+++ b/sphinx/ext/autodoc.py 2012-11-01 17:33:08 +0000
4791+Last-Update: 2012-11-08
4792+
4793+--- a/sphinx/ext/autodoc.py
4794++++ b/sphinx/ext/autodoc.py
4795 @@ -1098,7 +1098,7 @@
4796 """
4797 objtype = 'method'
4798@@ -33,10 +15,8 @@
4799
4800 @classmethod
4801 def can_document_member(cls, member, membername, isattr, parent):
4802-
4803-=== modified file 'sphinx/ext/intersphinx.py'
4804---- a/sphinx/ext/intersphinx.py 2012-03-30 23:32:16 +0000
4805-+++ b/sphinx/ext/intersphinx.py 2012-11-01 17:33:13 +0000
4806+--- a/sphinx/ext/intersphinx.py
4807++++ b/sphinx/ext/intersphinx.py
4808 @@ -188,7 +188,17 @@
4809 if update:
4810 env.intersphinx_inventory = {}
4811@@ -56,17 +36,13 @@
4812 if name:
4813 env.intersphinx_named_inventory[name] = invdata
4814 for type, objects in invdata.iteritems():
4815-
4816-=== modified file 'sphinx/util/__init__.py'
4817---- a/sphinx/util/__init__.py 2012-03-30 23:32:16 +0000
4818-+++ b/sphinx/util/__init__.py 2012-11-01 17:37:41 +0000
4819+--- a/sphinx/util/__init__.py
4820++++ b/sphinx/util/__init__.py
4821 @@ -197,13 +197,18 @@
4822 except Exception, err:
4823 raise PycodeError('error importing %r' % modname, err)
4824 mod = sys.modules[modname]
4825 - if hasattr(mod, '__loader__'):
4826-- try:
4827-- source = mod.__loader__.get_source(modname)
4828 + filename = getattr(mod, '__file__', None)
4829 + loader = getattr(mod, '__loader__', None)
4830 + if loader and getattr(loader, 'get_filename', None):
4831@@ -75,7 +51,8 @@
4832 + except Exception, err:
4833 + raise PycodeError('error getting filename for %r' % filename, err)
4834 + if filename is None and loader:
4835-+ try:
4836+ try:
4837+- source = mod.__loader__.get_source(modname)
4838 + return 'string', loader.get_source(modname)
4839 except Exception, err:
4840 raise PycodeError('error getting source for %r' % modname, err)
4841@@ -84,4 +61,3 @@
4842 if filename is None:
4843 raise PycodeError('no source found for module %r' % modname)
4844 filename = path.normpath(path.abspath(filename))
4845-
4846
4847=== added file 'debian/patches/test_build_html_rb.diff'
4848--- debian/patches/test_build_html_rb.diff 1970-01-01 00:00:00 +0000
4849+++ debian/patches/test_build_html_rb.diff 2012-11-28 07:12:20 +0000
4850@@ -0,0 +1,17 @@
4851+Description: fix encoding issues in test_build_html
4852+ test_build_html: open files that are fed to ElementTree parser in "rb"
4853+ mode, fixing encoding issues.
4854+Origin: upstream, https://bitbucket.org/birkenfeld/sphinx/changeset/15c9d212bbf4
4855+Bug: https://bitbucket.org/birkenfeld/sphinx/issue/895
4856+
4857+--- a/tests/test_build_html.py
4858++++ b/tests/test_build_html.py
4859+@@ -328,7 +328,7 @@
4860+ for fname, paths in HTML_XPATH.iteritems():
4861+ parser = NslessParser()
4862+ parser.entity.update(htmlentitydefs.entitydefs)
4863+- fp = open(os.path.join(app.outdir, fname))
4864++ fp = open(os.path.join(app.outdir, fname), 'rb')
4865+ try:
4866+ etree = ET.parse(fp, parser)
4867+ finally:
4868
4869=== modified file 'debian/rules'
4870--- debian/rules 2012-06-19 09:06:35 +0000
4871+++ debian/rules 2012-11-28 07:12:20 +0000
4872@@ -5,6 +5,7 @@
4873
4874 export NO_PKG_MANGLE=1
4875 export PYTHONWARNINGS=d
4876+export PYTHONHASHSEED=random
4877
4878 here = $(dir $(firstword $(MAKEFILE_LIST)))/..
4879 debian_version = $(word 2,$(shell cd $(here) && dpkg-parsechangelog | grep ^Version:))
4880@@ -39,7 +40,7 @@
4881 ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
4882 find sphinx/locale/ -name '*.po' | xargs -t -I {} msgfmt -o /dev/null -c {}
4883 $(python_all) tests/run.py --verbose --no-skip
4884- export LC_ALL=C.UTF-8 && $(python3_all) tests/run.py --verbose
4885+ $(python3_all) tests/run.py --verbose
4886 cd build/py3/ && rm -rf tests/ sphinx/pycode/Grammar.pickle
4887 xvfb-run --auto-servernum ./debian/jstest/run-tests
4888 endif
4889@@ -146,7 +147,7 @@
4890 dh_md5sums
4891 dh_builddeb
4892
4893-binary-arch: build install
4894+binary-arch:
4895
4896 binary: binary-indep binary-arch
4897
4898
4899=== modified file 'debian/sphinx-autogen.1'
4900--- debian/sphinx-autogen.1 2011-11-20 15:56:50 +0000
4901+++ debian/sphinx-autogen.1 2012-11-28 07:12:20 +0000
4902@@ -1,4 +1,4 @@
4903-.TH sphinx\-autogen 1 "Aug 2010" "Sphinx 1.1" "User Commands"
4904+.TH sphinx\-autogen 1 "Nov 2012" "Sphinx 1.1.3" "User Commands"
4905
4906 .SH NAME
4907 sphinx\-autogen \- generate ReStructuredText using \fBautosummary\fR
4908
4909=== modified file 'debian/tests/control'
4910--- debian/tests/control 2012-05-24 18:53:29 +0000
4911+++ debian/tests/control 2012-11-28 07:12:20 +0000
4912@@ -1,7 +1,5 @@
4913 Tests: python-sphinx
4914-Features: no-build-needed
4915 Depends: python-sphinx, python-nose
4916
4917 Tests: python3-sphinx
4918-Features: no-build-needed
4919 Depends: python3-sphinx, python3-nose
4920
4921=== modified file 'debian/tests/python-sphinx'
4922--- debian/tests/python-sphinx 2012-08-21 17:45:58 +0000
4923+++ debian/tests/python-sphinx 2012-11-28 07:12:20 +0000
4924@@ -4,7 +4,7 @@
4925 cd "$ADTTMP"
4926 pyversions -i \
4927 | tr ' ' '\n' \
4928-| xargs -I {} env PYTHONWARNINGS=d {} \
4929+| xargs -I {} env PYTHONWARNINGS=d PYTHONHASHSEED=random {} \
4930 /usr/bin/nosetests --verbose 2>&1
4931
4932 # vim:ts=4 sw=4 et
4933
4934=== modified file 'debian/tests/python3-sphinx'
4935--- debian/tests/python3-sphinx 2012-08-21 17:45:58 +0000
4936+++ debian/tests/python3-sphinx 2012-11-28 07:12:20 +0000
4937@@ -5,7 +5,7 @@
4938 cd "$ADTTMP"
4939 py3versions -i \
4940 | tr ' ' '\n' \
4941-| xargs -I {} env PYTHONWARNINGS=d {} \
4942+| xargs -I {} env PYTHONWARNINGS=d PYTHONHASHSEED=random {} \
4943 /usr/bin/nosetests3 --verbose 2>&1
4944
4945 # vim:ts=4 sw=4 et
4946
4947=== modified file 'sphinx/environment.py'
4948--- sphinx/environment.py 2012-11-01 21:39:16 +0000
4949+++ sphinx/environment.py 2012-11-28 07:12:20 +0000
4950@@ -213,16 +213,44 @@
4951 parser = RSTParser()
4952
4953 for node, msg in extract_messages(self.document):
4954- patch = new_document(source, settings)
4955 msgstr = catalog.gettext(msg)
4956 # XXX add marker to untranslated parts
4957 if not msgstr or msgstr == msg: # as-of-yet untranslated
4958 continue
4959+
4960+ patch = new_document(source, settings)
4961 parser.parse(msgstr, patch)
4962 patch = patch[0]
4963 # XXX doctest and other block markup
4964 if not isinstance(patch, nodes.paragraph):
4965 continue # skip for now
4966+
4967+ footnote_refs = [r for r in node.children
4968+ if isinstance(r, nodes.footnote_reference)
4969+ and r.get('auto') == 1]
4970+ refs = [r for r in node.children if isinstance(r, nodes.reference)]
4971+
4972+ for i, child in enumerate(patch.children): # update leaves
4973+ if isinstance(child, nodes.footnote_reference) \
4974+ and child.get('auto') == 1:
4975+ # use original 'footnote_reference' object.
4976+ # this object is already registered in self.document.autofootnote_refs
4977+ patch.children[i] = footnote_refs.pop(0)
4978+ # Some duplicated footnote_reference in msgstr causes
4979+ # IndexError in .pop(0). That is invalid msgstr.
4980+
4981+ elif isinstance(child, nodes.reference):
4982+ # reference should use original 'refname'.
4983+ # * reference target ".. _Python: ..." is not translatable.
4984+ # * section refname is not translatable.
4985+ # * inline reference "`Python <...>`_" has no 'refname'.
4986+ if refs and 'refname' in refs[0]:
4987+ refname = child['refname'] = refs.pop(0)['refname']
4988+ self.document.refnames.setdefault(
4989+ refname, []).append(child)
4990+ # if number of reference nodes had been changed, that
4991+ # would often generate unknown link target warning.
4992+
4993 for child in patch.children: # update leaves
4994 child.parent = node
4995 node.children = patch.children
4996@@ -782,11 +810,7 @@
4997 app.emit('doctree-read', doctree)
4998
4999 # store time of build, for outdated files detection
5000- # (Some filesystems have coarse timestamp resolution;
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches