Merge lp:~chromium-team/chromium-browser/chromium-translations-tools.head into lp:chromium-browser
- chromium-translations-tools.head
- Merge into chromium-browser.head
Status: | Rejected |
---|---|
Rejected by: | Nathan Teodosio |
Proposed branch: | lp:~chromium-team/chromium-browser/chromium-translations-tools.head |
Merge into: | lp:chromium-browser |
Diff against target: |
3433 lines (+3409/-0) 5 files modified
chromium2pot.py (+2610/-0) create-patches.sh (+185/-0) desktop2gettext.py (+378/-0) update-inspector.py (+149/-0) update-pot.sh (+87/-0) |
To merge this branch: | bzr merge lp:~chromium-team/chromium-browser/chromium-translations-tools.head |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Chromium team | Pending | ||
Review via email: mp+461443@code.launchpad.net |
Commit message
Description of the change
Unmerged revisions
- 121. By Chad Miller
-
Handle GRD partial files.
Ignore "external" references, which are usually images.
- 120. By Ken VanDine
-
handled latest grd format
- 119. By Micah Gersten
-
* Temporarily workaround muliple bg locales in generated_resources
- 118. By Cris Dywan
-
* Add temporary workaround for new type not being used yet
- 117. By Fabien Tassin
-
* Add some helper scripts
- 116. By Fabien Tassin
-
* When updating common.gypi, fold 'locales' by size (instead of by groups of 10)
- 115. By Fabien Tassin
-
* Fix a regression introduced by the new fake-bidi pseudo locale
(see https://sites. google. com/a/chromium. org/dev/ Home/fake- bidi and
http://code.google. com/p/chromium/ issues/ detail? id=73052) - 114. By Fabien Tassin
-
* Add a --map-template-
names knob allowing to handle renamed templates
in some branches - 113. By Fabien Tassin
-
* Add support for 'string-enum' and 'int-enum' outside of 'group' policies
(needed since http://codereview. chromium. org/7287001/ landed) - 112. By Fabien Tassin
-
* Move all new xtb files to third_party/
launchpad_ translations (relative to $SRC)
Preview Diff
1 | === added file 'chromium2pot.py' | |||
2 | --- chromium2pot.py 1970-01-01 00:00:00 +0000 | |||
3 | +++ chromium2pot.py 2024-02-28 12:58:47 +0000 | |||
4 | @@ -0,0 +1,2610 @@ | |||
5 | 1 | #!/usr/bin/python | ||
6 | 2 | # -*- coding: utf-8 -*- | ||
7 | 3 | |||
8 | 4 | # (c) 2010-2011, Fabien Tassin <fta@ubuntu.com> | ||
9 | 5 | |||
10 | 6 | # Convert grd/xtb files into pot/po for integration into the Launchpad | ||
11 | 7 | # translation system | ||
12 | 8 | |||
13 | 9 | ## grd files contain the strings for the 'pot' file(s). | ||
14 | 10 | ## Keys are alphabetical (IDS_XXX). | ||
15 | 11 | # Sources: | ||
16 | 12 | # - $SRC/chrome/app/*.grd | ||
17 | 13 | # - $SRC/webkit/glue/*.grd | ||
18 | 14 | |||
19 | 15 | ## xtb files are referenced to by the grd files. They contain the translated | ||
20 | 16 | ## strings for the 'po' our files. Keys are numerical (64bit ids). | ||
21 | 17 | # Sources: | ||
22 | 18 | # - $SRC/chrome/app/resources/*.xtb | ||
23 | 19 | # - $SRC/webkit/glue/resources/*.xtb | ||
24 | 20 | # and for launchpad contributed strings that already landed: | ||
25 | 21 | # - $SRC/third_party/launchpad_translations/*.xtb | ||
26 | 22 | |||
27 | 23 | ## the mapping between those keys is done using FingerPrint() | ||
28 | 24 | ## [ taken from grit ] on a stripped version of the untranslated string | ||
29 | 25 | |||
30 | 26 | ## grd files contain a lot of <if expr="..."> (python-like) conditions. | ||
31 | 27 | ## Evaluate those expressions but only skip strings with a lang restriction. | ||
32 | 28 | ## For all other conditions (os, defines), simply expose them so translators | ||
33 | 29 | ## know when a given string is expected. | ||
34 | 30 | |||
35 | 31 | ## TODO: handle <message translateable="false"> | ||
36 | 32 | |||
37 | 33 | import os, sys, shutil, re, getopt, codecs, urllib | ||
38 | 34 | from xml.dom import minidom | ||
39 | 35 | from xml.sax.saxutils import unescape | ||
40 | 36 | from datetime import datetime | ||
41 | 37 | from difflib import unified_diff | ||
42 | 38 | import textwrap, filecmp, json | ||
43 | 39 | |||
44 | 40 | lang_mapping = { | ||
45 | 41 | 'no': 'nb', # 'no' is obsolete and the more specific 'nb' (Norwegian Bokmal) | ||
46 | 42 | # and 'nn' (Norwegian Nynorsk) are preferred. | ||
47 | 43 | 'pt-PT': 'pt' | ||
48 | 44 | } | ||
49 | 45 | |||
50 | 46 | #### | ||
51 | 47 | # vanilla from $SRC/tools/grit/grit/extern/FP.py (r10982) | ||
52 | 48 | # See svn log http://src.chromium.org/svn/trunk/src/tools/grit/grit/extern/FP.py | ||
53 | 49 | |||
54 | 50 | # Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | ||
55 | 51 | # Use of this source code is governed by a BSD-style license that can be | ||
56 | 52 | # found in the LICENSE file. | ||
57 | 53 | |||
58 | 54 | try: | ||
59 | 55 | import hashlib | ||
60 | 56 | _new_md5 = hashlib.md5 | ||
61 | 57 | except ImportError: | ||
62 | 58 | import md5 | ||
63 | 59 | _new_md5 = md5.new | ||
64 | 60 | |||
65 | 61 | def UnsignedFingerPrint(str, encoding='utf-8'): | ||
66 | 62 | """Generate a 64-bit fingerprint by taking the first half of the md5 | ||
67 | 63 | of the string.""" | ||
68 | 64 | hex128 = _new_md5(str).hexdigest() | ||
69 | 65 | int64 = long(hex128[:16], 16) | ||
70 | 66 | return int64 | ||
71 | 67 | |||
72 | 68 | def FingerPrint(str, encoding='utf-8'): | ||
73 | 69 | fp = UnsignedFingerPrint(str, encoding=encoding) | ||
74 | 70 | # interpret fingerprint as signed longs | ||
75 | 71 | if fp & 0x8000000000000000L: | ||
76 | 72 | fp = - ((~fp & 0xFFFFFFFFFFFFFFFFL) + 1) | ||
77 | 73 | return fp | ||
78 | 74 | #### | ||
79 | 75 | |||
80 | 76 | class EvalConditions: | ||
81 | 77 | """ A class allowing an <if expr="xx"/> to be evaluated, based on an array of defines, | ||
82 | 78 | a dict of local variables. | ||
83 | 79 | As of Chromium 10: | ||
84 | 80 | - the known defines are: | ||
85 | 81 | [ 'chromeos', '_google_chrome', 'toolkit_views', 'touchui', 'use_titlecase' ] | ||
86 | 82 | On Linux, only [ 'use_titlecase' ] is set. | ||
87 | 83 | - the known variables are: | ||
88 | 84 | 'os' ('linux2' on Linux) | ||
89 | 85 | 'lang' | ||
90 | 86 | See http://src.chromium.org/svn/trunk/src/build/common.gypi | ||
91 | 87 | """ | ||
92 | 88 | |||
93 | 89 | def eval(self, expression, defines = [ 'use_titlecase' ], vars = { 'os': "linux2" }): | ||
94 | 90 | |||
95 | 91 | def pp_ifdef(match): | ||
96 | 92 | return str(match.group(1) in defines) | ||
97 | 93 | |||
98 | 94 | # evaluate all ifdefs | ||
99 | 95 | expression = re.sub(r"pp_ifdef\('(.*?)'\)", pp_ifdef, expression) | ||
100 | 96 | # evaluate the whole expression using the vars dict | ||
101 | 97 | vars['__builtins__'] = { 'True': True, 'False': False } # prevent eval from using the real current globals | ||
102 | 98 | return eval(expression, vars) | ||
103 | 99 | |||
104 | 100 | def lang_eval(self, expression, lang): | ||
105 | 101 | """ only evaluate the expression against the lang, ignore all defines and other variables. | ||
106 | 102 | This is needed to ignore a string that has lang restrictions (numerals, plurals, ..) but | ||
107 | 103 | still keep it even if it's OS or defined don't match the local platform. | ||
108 | 104 | """ | ||
109 | 105 | conditions = [ x for x in re.split(r'\s+(and|or)\s+', expression) if x.find('lang') >= 0 ] | ||
110 | 106 | if len(conditions) == 0: | ||
111 | 107 | return True | ||
112 | 108 | assert len(conditions) == 1, "Expression '%s' has multiple lang conditions" % expression | ||
113 | 109 | vars = { 'lang': lang, '__builtins__': { 'True': True, 'False': False } } | ||
114 | 110 | return eval(conditions[0], vars) | ||
115 | 111 | |||
116 | 112 | def test(self): | ||
117 | 113 | data = [ | ||
118 | 114 | { 'expr': "lang == 'ar'", | ||
119 | 115 | 'vars': { 'lang': 'ar' }, | ||
120 | 116 | 'result': True | ||
121 | 117 | }, | ||
122 | 118 | { 'expr': "lang == 'ar'", | ||
123 | 119 | 'vars': { 'lang': 'fr' }, | ||
124 | 120 | 'result': False | ||
125 | 121 | }, | ||
126 | 122 | { 'expr': "lang in ['ar', 'ro', 'lv']", | ||
127 | 123 | 'vars': { 'lang': 'ar' }, | ||
128 | 124 | 'result': True | ||
129 | 125 | }, | ||
130 | 126 | { 'expr': "lang in ['ar', 'ro', 'lv']", | ||
131 | 127 | 'vars': { 'lang': 'pt-BR' }, | ||
132 | 128 | 'result': False | ||
133 | 129 | }, | ||
134 | 130 | { 'expr': "lang not in ['ar', 'ro', 'lv']", | ||
135 | 131 | 'vars': { 'lang': 'ar' }, | ||
136 | 132 | 'result': False | ||
137 | 133 | }, | ||
138 | 134 | { 'expr': "lang not in ['ar', 'ro', 'lv']", | ||
139 | 135 | 'vars': { 'lang': 'no' }, | ||
140 | 136 | 'result': True | ||
141 | 137 | }, | ||
142 | 138 | { 'expr': "os != 'linux2' and os != 'darwin' and os.find('bsd') == -1", | ||
143 | 139 | 'vars': { 'lang': 'no', 'os': 'bsdos' }, | ||
144 | 140 | 'result': False, | ||
145 | 141 | 'lresult': True # no lang restriction in 'expr', so 'no' is ok | ||
146 | 142 | }, | ||
147 | 143 | { 'expr': "os != 'linux2' and os != 'darwin' and os.find('bsd') > -1", | ||
148 | 144 | 'vars': { 'lang': 'no', 'os': 'bsdos' }, | ||
149 | 145 | 'result': True, | ||
150 | 146 | }, | ||
151 | 147 | { 'expr': "not pp_ifdef('chromeos')", | ||
152 | 148 | 'vars': { 'lang': 'no' }, | ||
153 | 149 | 'defines': [], | ||
154 | 150 | 'result': True, | ||
155 | 151 | }, | ||
156 | 152 | { 'expr': "not pp_ifdef('chromeos')", | ||
157 | 153 | 'vars': { 'lang': 'no' }, | ||
158 | 154 | 'defines': [ 'chromeos' ], | ||
159 | 155 | 'result': False, | ||
160 | 156 | 'lresult': True # no lang restriction in 'expr', so 'no' is ok | ||
161 | 157 | }, | ||
162 | 158 | { 'expr': "pp_ifdef('_google_chrome') and (os == 'darwin')", | ||
163 | 159 | 'vars': { 'lang': 'no', 'os': 'linux2' }, | ||
164 | 160 | 'defines': [ 'chromeos' ], | ||
165 | 161 | 'result': False, | ||
166 | 162 | 'lresult': True # no lang restriction in 'expr', so 'no' is ok | ||
167 | 163 | }, | ||
168 | 164 | { 'expr': "pp_ifdef('_google_chrome') and (os == 'darwin')", | ||
169 | 165 | 'vars': { 'lang': 'no', 'os': 'darwin' }, | ||
170 | 166 | 'defines': [ '_google_chrome' ], | ||
171 | 167 | 'result': True | ||
172 | 168 | }, | ||
173 | 169 | { 'expr': "not pp_ifdef('chromeos') and pp_ifdef('_google_chrome') and 'pt-PT' == lang", | ||
174 | 170 | 'vars': { 'lang': 'pt-PT', 'os': 'darwin' }, | ||
175 | 171 | 'defines': [ '_google_chrome' ], | ||
176 | 172 | 'result': True | ||
177 | 173 | }, | ||
178 | 174 | { 'expr': "not pp_ifdef('chromeos') and pp_ifdef('_google_chrome') and 'pt-PT' == lang", | ||
179 | 175 | 'vars': { 'lang': 'pt-PT', 'os': 'darwin' }, | ||
180 | 176 | 'defines': [ ], | ||
181 | 177 | 'result': False, | ||
182 | 178 | 'lresult': True | ||
183 | 179 | }, | ||
184 | 180 | ] | ||
185 | 181 | i = -1 | ||
186 | 182 | for d in data: | ||
187 | 183 | i += 1 | ||
188 | 184 | defines = d['defines'] if 'defines' in d else [] | ||
189 | 185 | vars = d['vars'] if 'vars' in d else {} | ||
190 | 186 | lvars = vars.copy() # make a copy because eval modifies it | ||
191 | 187 | res = self.eval(d['expr'], defines = defines, vars = lvars) | ||
192 | 188 | assert res == d['result'], "FAILED %d: expr: \"%s\" returned %s with vars = %s and defines = %s" % \ | ||
193 | 189 | (i, d['expr'], repr(res), repr(vars), repr(defines)) | ||
194 | 190 | print "All %d tests passed for EvalConditions.eval()" % (i + 1) | ||
195 | 191 | i = -1 | ||
196 | 192 | for d in data: | ||
197 | 193 | i += 1 | ||
198 | 194 | assert 'lang' in vars, "All test must have a 'lang' in 'vars', test %d doesn't: %s" % (i, repr(d)) | ||
199 | 195 | res = self.lang_eval(d['expr'], lang = d['vars']['lang']) | ||
200 | 196 | expected = d['lresult'] if 'lresult' in d else d['result'] | ||
201 | 197 | assert res == expected, "FAILED %d: expr: \"%s\" returned %s with lang = %s for the lang_eval test" % \ | ||
202 | 198 | (i, d['expr'], repr(res), d['vars']['lang']) | ||
203 | 199 | print "All %d tests passed for EvalConditions.lang_eval()" % (i + 1) | ||
204 | 200 | |||
205 | 201 | class StringCvt: | ||
206 | 202 | """ A class converting grit formatted strings to gettext back and forth. | ||
207 | 203 | The idea is to always have: | ||
208 | 204 | a/ grd2gettext(xtb2gettext(s)) == s | ||
209 | 205 | b/ xtb2gettext(s) produces a string that the msgfmt checker likes and | ||
210 | 206 | that makes sense to translators | ||
211 | 207 | c/ grd2gettext(s) produces a string acceptable by upstream | ||
212 | 208 | """ | ||
213 | 209 | |||
214 | 210 | def xtb2gettext(self, string): | ||
215 | 211 | """ parse the xtb (xml encoded) string and convert it to a gettext string """ | ||
216 | 212 | |||
217 | 213 | def fold(string): | ||
218 | 214 | return textwrap.wrap(string, break_long_words=False, width=76, drop_whitespace=False, | ||
219 | 215 | expand_tabs=False, replace_whitespace=False, break_on_hyphens=False) | ||
220 | 216 | |||
221 | 217 | s = string.replace('\\n', '\\\\n') | ||
222 | 218 | # escape all single '\' (not followed by 'n') | ||
223 | 219 | s = re.sub(r'(?<!\\)(\\[^n\\\\])', r'\\\1', s) | ||
224 | 220 | # remove all xml encodings | ||
225 | 221 | s = self.unescape_xml(s) | ||
226 | 222 | # replace '<ph name="FOO"/>' by '%{FOO}' | ||
227 | 223 | s = re.sub(r'<ph name="(.*?)"/>', r'%{\1}', s) | ||
228 | 224 | # fold | ||
229 | 225 | # 1/ fold at \n | ||
230 | 226 | # 2/ fold each part at ~76 char | ||
231 | 227 | v = [] | ||
232 | 228 | ll = s.split('\n') | ||
233 | 229 | sz = len(ll) | ||
234 | 230 | if sz > 1: | ||
235 | 231 | i = 0 | ||
236 | 232 | for l in ll: | ||
237 | 233 | i += 1 | ||
238 | 234 | if i == sz: | ||
239 | 235 | v.extend(fold(l)) | ||
240 | 236 | else: | ||
241 | 237 | v.extend(fold(l + '\\n')) | ||
242 | 238 | else: | ||
243 | 239 | v.extend(fold(ll[0])) | ||
244 | 240 | if len(v) > 1: | ||
245 | 241 | v[:0] = [ '' ] | ||
246 | 242 | s = '"' + '"\n"'.join(v) + '"' | ||
247 | 243 | return s | ||
248 | 244 | |||
249 | 245 | def decode_xml_entities(self, string): | ||
250 | 246 | def replace_xmlent(match): | ||
251 | 247 | if match.group(1)[:1] == 'x': | ||
252 | 248 | return unichr(int("0" + match.group(1), 16)) | ||
253 | 249 | else: | ||
254 | 250 | return unichr(int(match.group(1))) | ||
255 | 251 | |||
256 | 252 | return re.sub(r'&#(x\w+|\d+);', replace_xmlent, string) | ||
257 | 253 | |||
258 | 254 | def unescape_xml(self, string): | ||
259 | 255 | string = unescape(string).replace('"', '\\"').replace(''', "'") | ||
260 | 256 | string = self.decode_xml_entities(string) | ||
261 | 257 | return string | ||
262 | 258 | |||
263 | 259 | def grd2gettext(self, string): | ||
264 | 260 | """ parse the string returned from minidom and convert it to a gettext string. | ||
265 | 261 | This is similar to str_cvt_xtb2gettext but minidom has its own magic for encoding | ||
266 | 262 | """ | ||
267 | 263 | return self.xtb2gettext(string) | ||
268 | 264 | |||
269 | 265 | def gettext2xtb(self, string): | ||
270 | 266 | """ parse the gettext string and convert it to an xtb (xml encoded) string. """ | ||
271 | 267 | u = [] | ||
272 | 268 | for s in string.split(u'\n'): | ||
273 | 269 | # remove the enclosing double quotes | ||
274 | 270 | u.append(s[1:][:-1]) | ||
275 | 271 | s = u"".join(u) | ||
276 | 272 | |||
277 | 273 | # encode the xml special chars | ||
278 | 274 | s = s.replace("&", "&") # must be first! | ||
279 | 275 | s = s.replace("<", "<") | ||
280 | 276 | s = s.replace(">", ">") | ||
281 | 277 | s = s.replace('\\"', """) | ||
282 | 278 | # special case, html comments | ||
283 | 279 | s = re.sub(r'<!--(.*?)-->', r'<!--\1-->', s, re.S) | ||
284 | 280 | # replace non-ascii by &#xxx; codes | ||
285 | 281 | # s = s.encode("ascii", "xmlcharrefreplace") | ||
286 | 282 | # replace '%{FOO}' by '<ph name="FOO"/>' | ||
287 | 283 | s = re.sub(r'%{(.*?)}', r'<ph name="\1"/>', s) | ||
288 | 284 | # unquote \\n and \\\\n | ||
289 | 285 | s = re.sub(r'(?<!\\)\\n', r'\n', s) | ||
290 | 286 | # unquote all control chars | ||
291 | 287 | s = re.sub(r'\\\\([^\\])', r'\\\1', s) | ||
292 | 288 | |||
293 | 289 | # launchpad seems to always quote tabs | ||
294 | 290 | s = s.replace("\\t", "\t") | ||
295 | 291 | return s | ||
296 | 292 | |||
297 | 293 | def test(self): | ||
298 | 294 | # unit tests | ||
299 | 295 | data = [ | ||
300 | 296 | # tab | ||
301 | 297 | { 'id': '0', | ||
302 | 298 | 'xtb': u'foo bar', | ||
303 | 299 | 'po': u'"foo bar"' }, | ||
304 | 300 | { 'id': '1', | ||
305 | 301 | 'xtb': u'foo\tbar', | ||
306 | 302 | 'po': u'"foo\tbar"' }, | ||
307 | 303 | # & | ||
308 | 304 | { 'id': '6779164083355903755', | ||
309 | 305 | 'xtb': u'Supprime&r', | ||
310 | 306 | 'po': u'"Supprime&r"' }, | ||
311 | 307 | # " | ||
312 | 308 | { 'id': '4194570336751258953', | ||
313 | 309 | 'xtb': u'Activer la fonction "taper pour cliquer"', | ||
314 | 310 | 'po': u'"Activer la fonction \\"taper pour cliquer\\""' }, | ||
315 | 311 | # < / > | ||
316 | 312 | { 'id': '7615851733760445951', | ||
317 | 313 | 'xtb': u'<aucun cookie sélectionné>', | ||
318 | 314 | 'po': u'"<aucun cookie sélectionné>"' }, | ||
319 | 315 | # <ph name="FOO"/> | ||
320 | 316 | { 'id': '5070288309321689174', | ||
321 | 317 | 'xtb': u'<ph name="EXTENSION_NAME"/> :', | ||
322 | 318 | 'po': u'"%{EXTENSION_NAME} :"' }, | ||
323 | 319 | { 'id': '1467071896935429871', | ||
324 | 320 | 'xtb': u'Téléchargement de la mise à jour du système : <ph name="PERCENT"/>% terminé', | ||
325 | 321 | 'po': u'"Téléchargement de la mise à jour du système : %{PERCENT}% terminé"' }, | ||
326 | 322 | # line folding | ||
327 | 323 | { 'id': '1526811905352917883', | ||
328 | 324 | 'xtb': u'Une nouvelle tentative de connexion avec SSL 3.0 a dû être effectuée. Cette opération indique généralement que le serveur utilise un logiciel très ancien et qu\'il est susceptible de présenter d\'autres problèmes de sécurité.', | ||
329 | 325 | 'po': u'""\n"Une nouvelle tentative de connexion avec SSL 3.0 a dû être effectuée. Cette "\n"opération indique généralement que le serveur utilise un logiciel très "\n"ancien et qu\'il est susceptible de présenter d\'autres problèmes de sécurité."' }, | ||
330 | 326 | { 'id': '7999229196265990314', | ||
331 | 327 | 'xtb': u'Les fichiers suivants ont été créés :\n\nExtension : <ph name="EXTENSION_FILE"/>\nFichier de clé : <ph name="KEY_FILE"/>\n\nConservez votre fichier de clé en lieu sûr. Vous en aurez besoin lors de la création de nouvelles versions de l\'extension.', | ||
332 | 328 | 'po': u'""\n"Les fichiers suivants ont été créés :\\n"\n"\\n"\n"Extension : %{EXTENSION_FILE}\\n"\n"Fichier de clé : %{KEY_FILE}\\n"\n"\\n"\n"Conservez votre fichier de clé en lieu sûr. Vous en aurez besoin lors de la "\n"création de nouvelles versions de l\'extension."' }, | ||
333 | 329 | # quoted LF | ||
334 | 330 | { 'id': '4845656988780854088', | ||
335 | 331 | 'xtb': u'Synchroniser uniquement les paramètres et\\ndonnées qui ont changé depuis la dernière connexion\\n(requiert votre mot de passe précédent)', | ||
336 | 332 | 'po': u'""\n"Synchroniser uniquement les paramètres et\\\\ndonnées qui ont changé depuis la"\n" dernière connexion\\\\n(requiert votre mot de passe précédent)"' }, | ||
337 | 333 | { 'id': '1761265592227862828', # lang: 'el' | ||
338 | 334 | 'xtb': u'Συγχρονισμός όλων των ρυθμίσεων και των δεδομένων\\n (ενδέχεται να διαρκέσει ορισμένο χρονικό διάστημα)', | ||
339 | 335 | 'po': u'""\n"Συγχρονισμός όλων των ρυθμίσεων και των δεδομένων\\\\n (ενδέχεται να διαρκέσει"\n" ορισμένο χρονικό διάστημα)"' }, | ||
340 | 336 | { 'id': '1768211415369530011', # lang: 'de' | ||
341 | 337 | 'xtb': u'Folgende Anwendung wird gestartet, wenn Sie diese Anforderung akzeptieren:\\n\\n <ph name="APPLICATION"/>', | ||
342 | 338 | 'po': u'""\n"Folgende Anwendung wird gestartet, wenn Sie diese Anforderung "\n"akzeptieren:\\\\n\\\\n %{APPLICATION}"' }, | ||
343 | 339 | # weird controls | ||
344 | 340 | { 'id': '5107325588313356747', # lang: 'es-419' | ||
345 | 341 | 'xtb': u'Para ocultar el acceso a este programa, debes desinstalarlo. Para ello, utiliza\\n<ph name="CONTROL_PANEL_APPLET_NAME"/> del Panel de control.\\n\¿Deseas iniciar <ph name="CONTROL_PANEL_APPLET_NAME"/>?', | ||
346 | 342 | 'po': u'""\n"Para ocultar el acceso a este programa, debes desinstalarlo. Para ello, "\n"utiliza\\\\n%{CONTROL_PANEL_APPLET_NAME} del Panel de control.\\\\n\\\\¿Deseas "\n"iniciar %{CONTROL_PANEL_APPLET_NAME}?"' } | ||
347 | 343 | ] | ||
348 | 344 | |||
349 | 345 | for string in data: | ||
350 | 346 | s = u"<x>" + string['xtb'] + u"</x>" | ||
351 | 347 | s = s.encode('ascii', 'xmlcharrefreplace') | ||
352 | 348 | dom = minidom.parseString(s) | ||
353 | 349 | s = dom.firstChild.toxml()[3:][:-4] | ||
354 | 350 | e = self.grd2gettext(s) | ||
355 | 351 | if e != string['po']: | ||
356 | 352 | assert False, "grd2gettext() failed for id " + string['id'] + \ | ||
357 | 353 | ". \nExpected: " + repr(string['po']) + "\nGot: " + repr(e) | ||
358 | 354 | e = self.xtb2gettext(string['xtb']) | ||
359 | 355 | if e != string['po']: | ||
360 | 356 | assert False, "xtb2gettext() failed for id " + string['id'] + \ | ||
361 | 357 | ". \nExpected: " + repr(string['po']) + "\nGot: " + repr(e) | ||
362 | 358 | u = self.gettext2xtb(e) | ||
363 | 359 | if u != string['xtb']: | ||
364 | 360 | assert False, "gettext2xtb() failed for id " + string['id'] + \ | ||
365 | 361 | ". \nExpected: " + repr(string['xtb']) + "\nGot: " + repr(u) | ||
366 | 362 | print string['id'] + " ok" | ||
367 | 363 | |||
368 | 364 | # more tests with only po to xtb to test some weird launchpad po exports | ||
369 | 365 | data2 = [ | ||
370 | 366 | { 'id': '1768211415369530011', # lang: 'de' | ||
371 | 367 | 'po': u'""\n"Folgende Anwendung wird gestartet, wenn Sie diese Anforderung akzeptieren:\\\\"\n"n\\\\n %{APPLICATION}"', | ||
372 | 368 | 'xtb': u'Folgende Anwendung wird gestartet, wenn Sie diese Anforderung akzeptieren:\\n\\n <ph name="APPLICATION"/>' }, | ||
373 | 369 | ] | ||
374 | 370 | for string in data2: | ||
375 | 371 | u = self.gettext2xtb(string['po']) | ||
376 | 372 | if u != string['xtb']: | ||
377 | 373 | assert False, "gettext2xtb() failed for id " + string['id'] + \ | ||
378 | 374 | ". \nExpected: " + repr(string['xtb']) + "\nGot: " + repr(u) | ||
379 | 375 | print string['id'] + " ok" | ||
380 | 376 | |||
381 | 377 | ###### | ||
382 | 378 | |||
383 | 379 | class PotFile(dict): | ||
384 | 380 | """ | ||
385 | 381 | Read and write gettext pot files | ||
386 | 382 | """ | ||
387 | 383 | |||
388 | 384 | def __init__(self, filename, date = None, debug = False, branch_name = "default", branch_dir = os.getcwd()): | ||
389 | 385 | self.debug = debug | ||
390 | 386 | self.lang = None | ||
391 | 387 | self.filename = filename | ||
392 | 388 | self.tfile = filename + ".new" | ||
393 | 389 | self.branch_dir = branch_dir | ||
394 | 390 | self.branch_name = branch_name | ||
395 | 391 | self.template_date = date | ||
396 | 392 | self.translation_date = "YEAR-MO-DA HO:MI+ZONE" | ||
397 | 393 | self.is_pot = True | ||
398 | 394 | self.fd = None | ||
399 | 395 | self.fd_mode = "rb" | ||
400 | 396 | if self.template_date is None: | ||
401 | 397 | self.template_date = datetime.utcnow().strftime("%Y-%m-%d %H:%M+0000") | ||
402 | 398 | self.strings = [] | ||
403 | 399 | |||
404 | 400 | def add_string(self, id, comment, string, translation = "", origin = None): | ||
405 | 401 | self.strings.append({ 'id': id, 'comment': comment, 'string': string, | ||
406 | 402 | 'origin': origin, 'translation': translation }) | ||
407 | 403 | |||
408 | 404 | def replace_file_if_newer(self): | ||
409 | 405 | filename = os.path.join(self.branch_dir, self.filename) if self.branch_dir is not None \ | ||
410 | 406 | else self.filename | ||
411 | 407 | tfile = os.path.join(self.branch_dir, self.tfile) if self.branch_dir is not None \ | ||
412 | 408 | else self.tfile | ||
413 | 409 | if os.path.isfile(filename) and filecmp.cmp(filename, tfile) == 1: | ||
414 | 410 | os.unlink(tfile) | ||
415 | 411 | return 0 | ||
416 | 412 | else: | ||
417 | 413 | os.rename(tfile, filename) | ||
418 | 414 | return 1 | ||
419 | 415 | |||
420 | 416 | def get_mtime(self, file): | ||
421 | 417 | rfile = os.path.join(self.branch_dir, file) | ||
422 | 418 | if self.debug: | ||
423 | 419 | print "getmtime(%s) [%s]" % (file, os.path.abspath(rfile)) | ||
424 | 420 | return os.path.getmtime(rfile) | ||
425 | 421 | |||
426 | 422 | def open(self, mode = "rb", filename = None): | ||
427 | 423 | if filename is not None: | ||
428 | 424 | self.filename = filename | ||
429 | 425 | self.tfile = filename + ".new" | ||
430 | 426 | rfile = os.path.join(self.branch_dir, self.filename) | ||
431 | 427 | rtfile = os.path.join(self.branch_dir, self.tfile) | ||
432 | 428 | if self.fd is not None: | ||
433 | 429 | self.close() | ||
434 | 430 | self.fd_mode = mode | ||
435 | 431 | if mode.find("r") != -1: | ||
436 | 432 | if self.debug: | ||
437 | 433 | print "open %s [mode=%s] from branch '%s' [%s]" % (self.filename, mode, self.branch_name, os.path.abspath(rfile)) | ||
438 | 434 | self.fd = codecs.open(rfile, mode, encoding="utf-8") | ||
439 | 435 | else: | ||
440 | 436 | if self.debug: | ||
441 | 437 | print "open %s [mode=%s] from branch '%s' [%s]" % (self.tfile, mode, self.branch_name, os.path.abspath(rtfile)) | ||
442 | 438 | self.fd = codecs.open(rtfile, mode, encoding="utf-8") | ||
443 | 439 | |||
444 | 440 | def close(self): | ||
445 | 441 | self.fd.close() | ||
446 | 442 | self.fd = None | ||
447 | 443 | if self.fd_mode.find("w") != -1: | ||
448 | 444 | return self.replace_file_if_newer() | ||
449 | 445 | |||
450 | 446 | def read_string(self): | ||
451 | 447 | string = {} | ||
452 | 448 | cur = None | ||
453 | 449 | while 1: | ||
454 | 450 | s = self.fd.readline() | ||
455 | 451 | if len(s) == 0 or s == "\n": | ||
456 | 452 | break # EOF or end of block | ||
457 | 453 | if s.rfind('\n') == len(s) - 1: | ||
458 | 454 | s = s[:-1] # chomp | ||
459 | 455 | if s.find("# ") == 0 or s == "#": # translator-comment | ||
460 | 456 | if 'comment' not in string: | ||
461 | 457 | string['comment'] = '' | ||
462 | 458 | string['comment'] += s[2:] | ||
463 | 459 | continue | ||
464 | 460 | if s.find("#:") == 0: # reference | ||
465 | 461 | if 'reference' not in string: | ||
466 | 462 | string['reference'] = '' | ||
467 | 463 | string['reference'] += s[2:] | ||
468 | 464 | if s[2:].find(" id: ") == 0: | ||
469 | 465 | string['id'] = s[7:].split(' ')[0] | ||
470 | 466 | continue | ||
471 | 467 | if s.find("#.") == 0: # extracted-comments | ||
472 | 468 | if 'extracted' not in string: | ||
473 | 469 | string['extracted'] = '' | ||
474 | 470 | string['extracted'] += s[2:] | ||
475 | 471 | if s[2:].find(" - condition: ") == 0: | ||
476 | 472 | if 'conditions' not in string: | ||
477 | 473 | string['conditions'] = [] | ||
478 | 474 | string['conditions'].append(s[16:]) | ||
479 | 475 | continue | ||
480 | 476 | if s.find("#~") == 0: # obsolete messages | ||
481 | 477 | continue | ||
482 | 478 | if s.find("#") == 0: # something else | ||
483 | 479 | print "%s not expected. Skip" % repr(s) | ||
484 | 480 | continue # not supported/expected | ||
485 | 481 | if s.find("msgid ") == 0: | ||
486 | 482 | cur = "string" | ||
487 | 483 | if cur not in string: | ||
488 | 484 | string[cur] = u"" | ||
489 | 485 | else: | ||
490 | 486 | string[cur] += "\n" | ||
491 | 487 | string[cur] += s[6:] | ||
492 | 488 | continue | ||
493 | 489 | if s.find("msgstr ") == 0: | ||
494 | 490 | cur = "translation" | ||
495 | 491 | if cur not in string: | ||
496 | 492 | string[cur] = u"" | ||
497 | 493 | else: | ||
498 | 494 | string[cur] += "\n" | ||
499 | 495 | string[cur] += s[7:] | ||
500 | 496 | continue | ||
501 | 497 | if s.find('"') == 0: | ||
502 | 498 | if cur is None: | ||
503 | 499 | print "'%s' not expected here. Skip" % s | ||
504 | 500 | continue | ||
505 | 501 | string[cur] += "\n" + s | ||
506 | 502 | continue | ||
507 | 503 | print "'%s' not expected here. Skip" % s | ||
508 | 504 | return None if string == {} else string | ||
509 | 505 | |||
510 | 506 | def write(self, string): | ||
511 | 507 | self.fd.write(string) | ||
512 | 508 | |||
513 | 509 | def write_header(self): | ||
514 | 510 | lang_team = "LANGUAGE <LL@li.org>" if self.is_pot else "%s <%s@li.org>" % (self.lang, self.lang) | ||
515 | 511 | lang_str = "template" if self.is_pot else "for lang '%s'" % self.lang | ||
516 | 512 | date = "YEAR-MO-DA HO:MI+ZONE" if self.is_pot else \ | ||
517 | 513 | datetime.fromtimestamp(self.translation_date).strftime("%Y-%m-%d %H:%M+0000") | ||
518 | 514 | self.write("# Chromium Translations %s.\n" | ||
519 | 515 | "# Copyright (C) 2010-2011 Fabien Tassin\n" | ||
520 | 516 | "# This file is distributed under the same license as the chromium-browser package.\n" | ||
521 | 517 | "# Fabien Tassin <fta@ubuntu.com>, 2010-2011.\n" | ||
522 | 518 | "#\n" % lang_str) | ||
523 | 519 | # FIXME: collect contributors (can LP export them?) | ||
524 | 520 | self.write('msgid ""\n' | ||
525 | 521 | 'msgstr ""\n' | ||
526 | 522 | '"Project-Id-Version: chromium-browser.head\\n"\n' | ||
527 | 523 | '"Report-Msgid-Bugs-To: https://bugs.launchpad.net/ubuntu/+source/chromium-browser/+filebug\\n"\n' | ||
528 | 524 | '"POT-Creation-Date: %s\\n"\n' | ||
529 | 525 | '"PO-Revision-Date: %s\\n"\n' | ||
530 | 526 | '"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"\n' | ||
531 | 527 | '"Language-Team: %s\\n"\n' | ||
532 | 528 | '"MIME-Version: 1.0\\n"\n' | ||
533 | 529 | '"Content-Type: text/plain; charset=UTF-8\\n"\n' | ||
534 | 530 | '"Content-Transfer-Encoding: 8bit\\n"\n\n' % \ | ||
535 | 531 | (datetime.fromtimestamp(self.template_date).strftime("%Y-%m-%d %H:%M+0000"), | ||
536 | 532 | date, lang_team)) | ||
537 | 533 | |||
538 | 534 | def write_footer(self): | ||
539 | 535 | pass | ||
540 | 536 | |||
541 | 537 | def write_all_strings(self): | ||
542 | 538 | for string in self.strings: | ||
543 | 539 | self.write(u"#. %s\n" % u"\n#. ".join(string['comment'].split("\n"))) | ||
544 | 540 | self.write(u"#: id: %s (used in the following branches: %s)\n" % \ | ||
545 | 541 | (string['id'], ", ".join(string['origin']))) | ||
546 | 542 | self.write(u'msgid %s\n' % StringCvt().xtb2gettext(string['string'])) | ||
547 | 543 | self.write(u'msgstr %s\n\n' % StringCvt().xtb2gettext(string['translation'])) | ||
548 | 544 | |||
549 | 545 | def export_file(self, directory = None, filename = None): | ||
550 | 546 | self.open(mode = "wb", filename = filename) | ||
551 | 547 | self.write_header() | ||
552 | 548 | self.write_all_strings() | ||
553 | 549 | self.write_footer() | ||
554 | 550 | return self.close() | ||
555 | 551 | |||
556 | 552 | def import_file(self): | ||
557 | 553 | self.mtime = self.get_mtime(self.filename) | ||
558 | 554 | self.open() | ||
559 | 555 | while 1: | ||
560 | 556 | string = self.read_string() | ||
561 | 557 | if string is None: | ||
562 | 558 | break | ||
563 | 559 | self.strings.append(string) | ||
564 | 560 | self.close() | ||
565 | 561 | |||
566 | 562 | def pack_comment(self, data): | ||
567 | 563 | comment = "" | ||
568 | 564 | for ent in sorted(data, lambda x,y: cmp(x['code'], y['code'])): | ||
569 | 565 | comment += "%s\n- description: %s\n" % (ent['code'], ent['desc']) | ||
570 | 566 | if ent['test'] is not None: | ||
571 | 567 | comment += "- condition: %s\n" % ent['test'] | ||
572 | 568 | comment = comment[:-1] # strip trailing \n | ||
573 | 569 | return comment | ||
574 | 570 | |||
575 | 571 | def get_origins(self, data): | ||
576 | 572 | o = [] | ||
577 | 573 | for ent in sorted(data, lambda x,y: cmp(x['code'], y['code'])): | ||
578 | 574 | for origin in ent['origin']: | ||
579 | 575 | if origin not in o: | ||
580 | 576 | o.append(origin) | ||
581 | 577 | return o | ||
582 | 578 | |||
583 | 579 | def import_grd(self, grd): | ||
584 | 580 | imported = 0 | ||
585 | 581 | for id in sorted(grd.supported_ids.keys()): | ||
586 | 582 | if 'ids' not in grd.supported_ids[id]: | ||
587 | 583 | continue | ||
588 | 584 | comment = self.pack_comment(grd.supported_ids[id]['ids']) | ||
589 | 585 | string = grd.supported_ids[id]['ids'][0]['val'] | ||
590 | 586 | origin = self.get_origins(grd.supported_ids[id]['ids']) | ||
591 | 587 | self.strings.append({ 'id': id, 'comment': comment, 'string': string, | ||
592 | 588 | 'origin': origin, 'translation': '' }) | ||
593 | 589 | imported += 1 | ||
594 | 590 | if self.debug: | ||
595 | 591 | print "imported %d strings from the grd template" % imported | ||
596 | 592 | |||
597 | 593 | class PoFile(PotFile): | ||
598 | 594 | """ | ||
599 | 595 | Read and write gettext po files | ||
600 | 596 | """ | ||
601 | 597 | |||
602 | 598 | def __init__(self, lang, filename, template, date = None, debug = None, | ||
603 | 599 | branch_name = "default", branch_dir = os.getcwd()): | ||
604 | 600 | super(PoFile, self).__init__(filename, date = template.template_date, debug = debug, | ||
605 | 601 | branch_name = branch_name, branch_dir = branch_dir) | ||
606 | 602 | self.template = template | ||
607 | 603 | self.lang = lang | ||
608 | 604 | self.translation_date = date | ||
609 | 605 | self.is_pot = False | ||
610 | 606 | |||
611 | 607 | def import_xtb(self, xtb): | ||
612 | 608 | # only import strings present in the current template | ||
613 | 609 | imported = 0 | ||
614 | 610 | for id in sorted(xtb.template.supported_ids.keys()): | ||
615 | 611 | if 'ids' not in xtb.template.supported_ids[id]: | ||
616 | 612 | continue | ||
617 | 613 | translation = xtb.strings[id] if id in xtb.strings else "" | ||
618 | 614 | comment = self.template.pack_comment(xtb.template.supported_ids[id]['ids']) | ||
619 | 615 | string = xtb.template.supported_ids[id]['ids'][0]['val'] | ||
620 | 616 | origin = self.get_origins(xtb.template.supported_ids[id]['ids']) | ||
621 | 617 | self.add_string(id, comment, string, translation, origin) | ||
622 | 618 | imported += 1 | ||
623 | 619 | if self.debug: | ||
624 | 620 | print "imported %d translations for lang %s from xtb into po %s" % (imported, self.lang, self.filename) | ||
625 | 621 | |||
626 | 622 | class GrdFile(PotFile): | ||
627 | 623 | """ | ||
628 | 624 | Read a Grit GRD file (write is not supported) | ||
629 | 625 | """ | ||
630 | 626 | def __init__(self, filename, date = None, lang_mapping = None, debug = None, | ||
631 | 627 | branch_name = "default", branch_dir = os.getcwd()): | ||
632 | 628 | super(GrdFile, self).__init__(filename, date = date, debug = debug, | ||
633 | 629 | branch_name = branch_name, branch_dir = branch_dir) | ||
634 | 630 | self.lang_mapping = lang_mapping | ||
635 | 631 | self.mapped_langs = {} | ||
636 | 632 | self.supported_langs = {} | ||
637 | 633 | self.supported_ids = {} | ||
638 | 634 | self.supported_ids_counts = {} | ||
639 | 635 | self.translated_strings = {} | ||
640 | 636 | self.stats = {} # per lang | ||
641 | 637 | self.debug = debug | ||
642 | 638 | self._PH_REGEXP = re.compile('(<ph name=")([^"]*)("/>)') | ||
643 | 639 | |||
644 | 640 | def open(self): | ||
645 | 641 | pass | ||
646 | 642 | |||
647 | 643 | def close(self): | ||
648 | 644 | pass | ||
649 | 645 | |||
650 | 646 | def write_header(self): | ||
651 | 647 | raise Exception("Not implemented!") | ||
652 | 648 | |||
653 | 649 | def write_footer(self): | ||
654 | 650 | raise Exception("Not implemented!") | ||
655 | 651 | |||
656 | 652 | def write_all_strings(self): | ||
657 | 653 | raise Exception("Not implemented!") | ||
658 | 654 | |||
659 | 655 | def export_file(self, directory = None, filename = None, global_langs = None, langs = None): | ||
660 | 656 | fdi = codecs.open(self.filename, 'rb', encoding="utf-8") | ||
661 | 657 | fdo = codecs.open(filename, 'wb', encoding="utf-8") | ||
662 | 658 | # can't use minidom here as the file is manually generated and the | ||
663 | 659 | # output will create big diffs. parse the source file line by line | ||
664 | 660 | # and insert our xtb in the <translations> section. Also insert new | ||
665 | 661 | # langs in the <outputs> section (with type="data_package" or type="js_map_format"). | ||
666 | 662 | # Let everything else untouched | ||
667 | 663 | tr_found = False | ||
668 | 664 | tr_saved = [] | ||
669 | 665 | tr_has_ifs = False | ||
670 | 666 | tr_skipping_if_not = False | ||
671 | 667 | pak_found = False | ||
672 | 668 | pak_saved = [] | ||
673 | 669 | # langs, sorted by their xtb names | ||
674 | 670 | our_langs = map(lambda x: x[0], | ||
675 | 671 | sorted(map(lambda x: (x, self.mapped_langs[x]['xtb_file']), | ||
676 | 672 | self.mapped_langs), | ||
677 | 673 | key = lambda x: x[1])) # d'oh! | ||
678 | 674 | if langs is None: | ||
679 | 675 | langs = our_langs[:] | ||
680 | 676 | for line in fdi.readlines(): | ||
681 | 677 | if re.match(r'.*?<output filename=".*?" type="(data_package|js_map_format)"', line): | ||
682 | 678 | pak_found = True | ||
683 | 679 | pak_saved.append(line) | ||
684 | 680 | continue | ||
685 | 681 | if line.find('</outputs>') > 0: | ||
686 | 682 | pak_found = False | ||
687 | 683 | ours = global_langs[:] | ||
688 | 684 | chunks = {} | ||
689 | 685 | c = None | ||
690 | 686 | pak_if = None | ||
691 | 687 | pak_is_in_if = False | ||
692 | 688 | for l in pak_saved: | ||
693 | 689 | if l.find("<!-- ") > 0: | ||
694 | 690 | c = l | ||
695 | 691 | continue | ||
696 | 692 | if l.find("<if ") > -1: | ||
697 | 693 | c = l if c is None else c + l | ||
698 | 694 | tr_has_ifs = True | ||
699 | 695 | pak_is_in_if = True | ||
700 | 696 | continue | ||
701 | 697 | if l.find("</if>") > -1: | ||
702 | 698 | c = l if c is None else c + l | ||
703 | 699 | pak_is_in_if = False | ||
704 | 700 | continue | ||
705 | 701 | m = re.match(r'.*?<output filename="(.*?)_([^_\.]+)\.(pak|js)" type="(data_package|js_map_format)" lang="(.*?)" />', l) | ||
706 | 702 | if m is not None: | ||
707 | 703 | x = { 'name': m.group(1), 'ext': m.group(3), 'lang': m.group(5), 'file_lang': m.group(2), | ||
708 | 704 | 'type': m.group(4), 'in_if': pak_is_in_if, 'line': l } | ||
709 | 705 | if c is not None: | ||
710 | 706 | x['comment'] = c | ||
711 | 707 | c = None | ||
712 | 708 | k = m.group(2) if m.group(2) != 'nb' else 'no' | ||
713 | 709 | chunks[k] = x | ||
714 | 710 | else: | ||
715 | 711 | if c is None: | ||
716 | 712 | c = l | ||
717 | 713 | else: | ||
718 | 714 | c += l | ||
719 | 715 | is_in_if = False | ||
720 | 716 | for lang in sorted(chunks.keys()): | ||
721 | 717 | tlang = lang if lang != 'no' else 'nb' | ||
722 | 718 | while len(ours) > 0 and ((ours[0] == 'nb' and 'no' < tlang) or (ours[0] != 'nb' and ours[0] < tlang)): | ||
723 | 719 | if ours[0] in chunks: | ||
724 | 720 | ours = ours[1:] | ||
725 | 721 | continue | ||
726 | 722 | if tr_has_ifs and is_in_if is False: | ||
727 | 723 | fdo.write(' <if expr="pp_ifdef(\'use_third_party_translations\')">\n') | ||
728 | 724 | f = "%s_%s.%s" % (chunks[lang]['name'], ours[0], chunks[lang]['ext']) | ||
729 | 725 | fdo.write(' %s<output filename="%s" type="%s" lang="%s" />\n' % \ | ||
730 | 726 | (' ' if tr_has_ifs else '', f, chunks[lang]['type'], ours[0])) | ||
731 | 727 | is_in_if = True | ||
732 | 728 | if tr_has_ifs and chunks[lang]['in_if'] is False: | ||
733 | 729 | if 'comment' not in chunks[lang] or chunks[lang]['comment'].find('</if>') == -1: | ||
734 | 730 | fdo.write(' </if>\n') | ||
735 | 731 | is_in_if = False | ||
736 | 732 | ours = ours[1:] | ||
737 | 733 | if 'comment' in chunks[lang]: | ||
738 | 734 | for s in chunks[lang]['comment'].split('\n')[:-1]: | ||
739 | 735 | if chunks[lang]['in_if'] is True and is_in_if and s.find('<if ') > -1: | ||
740 | 736 | continue | ||
741 | 737 | if s.find('<!-- No translations available. -->') > -1: | ||
742 | 738 | continue | ||
743 | 739 | fdo.write(s + '\n') | ||
744 | 740 | fdo.write(chunks[lang]['line']) | ||
745 | 741 | if ours[0] == tlang: | ||
746 | 742 | ours = ours[1:] | ||
747 | 743 | is_in_if = chunks[lang]['in_if'] | ||
748 | 744 | if len(chunks.keys()) > 0: | ||
749 | 745 | while len(ours) > 0: | ||
750 | 746 | f = "%s_%s.%s" % (chunks[lang]['name'], ours[0], chunks[lang]['ext']) | ||
751 | 747 | if tr_has_ifs and is_in_if is False: | ||
752 | 748 | fdo.write(' <if expr="pp_ifdef(\'use_third_party_translations\')">\n') | ||
753 | 749 | fdo.write(' %s<output filename="%s" type="data_package" lang="%s" />\n' % \ | ||
754 | 750 | (' ' if tr_has_ifs else '', f, ours[0])) | ||
755 | 751 | is_in_if = True | ||
756 | 752 | ours = ours[1:] | ||
757 | 753 | if tr_has_ifs and is_in_if: | ||
758 | 754 | fdo.write(' </if>\n') | ||
759 | 755 | is_in_if = False | ||
760 | 756 | if c is not None: | ||
761 | 757 | for s in c.split('\n')[:-1]: | ||
762 | 758 | if s.find('<!-- No translations available. -->') > -1: | ||
763 | 759 | continue | ||
764 | 760 | if s.find('</if>') > -1: | ||
765 | 761 | continue | ||
766 | 762 | fdo.write(s + '\n') | ||
767 | 763 | if line.find('<translations>') > 0: | ||
768 | 764 | fdo.write(line) | ||
769 | 765 | tr_found = True | ||
770 | 766 | continue | ||
771 | 767 | if line.find('</translations>') > 0: | ||
772 | 768 | tr_found = False | ||
773 | 769 | ours = our_langs[:] | ||
774 | 770 | chunks = {} | ||
775 | 771 | obsolete = [] | ||
776 | 772 | c = None | ||
777 | 773 | tr_if = None | ||
778 | 774 | tr_is_in_if = False | ||
779 | 775 | for l in tr_saved: | ||
780 | 776 | if l.find("</if>") > -1: | ||
781 | 777 | if tr_skipping_if_not: | ||
782 | 778 | tr_skipping_if_not = False | ||
783 | 779 | continue | ||
784 | 780 | tr_is_in_if = False | ||
785 | 781 | continue | ||
786 | 782 | if tr_skipping_if_not: | ||
787 | 783 | continue | ||
788 | 784 | if l.find("<!-- ") > 0: | ||
789 | 785 | c = l if c is None else c + l | ||
790 | 786 | continue | ||
791 | 787 | if l.find("<if ") > -1: | ||
792 | 788 | m = re.match(r'.*?<if expr="not pp_ifdef\(\'use_third_party_translations\'\)"', l) | ||
793 | 789 | if m is not None: | ||
794 | 790 | tr_skipping_if_not = True | ||
795 | 791 | continue | ||
796 | 792 | tr_has_ifs = True | ||
797 | 793 | tr_is_in_if = True | ||
798 | 794 | continue | ||
799 | 795 | m = re.match(r'.*?<file path=".*_([^_]+)\.xtb" lang="(.*?)"', l) | ||
800 | 796 | if m is not None: | ||
801 | 797 | tlang = m.group(2) | ||
802 | 798 | if m.group(1) == 'iw': | ||
803 | 799 | tlang = m.group(1) | ||
804 | 800 | x = { 'lang': tlang, 'line': l, 'in_if': tr_is_in_if } | ||
805 | 801 | if c is not None: | ||
806 | 802 | x['comment'] = c | ||
807 | 803 | c = None | ||
808 | 804 | chunks[tlang] = x | ||
809 | 805 | if tlang not in langs and tlang not in map(lambda t: self.mapped_langs[t]['grit'], langs): | ||
810 | 806 | obsolete.append(tlang) | ||
811 | 807 | else: | ||
812 | 808 | if c is None: | ||
813 | 809 | c = l | ||
814 | 810 | else: | ||
815 | 811 | c += l | ||
816 | 812 | is_in_if = False | ||
817 | 813 | # Do we want <if/> in the <translations/> block? (they are only mandatory in the <outputs/> block) | ||
818 | 814 | want_ifs_in_translations = False | ||
819 | 815 | for lang in sorted(chunks.keys()): | ||
820 | 816 | while len(ours) > 0 and self.mapped_langs[ours[0]]['xtb_file'] < lang.replace('@', '-'): | ||
821 | 817 | if ours[0] not in self.supported_langs: | ||
822 | 818 | if self.debug: | ||
823 | 819 | print "Skipped export of lang '%s' (most probably a 'po' file without any translated strings)" % ours[0] | ||
824 | 820 | ours = ours[1:] | ||
825 | 821 | continue | ||
826 | 822 | if ours[0] in obsolete: | ||
827 | 823 | if self.debug: | ||
828 | 824 | print "Skipped export of lang '%s' (now obsolete)" % ours[0] | ||
829 | 825 | ours = ours[1:] | ||
830 | 826 | continue | ||
831 | 827 | f = os.path.relpath(self.supported_langs[ours[0]], os.path.dirname(self.filename)) | ||
832 | 828 | if want_ifs_in_translations and tr_has_ifs and is_in_if is False: | ||
833 | 829 | fdo.write(' <if expr="pp_ifdef(\'use_third_party_translations\')">\n') | ||
834 | 830 | is_in_if = True | ||
835 | 831 | fdo.write(' %s<file path="%s" lang="%s" />\n' % | ||
836 | 832 | (' ' if (is_in_if or want_ifs_in_translations) and tr_has_ifs else '', f, ours[0])) | ||
837 | 833 | if tr_has_ifs and chunks[lang]['in_if'] is False: | ||
838 | 834 | if want_ifs_in_translations: | ||
839 | 835 | fdo.write(' </if>\n') | ||
840 | 836 | is_in_if = False | ||
841 | 837 | ours = ours[1:] | ||
842 | 838 | if 'comment' in chunks[lang]: | ||
843 | 839 | for s in chunks[lang]['comment'].split('\n')[:-1]: | ||
844 | 840 | if chunks[lang]['in_if'] is True and is_in_if and s.find('<if ') > -1: | ||
845 | 841 | continue | ||
846 | 842 | if s.find('<!-- No translations available. -->') > -1: | ||
847 | 843 | continue | ||
848 | 844 | fdo.write(s + '\n') | ||
849 | 845 | if lang not in obsolete: | ||
850 | 846 | fdo.write(chunks[lang]['line']) | ||
851 | 847 | ours = ours[1:] | ||
852 | 848 | is_in_if = chunks[lang]['in_if'] | ||
853 | 849 | while len(ours) > 0: | ||
854 | 850 | if ours[0] in self.supported_langs: | ||
855 | 851 | f = os.path.relpath(self.supported_langs[ours[0]], os.path.dirname(self.filename)) | ||
856 | 852 | if want_ifs_in_translations and tr_has_ifs and is_in_if is False: | ||
857 | 853 | fdo.write(' <if expr="pp_ifdef(\'use_third_party_translations\')">\n') | ||
858 | 854 | is_in_if = True | ||
859 | 855 | fdo.write(' %s<file path="%s" lang="%s" />\n' % | ||
860 | 856 | (' ' if (is_in_if or want_ifs_in_translations) and tr_has_ifs else '', f, ours[0])) | ||
861 | 857 | elif self.debug: | ||
862 | 858 | print "Skipped lang %s with no translated strings" % ours[0] | ||
863 | 859 | ours = ours[1:] | ||
864 | 860 | |||
865 | 861 | if is_in_if and want_ifs_in_translations: | ||
866 | 862 | fdo.write(' </if>\n') | ||
867 | 863 | is_in_if = False | ||
868 | 864 | if c is not None: | ||
869 | 865 | for s in c.split('\n')[:-1]: | ||
870 | 866 | if s.find('<!-- No translations available. -->') > -1: | ||
871 | 867 | continue | ||
872 | 868 | if s.find('</if>') > -1: | ||
873 | 869 | continue | ||
874 | 870 | fdo.write(s + '\n') | ||
875 | 871 | if tr_found: | ||
876 | 872 | tr_saved.append(line) | ||
877 | 873 | continue | ||
878 | 874 | if pak_found: | ||
879 | 875 | pak_saved.append(line) | ||
880 | 876 | continue | ||
881 | 877 | fdo.write(line) | ||
882 | 878 | fdi.close() | ||
883 | 879 | fdo.close() | ||
884 | 880 | |||
885 | 881 | def uc(self, match): | ||
886 | 882 | return match.group(2).upper() | ||
887 | 883 | |||
888 | 884 | def uc_name(self, match): | ||
889 | 885 | return match.group(1) + match.group(2).upper() + match.group(3) | ||
890 | 886 | |||
891 | 887 | def is_string_valid_for_lang(self, id, lang): | ||
892 | 888 | ok = False | ||
893 | 889 | for string in self.supported_ids[id]['ids']: | ||
894 | 890 | if string['test'] is not None: | ||
895 | 891 | ok |= EvalConditions().lang_eval(string['test'], lang) | ||
896 | 892 | if ok: | ||
897 | 893 | break | ||
898 | 894 | else: | ||
899 | 895 | ok = True | ||
900 | 896 | break | ||
901 | 897 | return ok | ||
902 | 898 | |||
903 | 899 | def get_supported_strings_count(self, lang): | ||
904 | 900 | # need to ignore strings for which this lang is not wanted in the <if> conditions | ||
905 | 901 | if lang in self.supported_ids_counts: | ||
906 | 902 | return self.supported_ids_counts[lang]['count'], self.supported_ids_counts[lang]['skipped'] | ||
907 | 903 | count = 0 | ||
908 | 904 | skipped = 0 | ||
909 | 905 | for id in self.supported_ids: | ||
910 | 906 | ok = self.is_string_valid_for_lang(id, lang) | ||
911 | 907 | if ok: | ||
912 | 908 | count += 1 | ||
913 | 909 | else: | ||
914 | 910 | skipped += 1 | ||
915 | 911 | assert count + skipped == len(self.supported_ids.keys()) | ||
916 | 912 | self.supported_ids_counts[lang] = { 'count': count, 'skipped': skipped } | ||
917 | 913 | return count, skipped | ||
918 | 914 | |||
919 | 915 | def get_supported_langs(self): | ||
920 | 916 | return sorted(self.supported_langs.keys()) | ||
921 | 917 | |||
922 | 918 | def get_supported_lang_filenames(self): | ||
923 | 919 | """ return the list of (xtb) filenames sorted by langs (so it's | ||
924 | 920 | possible to zip() it) """ | ||
925 | 921 | return map(lambda l: self.supported_langs[l], sorted(self.supported_langs.keys())) | ||
926 | 922 | |||
927 | 923 | def update_stats(self, lang, translated_upstream = 0, obsolete = 0, | ||
928 | 924 | new = 0, updated = 0, skipped_lang = 0, mandatory_linux = 0): | ||
929 | 925 | if lang not in self.stats: | ||
930 | 926 | self.stats[lang] = { 'translated_upstream': 0, 'skipped_lang': 0, | ||
931 | 927 | 'obsolete': 0, 'new': 0, 'updated': 0, | ||
932 | 928 | 'mandatory_linux': 0 } | ||
933 | 929 | self.stats[lang]['translated_upstream'] += translated_upstream - updated | ||
934 | 930 | self.stats[lang]['obsolete'] += obsolete | ||
935 | 931 | self.stats[lang]['new'] += new | ||
936 | 932 | self.stats[lang]['updated'] += updated | ||
937 | 933 | self.stats[lang]['skipped_lang'] += skipped_lang | ||
938 | 934 | self.stats[lang]['mandatory_linux'] += mandatory_linux | ||
939 | 935 | |||
940 | 936 | def merge_template(self, template, newer_preferred = True): | ||
941 | 937 | """ merge strings from 'template' into self (the master template). | ||
942 | 938 | If the string differs, prefer the new one when newer_preferred is set """ | ||
943 | 939 | for id in template.supported_ids: | ||
944 | 940 | if id not in self.supported_ids: | ||
945 | 941 | if self.debug: | ||
946 | 942 | print "merged code %s (id %s) from branch '%s' from %s" % \ | ||
947 | 943 | (template.supported_ids[id]['ids'][0]['code'], id, | ||
948 | 944 | template.supported_ids[id]['ids'][0]['origin'][0], template.filename) | ||
949 | 945 | self.supported_ids[id] = template.supported_ids[id] | ||
950 | 946 | else: | ||
951 | 947 | for ent in template.supported_ids[id]['ids']: | ||
952 | 948 | found = False | ||
953 | 949 | for ent2 in self.supported_ids[id]['ids']: | ||
954 | 950 | if ent2['code'] != ent['code']: | ||
955 | 951 | continue | ||
956 | 952 | found = True | ||
957 | 953 | ent2['origin'].append(ent['origin'][0]) | ||
958 | 954 | if ent['test'] != ent2['test'] or \ | ||
959 | 955 | ent['desc'] != ent2['desc']: | ||
960 | 956 | if newer_preferred: | ||
961 | 957 | ent2['test'] = ent['test'] | ||
962 | 958 | ent2['desc'] = ent['desc'] | ||
963 | 959 | if not found: | ||
964 | 960 | if self.debug: | ||
965 | 961 | print "adding new ids code '%s' from branch '%s' for string id %s" % \ | ||
966 | 962 | (ent['code'], template.supported_ids[id]['ids'][0]['origin'][0], id) | ||
967 | 963 | self.supported_ids[id]['ids'].append(ent) | ||
968 | 964 | |||
969 | 965 | def add_translation(self, lang, id, translation): | ||
970 | 966 | if id not in self.supported_ids: | ||
971 | 967 | if self.debug: | ||
972 | 968 | print "*warn* obsolete string id %s for lang %s" % (id, lang) | ||
973 | 969 | return | ||
974 | 970 | self.supported_ids[id]['lang'][lang] = translation | ||
975 | 971 | |||
976 | 972 | def merge_translations(self, lang, xtb, master_xtb = None, newer_preferred = True): | ||
977 | 973 | if lang not in self.supported_langs: | ||
978 | 974 | self.supported_langs[lang] = xtb.filename | ||
979 | 975 | for id in xtb.strings: | ||
980 | 976 | if id not in self.supported_ids: | ||
981 | 977 | # d'oh!! obsolete translation? | ||
982 | 978 | self.update_stats(lang, obsolete = 1) | ||
983 | 979 | continue | ||
984 | 980 | if not self.is_string_valid_for_lang(id, lang): | ||
985 | 981 | # string not wanted for that lang, skipped | ||
986 | 982 | continue | ||
987 | 983 | if 'lang' not in self.supported_ids[id]: | ||
988 | 984 | self.supported_ids[id]['lang'] = {} | ||
989 | 985 | if lang in self.supported_ids[id]['lang']: | ||
990 | 986 | # already have a translation for this string | ||
991 | 987 | if newer_preferred and xtb.strings[id] != self.supported_ids[id]['lang'][lang]: | ||
992 | 988 | self.supported_ids[id]['lang'][lang] = xtb.strings[id] | ||
993 | 989 | else: | ||
994 | 990 | self.update_stats(lang, translated_upstream = 1) | ||
995 | 991 | self.supported_ids[id]['lang'][lang] = xtb.strings[id] | ||
996 | 992 | if master_xtb is not None: | ||
997 | 993 | master_xtb.strings[id] = xtb.strings[id] | ||
998 | 994 | |||
999 | 995 | def read_string(self, node, test = None): | ||
1000 | 996 | desc = node.getAttribute('desc') | ||
1001 | 997 | name = node.getAttribute('name') | ||
1002 | 998 | if not node.firstChild: | ||
1003 | 999 | # no string? weird. Skip. (e.g. IDS_LOAD_STATE_IDLE) | ||
1004 | 1000 | return | ||
1005 | 1001 | |||
1006 | 1002 | # Get a/ the full string from the grd, b/ its transformation | ||
1007 | 1003 | # into the smaller version found in xtb files (val) and c/ another into | ||
1008 | 1004 | # something suitable for the 64bit key generator (kval) | ||
1009 | 1005 | |||
1010 | 1006 | orig_val = "".join([ n.toxml() for n in node.childNodes ]) | ||
1011 | 1007 | |||
1012 | 1008 | # encode the value to create the 64bit ID needed for the xtb mapping. | ||
1013 | 1009 | # | ||
1014 | 1010 | # grd: 'f&oo "<ph name="IDS_xX">$1<ex>blabla</ex></ph>" bar' | ||
1015 | 1011 | # xtb: 'f&oo "<ph name="IDS_XX"/>" bar' | ||
1016 | 1012 | # but the string used to create the 64bit id is only 'f&oo "IDS_XX" bar'. | ||
1017 | 1013 | # Also, the final value must be positive, while FingerPrint() returns | ||
1018 | 1014 | # a signed long. Of course, none of this is documented... | ||
1019 | 1015 | |||
1020 | 1016 | # grd->xtb | ||
1021 | 1017 | for x in node.getElementsByTagName('ph'): | ||
1022 | 1018 | while x.hasChildNodes(): | ||
1023 | 1019 | x.removeChild(x.childNodes[0]) | ||
1024 | 1020 | val = "".join([ n.toxml() for n in node.childNodes ]).strip() | ||
1025 | 1021 | # xtb->id | ||
1026 | 1022 | kval = StringCvt().decode_xml_entities(unescape(self._PH_REGEXP.sub(self.uc, val))).encode('utf-8') | ||
1027 | 1023 | kval = kval.replace('"', '"') # not replaced by unescape() | ||
1028 | 1024 | |||
1029 | 1025 | val = self._PH_REGEXP.sub(self.uc_name, val) | ||
1030 | 1026 | val = val.encode("ascii", "xmlcharrefreplace").strip().encode('utf-8') | ||
1031 | 1027 | |||
1032 | 1028 | # finally, create the 64bit ID | ||
1033 | 1029 | id = str(FingerPrint(kval) & 0x7fffffffffffffffL) | ||
1034 | 1030 | |||
1035 | 1031 | if val == '': | ||
1036 | 1032 | # unexpect <message/> block with attributes but without value, skip | ||
1037 | 1033 | return | ||
1038 | 1034 | |||
1039 | 1035 | if id not in self.supported_ids: | ||
1040 | 1036 | self.supported_ids[id] = { 'ids': [] } | ||
1041 | 1037 | self.supported_ids[id]['ids'].append({ 'code': name, 'desc': desc, | ||
1042 | 1038 | 'val': val, 'test': test, | ||
1043 | 1039 | 'origin': [ self.branch_name ] }) | ||
1044 | 1040 | |||
1045 | 1041 | def read_strings(self, node, test = None): | ||
1046 | 1042 | for n in node.childNodes: | ||
1047 | 1043 | if n.nodeName == '#text' or n.nodeName == '#comment': | ||
1048 | 1044 | # comments, skip | ||
1049 | 1045 | continue | ||
1050 | 1046 | if n.nodeName == 'message': | ||
1051 | 1047 | self.read_string(n, test) | ||
1052 | 1048 | continue | ||
1053 | 1049 | if n.nodeName == 'if': | ||
1054 | 1050 | expr = n.getAttribute('expr') | ||
1055 | 1051 | if expr is not None and test is not None: | ||
1056 | 1052 | assert "nested <if> not supported" | ||
1057 | 1053 | self.read_strings(n, expr) | ||
1058 | 1054 | continue | ||
1059 | 1055 | if n.nodeName == 'part': | ||
1060 | 1056 | f = n.getAttribute('file') | ||
1061 | 1057 | qualified_file = os.path.join(os.path.dirname(self.filename), f) | ||
1062 | 1058 | self.import_file(override_filename=qualified_file) | ||
1063 | 1059 | continue | ||
1064 | 1060 | raise Exception("unknown tag (<%s> type %s): ''%s''" % \ | ||
1065 | 1061 | (n.nodeName, n.nodeType, n.toxml())) | ||
1066 | 1062 | |||
1067 | 1063 | def import_json_file(self, filename): | ||
1068 | 1064 | # unlike its name seems to indicate, this file is definitely not a json file. | ||
1069 | 1065 | # It's a python object, dumped in a file. It means it's far easier to parse | ||
1070 | 1066 | # because there's no extra unescaping to do on all the strings. It also | ||
1071 | 1067 | # means we can't use the json module | ||
1072 | 1068 | rfile = os.path.join(self.branch_dir, filename) | ||
1073 | 1069 | if self.debug: | ||
1074 | 1070 | print "parse_json('%s') [%s]" % (filename, rfile) | ||
1075 | 1071 | fd = open(rfile, "rb") | ||
1076 | 1072 | data = fd.read() | ||
1077 | 1073 | fd.close() | ||
1078 | 1074 | vars = { '__builtins__': { 'True': True, 'False': False } } # prevent eval from using the real current globals | ||
1079 | 1075 | data = eval(data, vars) | ||
1080 | 1076 | # Check if this is a format we support | ||
1081 | 1077 | if 'policy_definitions' in data and len(data['policy_definitions']) > 0 and \ | ||
1082 | 1078 | 'caption' not in data['policy_definitions'][0]: | ||
1083 | 1079 | # most probably Chromium v9. It used 'annotations' instead of 'caption' | ||
1084 | 1080 | # Not worth supporting that, all the strings we need in v9 are already in | ||
1085 | 1081 | # the grd file. Skip this json file | ||
1086 | 1082 | if self.debug: | ||
1087 | 1083 | print "Found older unsupported json format. Skipped" | ||
1088 | 1084 | return | ||
1089 | 1085 | if 'messages' in data: | ||
1090 | 1086 | for msg in data['messages']: | ||
1091 | 1087 | self.read_policy('IDS_POLICY_' + msg.upper(), | ||
1092 | 1088 | data['messages'][msg]['desc'], | ||
1093 | 1089 | data['messages'][msg]['text']) | ||
1094 | 1090 | if 'policy_definitions' in data: | ||
1095 | 1091 | for policy in data['policy_definitions']: | ||
1096 | 1092 | name = 'IDS_POLICY_' + policy['name'].upper() | ||
1097 | 1093 | if policy['type'] in [ 'main', 'int', 'string', 'list', 'string-enum', 'int-enum', 'string-enum-list' ]: | ||
1098 | 1094 | # caption | ||
1099 | 1095 | self.read_policy(name + '_CAPTION', | ||
1100 | 1096 | "Caption of the '%s' policy." % policy['name'], | ||
1101 | 1097 | policy['caption']) | ||
1102 | 1098 | # label (optional) | ||
1103 | 1099 | if 'label' in policy: | ||
1104 | 1100 | self.read_policy(name + '_LABEL', | ||
1105 | 1101 | "Label of the '%s' policy." % policy['name'], | ||
1106 | 1102 | policy['label']) | ||
1107 | 1103 | # desc | ||
1108 | 1104 | self.read_policy(name + '_DESC', | ||
1109 | 1105 | "Description of the '%s' policy." % policy['name'], | ||
1110 | 1106 | policy['desc']) | ||
1111 | 1107 | if policy['type'] in [ 'string-enum', 'int-enum', 'string-enum-list' ]: | ||
1112 | 1108 | for item in policy['items']: | ||
1113 | 1109 | self.read_policy('IDS_POLICY_ENUM_' + item['name'].upper().replace(' ', '_') + '_CAPTION', | ||
1114 | 1110 | "Label in a '%s' dropdown menu for selecting '%s'" % \ | ||
1115 | 1111 | (policy['name'], item['name']), | ||
1116 | 1112 | item['caption']) | ||
1117 | 1113 | continue | ||
1118 | 1114 | if policy['type'] == 'group': | ||
1119 | 1115 | # group caption | ||
1120 | 1116 | self.read_policy(name + '_CAPTION', | ||
1121 | 1117 | "Caption of the group of '%s' related policies." % name, | ||
1122 | 1118 | policy['caption']) | ||
1123 | 1119 | # group label (optional) | ||
1124 | 1120 | if 'label' in policy: | ||
1125 | 1121 | self.read_policy(name + '_LABEL', | ||
1126 | 1122 | "Label of the group of '%s' related policies." % name, | ||
1127 | 1123 | policy['label']) | ||
1128 | 1124 | # group desc | ||
1129 | 1125 | self.read_policy(name + '_DESC', | ||
1130 | 1126 | "Description of the group of '%s' related policies." % name, | ||
1131 | 1127 | policy['desc']) | ||
1132 | 1128 | for spolicy in policy['policies']: | ||
1133 | 1129 | sname = 'IDS_POLICY_' + spolicy['name'].upper() | ||
1134 | 1130 | # desc | ||
1135 | 1131 | self.read_policy(sname + '_DESC', | ||
1136 | 1132 | "Description of the '%s' policy." % spolicy['name'], | ||
1137 | 1133 | spolicy['desc']) | ||
1138 | 1134 | # label (optional) | ||
1139 | 1135 | if 'label' in spolicy: | ||
1140 | 1136 | self.read_policy(sname + '_LABEL', | ||
1141 | 1137 | "Label of the '%s' policy." % spolicy['name'], | ||
1142 | 1138 | spolicy['label']) | ||
1143 | 1139 | # caption | ||
1144 | 1140 | self.read_policy(sname + '_CAPTION', | ||
1145 | 1141 | "Caption of the '%s' policy." % spolicy['name'], | ||
1146 | 1142 | spolicy['caption']) | ||
1147 | 1143 | if spolicy['type'] in [ 'int-enum', 'string-enum' ]: | ||
1148 | 1144 | # only caption | ||
1149 | 1145 | for item in spolicy['items']: | ||
1150 | 1146 | self.read_policy('IDS_POLICY_ENUM_' + item['name'].upper() + '_CAPTION', | ||
1151 | 1147 | "Label in a '%s' dropdown menu for selecting a '%s' of '%s'" % \ | ||
1152 | 1148 | (policy['name'], spolicy['name'], item['name']), | ||
1153 | 1149 | item['caption']) | ||
1154 | 1150 | continue | ||
1155 | 1151 | # The new type is not yet being used: http://code.google.com/p/chromium/issues/detail?id=108992 | ||
1156 | 1152 | if policy['type'] == 'external': | ||
1157 | 1153 | continue | ||
1158 | 1154 | if policy['type'] == 'dict': | ||
1159 | 1155 | continue | ||
1160 | 1156 | |||
1161 | 1157 | assert False, "Policy type '%s' not supported while parsing %s" % (policy['type'], rfile) | ||
1162 | 1158 | |||
1163 | 1159 | def read_policy(self, name, desc, text): | ||
1164 | 1160 | xml = '<x><message name="%s" desc="%s">\n%s\n</message></x>' % (name, desc, text) | ||
1165 | 1161 | dom = minidom.parseString(xml) | ||
1166 | 1162 | self.read_strings(dom.getElementsByTagName('x')[0]) | ||
1167 | 1163 | |||
1168 | 1164 | def _add_xtb(self, node): | ||
1169 | 1165 | if node.nodeName != 'file': | ||
1170 | 1166 | return | ||
1171 | 1167 | path = node.getAttribute('path') | ||
1172 | 1168 | m = re.match(r'.*_([^_]+)\.xtb', path) | ||
1173 | 1169 | flang = m.group(1) | ||
1174 | 1170 | lang = node.getAttribute('lang') | ||
1175 | 1171 | tlang = lang | ||
1176 | 1172 | if self.lang_mapping is not None and lang in self.lang_mapping: | ||
1177 | 1173 | if self.debug: | ||
1178 | 1174 | print "# mapping lang '%s' to '%s'" % (lang, self.lang_mapping[lang]) | ||
1179 | 1175 | tlang = self.lang_mapping[lang] | ||
1180 | 1176 | tlang = tlang.replace('-', '_') | ||
1181 | 1177 | self.supported_langs[lang] = os.path.normpath(os.path.join(os.path.dirname(self.filename), path)) | ||
1182 | 1178 | self.translated_strings[lang] = {} | ||
1183 | 1179 | glang = lang | ||
1184 | 1180 | if flang == 'iw': | ||
1185 | 1181 | glang = flang | ||
1186 | 1182 | #assert lang not in self.mapped_langs, "'%s' already in self.mapped_langs" % lang | ||
1187 | 1183 | if lang not in self.mapped_langs: | ||
1188 | 1184 | self.mapped_langs[lang] = { 'xtb_file': flang, 'grit': glang, 'gettext': tlang } | ||
1189 | 1185 | |||
1190 | 1186 | def import_file(self, override_filename=None): | ||
1191 | 1187 | if override_filename: | ||
1192 | 1188 | assert self.branch_dir | ||
1193 | 1189 | filename = os.path.join(self.branch_dir, override_filename) | ||
1194 | 1190 | else: | ||
1195 | 1191 | filename = os.path.join(self.branch_dir, self.filename) if self.branch_dir is not None \ | ||
1196 | 1192 | else self.filename | ||
1197 | 1193 | self.supported_langs = {} | ||
1198 | 1194 | self.mtime = self.get_mtime(self.filename) | ||
1199 | 1195 | if self.debug: | ||
1200 | 1196 | print "minidom.parse(%s)" % filename | ||
1201 | 1197 | dom = minidom.parse(filename) | ||
1202 | 1198 | grits = dom.getElementsByTagName('grit') | ||
1203 | 1199 | if not grits: | ||
1204 | 1200 | grits = dom.getElementsByTagName('grit-part') | ||
1205 | 1201 | grit = grits[0] | ||
1206 | 1202 | for node in grit.childNodes: | ||
1207 | 1203 | if node.nodeName == '#text' or node.nodeName == '#comment': | ||
1208 | 1204 | # comments, skip | ||
1209 | 1205 | continue | ||
1210 | 1206 | if node.nodeName == 'outputs': | ||
1211 | 1207 | # skip, nothing for us here | ||
1212 | 1208 | continue | ||
1213 | 1209 | if node.nodeName == 'translations': | ||
1214 | 1210 | # collect the supported langs by scanning the list of xtb files | ||
1215 | 1211 | for n in node.childNodes: | ||
1216 | 1212 | if n.nodeName == 'if': | ||
1217 | 1213 | for nn in n.childNodes: | ||
1218 | 1214 | self._add_xtb(nn) | ||
1219 | 1215 | continue | ||
1220 | 1216 | self._add_xtb(n) | ||
1221 | 1217 | continue | ||
1222 | 1218 | if node.nodeName == 'release': | ||
1223 | 1219 | for n in node.childNodes: | ||
1224 | 1220 | if n.nodeName == '#text' or n.nodeName == '#comment': | ||
1225 | 1221 | # comments, skip | ||
1226 | 1222 | continue | ||
1227 | 1223 | if n.nodeName == 'includes': | ||
1228 | 1224 | # skip, nothing for us here | ||
1229 | 1225 | continue | ||
1230 | 1226 | if n.nodeName == 'structures': | ||
1231 | 1227 | for sn in n.childNodes: | ||
1232 | 1228 | if sn.nodeName != 'structure': | ||
1233 | 1229 | continue | ||
1234 | 1230 | type = sn.getAttribute('type') | ||
1235 | 1231 | if type == 'dialog': | ||
1236 | 1232 | # nothing for us here | ||
1237 | 1233 | continue | ||
1238 | 1234 | name = sn.getAttribute('name') | ||
1239 | 1235 | file = sn.getAttribute('file') | ||
1240 | 1236 | if type == 'policy_template_metafile': | ||
1241 | 1237 | # included file containing the strings that are usually in the <messages> tree. | ||
1242 | 1238 | fname = os.path.normpath(os.path.join(os.path.dirname(self.filename), file)) | ||
1243 | 1239 | self.import_json_file(fname) | ||
1244 | 1240 | continue | ||
1245 | 1241 | else: | ||
1246 | 1242 | if self.debug: | ||
1247 | 1243 | print "unknown <structure> type found ('%s') in %s" % (type, self.filename) | ||
1248 | 1244 | continue | ||
1249 | 1245 | if n.nodeName == 'messages': | ||
1250 | 1246 | self.read_strings(n) | ||
1251 | 1247 | continue | ||
1252 | 1248 | print "unknown tag (<%s> type %s): ''%s''" % (n.nodeName, n.nodeType, n.toxml()) | ||
1253 | 1249 | continue | ||
1254 | 1250 | print "unknown tag (<%s> type %s): ''%s''" % (node.nodeName, node.nodeType, node.toxml()) | ||
1255 | 1251 | |||
1256 | 1252 | class XtbFile(PoFile): | ||
1257 | 1253 | """ | ||
1258 | 1254 | Read and write a Grit XTB file | ||
1259 | 1255 | """ | ||
1260 | 1256 | |||
1261 | 1257 | def __init__(self, lang, filename, grd, date = None, debug = None, | ||
1262 | 1258 | branch_name = "default", branch_dir = os.getcwd()): | ||
1263 | 1259 | super(XtbFile, self).__init__(lang, filename, grd, date = date, debug = debug, | ||
1264 | 1260 | branch_name = branch_name, branch_dir = branch_dir) | ||
1265 | 1261 | self.template = grd | ||
1266 | 1262 | self.strings = {} | ||
1267 | 1263 | self.strings_updated = 0 | ||
1268 | 1264 | self.strings_new = 0 | ||
1269 | 1265 | self.strings_order = [] # needed to recreate xtb files in a similar order :( | ||
1270 | 1266 | |||
1271 | 1267 | def add_translation(self, id, string): | ||
1272 | 1268 | assert id in self.template.supported_ids, "'%s' is not in supported_ids (file=%s)" % (id, self.filename) | ||
1273 | 1269 | while string[-1:] == '\n' and self.template.supported_ids[id]['ids'][0]['val'][-1:] != '\n': | ||
1274 | 1270 | # prevent the `msgid' and `msgstr' entries do not both end with '\n' error | ||
1275 | 1271 | if self.debug: | ||
1276 | 1272 | print "Found unwanted \\n at the end of translation id " + id + " lang " + self.lang + ". Dropped" | ||
1277 | 1273 | string = string[:-1] | ||
1278 | 1274 | while string[0] == '\n' and self.template.supported_ids[id]['ids'][0]['val'][0] != '\n': | ||
1279 | 1275 | # prevent the `msgid' and `msgstr' entries do not both begin with '\n' error | ||
1280 | 1276 | if self.debug: | ||
1281 | 1277 | print "Found unwanted \\n at the begin of translation id " + id + " lang " + self.lang + ". Dropped" | ||
1282 | 1278 | string = string[1:] | ||
1283 | 1279 | self.strings[id] = string | ||
1284 | 1280 | self.strings_order.append(id) | ||
1285 | 1281 | |||
1286 | 1282 | def write_header(self): | ||
1287 | 1283 | self.write('<?xml version="1.0" ?>\n') | ||
1288 | 1284 | self.write('<!DOCTYPE translationbundle>\n') | ||
1289 | 1285 | self.write('<translationbundle lang="%s">\n' % \ | ||
1290 | 1286 | self.template.mapped_langs[self.lang]['grit']) | ||
1291 | 1287 | |||
1292 | 1288 | def write_footer(self): | ||
1293 | 1289 | self.write('</translationbundle>') | ||
1294 | 1290 | |||
1295 | 1291 | def write_all_strings(self): | ||
1296 | 1292 | for id in self.strings_order: | ||
1297 | 1293 | if id in self.strings: | ||
1298 | 1294 | self.write('<translation id="%s">%s</translation>\n' % \ | ||
1299 | 1295 | (id, self.strings[id])) | ||
1300 | 1296 | for id in sorted(self.strings.keys()): | ||
1301 | 1297 | if id in self.strings_order: | ||
1302 | 1298 | continue | ||
1303 | 1299 | self.write('<translation id="%s">%s</translation>\n' % \ | ||
1304 | 1300 | (id, self.strings[id])) | ||
1305 | 1301 | |||
1306 | 1302 | def import_po(self, po): | ||
1307 | 1303 | for string in po.strings: | ||
1308 | 1304 | if string['string'] == '': | ||
1309 | 1305 | continue | ||
1310 | 1306 | self.add_string(string['id'], string['extracted'], | ||
1311 | 1307 | string['string'], string['translation']) | ||
1312 | 1308 | |||
1313 | 1309 | def import_file(self): | ||
1314 | 1310 | self.open() | ||
1315 | 1311 | file = self.fd.read() # *sigh* | ||
1316 | 1312 | self.close() | ||
1317 | 1313 | imported = 0 | ||
1318 | 1314 | for m in re.finditer('<translation id="(.*?)">(.*?)</translation>', | ||
1319 | 1315 | file, re.S): | ||
1320 | 1316 | if m.group(1) not in self.template.supported_ids: | ||
1321 | 1317 | if self.debug: | ||
1322 | 1318 | print "found a translation for obsolete string id %s in upstream xtb %s" % (m.group(1), self.filename) | ||
1323 | 1319 | continue | ||
1324 | 1320 | self.add_translation(m.group(1), m.group(2)) | ||
1325 | 1321 | imported += 1 | ||
1326 | 1322 | for m in re.finditer('<translationbundle lang="(.*?)">', file): | ||
1327 | 1323 | lang = m.group(1) | ||
1328 | 1324 | if self.lang in self.template.mapped_langs: | ||
1329 | 1325 | assert self.template.mapped_langs[self.lang]['grit'] == lang, \ | ||
1330 | 1326 | "bad lang mapping for '%s' while importing %s, expected '%s'" % \ | ||
1331 | 1327 | (lang, self.filename, self.template.mapped_langs[self.lang]['grit']) | ||
1332 | 1328 | else: | ||
1333 | 1329 | tlang = lang | ||
1334 | 1330 | if self.template.lang_mapping is not None and lang in self.template.lang_mapping: | ||
1335 | 1331 | if self.debug: | ||
1336 | 1332 | print "# mapping lang '%s' to '%s'" % (lang, self.template.lang_mapping[lang]) | ||
1337 | 1333 | tlang = self.template.lang_mapping[lang] | ||
1338 | 1334 | tlang = tlang.replace('-', '_') | ||
1339 | 1335 | flang = lang.replace('@', '-') | ||
1340 | 1336 | self.template.mapped_langs[lang] = { 'xtb_file': flang, 'grit': lang, 'gettext': tlang } | ||
1341 | 1337 | if self.debug: | ||
1342 | 1338 | print "imported %d strings from the xtb file into lang '%s'" % (imported, self.lang) | ||
1343 | 1339 | self.mtime = self.get_mtime(self.filename) | ||
1344 | 1340 | |||
1345 | 1341 | ### | ||
1346 | 1342 | |||
1347 | 1343 | class Converter(dict): | ||
1348 | 1344 | """ | ||
1349 | 1345 | Given a grd template and its xtb translations, | ||
1350 | 1346 | a/ exports gettext pot template and po translations, | ||
1351 | 1347 | possibly by merging grd/xtb files from multiple branches | ||
1352 | 1348 | or | ||
1353 | 1349 | b/ imports and merges some gettext po translations, | ||
1354 | 1350 | and exports xtb translations | ||
1355 | 1351 | """ | ||
1356 | 1352 | |||
1357 | 1353 | def __init__(self, template_filename, lang_mapping = None, date = None, debug = False, | ||
1358 | 1354 | template_mapping = {}, html_output = False, branches = None): | ||
1359 | 1355 | self.debug = debug | ||
1360 | 1356 | self.translations = {} | ||
1361 | 1357 | self.errors = 0 | ||
1362 | 1358 | self.template_changes = 0 | ||
1363 | 1359 | self.translations_changes = 0 | ||
1364 | 1360 | self.lang_mapping = lang_mapping | ||
1365 | 1361 | self.template_mapping = template_mapping | ||
1366 | 1362 | self.file_mapping = {} | ||
1367 | 1363 | self.html_output = html_output | ||
1368 | 1364 | self.stats = {} | ||
1369 | 1365 | self.branches = branches if branches is not None else [ { 'branch': 'default', 'dir': os.getcwd(), 'grd': template_filename } ] | ||
1370 | 1366 | |||
1371 | 1367 | # read a grd template from a file | ||
1372 | 1368 | self.template = GrdFile(self.branches[0]['grd'], date, lang_mapping = self.lang_mapping, debug = self.debug, | ||
1373 | 1369 | branch_name = self.branches[0]['branch'], branch_dir = self.branches[0]['dir']) | ||
1374 | 1370 | self.file_mapping['grd'] = { 'src': self.branches[0]['grd'], | ||
1375 | 1371 | 'branches': { self.branches[0]['branch']: self.branches[0]['dir'] } } | ||
1376 | 1372 | if 'mapped_grd' in self.branches[0]: | ||
1377 | 1373 | self.file_mapping['grd']['mapped_grd'] = self.branches[0]['mapped_grd'] | ||
1378 | 1374 | self.template.import_file() | ||
1379 | 1375 | self.template_pot = None | ||
1380 | 1376 | for lang, file in zip(self.template.get_supported_langs(), | ||
1381 | 1377 | self.template.get_supported_lang_filenames()): | ||
1382 | 1378 | try: | ||
1383 | 1379 | # also read all the xtb files referenced by this grd template | ||
1384 | 1380 | rfile = os.path.join(self.branches[0]['dir'] , file) | ||
1385 | 1381 | xtb = XtbFile(lang, file, self.template, date = self.template.get_mtime(file), debug = self.debug, | ||
1386 | 1382 | branch_name = self.branches[0]['branch'], branch_dir = self.branches[0]['dir']) | ||
1387 | 1383 | xtb.import_file() | ||
1388 | 1384 | |||
1389 | 1385 | self.file_mapping['lang_' + lang] = { 'src': file, | ||
1390 | 1386 | 'branches': { self.branches[0]['branch']: self.branches[0]['dir'] } } | ||
1391 | 1387 | self.stats[lang] = { 'strings': self.template.get_supported_strings_count(lang), | ||
1392 | 1388 | 'translated_upstream': 0, | ||
1393 | 1389 | 'changed_in_gettext': 0, | ||
1394 | 1390 | 'rejected': 0 | ||
1395 | 1391 | } | ||
1396 | 1392 | self.template.merge_translations(lang, xtb) | ||
1397 | 1393 | self.translations[lang] = xtb | ||
1398 | 1394 | except Exception, e: | ||
1399 | 1395 | print "Skipping a XTB file, ERROR while importing xtb %s from grd file %s: %s" % (file, self.branches[0]['grd'], str(e)) | ||
1400 | 1396 | |||
1401 | 1397 | # read other grd templates | ||
1402 | 1398 | if len(self.branches) > 1: | ||
1403 | 1399 | for branch in self.branches[1:]: | ||
1404 | 1400 | if self.debug: | ||
1405 | 1401 | print "merging %s from branch '%s' from %s" % (branch['grd'], branch['branch'], branch['dir']) | ||
1406 | 1402 | template = GrdFile(branch['grd'], date, lang_mapping = self.lang_mapping, debug = self.debug, | ||
1407 | 1403 | branch_name = branch['branch'], branch_dir = branch['dir']) | ||
1408 | 1404 | self.file_mapping['grd']['branches'][branch['branch']] = branch['dir'] | ||
1409 | 1405 | template.import_file() | ||
1410 | 1406 | self.template.merge_template(template, newer_preferred = False) | ||
1411 | 1407 | for lang, file in zip(template.get_supported_langs(), | ||
1412 | 1408 | template.get_supported_lang_filenames()): | ||
1413 | 1409 | xtb = XtbFile(lang, file, self.template, date = template.get_mtime(file), debug = self.debug, | ||
1414 | 1410 | branch_name = branch['branch'], branch_dir = branch['dir']) | ||
1415 | 1411 | if 'lang_' + lang not in self.file_mapping: | ||
1416 | 1412 | self.file_mapping['lang_' + lang] = { 'src': file, 'branches': {} } | ||
1417 | 1413 | self.file_mapping['lang_' + lang]['branches'][branch['branch']] = branch['dir'] | ||
1418 | 1414 | # TODO: stats | ||
1419 | 1415 | xtb.import_file() | ||
1420 | 1416 | if lang not in self.translations: | ||
1421 | 1417 | if self.debug: | ||
1422 | 1418 | print "Add lang '%s' as master xtb for alt branch '%s'" % (lang, branch['branch']) | ||
1423 | 1419 | self.translations[lang] = xtb | ||
1424 | 1420 | self.template.merge_translations(lang, xtb, master_xtb = self.translations[lang], | ||
1425 | 1421 | newer_preferred = False) | ||
1426 | 1422 | |||
1427 | 1423 | def export_gettext_files(self, directory): | ||
1428 | 1424 | fname = self.file_mapping['grd']['mapped_grd'] \ | ||
1429 | 1425 | if 'mapped_grd' in self.file_mapping['grd'] else self.template.filename | ||
1430 | 1426 | name = os.path.splitext(os.path.basename(fname))[0] | ||
1431 | 1427 | if directory is not None: | ||
1432 | 1428 | directory = os.path.join(directory, name) | ||
1433 | 1429 | if not os.path.isdir(directory): | ||
1434 | 1430 | os.makedirs(directory, 0755) | ||
1435 | 1431 | filename = os.path.join(directory, name + ".pot") | ||
1436 | 1432 | else: | ||
1437 | 1433 | filename = os.path.splitext(fname)[0] + ".pot" | ||
1438 | 1434 | # create a pot template and merge the grd strings into it | ||
1439 | 1435 | self.template_pot = PotFile(filename, date = self.template.mtime, debug = self.debug) | ||
1440 | 1436 | self.template_pot.import_grd(self.template) | ||
1441 | 1437 | # write it to a file | ||
1442 | 1438 | self.template_changes += self.template_pot.export_file(directory = directory) | ||
1443 | 1439 | |||
1444 | 1440 | # do the same for all langs (xtb -> po) | ||
1445 | 1441 | for lang in self.translations: | ||
1446 | 1442 | gtlang = self.template.mapped_langs[lang]['gettext'] | ||
1447 | 1443 | file = os.path.join(os.path.dirname(filename), gtlang + ".po") | ||
1448 | 1444 | po = PoFile(gtlang, file, self.template_pot, | ||
1449 | 1445 | date = self.translations[lang].translation_date, debug = self.debug) | ||
1450 | 1446 | po.import_xtb(self.translations[lang]) | ||
1451 | 1447 | self.translations_changes += po.export_file(directory) | ||
1452 | 1448 | |||
1453 | 1449 | def export_grit_xtb_file(self, lang, directory): | ||
1454 | 1450 | name = os.path.splitext(os.path.basename(self.template.filename))[0] | ||
1455 | 1451 | file = os.path.join(directory, os.path.basename(self.template.supported_langs[lang])) | ||
1456 | 1452 | if len(self.translations[lang].strings.keys()) > 0: | ||
1457 | 1453 | if 'lang_' + lang in self.file_mapping: | ||
1458 | 1454 | self.file_mapping['lang_' + lang]['dst'] = file | ||
1459 | 1455 | else: | ||
1460 | 1456 | self.file_mapping['lang_' + lang] = { 'src': None, 'dst': file } | ||
1461 | 1457 | self.translations[lang].export_file(filename = file) | ||
1462 | 1458 | |||
1463 | 1459 | def export_grit_files(self, directory, langs): | ||
1464 | 1460 | grd_dst = os.path.join(directory, os.path.basename(self.template.filename)) | ||
1465 | 1461 | if len(self.translations.keys()) == 0: | ||
1466 | 1462 | if self.debug: | ||
1467 | 1463 | print "no translation at all, nothing to export here (template: %s)" % self.template.filename | ||
1468 | 1464 | return | ||
1469 | 1465 | if not os.path.isdir(directory): | ||
1470 | 1466 | os.makedirs(directory, 0755) | ||
1471 | 1467 | # 'langs' may contain langs for which this template no longer have translations for. | ||
1472 | 1468 | # They need to be dropped from the grd file | ||
1473 | 1469 | self.template.export_file(filename = grd_dst, global_langs = langs, langs = self.translations.keys()) | ||
1474 | 1470 | self.file_mapping['grd']['dst'] = grd_dst | ||
1475 | 1471 | self.file_mapping['grd']['dir'] = directory[:-len(os.path.dirname(self.template.filename)) - 1] | ||
1476 | 1472 | for lang in self.translations: | ||
1477 | 1473 | prefix = self.template.supported_langs[lang] | ||
1478 | 1474 | fdirectory = os.path.normpath(os.path.join(self.file_mapping['grd']['dir'], os.path.dirname(prefix))) | ||
1479 | 1475 | if not os.path.isdir(fdirectory): | ||
1480 | 1476 | os.makedirs(fdirectory, 0755) | ||
1481 | 1477 | self.export_grit_xtb_file(lang, fdirectory) | ||
1482 | 1478 | |||
1483 | 1479 | def get_supported_strings_count(self): | ||
1484 | 1480 | return len(self.template.supported_ids.keys()) | ||
1485 | 1481 | |||
1486 | 1482 | def compare_translations(self, old, new, id, lang): | ||
1487 | 1483 | # strip leading and trailing whitespaces from the upstream strings | ||
1488 | 1484 | # (this should be done upstream) | ||
1489 | 1485 | old = old.strip() | ||
1490 | 1486 | if old != new: | ||
1491 | 1487 | s = self.template.supported_ids[id]['ids'][0]['val'] if 'ids' in self.template.supported_ids[id] else "<none?>" | ||
1492 | 1488 | if self.debug: | ||
1493 | 1489 | print "Found a different translation for id %s in lang '%s':\n string: \"%s\"\n " \ | ||
1494 | 1490 | "upstream: \"%s\"\n launchpad: \"%s\"\n" % (id, lang, s, old, new) | ||
1495 | 1491 | return old == new | ||
1496 | 1492 | |||
1497 | 1493 | def import_gettext_po_file(self, lang, filename): | ||
1498 | 1494 | """ import a single lang file into the current translations set, | ||
1499 | 1495 | matching the current template. Could be useful to merge the upstream | ||
1500 | 1496 | and launchpad translations, or to merge strings from another project | ||
1501 | 1497 | (like webkit) """ | ||
1502 | 1498 | po = PoFile(self.template.mapped_langs[lang]['gettext'], filename, self.template, | ||
1503 | 1499 | date = self.template.get_mtime(filename), debug = self.debug) | ||
1504 | 1500 | po.import_file() | ||
1505 | 1501 | # no need to continue if there are no translation in this po | ||
1506 | 1502 | translated_count = 0 | ||
1507 | 1503 | for s in po.strings: | ||
1508 | 1504 | if s['string'] != '""' and s['translation'] != '""': | ||
1509 | 1505 | translated_count += 1 | ||
1510 | 1506 | if translated_count == 0: | ||
1511 | 1507 | if self.debug: | ||
1512 | 1508 | print "No translation found for lang %s in %s" % (lang, filename) | ||
1513 | 1509 | return | ||
1514 | 1510 | if lang not in self.translations: | ||
1515 | 1511 | # assuming the filename should be third_party/launchpad_translations/<template_name>_<lang>.xtb | ||
1516 | 1512 | # (relative to $SRC), we need it relatively to the grd directory | ||
1517 | 1513 | tname = os.path.splitext(os.path.basename(self.template.filename))[0] | ||
1518 | 1514 | f = os.path.normpath(os.path.join('third_party/launchpad_translations', | ||
1519 | 1515 | tname + '_' + self.template.mapped_langs[lang]['xtb_file'] + '.xtb')) | ||
1520 | 1516 | self.translations[lang] = XtbFile(lang, f, self.template, date = po.mtime, debug = self.debug) | ||
1521 | 1517 | self.template.supported_langs[lang] = f # *sigh* | ||
1522 | 1518 | |||
1523 | 1519 | lp669831_skipped = 0 | ||
1524 | 1520 | for string in po.strings: | ||
1525 | 1521 | if 'id' not in string: | ||
1526 | 1522 | continue # PO header | ||
1527 | 1523 | id = string['id'] | ||
1528 | 1524 | if id in self.template.supported_ids: | ||
1529 | 1525 | if 'conditions' in string: | ||
1530 | 1526 | # test the lang against all those conditions. If at least one passes, we need | ||
1531 | 1527 | # the string | ||
1532 | 1528 | found = False | ||
1533 | 1529 | for c in string['conditions']: | ||
1534 | 1530 | found |= EvalConditions().lang_eval(c, lang) | ||
1535 | 1531 | if found is False: | ||
1536 | 1532 | self.template.update_stats(lang, skipped_lang = 1) | ||
1537 | 1533 | if self.debug: | ||
1538 | 1534 | print "Skipped string (lang condition) for %s/%s: %s" % \ | ||
1539 | 1535 | (os.path.splitext(os.path.basename(self.template.filename))[0], | ||
1540 | 1536 | lang, repr(string)) | ||
1541 | 1537 | continue | ||
1542 | 1538 | # workaround bug https://bugs.launchpad.net/rosetta/+bug/669831 | ||
1543 | 1539 | ustring = StringCvt().gettext2xtb(string['string']) | ||
1544 | 1540 | gt_translation = string['translation'][1:-1].replace('"\n"', '') | ||
1545 | 1541 | string['translation'] = StringCvt().gettext2xtb(string['translation']) | ||
1546 | 1542 | if string['translation'] != "": | ||
1547 | 1543 | while string['translation'][-1:] == '\n' and ustring[-1:] != '\n': | ||
1548 | 1544 | # prevent the `msgid' and `msgstr' entries do not both end with '\n' error | ||
1549 | 1545 | if self.debug: | ||
1550 | 1546 | print "Found unwanted \\n at the end of translation id " + id + " lang " + self.lang + ". Dropped" | ||
1551 | 1547 | string['translation'] = string['translation'][:-1] | ||
1552 | 1548 | while string['translation'][0] == '\n' and ustring[0] != '\n': | ||
1553 | 1549 | # prevent the `msgid' and `msgstr' entries do not both begin with '\n' error | ||
1554 | 1550 | if self.debug: | ||
1555 | 1551 | print "Found unwanted \\n at the begin of translation id " + id + " lang " + self.lang + ". Dropped" | ||
1556 | 1552 | string['translation'] = string['translation'][1:] | ||
1557 | 1553 | grit_str = StringCvt().decode_xml_entities(self.template.supported_ids[id]['ids'][0]['val']) | ||
1558 | 1554 | if False and 'ids' in self.template.supported_ids[id] and \ | ||
1559 | 1555 | ustring != grit_str: | ||
1560 | 1556 | # the string for this id is no longer the same, skip it | ||
1561 | 1557 | lp669831_skipped += 1 | ||
1562 | 1558 | if self.debug: | ||
1563 | 1559 | print "lp669831_skipped:\n lp: '%s'\n grd: '%s'" % (ustring, grit_str) | ||
1564 | 1560 | continue | ||
1565 | 1561 | # check for xml errors when '<' or '>' are in the string | ||
1566 | 1562 | if string['translation'].find('<') >= 0 or \ | ||
1567 | 1563 | string['translation'].find('>') >= 0: | ||
1568 | 1564 | try: | ||
1569 | 1565 | # try to parse it with minidom (it's slow!!), and skip if it fails | ||
1570 | 1566 | s = u"<x>" + string['translation'] + u"</x>" | ||
1571 | 1567 | dom = minidom.parseString(s.encode('utf-8')) | ||
1572 | 1568 | except Exception as inst: | ||
1573 | 1569 | print "Parse error in '%s/%s' for id %s. Skipped.\n%s\n%s" % \ | ||
1574 | 1570 | (os.path.splitext(os.path.basename(self.template.filename))[0], lang, id, | ||
1575 | 1571 | repr(string['translation']), inst) | ||
1576 | 1572 | continue | ||
1577 | 1573 | # if the upstream string is not empty, but the contributed string is, keep | ||
1578 | 1574 | # the upstream string untouched | ||
1579 | 1575 | if string['translation'] == '': | ||
1580 | 1576 | continue | ||
1581 | 1577 | # check if we have the same variables in both the upstream string and its | ||
1582 | 1578 | # translation. Otherwise, complain and reject the translation | ||
1583 | 1579 | if 'ids' in self.template.supported_ids[id]: | ||
1584 | 1580 | uvars = sorted([e for e in re.split('(<ph name=".*?"/>)', self.template.supported_ids[id]['ids'][0]['val']) \ | ||
1585 | 1581 | if re.match('^<ph name=".*?"/>$', e)]) | ||
1586 | 1582 | tvars = sorted([e for e in re.split('(<ph name=".*?"/>)', string['translation'])\ | ||
1587 | 1583 | if re.match('^<ph name=".*?"/>$', e)]) | ||
1588 | 1584 | lostvars = list(set(uvars).difference(set(tvars))) | ||
1589 | 1585 | createdvars = list(set(tvars).difference(set(uvars))) | ||
1590 | 1586 | if len(lostvars) or len(createdvars): | ||
1591 | 1587 | template = os.path.splitext(os.path.basename(self.template.filename))[0].replace('_', '-') | ||
1592 | 1588 | self.errors += 1 | ||
1593 | 1589 | if self.html_output: | ||
1594 | 1590 | print "<div class='error'>[<a id='pherr-%s-%d' href='javascript:toggle(\"pherr-%s-%d\");'>+</a>] " \ | ||
1595 | 1591 | "<b>ERROR</b>: Found mismatching placeholder variables in string id %s of <b>%s</b> lang <b>%s</b>" % \ | ||
1596 | 1592 | (template, self.errors, template, self.errors, id, template, lang) | ||
1597 | 1593 | else: | ||
1598 | 1594 | print "ERROR: Found mismatching placeholder variables in string id %s of %s/%s:" % \ | ||
1599 | 1595 | (id, template, lang) | ||
1600 | 1596 | url = 'https://translations.launchpad.net/chromium-browser/translations/+pots/%s/%s/+translate?batch=10&show=all&search=%s' % \ | ||
1601 | 1597 | (template, self.template.mapped_langs[lang]['gettext'], urllib.quote(gt_translation.encode('utf-8'))) | ||
1602 | 1598 | if self.html_output: | ||
1603 | 1599 | print "<div id='pherr-%s-%d-t' style='display: none'>\n" \ | ||
1604 | 1600 | "<fieldset><legend>Details</legend><p><ul>" % (template, self.errors) | ||
1605 | 1601 | print "<li> <a href='%s'>this string in Launchpad</a>\n" % url | ||
1606 | 1602 | if len(lostvars): | ||
1607 | 1603 | print " <li> expected but not found: <code>%s</code>" % " ".join([ re.sub(r'<ph name="(.*?)"/>', r'%{\1}', s) for s in lostvars ]) | ||
1608 | 1604 | if len(createdvars): | ||
1609 | 1605 | print " <li> found but not expected: <code>%s</code>" % " ".join([ re.sub(r'<ph name="(.*?)"/>', r'%{\1}', s) for s in createdvars ]) | ||
1610 | 1606 | print "</ul><table border='1'>" \ | ||
1611 | 1607 | "<tr><th rowspan='2'>GetText</th><th>template</th><td><code>%s</code></td></tr>\n" \ | ||
1612 | 1608 | "<tr><th>translation</th><td><code>%s</code></td></tr>\n" \ | ||
1613 | 1609 | "<tr><th rowspan='2'>Grit</th><th>template</th><td><code>%s</code></td></tr>\n" \ | ||
1614 | 1610 | "<tr><th>translation</th><td><code>%s</code></td></tr>\n" \ | ||
1615 | 1611 | "</table><p> => <b>translation skipped</b>\n" % \ | ||
1616 | 1612 | (string['string'][1:-1].replace('"\n"', '').replace('<', '<').replace('>', '>'), | ||
1617 | 1613 | gt_translation.replace('<', '<').replace('>', '>'), | ||
1618 | 1614 | self.template.supported_ids[id]['ids'][0]['val'].replace('<', '<').replace('>', '>'), | ||
1619 | 1615 | string['translation'].replace('<', '<').replace('>', '>')) | ||
1620 | 1616 | print "</fieldset></div></div>" | ||
1621 | 1617 | else: | ||
1622 | 1618 | if len(lostvars): | ||
1623 | 1619 | print " - expected but not found: " + " ".join(lostvars) | ||
1624 | 1620 | if len(createdvars): | ||
1625 | 1621 | print " - found but not expected: " + " ".join(createdvars) | ||
1626 | 1622 | print " string: '%s'\n translation: '%s'\n gettext: '%s'\n url: %s\n => translation skipped\n" % \ | ||
1627 | 1623 | (self.template.supported_ids[id]['ids'][0]['val'], string['translation'], gt_translation, url) | ||
1628 | 1624 | continue | ||
1629 | 1625 | # check if the translated string is the same | ||
1630 | 1626 | if 'lang' in self.template.supported_ids[id] and \ | ||
1631 | 1627 | lang in self.template.supported_ids[id]['lang']: | ||
1632 | 1628 | # compare | ||
1633 | 1629 | if self.compare_translations(self.template.supported_ids[id]['lang'][lang], | ||
1634 | 1630 | string['translation'], id, lang): | ||
1635 | 1631 | continue # it's the same | ||
1636 | 1632 | if id in self.translations[lang].strings: | ||
1637 | 1633 | # already added from a previously merged gettext po file | ||
1638 | 1634 | if self.debug: | ||
1639 | 1635 | print "already added from a previously merged gettext po file for" + \ | ||
1640 | 1636 | " template %s %s id %s in lang %s: %s" % \ | ||
1641 | 1637 | (self.template.branch_name, self.template.filename, | ||
1642 | 1638 | id, lang, repr(string['translation'])) | ||
1643 | 1639 | # compare | ||
1644 | 1640 | if self.compare_translations(self.translations[lang].strings[id], | ||
1645 | 1641 | string['translation'], id, lang): | ||
1646 | 1642 | continue # it's the same | ||
1647 | 1643 | # update it.. | ||
1648 | 1644 | if self.debug: | ||
1649 | 1645 | print "updated string for template %s %s id %s in lang %s: %s" % \ | ||
1650 | 1646 | (self.template.branch_name, self.template.filename, id, lang, | ||
1651 | 1647 | repr(string['translation'])) | ||
1652 | 1648 | self.template.update_stats(lang, updated = 1) | ||
1653 | 1649 | self.translations[lang].strings[id] = string['translation'] | ||
1654 | 1650 | self.translations[lang].strings_updated += 1 | ||
1655 | 1651 | elif id in self.translations[lang].strings: | ||
1656 | 1652 | # already added from a previously merged gettext po file | ||
1657 | 1653 | if self.debug: | ||
1658 | 1654 | print "already added from a previously merged gettext po file for" + \ | ||
1659 | 1655 | "template %s %s id %s in lang %s: %s" % \ | ||
1660 | 1656 | (self.template.branch_name, self.template.filename, | ||
1661 | 1657 | id, lang, repr(string['translation'])) | ||
1662 | 1658 | # compare | ||
1663 | 1659 | if self.compare_translations(self.translations[lang].strings[id], | ||
1664 | 1660 | string['translation'], id, lang): | ||
1665 | 1661 | continue # it's the same | ||
1666 | 1662 | # update it.. | ||
1667 | 1663 | self.translations[lang].strings[id] = string['translation'] | ||
1668 | 1664 | else: | ||
1669 | 1665 | # add | ||
1670 | 1666 | if self.debug: | ||
1671 | 1667 | print "add new string for template %s %s id %s in lang %s: %s" % \ | ||
1672 | 1668 | (self.template.branch_name, self.template.filename, | ||
1673 | 1669 | id, lang, repr(string['translation'])) | ||
1674 | 1670 | self.template.update_stats(lang, new = 1) | ||
1675 | 1671 | self.translations[lang].strings[id] = string['translation'] | ||
1676 | 1672 | self.translations[lang].strings_new += 1 | ||
1677 | 1673 | if self.debug and lp669831_skipped > 0: | ||
1678 | 1674 | print "lp669831: skipped %s bogus/obsolete strings from %s" % \ | ||
1679 | 1675 | (lp669831_skipped, filename[filename[:filename.rfind('/')].rfind('/') + 1:]) | ||
1680 | 1676 | |||
1681 | 1677 | def import_gettext_po_files(self, directory): | ||
1682 | 1678 | fname = self.file_mapping['grd']['mapped_grd'] \ | ||
1683 | 1679 | if 'mapped_grd' in self.file_mapping['grd'] else self.template.filename | ||
1684 | 1680 | template_name = os.path.splitext(os.path.basename(fname))[0] | ||
1685 | 1681 | directory = os.path.join(directory, template_name) | ||
1686 | 1682 | if not os.path.isdir(directory): | ||
1687 | 1683 | if self.debug: | ||
1688 | 1684 | print "WARN: Launchpad didn't export anything for template '%s' [%s]" % (template_name, directory) | ||
1689 | 1685 | return | ||
1690 | 1686 | for file in os.listdir(directory): | ||
1691 | 1687 | base, ext = os.path.splitext(file) | ||
1692 | 1688 | if ext != ".po": | ||
1693 | 1689 | continue | ||
1694 | 1690 | # 'base' is a gettext lang, map it | ||
1695 | 1691 | lang = None | ||
1696 | 1692 | for l in self.template.mapped_langs: | ||
1697 | 1693 | if base == self.template.mapped_langs[l]['gettext']: | ||
1698 | 1694 | lang = l | ||
1699 | 1695 | break | ||
1700 | 1696 | if lang is None: # most probably a new lang, map back | ||
1701 | 1697 | lang = base.replace('_', '-') | ||
1702 | 1698 | for l in self.lang_mapping: | ||
1703 | 1699 | if lang == self.lang_mapping[l]: | ||
1704 | 1700 | lang = l | ||
1705 | 1701 | break | ||
1706 | 1702 | flang = lang.replace('@', '-') | ||
1707 | 1703 | self.template.mapped_langs[lang] = { 'xtb_file': flang, 'grit': lang, 'gettext': base } | ||
1708 | 1704 | self.import_gettext_po_file(lang, os.path.join(directory, file)) | ||
1709 | 1705 | # remove from the supported langs list all langs with no translated strings | ||
1710 | 1706 | # (to catch either empty 'po' files exported by Launchpad, or 'po' files | ||
1711 | 1707 | # containing only obsolete or too new strings for this branch) | ||
1712 | 1708 | dropped = [] | ||
1713 | 1709 | for lang in self.translations: | ||
1714 | 1710 | if len(self.translations[lang].strings.keys()) == 0: | ||
1715 | 1711 | if self.debug: | ||
1716 | 1712 | print "no translation found for template '%s' and lang '%s'. lang removed from the supported lang list" % \ | ||
1717 | 1713 | (os.path.splitext(os.path.basename(self.template.filename))[0], lang) | ||
1718 | 1714 | del(self.template.supported_langs[lang]) | ||
1719 | 1715 | dropped.append(lang) | ||
1720 | 1716 | for lang in dropped: | ||
1721 | 1717 | del(self.translations[lang]) | ||
1722 | 1718 | |||
1723 | 1719 | def copy_grit_files(self, directory): | ||
1724 | 1720 | fname = self.file_mapping['grd']['mapped_grd'] \ | ||
1725 | 1721 | if 'mapped_grd' in self.file_mapping['grd'] else self.template.filename | ||
1726 | 1722 | dst = os.path.join(directory, os.path.dirname(fname)) | ||
1727 | 1723 | if not os.path.isdir(dst): | ||
1728 | 1724 | os.makedirs(dst, 0755) | ||
1729 | 1725 | shutil.copy2(fname, dst) | ||
1730 | 1726 | for lang in self.template.supported_langs: | ||
1731 | 1727 | dst = os.path.join(directory, os.path.dirname(self.translations[lang].filename)) | ||
1732 | 1728 | if not os.path.isdir(dst): | ||
1733 | 1729 | os.makedirs(dst, 0755) | ||
1734 | 1730 | shutil.copy2(self.translations[lang].filename, dst) | ||
1735 | 1731 | |||
1736 | 1732 | def create_patches(self, directory): | ||
1737 | 1733 | if not os.path.isdir(directory): | ||
1738 | 1734 | os.makedirs(directory, 0755) | ||
1739 | 1735 | template_name = os.path.splitext(os.path.basename(self.template.filename))[0] | ||
1740 | 1736 | patch = codecs.open(os.path.join(directory, "translations-" + template_name + ".patch"), | ||
1741 | 1737 | "wb", encoding="utf-8") | ||
1742 | 1738 | for e in sorted(self.file_mapping.keys()): | ||
1743 | 1739 | if 'dst' not in self.file_mapping[e]: | ||
1744 | 1740 | self.file_mapping[e]['dst'] = None | ||
1745 | 1741 | if self.file_mapping[e]['src'] is not None and \ | ||
1746 | 1742 | self.file_mapping[e]['dst'] is not None and \ | ||
1747 | 1743 | filecmp.cmp(self.file_mapping[e]['src'], self.file_mapping[e]['dst']) == True: | ||
1748 | 1744 | continue # files are the same | ||
1749 | 1745 | |||
1750 | 1746 | if self.file_mapping[e]['src'] is not None: | ||
1751 | 1747 | fromfile = "old/" + self.file_mapping[e]['src'] | ||
1752 | 1748 | tofile = "new/" + self.file_mapping[e]['src'] | ||
1753 | 1749 | fromdate = datetime.fromtimestamp(self.template.get_mtime( | ||
1754 | 1750 | self.file_mapping[e]['src'])).strftime("%Y-%m-%d %H:%M:%S.%f000 +0000") | ||
1755 | 1751 | fromlines = codecs.open(self.file_mapping[e]['src'], 'rb', encoding="utf-8").readlines() | ||
1756 | 1752 | else: | ||
1757 | 1753 | fromfile = "old/" + self.file_mapping[e]['dst'][len(self.file_mapping['grd']['dir']) + 1:] | ||
1758 | 1754 | tofile = "new/" + self.file_mapping[e]['dst'][len(self.file_mapping['grd']['dir']) + 1:] | ||
1759 | 1755 | fromdate = datetime.fromtimestamp(0).strftime("%Y-%m-%d %H:%M:%S.%f000 +0000") | ||
1760 | 1756 | fromlines = "" | ||
1761 | 1757 | if self.file_mapping[e]['dst'] is not None: | ||
1762 | 1758 | todate = datetime.fromtimestamp(self.template.get_mtime( | ||
1763 | 1759 | self.file_mapping[e]['dst'])).strftime("%Y-%m-%d %H:%M:%S.%f000 +0000") | ||
1764 | 1760 | tolines = codecs.open(self.file_mapping[e]['dst'], 'rb', encoding="utf-8").readlines() | ||
1765 | 1761 | else: | ||
1766 | 1762 | todate = datetime.fromtimestamp(0).strftime("%Y-%m-%d %H:%M:%S.%f000 +0000") | ||
1767 | 1763 | tolines = "" | ||
1768 | 1764 | diff = unified_diff(fromlines, tolines, fromfile, tofile, | ||
1769 | 1765 | fromdate, todate, n=3) | ||
1770 | 1766 | patch.write("diff -Nur %s %s\n" % (fromfile, tofile)) | ||
1771 | 1767 | s = ''.join(diff) | ||
1772 | 1768 | # fix the diff so that older patch (<< 2.6) don't fail on new files | ||
1773 | 1769 | s = re.sub(r'@@ -1,0 ', '@@ -0,0 ', s) | ||
1774 | 1770 | # ..and make sure patch is able to detect a patch removing files | ||
1775 | 1771 | s = re.sub(r'(@@ \S+) \+1,0 @@', '\\1 +0,0 @@', s) | ||
1776 | 1772 | patch.writelines(s) | ||
1777 | 1773 | if s[-1:] != '\n': | ||
1778 | 1774 | patch.write("\n\\ No newline at end of file\n") | ||
1779 | 1775 | patch.close() | ||
1780 | 1776 | |||
1781 | 1777 | def update_supported_langs_in_grd(self, grd_in, grd_out, langs): | ||
1782 | 1778 | fdi = codecs.open(grd_in, 'rb', encoding="utf-8") | ||
1783 | 1779 | fdo = codecs.open(grd_out, 'wb', encoding="utf-8") | ||
1784 | 1780 | # can't use minidom here as the file is manually generated and the | ||
1785 | 1781 | # output will create big diffs. parse the source file line by line | ||
1786 | 1782 | # and insert new langs in the <outputs> section (with type="data_package" | ||
1787 | 1783 | # or type="js_map_format"). Let everything else untouched | ||
1788 | 1784 | # FIXME: this is mostly a copy of GrdFile::export_file() | ||
1789 | 1785 | pak_found = False | ||
1790 | 1786 | pak_saved = [] | ||
1791 | 1787 | has_ifs = False | ||
1792 | 1788 | for line in fdi.readlines(): | ||
1793 | 1789 | if re.match(r'.*?<output filename=".*?" type="(data_package|js_map_format)"', line): | ||
1794 | 1790 | pak_found = True | ||
1795 | 1791 | pak_saved.append(line) | ||
1796 | 1792 | continue | ||
1797 | 1793 | if line.find('</outputs>') > 0: | ||
1798 | 1794 | pak_found = False | ||
1799 | 1795 | ours = langs[:] | ||
1800 | 1796 | chunks = {} | ||
1801 | 1797 | c = None | ||
1802 | 1798 | pak_if = None | ||
1803 | 1799 | pak_is_in_if = False | ||
1804 | 1800 | for l in pak_saved: | ||
1805 | 1801 | if l.find("<!-- ") > 0: | ||
1806 | 1802 | c = l | ||
1807 | 1803 | continue | ||
1808 | 1804 | if l.find("<if ") > -1: | ||
1809 | 1805 | c = l if c is None else c + l | ||
1810 | 1806 | has_ifs = True | ||
1811 | 1807 | pak_is_in_if = True | ||
1812 | 1808 | continue | ||
1813 | 1809 | if l.find("</if>") > -1: | ||
1814 | 1810 | c = l if c is None else c + l | ||
1815 | 1811 | pak_is_in_if = False | ||
1816 | 1812 | continue | ||
1817 | 1813 | m = re.match(r'.*?<output filename="(.*?)_([^_\.]+)\.(pak|js)" type="(data_package|js_map_format)" lang="(.*?)" />', l) | ||
1818 | 1814 | if m is not None: | ||
1819 | 1815 | x = { 'name': m.group(1), 'ext': m.group(3), 'lang': m.group(5), 'file_lang': m.group(2), | ||
1820 | 1816 | 'type': m.group(4), 'in_if': pak_is_in_if, 'line': l } | ||
1821 | 1817 | if c is not None: | ||
1822 | 1818 | x['comment'] = c | ||
1823 | 1819 | c = None | ||
1824 | 1820 | k = m.group(2) if m.group(2) != 'nb' else 'no' | ||
1825 | 1821 | chunks[k] = x | ||
1826 | 1822 | else: | ||
1827 | 1823 | if c is None: | ||
1828 | 1824 | c = l | ||
1829 | 1825 | else: | ||
1830 | 1826 | c += l | ||
1831 | 1827 | is_in_if = False | ||
1832 | 1828 | for lang in sorted(chunks.keys()): | ||
1833 | 1829 | tlang = lang if lang != 'no' else 'nb' | ||
1834 | 1830 | while len(ours) > 0 and ((ours[0] == 'nb' and 'no' < tlang) or (ours[0] != 'nb' and ours[0] < tlang)): | ||
1835 | 1831 | if ours[0] in chunks: | ||
1836 | 1832 | ours = ours[1:] | ||
1837 | 1833 | continue | ||
1838 | 1834 | if has_ifs and is_in_if is False: | ||
1839 | 1835 | fdo.write(' <if expr="pp_ifdef(\'use_third_party_translations\')">\n') | ||
1840 | 1836 | f = "%s_%s.%s" % (chunks[lang]['name'], ours[0], chunks[lang]['ext']) | ||
1841 | 1837 | fdo.write(' %s<output filename="%s" type="%s" lang="%s" />\n' % \ | ||
1842 | 1838 | (' ' if has_ifs else '', f, chunks[lang]['type'], ours[0])) | ||
1843 | 1839 | is_in_if = True | ||
1844 | 1840 | if has_ifs and chunks[lang]['in_if'] is False: | ||
1845 | 1841 | if 'comment' not in chunks[lang] or chunks[lang]['comment'].find('</if>') == -1: | ||
1846 | 1842 | fdo.write(' </if>\n') | ||
1847 | 1843 | is_in_if = False | ||
1848 | 1844 | ours = ours[1:] | ||
1849 | 1845 | if 'comment' in chunks[lang]: | ||
1850 | 1846 | for s in chunks[lang]['comment'].split('\n')[:-1]: | ||
1851 | 1847 | if chunks[lang]['in_if'] is True and is_in_if and s.find('<if ') > -1: | ||
1852 | 1848 | continue | ||
1853 | 1849 | if s.find('<!-- No translations available. -->') > -1: | ||
1854 | 1850 | continue | ||
1855 | 1851 | fdo.write(s + '\n') | ||
1856 | 1852 | fdo.write(chunks[lang]['line']) | ||
1857 | 1853 | if ours[0] == tlang: | ||
1858 | 1854 | ours = ours[1:] | ||
1859 | 1855 | is_in_if = chunks[lang]['in_if'] | ||
1860 | 1856 | if len(chunks.keys()) > 0: | ||
1861 | 1857 | while len(ours) > 0: | ||
1862 | 1858 | f = "%s_%s.%s" % (chunks[lang]['name'], ours[0], chunks[lang]['ext']) | ||
1863 | 1859 | if has_ifs and is_in_if is False: | ||
1864 | 1860 | fdo.write(' <if expr="pp_ifdef(\'use_third_party_translations\')">\n') | ||
1865 | 1861 | fdo.write(' %s<output filename="%s" type="data_package" lang="%s" />\n' % \ | ||
1866 | 1862 | (' ' if has_ifs else '', f, ours[0])) | ||
1867 | 1863 | is_in_if = True | ||
1868 | 1864 | ours = ours[1:] | ||
1869 | 1865 | if has_ifs and is_in_if: | ||
1870 | 1866 | fdo.write(' </if>\n') | ||
1871 | 1867 | is_in_if = False | ||
1872 | 1868 | if c is not None: | ||
1873 | 1869 | for s in c.split('\n')[:-1]: | ||
1874 | 1870 | if s.find('<!-- No translations available. -->') > -1: | ||
1875 | 1871 | continue | ||
1876 | 1872 | if s.find('</if>') > -1: | ||
1877 | 1873 | continue | ||
1878 | 1874 | fdo.write(s + '\n') | ||
1879 | 1875 | if pak_found: | ||
1880 | 1876 | pak_saved.append(line) | ||
1881 | 1877 | continue | ||
1882 | 1878 | fdo.write(line) | ||
1883 | 1879 | fdi.close() | ||
1884 | 1880 | fdo.close() | ||
1885 | 1881 | |||
1886 | 1882 | def create_build_gyp_patch(self, directory, build_gyp_file, other_grd_files, nlangs, | ||
1887 | 1883 | whitelisted_new_langs = None): | ||
1888 | 1884 | # read the list of langs supported upstream | ||
1889 | 1885 | fd = open(build_gyp_file, "r") | ||
1890 | 1886 | data = fd.read() | ||
1891 | 1887 | fd.close() | ||
1892 | 1888 | r = data[data.find("'locales':"):] | ||
1893 | 1889 | olangs = sorted(re.findall("'(.*?)'", r[r.find('['):r.find(']')])) | ||
1894 | 1890 | # check for an optional use_third_party_translations list of locales | ||
1895 | 1891 | tpt = data.find('use_third_party_translations==1') | ||
1896 | 1892 | if tpt > 0: | ||
1897 | 1893 | tpt += data[tpt:].find("'locales':") | ||
1898 | 1894 | r = data[tpt:] | ||
1899 | 1895 | tptlangs = sorted(re.findall("'(.*?)'", r[r.find('['):r.find(']')])) | ||
1900 | 1896 | if nlangs == sorted(tptlangs + olangs): | ||
1901 | 1897 | return tptlangs | ||
1902 | 1898 | else: | ||
1903 | 1899 | if nlangs == olangs: | ||
1904 | 1900 | return [] | ||
1905 | 1901 | # check if we need to only activate some whitelisted new langs | ||
1906 | 1902 | xlangs = None | ||
1907 | 1903 | nnlangs = [ x for x in nlangs if x not in olangs ] | ||
1908 | 1904 | if whitelisted_new_langs is not None: | ||
1909 | 1905 | if tpt > 0: | ||
1910 | 1906 | nlangs = [ x for x in nlangs if x not in olangs and x in whitelisted_new_langs ] | ||
1911 | 1907 | else: | ||
1912 | 1908 | xlangs = [ x for x in nlangs if x not in olangs and x not in whitelisted_new_langs ] | ||
1913 | 1909 | nlangs = [ x for x in nlangs if x in olangs or x in whitelisted_new_langs ] | ||
1914 | 1910 | elif tpt > 0: | ||
1915 | 1911 | nlangs = [ x for x in nlangs if x not in olangs ] | ||
1916 | 1912 | |||
1917 | 1913 | # we need a patch | ||
1918 | 1914 | if tpt > 0: | ||
1919 | 1915 | pos = tpt + data[tpt:].find('[') | ||
1920 | 1916 | end = data[:pos + 1] | ||
1921 | 1917 | ndata = end[:] | ||
1922 | 1918 | else: | ||
1923 | 1919 | pos = data.find("'locales':") | ||
1924 | 1920 | begin = data[pos:] | ||
1925 | 1921 | end = data[:pos + begin.find('\n')] | ||
1926 | 1922 | ndata = end[:] | ||
1927 | 1923 | end = data[pos + data[pos:].find(']'):] | ||
1928 | 1924 | |||
1929 | 1925 | # list of langs, folded | ||
1930 | 1926 | if len(nlangs) > 9: | ||
1931 | 1927 | ndata += '\n' + \ | ||
1932 | 1928 | '\n'.join(textwrap.wrap("'" + "', '".join(nlangs) + "'", | ||
1933 | 1929 | break_long_words=False, width=76, | ||
1934 | 1930 | drop_whitespace=False, | ||
1935 | 1931 | expand_tabs=False, | ||
1936 | 1932 | replace_whitespace=False, | ||
1937 | 1933 | initial_indent=' ', | ||
1938 | 1934 | subsequent_indent=' ', | ||
1939 | 1935 | break_on_hyphens=False)) + '\n ' | ||
1940 | 1936 | else: | ||
1941 | 1937 | ndata += "'%s'" % "', '".join(nlangs) | ||
1942 | 1938 | |||
1943 | 1939 | ndata += end | ||
1944 | 1940 | |||
1945 | 1941 | # write the patch | ||
1946 | 1942 | fromfile = "old/" + build_gyp_file | ||
1947 | 1943 | tofile = "new/" + build_gyp_file | ||
1948 | 1944 | fromdate = datetime.fromtimestamp(self.template.get_mtime(build_gyp_file)).strftime("%Y-%m-%d %H:%M:%S.%f000 +0000") | ||
1949 | 1945 | fromlines = [ x for x in re.split('(.*\n?)', data) if x != '' ] | ||
1950 | 1946 | todate = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f000 +0000") | ||
1951 | 1947 | tolines = [ x for x in re.split('(.*\n?)', ndata) if x != '' ] | ||
1952 | 1948 | patch = codecs.open(os.path.join(directory, "build.patch"), "wb", encoding="utf-8") | ||
1953 | 1949 | diff = unified_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=3) | ||
1954 | 1950 | patch.write("diff -Nur %s %s\n" % (fromfile, tofile)) | ||
1955 | 1951 | patch.writelines(''.join(diff)) | ||
1956 | 1952 | |||
1957 | 1953 | for grd in other_grd_files: | ||
1958 | 1954 | grd_out = os.path.join(directory, os.path.basename(grd)) | ||
1959 | 1955 | self.update_supported_langs_in_grd(grd, grd_out, langs) | ||
1960 | 1956 | if filecmp.cmp(grd, grd_out) == True: | ||
1961 | 1957 | os.unlink(grd_out) | ||
1962 | 1958 | continue # files are the same | ||
1963 | 1959 | # add it to the patch | ||
1964 | 1960 | fromfile = "old/" + grd | ||
1965 | 1961 | tofile = "new/" + grd | ||
1966 | 1962 | fromdate = datetime.fromtimestamp(self.template.get_mtime(grd)).strftime("%Y-%m-%d %H:%M:%S.%f000 +0000") | ||
1967 | 1963 | fromlines = codecs.open(grd, 'rb', encoding="utf-8").readlines() | ||
1968 | 1964 | todate = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f000 +0000") | ||
1969 | 1965 | tolines = codecs.open(grd_out, 'rb', encoding="utf-8").readlines() | ||
1970 | 1966 | diff = unified_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=3) | ||
1971 | 1967 | patch.write("diff -Nur %s %s\n" % (fromfile, tofile)) | ||
1972 | 1968 | patch.writelines(''.join(diff)) | ||
1973 | 1969 | os.unlink(grd_out) | ||
1974 | 1970 | patch.close() | ||
1975 | 1971 | return nnlangs | ||
1976 | 1972 | |||
1977 | 1973 | def usage(): | ||
1978 | 1974 | print """ | ||
1979 | 1975 | Usage: %s [options] [grd_file [more_grd_files]] | ||
1980 | 1976 | |||
1981 | 1977 | Convert Chromium translation files (grd/xtb) into gettext files (pot/po) and back | ||
1982 | 1978 | |||
1983 | 1979 | options could be: | ||
1984 | 1980 | -d | --debug debug mode | ||
1985 | 1981 | -v | --verbose verbose mode | ||
1986 | 1982 | -h | --help this help screen | ||
1987 | 1983 | |||
1988 | 1984 | --export-gettext dir | ||
1989 | 1985 | export pot/po gettext files to dir | ||
1990 | 1986 | |||
1991 | 1987 | --import-gettext dir[,dir2][...] | ||
1992 | 1988 | import gettext pot/po files from those directories. | ||
1993 | 1989 | Directories must be ordered from the oldest to | ||
1994 | 1990 | the freshest. Only strings different from the grit | ||
1995 | 1991 | (upstream) translations are considered. | ||
1996 | 1992 | |||
1997 | 1993 | --import-grit-branch name:dir:grd1[,grd2,...]] | ||
1998 | 1994 | import the Grit files for this branch from this | ||
1999 | 1995 | directory. --import-grit-branch could be used several | ||
2000 | 1996 | times, and then, branches must be specified from the | ||
2001 | 1997 | freshest (trunk) to the more stable ones. | ||
2002 | 1998 | The default value is trunk:<cwd> | ||
2003 | 1999 | Note: must not be used along with --export-grit | ||
2004 | 2000 | |||
2005 | 2001 | --export-grit dir | ||
2006 | 2002 | export grd/xtb grit files to dir | ||
2007 | 2003 | |||
2008 | 2004 | --copy-grit dir copy the src grit files containing strings to dir | ||
2009 | 2005 | (useful to create diffs after --export-grit) | ||
2010 | 2006 | |||
2011 | 2007 | --whitelisted-new-langs lang1[,lang2][..] | ||
2012 | 2008 | comma separated list of new langs that have to be enabled | ||
2013 | 2009 | (assuming they have some strings translated). The default | ||
2014 | 2010 | is to enable all new langs, but for stable builds, a good | ||
2015 | 2011 | enough coverage is preferred | ||
2016 | 2012 | |||
2017 | 2013 | --create-patches dir | ||
2018 | 2014 | create unified patches per template in dir | ||
2019 | 2015 | (only useful after --export-grit) | ||
2020 | 2016 | |||
2021 | 2017 | --build-gyp-file file | ||
2022 | 2018 | location of the build/common.gypi file, used only | ||
2023 | 2019 | with --create-patches to add all new langs | ||
2024 | 2020 | for which we merged translated strings | ||
2025 | 2021 | |||
2026 | 2022 | --other-grd-files file1[,file2][..] | ||
2027 | 2023 | comma separated list of grd files to also patch | ||
2028 | 2024 | to add new langs for (see --build-gyp-file) | ||
2029 | 2025 | |||
2030 | 2026 | --html-output produce nice some HTML as output (on stdout) | ||
2031 | 2027 | |||
2032 | 2028 | --json-branches-info file | ||
2033 | 2029 | location of a json file containing the url, revision | ||
2034 | 2030 | and last date of both the upstream branch and | ||
2035 | 2031 | launchpad export. optionally used in the html output | ||
2036 | 2032 | |||
2037 | 2033 | --map-template-names new1=old1[,new2=old2][...] | ||
2038 | 2034 | comma separated list of template names mappings. | ||
2039 | 2035 | It is useful when upstream renames a grd file in a branch | ||
2040 | 2036 | to preserve the old name in gettext for the older branches | ||
2041 | 2037 | |||
2042 | 2038 | --landable-templates template1[,template2][...] | ||
2043 | 2039 | comma separated list of templates that are landable upstream | ||
2044 | 2040 | for all langs | ||
2045 | 2041 | |||
2046 | 2042 | --unlandable-templates template1[,template2][...] | ||
2047 | 2043 | comma separated list of templates that are not landable upstream, | ||
2048 | 2044 | even for new langs | ||
2049 | 2045 | |||
2050 | 2046 | --test-strcvt run the grit2gettext2grit checker | ||
2051 | 2047 | --test-conditions run the conditions evaluation checker | ||
2052 | 2048 | |||
2053 | 2049 | """ % sys.argv[0].rpartition('/')[2] | ||
2054 | 2050 | |||
2055 | 2051 | if '__main__' == __name__: | ||
2056 | 2052 | sys.stdout = codecs.getwriter('utf8')(sys.stdout) | ||
2057 | 2053 | try: | ||
2058 | 2054 | opts, args = getopt.getopt(sys.argv[1:], "dhv", | ||
2059 | 2055 | [ "test-strcvt", "test-conditions", "debug", "verbose", "help", "copy-grit=", | ||
2060 | 2056 | "import-grit-branch=", "export-gettext=", "import-gettext=", "export-grit=", | ||
2061 | 2057 | "create-patches=", "build-gyp-file=", "other-grd-files=", | ||
2062 | 2058 | "landable-templates=", "unlandable-templates=", "map-template-names=", | ||
2063 | 2059 | "whitelisted-new-langs=", "html-output", "json-branches-info=" ]) | ||
2064 | 2060 | except getopt.GetoptError, err: | ||
2065 | 2061 | print str(err) | ||
2066 | 2062 | usage() | ||
2067 | 2063 | sys.exit(2) | ||
2068 | 2064 | |||
2069 | 2065 | verbose = False | ||
2070 | 2066 | debug = False | ||
2071 | 2067 | html_output = False | ||
2072 | 2068 | outdir = None | ||
2073 | 2069 | indir = None | ||
2074 | 2070 | export_gettext = None | ||
2075 | 2071 | import_gettext = None | ||
2076 | 2072 | export_grit = None | ||
2077 | 2073 | copy_grit = None | ||
2078 | 2074 | create_patches = None | ||
2079 | 2075 | build_gyp_file = None | ||
2080 | 2076 | json_info = None | ||
2081 | 2077 | other_grd_files = [] | ||
2082 | 2078 | whitelisted_new_langs = None | ||
2083 | 2079 | templatenames_mapping = {} | ||
2084 | 2080 | landable_templates = [] | ||
2085 | 2081 | unlandable_templates = [] | ||
2086 | 2082 | branches = None | ||
2087 | 2083 | nbranches = [] | ||
2088 | 2084 | for o, a in opts: | ||
2089 | 2085 | if o in ("-v", "--verbose"): | ||
2090 | 2086 | verbose = True | ||
2091 | 2087 | elif o in ("-h", "--help"): | ||
2092 | 2088 | usage() | ||
2093 | 2089 | sys.exit() | ||
2094 | 2090 | elif o in ("-d", "--debug"): | ||
2095 | 2091 | debug = True | ||
2096 | 2092 | elif o == "--import-grit-branch": | ||
2097 | 2093 | if branches is None: | ||
2098 | 2094 | branches = {} | ||
2099 | 2095 | branch, dir, grds = a.split(':') | ||
2100 | 2096 | for grd in grds.split(','): | ||
2101 | 2097 | name = os.path.basename(grd) | ||
2102 | 2098 | if name not in branches: | ||
2103 | 2099 | branches[name] = [] | ||
2104 | 2100 | branches[name].append({ 'branch': branch, 'dir': dir, 'grd': grd }) | ||
2105 | 2101 | if branch not in nbranches: | ||
2106 | 2102 | nbranches.append(branch) | ||
2107 | 2103 | elif o == "--export-gettext": | ||
2108 | 2104 | export_gettext = a | ||
2109 | 2105 | elif o == "--import-gettext": | ||
2110 | 2106 | import_gettext = a.split(",") | ||
2111 | 2107 | elif o == "--export-grit": | ||
2112 | 2108 | export_grit = a | ||
2113 | 2109 | elif o == "--copy-grit": | ||
2114 | 2110 | copy_grit = a | ||
2115 | 2111 | elif o == "--whitelisted-new-langs": | ||
2116 | 2112 | whitelisted_new_langs = a.split(",") | ||
2117 | 2113 | elif o == "--create-patches": | ||
2118 | 2114 | create_patches = a | ||
2119 | 2115 | elif o == "--build-gyp-file": | ||
2120 | 2116 | build_gyp_file = a | ||
2121 | 2117 | elif o == "--other-grd-files": | ||
2122 | 2118 | other_grd_files = a.split(',') | ||
2123 | 2119 | elif o == "--html-output": | ||
2124 | 2120 | html_output = True | ||
2125 | 2121 | elif o == "--json-branches-info": | ||
2126 | 2122 | json_info = a | ||
2127 | 2123 | elif o == "--landable-templates": | ||
2128 | 2124 | landable_templates = a.split(",") | ||
2129 | 2125 | elif o == "--unlandable-templates": | ||
2130 | 2126 | unlandable_templates = a.split(",") | ||
2131 | 2127 | elif o == "--map-template-names": | ||
2132 | 2128 | for c in a.split(','): | ||
2133 | 2129 | x = c.split('=') | ||
2134 | 2130 | templatenames_mapping[x[0]] = x[1] | ||
2135 | 2131 | elif o == "--test-strcvt": | ||
2136 | 2132 | StringCvt().test() | ||
2137 | 2133 | sys.exit() | ||
2138 | 2134 | elif o == "--test-conditions": | ||
2139 | 2135 | EvalConditions().test() | ||
2140 | 2136 | sys.exit() | ||
2141 | 2137 | else: | ||
2142 | 2138 | assert False, "unhandled option" | ||
2143 | 2139 | |||
2144 | 2140 | if branches is None and len(args) != 0: | ||
2145 | 2141 | branches = {} | ||
2146 | 2142 | for arg in args: | ||
2147 | 2143 | branches[os.path.basename(arg)] = [ { 'branch': 'default', 'dir': os.getcwd(), 'grd': arg } ] | ||
2148 | 2144 | if branches is None: | ||
2149 | 2145 | print "Please specify at least one grd file or use --import-grit-branch" | ||
2150 | 2146 | usage() | ||
2151 | 2147 | sys.exit(2) | ||
2152 | 2148 | |||
2153 | 2149 | # re-map the templates, if needed | ||
2154 | 2150 | for grd in templatenames_mapping.keys(): | ||
2155 | 2151 | new = os.path.basename(grd) | ||
2156 | 2152 | old = os.path.basename(templatenames_mapping[grd]) | ||
2157 | 2153 | if new in branches: | ||
2158 | 2154 | if old not in branches: | ||
2159 | 2155 | branches[old] = [] | ||
2160 | 2156 | for branch in branches[new]: | ||
2161 | 2157 | branch['mapped_grd'] = old | ||
2162 | 2158 | branches[old].extend(branches[new]) | ||
2163 | 2159 | # re-sort the branches | ||
2164 | 2160 | branches[old] = sorted(branches[old], lambda x,y: cmp(nbranches.index(x['branch']), nbranches.index(y['branch']))) | ||
2165 | 2161 | del(branches[new]) | ||
2166 | 2162 | |||
2167 | 2163 | if html_output: | ||
2168 | 2164 | print """\ | ||
2169 | 2165 | <html> | ||
2170 | 2166 | <head><meta charset="UTF-8"> | ||
2171 | 2167 | </head><body> | ||
2172 | 2168 | <style type="text/css"> | ||
2173 | 2169 | body { | ||
2174 | 2170 | font-family: UbuntuBeta,Ubuntu,"Bitstream Vera Sans","DejaVu Sans",Tahoma,sans-serif; | ||
2175 | 2171 | } | ||
2176 | 2172 | div#legend { | ||
2177 | 2173 | float: left; | ||
2178 | 2174 | } | ||
2179 | 2175 | fieldset { | ||
2180 | 2176 | border-width: 1px; | ||
2181 | 2177 | border-color: #f0f0f0; | ||
2182 | 2178 | } | ||
2183 | 2179 | div#legend fieldset, div#branches fieldset { | ||
2184 | 2180 | border-width: 0px; | ||
2185 | 2181 | } | ||
2186 | 2182 | legend { | ||
2187 | 2183 | font-size: 80%; | ||
2188 | 2184 | } | ||
2189 | 2185 | div#branches { | ||
2190 | 2186 | float: left; | ||
2191 | 2187 | padding-left: 40px; | ||
2192 | 2188 | } | ||
2193 | 2189 | div#branches td { | ||
2194 | 2190 | padding-right: 5px; | ||
2195 | 2191 | } | ||
2196 | 2192 | div#stats { | ||
2197 | 2193 | padding-top: 5px; | ||
2198 | 2194 | clear: both; | ||
2199 | 2195 | } | ||
2200 | 2196 | a { | ||
2201 | 2197 | text-decoration: none; | ||
2202 | 2198 | } | ||
2203 | 2199 | a.l:link, a.l:visited { | ||
2204 | 2200 | color: black; | ||
2205 | 2201 | } | ||
2206 | 2202 | .error { | ||
2207 | 2203 | font-size: 90%; | ||
2208 | 2204 | } | ||
2209 | 2205 | div.error a { | ||
2210 | 2206 | font-family: monospace; | ||
2211 | 2207 | font-size: 120%; | ||
2212 | 2208 | } | ||
2213 | 2209 | table { | ||
2214 | 2210 | border-collapse: collapse; | ||
2215 | 2211 | border-spacing: 1px; | ||
2216 | 2212 | font-size: 0.9em; | ||
2217 | 2213 | } | ||
2218 | 2214 | th { | ||
2219 | 2215 | font-weight: bold; | ||
2220 | 2216 | color: #666; | ||
2221 | 2217 | padding-right: 5px; | ||
2222 | 2218 | } | ||
2223 | 2219 | th, td { | ||
2224 | 2220 | border: 1px #d2d2d2; | ||
2225 | 2221 | border-style: solid; | ||
2226 | 2222 | padding-left: 4px; | ||
2227 | 2223 | padding-top: 0px; | ||
2228 | 2224 | padding-bottom: 0px; | ||
2229 | 2225 | } | ||
2230 | 2226 | td.d { | ||
2231 | 2227 | font-size: 90%; | ||
2232 | 2228 | text-align: right; | ||
2233 | 2229 | } | ||
2234 | 2230 | td.n { | ||
2235 | 2231 | background: #FFA; | ||
2236 | 2232 | } | ||
2237 | 2233 | .lang { | ||
2238 | 2234 | font-weight: bold; | ||
2239 | 2235 | padding-left: 0.5em; | ||
2240 | 2236 | padding-right: 0.5em; | ||
2241 | 2237 | white-space: nowrap; | ||
2242 | 2238 | } | ||
2243 | 2239 | .progress_bar { | ||
2244 | 2240 | width: 100px; overflow: hidden; position: relative; padding: 0px; | ||
2245 | 2241 | } | ||
2246 | 2242 | .pb_label { | ||
2247 | 2243 | text-align: center; width: 100%; | ||
2248 | 2244 | position: absolute; z-index: 1001; left: 4px; top: -2px; color: white; font-size: 0.7em; | ||
2249 | 2245 | } | ||
2250 | 2246 | .pb_label2 { | ||
2251 | 2247 | text-align: center; width: 100%; | ||
2252 | 2248 | position: absolute; z-index: 1000; left: 5px; top: -1px; color: black; font-size: 0.7em; | ||
2253 | 2249 | } | ||
2254 | 2250 | .green_gradient { | ||
2255 | 2251 | height: 1em; position: relative; float: left; | ||
2256 | 2252 | background: #00ff00; | ||
2257 | 2253 | background: -moz-linear-gradient(top, #00ff00, #007700); | ||
2258 | 2254 | background: -webkit-gradient(linear, left top, left bottom, from(#00ff00), to(#007700)); | ||
2259 | 2255 | filter: progid:DXImageTransform.Microsoft.Gradient(StartColorStr='#00ff00', EndColorStr='#007700', GradientType=0); | ||
2260 | 2256 | } | ||
2261 | 2257 | .red_gradient { | ||
2262 | 2258 | height: 1em; position: relative; float: left; | ||
2263 | 2259 | background: #ff8888; | ||
2264 | 2260 | background: -moz-linear-gradient(top, #ff8888, #771111); | ||
2265 | 2261 | background: -webkit-gradient(linear, left top, left bottom, from(#ff8888), to(#771111)); | ||
2266 | 2262 | filter: progid:DXImageTransform.Microsoft.Gradient(StartColorStr='#ff8888', EndColorStr='#771111', GradientType=0); | ||
2267 | 2263 | } | ||
2268 | 2264 | .blue_gradient { | ||
2269 | 2265 | height: 1em; position: relative; float: left; | ||
2270 | 2266 | background: #62b0dd; | ||
2271 | 2267 | background: -moz-linear-gradient(top, #62b0dd, #1f3d4a); | ||
2272 | 2268 | background: -webkit-gradient(linear, left top, left bottom, from(#62b0dd), to(#1f3d4a)); | ||
2273 | 2269 | filter: progid:DXImageTransform.Microsoft.Gradient(StartColorStr='#62b0dd', EndColorStr='#1f3d4a', GradientType=0); | ||
2274 | 2270 | } | ||
2275 | 2271 | .purple_gradient { | ||
2276 | 2272 | height: 1em; position: relative; float: left; | ||
2277 | 2273 | background: #b8a4ba; | ||
2278 | 2274 | background: -moz-linear-gradient(top, #b8a4ba, #5c3765); | ||
2279 | 2275 | background: -webkit-gradient(linear, left top, left bottom, from(#b8a4ba), to(#5c3765)); | ||
2280 | 2276 | filter: progid:DXImageTransform.Microsoft.Gradient(StartColorStr='#b8a4ba', EndColorStr='#5c3765', GradientType=0); | ||
2281 | 2277 | } | ||
2282 | 2278 | </style> | ||
2283 | 2279 | <script type="text/javascript" language="javascript"> | ||
2284 | 2280 | function progress_bar(where, red, green, purple, blue) { | ||
2285 | 2281 | var total = green + red + blue + purple; | ||
2286 | 2282 | if (total == 0) | ||
2287 | 2283 | total = 1; | ||
2288 | 2284 | var d = document.getElementById(where); | ||
2289 | 2285 | var v = 100 * (1 - (red / total)); | ||
2290 | 2286 | if (total != 1) { | ||
2291 | 2287 | d.innerHTML += '<div class="pb_label">' + v.toFixed(1) + "%</div>"; | ||
2292 | 2288 | d.innerHTML += '<div class="pb_label2">' + v.toFixed(1) + "%</div>"; | ||
2293 | 2289 | } | ||
2294 | 2290 | else | ||
2295 | 2291 | d.style.width = "25px"; | ||
2296 | 2292 | var pgreen = parseInt(100 * green / total); | ||
2297 | 2293 | var pblue = parseInt(100 * blue / total); | ||
2298 | 2294 | var ppurple = parseInt(100 * purple / total); | ||
2299 | 2295 | var pred = parseInt(100 * red / total); | ||
2300 | 2296 | if (pgreen + pblue + ppurple + pred != 100) { | ||
2301 | 2297 | if (red > 0) | ||
2302 | 2298 | pred = 100 - pgreen - pblue - ppurple; | ||
2303 | 2299 | else if (purple > 0) | ||
2304 | 2300 | ppurple = 100 - pgreen - pblue; | ||
2305 | 2301 | else if (blue > 0) | ||
2306 | 2302 | pblue = 100 - pgreen; | ||
2307 | 2303 | else | ||
2308 | 2304 | pgreen = 100; | ||
2309 | 2305 | } | ||
2310 | 2306 | if (green > 0) | ||
2311 | 2307 | d.innerHTML += '<div class="green_gradient" style="width:' + pgreen + '%;"></div>'; | ||
2312 | 2308 | if (blue > 0) | ||
2313 | 2309 | d.innerHTML += '<div class="blue_gradient" style="width:' + pblue + '%;"></div>'; | ||
2314 | 2310 | if (purple > 0) | ||
2315 | 2311 | d.innerHTML += '<div class="purple_gradient" style="width:' + ppurple + '%;"></div>'; | ||
2316 | 2312 | if (red > 0) | ||
2317 | 2313 | d.innerHTML += '<div class="red_gradient" style="width:' + pred + '%;"></div>'; | ||
2318 | 2314 | return true; | ||
2319 | 2315 | } | ||
2320 | 2316 | |||
2321 | 2317 | function toggle(e) { | ||
2322 | 2318 | var elt = document.getElementById(e + "-t"); | ||
2323 | 2319 | var text = document.getElementById(e); | ||
2324 | 2320 | if (elt.style.display == "block") { | ||
2325 | 2321 | elt.style.display = "none"; | ||
2326 | 2322 | text.innerHTML = "+"; | ||
2327 | 2323 | } | ||
2328 | 2324 | else { | ||
2329 | 2325 | elt.style.display = "block"; | ||
2330 | 2326 | text.innerHTML = "-"; | ||
2331 | 2327 | } | ||
2332 | 2328 | } | ||
2333 | 2329 | |||
2334 | 2330 | function time_delta(date, e) { | ||
2335 | 2331 | var now = new Date(); | ||
2336 | 2332 | var d = new Date(date); | ||
2337 | 2333 | var delta = (now - d) / 1000; | ||
2338 | 2334 | var elt = document.getElementById(e); | ||
2339 | 2335 | if (delta >= 3600) { | ||
2340 | 2336 | var h = parseInt(delta / 3600); | ||
2341 | 2337 | var m = parseInt((delta - h * 3600) / 60); | ||
2342 | 2338 | elt.innerHTML = '(' + h + 'h ' + m + 'min ago)'; | ||
2343 | 2339 | return; | ||
2344 | 2340 | } | ||
2345 | 2341 | if (delta >= 60) { | ||
2346 | 2342 | var m = parseInt(delta / 60); | ||
2347 | 2343 | elt.innerHTML = '(' + m + 'min ago)'; | ||
2348 | 2344 | return; | ||
2349 | 2345 | } | ||
2350 | 2346 | elt.innerHTML = '(seconds ago)'; | ||
2351 | 2347 | } | ||
2352 | 2348 | |||
2353 | 2349 | </script> | ||
2354 | 2350 | """ | ||
2355 | 2351 | |||
2356 | 2352 | prefix = os.path.commonprefix([ branches[x][0]['grd'] for x in branches.keys() ]) | ||
2357 | 2353 | changes = 0 | ||
2358 | 2354 | langs = [] | ||
2359 | 2355 | mapped_langs = {} | ||
2360 | 2356 | cvts = {} | ||
2361 | 2357 | for grd in branches.keys(): | ||
2362 | 2358 | cvts[grd] = Converter(branches[grd][0]['grd'], | ||
2363 | 2359 | lang_mapping = lang_mapping, | ||
2364 | 2360 | template_mapping = templatenames_mapping, | ||
2365 | 2361 | debug = debug, | ||
2366 | 2362 | html_output = html_output, | ||
2367 | 2363 | branches = branches[grd]) | ||
2368 | 2364 | |||
2369 | 2365 | if cvts[grd].get_supported_strings_count() == 0: | ||
2370 | 2366 | if debug: | ||
2371 | 2367 | print "no string found in %s" % grd | ||
2372 | 2368 | if export_grit is not None and copy_grit is None: | ||
2373 | 2369 | directory = os.path.join(export_grit, os.path.dirname(branches[grd][0]['grd'])[len(prefix):]) | ||
2374 | 2370 | if not os.path.isdir(directory): | ||
2375 | 2371 | os.makedirs(directory, 0755) | ||
2376 | 2372 | shutil.copy2(branches[grd][0]['grd'], directory) | ||
2377 | 2373 | continue | ||
2378 | 2374 | |||
2379 | 2375 | if copy_grit is not None: | ||
2380 | 2376 | cvts[grd].copy_grit_files(copy_grit) | ||
2381 | 2377 | |||
2382 | 2378 | if import_gettext is not None: | ||
2383 | 2379 | for directory in import_gettext: | ||
2384 | 2380 | cvts[grd].import_gettext_po_files(directory) | ||
2385 | 2381 | langs.extend(cvts[grd].translations.keys()) | ||
2386 | 2382 | |||
2387 | 2383 | if export_gettext is not None: | ||
2388 | 2384 | cvts[grd].export_gettext_files(export_gettext) | ||
2389 | 2385 | changes += cvts[grd].template_changes + cvts[grd].translations_changes | ||
2390 | 2386 | |||
2391 | 2387 | # as we need to add all supported langs to the <outputs> section of all grd files, | ||
2392 | 2388 | # we have to wait for all the 'po' files to be imported and merged before we export | ||
2393 | 2389 | # the grit files and create the patches. | ||
2394 | 2390 | |||
2395 | 2391 | # supported langs | ||
2396 | 2392 | langs.append('en-US') # special case, it's not translated, but needs to be here | ||
2397 | 2393 | for lang in [ 'no' ]: # workaround for cases like the infamous no->nb mapping | ||
2398 | 2394 | while lang in langs: | ||
2399 | 2395 | langs.remove(lang) | ||
2400 | 2396 | langs.append(lang_mapping[lang]) | ||
2401 | 2397 | r = {} | ||
2402 | 2398 | langs = sorted([ r.setdefault(e, e) for e in langs if e not in r ]) | ||
2403 | 2399 | |||
2404 | 2400 | for grd in branches.keys(): | ||
2405 | 2401 | if export_grit is not None: | ||
2406 | 2402 | cvts[grd].export_grit_files(os.path.join(export_grit, os.path.dirname(branches[grd][0]['grd'])[len(prefix):]), langs) | ||
2407 | 2403 | for lang in cvts[grd].template.mapped_langs: | ||
2408 | 2404 | mapped_langs[lang] = cvts[grd].template.mapped_langs[lang]['gettext'] | ||
2409 | 2405 | if create_patches is not None: | ||
2410 | 2406 | cvts[grd].create_patches(create_patches) | ||
2411 | 2407 | |||
2412 | 2408 | # patch the build/common.gypi file if we have to | ||
2413 | 2409 | nlangs = None | ||
2414 | 2410 | if create_patches is not None and build_gyp_file is not None: | ||
2415 | 2411 | nlangs = cvts[branches.keys()[0]].create_build_gyp_patch(create_patches, build_gyp_file, other_grd_files, langs, | ||
2416 | 2412 | whitelisted_new_langs) | ||
2417 | 2413 | |||
2418 | 2414 | if create_patches is None: | ||
2419 | 2415 | # no need to display the stats | ||
2420 | 2416 | exit(1 if changes > 0 else 0) | ||
2421 | 2417 | |||
2422 | 2418 | # display some stats | ||
2423 | 2419 | html_js = "" | ||
2424 | 2420 | if html_output: | ||
2425 | 2421 | print """ | ||
2426 | 2422 | <p> | ||
2427 | 2423 | <div> | ||
2428 | 2424 | <div id="legend"> | ||
2429 | 2425 | <fieldset><legend>Legend</legend> | ||
2430 | 2426 | <table border="0"> | ||
2431 | 2427 | <tr><td><div id='green_l' class='progress_bar'></td><td>translated upstream</td></tr> | ||
2432 | 2428 | <tr><td><div id='blue_l' class='progress_bar'></td><td>translations updated in Launchpad</td></tr> | ||
2433 | 2429 | <tr><td><div id='purple_l' class='progress_bar'></td><td>translated in Launchpad</td></tr> | ||
2434 | 2430 | <tr><td><div id='red_l' class='progress_bar'></td><td>untranslated</td></tr> | ||
2435 | 2431 | </table> | ||
2436 | 2432 | </fieldset> | ||
2437 | 2433 | </div> | ||
2438 | 2434 | """ | ||
2439 | 2435 | html_js += "progress_bar('%s', %d, %d, %d, %d);\n" % ('green_l', 0, 1, 0, 0) | ||
2440 | 2436 | html_js += "progress_bar('%s', %d, %d, %d, %d);\n" % ('blue_l', 0, 0, 0, 1) | ||
2441 | 2437 | html_js += "progress_bar('%s', %d, %d, %d, %d);\n" % ('purple_l', 0, 0, 1, 0) | ||
2442 | 2438 | html_js += "progress_bar('%s', %d, %d, %d, %d);\n" % ('red_l', 1, 0, 0, 0) | ||
2443 | 2439 | if json_info: | ||
2444 | 2440 | now = datetime.utcfromtimestamp(os.path.getmtime(json_info)).strftime("%a %b %e %H:%M:%S UTC %Y") | ||
2445 | 2441 | binfo = json.loads(open(json_info, "r").read()) | ||
2446 | 2442 | print """ | ||
2447 | 2443 | <div id="branches"> | ||
2448 | 2444 | <fieldset><legend>Last update info</legend> | ||
2449 | 2445 | <table border="0"> | ||
2450 | 2446 | <tr><th>Branch</th><th>Revision</th><th>Date</th></tr> | ||
2451 | 2447 | <tr><td><a href="%s">Upstream</a></td><td>r%s</td><td>%s <em id='em-u'></em> </td></tr> | ||
2452 | 2448 | <tr><td><a href="%s">Launchpad export</a></td><td>r%s</td><td>%s <em id='em-lp'></em> </td></tr> | ||
2453 | 2449 | <tr><td>This page</a></td><td>-</td><td>%s <em id='em-now'></em> </td></tr> | ||
2454 | 2450 | </table> | ||
2455 | 2451 | </fieldset> | ||
2456 | 2452 | </div> | ||
2457 | 2453 | """ % (binfo['upstream']['url'], binfo['upstream']['revision'], binfo['upstream']['date'], | ||
2458 | 2454 | binfo['launchpad-export']['url'], binfo['launchpad-export']['revision'], | ||
2459 | 2455 | binfo['launchpad-export']['date'], now) | ||
2460 | 2456 | html_js += "time_delta('%s', '%s');\n" % (binfo['upstream']['date'], 'em-u') | ||
2461 | 2457 | html_js += "time_delta('%s', '%s');\n" % (binfo['launchpad-export']['date'], 'em-lp') | ||
2462 | 2458 | html_js += "time_delta('%s', '%s');\n" % (now, 'em-now') | ||
2463 | 2459 | print """ | ||
2464 | 2460 | <div id="stats"> | ||
2465 | 2461 | <table border="0"> | ||
2466 | 2462 | <tr><th rowspan="2">Rank</th><th rowspan="2">Lang</th><th colspan='5'>TOTAL</th><th colspan='5'>""" | ||
2467 | 2463 | print ("</th><th colspan='5'>".join([ "%s (<a href='http://git.chromium.org/gitweb/?p=chromium.git;a=history;f=%s;hb=HEAD'>+</a>)" \ | ||
2468 | 2464 | % (os.path.splitext(grd)[0], branches[grd][0]['grd']) \ | ||
2469 | 2465 | for grd in sorted(branches.keys()) ])) + "</th></tr><tr>" | ||
2470 | 2466 | j = 0 | ||
2471 | 2467 | for grd in [ 'TOTAL' ] + sorted(branches.keys()): | ||
2472 | 2468 | print """ | ||
2473 | 2469 | <th>Status</th> | ||
2474 | 2470 | <th><div id='%s_t%d' class='progress_bar'></th> | ||
2475 | 2471 | <th><div id='%s_t%d' class='progress_bar'></th> | ||
2476 | 2472 | <th><div id='%s_t%d' class='progress_bar'></th> | ||
2477 | 2473 | <th><div id='%s_t%d' class='progress_bar'></th>""" % ('red', j, 'green', j, 'purple', j, 'blue', j) | ||
2478 | 2474 | html_js += "progress_bar('%s_t%d', %d, %d, %d, %d);\n" % ('green', j, 0, 1, 0, 0) | ||
2479 | 2475 | html_js += "progress_bar('%s_t%d', %d, %d, %d, %d);\n" % ('blue', j, 0, 0, 0, 1) | ||
2480 | 2476 | html_js += "progress_bar('%s_t%d', %d, %d, %d, %d);\n" % ('purple', j, 0, 0, 1, 0) | ||
2481 | 2477 | html_js += "progress_bar('%s_t%d', %d, %d, %d, %d);\n" % ('red', j, 1, 0, 0, 0) | ||
2482 | 2478 | j += 1 | ||
2483 | 2479 | print "</tr>" | ||
2484 | 2480 | else: | ||
2485 | 2481 | print """\ | ||
2486 | 2482 | +----------------------- % translated | ||
2487 | 2483 | | +----------------- untranslated | ||
2488 | 2484 | | | +------------ translated upstream | ||
2489 | 2485 | | | | +------- translated in Launchpad | ||
2490 | 2486 | | | | | +-- translations updated in Launchpad | ||
2491 | 2487 | | | | | | | ||
2492 | 2488 | V V V V V""" | ||
2493 | 2489 | print "-- lang -- " + \ | ||
2494 | 2490 | ' '.join([ (" %s " % os.path.splitext(grd)[0]).center(25, "-") \ | ||
2495 | 2491 | for grd in [ 'TOTAL' ] + sorted(branches.keys()) ]) | ||
2496 | 2492 | totals = {} | ||
2497 | 2493 | for lang in langs: | ||
2498 | 2494 | klang = lang | ||
2499 | 2495 | if lang == 'nb': | ||
2500 | 2496 | klang = 'no' | ||
2501 | 2497 | totals[klang] = { 'total': 0, 'missing': 0, 'translated_upstream': 0, 'new': 0, 'updated': 0, 'lskipped': 0 } | ||
2502 | 2498 | for grd in branches.keys(): | ||
2503 | 2499 | tot, lskipped = cvts[grd].template.get_supported_strings_count(klang) | ||
2504 | 2500 | totals[klang]['lskipped'] += lskipped | ||
2505 | 2501 | totals[klang]['total'] += tot | ||
2506 | 2502 | totals[klang]['missing'] += tot | ||
2507 | 2503 | if klang in cvts[grd].template.stats: | ||
2508 | 2504 | totals[klang]['missing'] -= cvts[grd].template.stats[klang]['translated_upstream'] + \ | ||
2509 | 2505 | cvts[grd].template.stats[klang]['new'] + cvts[grd].template.stats[klang]['updated'] | ||
2510 | 2506 | totals[klang]['translated_upstream'] += cvts[grd].template.stats[klang]['translated_upstream'] | ||
2511 | 2507 | totals[klang]['new'] += cvts[grd].template.stats[klang]['new'] | ||
2512 | 2508 | totals[klang]['updated'] += cvts[grd].template.stats[klang]['updated'] | ||
2513 | 2509 | |||
2514 | 2510 | rank = 0 | ||
2515 | 2511 | p_rank = 0 | ||
2516 | 2512 | p_score = -1 | ||
2517 | 2513 | t_landable = 0 | ||
2518 | 2514 | for lang in sorted(totals, lambda x, y: cmp("%05d %05d %s" % (totals[x]['missing'], totals[x]['total'] - totals[x]['updated'] - totals[x]['new'], x), | ||
2519 | 2515 | "%05d %05d %s" % (totals[y]['missing'], totals[y]['total'] - totals[y]['updated'] - totals[y]['new'], y))): | ||
2520 | 2516 | if lang == 'en-US': | ||
2521 | 2517 | continue | ||
2522 | 2518 | rank += 1 | ||
2523 | 2519 | if p_score != totals[lang]['missing']: | ||
2524 | 2520 | p_score = totals[lang]['missing'] | ||
2525 | 2521 | p_rank = rank | ||
2526 | 2522 | rlang = lang | ||
2527 | 2523 | if lang in lang_mapping: | ||
2528 | 2524 | rlang = lang_mapping[lang] | ||
2529 | 2525 | if html_output: | ||
2530 | 2526 | s = "<tr><td>%s</td><td class='lang'><a class='l' href='%s'>%s</a></td>" % \ | ||
2531 | 2527 | ("#%d" % p_rank, 'https://translations.launchpad.net/chromium-browser/translations/+lang/' + \ | ||
2532 | 2528 | mapped_langs[lang], rlang) | ||
2533 | 2529 | s += "<td><div id='%s' class='progress_bar'></div></td>" % rlang | ||
2534 | 2530 | s += "<td class='d'>%d</td><td class='d'>%d</td><td class='d'>%d</td><td class='d'>%d</td>" % \ | ||
2535 | 2531 | (totals[lang]['missing'], totals[lang]['translated_upstream'], | ||
2536 | 2532 | totals[lang]['new'], totals[lang]['updated']) | ||
2537 | 2533 | html_js += "progress_bar('%s', %d, %d, %d, %d);\n" % \ | ||
2538 | 2534 | (rlang, totals[lang]['missing'], totals[lang]['translated_upstream'], | ||
2539 | 2535 | totals[lang]['new'], totals[lang]['updated']) | ||
2540 | 2536 | else: | ||
2541 | 2537 | s = "%-3s %-6s " % ("#%d" % p_rank, rlang) | ||
2542 | 2538 | s += "%3d%% %4d %4d %4d %4d" % \ | ||
2543 | 2539 | (100.0 * float(totals[lang]['total'] - totals[lang]['missing']) / float(totals[lang]['total']), | ||
2544 | 2540 | totals[lang]['missing'], totals[lang]['translated_upstream'], | ||
2545 | 2541 | totals[lang]['new'], totals[lang]['updated']) | ||
2546 | 2542 | j = 0 | ||
2547 | 2543 | for grd in sorted(branches.keys()): | ||
2548 | 2544 | j += 1 | ||
2549 | 2545 | tplt = os.path.splitext(grd)[0].replace('_', '-') | ||
2550 | 2546 | total, lskipped = cvts[grd].template.get_supported_strings_count(lang) | ||
2551 | 2547 | if lang in cvts[grd].template.stats: | ||
2552 | 2548 | missing = total - cvts[grd].template.stats[lang]['translated_upstream'] - \ | ||
2553 | 2549 | cvts[grd].template.stats[lang]['new'] - cvts[grd].template.stats[lang]['updated'] | ||
2554 | 2550 | if html_output: | ||
2555 | 2551 | if len(unlandable_templates) == 0 and len(landable_templates) == 0: | ||
2556 | 2552 | landable = False | ||
2557 | 2553 | else: | ||
2558 | 2554 | landable = (nlangs is not None and lang in nlangs and tplt not in unlandable_templates) or \ | ||
2559 | 2555 | (nlangs is not None and lang not in nlangs and tplt in landable_templates) | ||
2560 | 2556 | if landable: | ||
2561 | 2557 | t_landable += cvts[grd].template.stats[lang]['new'] + cvts[grd].template.stats[lang]['updated'] | ||
2562 | 2558 | s += "<td><div id='%s_%d' class='progress_bar'></div></td>" % (rlang, j) | ||
2563 | 2559 | s += "<td class='d'>%d</td><td class='d'>%d</td><td class='d%s'>%d</td><td class='d%s'>%d</td>" % \ | ||
2564 | 2560 | (missing, | ||
2565 | 2561 | cvts[grd].template.stats[lang]['translated_upstream'], | ||
2566 | 2562 | " n" if landable and cvts[grd].template.stats[lang]['new'] > 0 else "", | ||
2567 | 2563 | cvts[grd].template.stats[lang]['new'], | ||
2568 | 2564 | " n" if landable and cvts[grd].template.stats[lang]['updated'] > 0 else "", | ||
2569 | 2565 | cvts[grd].template.stats[lang]['updated']) | ||
2570 | 2566 | html_js += "progress_bar('%s_%d', %d, %d, %d, %d);\n" % \ | ||
2571 | 2567 | (rlang, j, missing, | ||
2572 | 2568 | cvts[grd].template.stats[lang]['translated_upstream'], | ||
2573 | 2569 | cvts[grd].template.stats[lang]['new'], | ||
2574 | 2570 | cvts[grd].template.stats[lang]['updated']) | ||
2575 | 2571 | else: | ||
2576 | 2572 | if float(total) > 0: | ||
2577 | 2573 | pct = 100.0 * float(total - missing) / float(total) | ||
2578 | 2574 | else: | ||
2579 | 2575 | pct = 0 | ||
2580 | 2576 | s += " %3d%% %4d %4d %4d %4d" % \ | ||
2581 | 2577 | (pct, missing, | ||
2582 | 2578 | cvts[grd].template.stats[lang]['translated_upstream'], | ||
2583 | 2579 | cvts[grd].template.stats[lang]['new'], | ||
2584 | 2580 | cvts[grd].template.stats[lang]['updated']) | ||
2585 | 2581 | else: | ||
2586 | 2582 | if html_output: | ||
2587 | 2583 | s += "<td><div id='%s_%d' class='progress_bar'></div></td>" % (rlang, j) | ||
2588 | 2584 | s += "<td class='d'>%d</td><td class='d'>%d</td><td class='d'>%d</td><td class='d'>%d</td>" % \ | ||
2589 | 2585 | (total, 0, 0, 0) | ||
2590 | 2586 | html_js += "progress_bar('%s_%d', %d, %d, %d, %d);\n" % \ | ||
2591 | 2587 | (rlang, j, total, 0, 0, 0) | ||
2592 | 2588 | else: | ||
2593 | 2589 | s += " %3d%% %4d %4d %4d %4d" % (0, total, 0, 0, 0) | ||
2594 | 2590 | if html_output: | ||
2595 | 2591 | s += "</tr>" | ||
2596 | 2592 | print s | ||
2597 | 2593 | if html_output: | ||
2598 | 2594 | landable_sum = "" | ||
2599 | 2595 | if t_landable > 0: | ||
2600 | 2596 | landable_sum = """<p> | ||
2601 | 2597 | <div name='landable'> | ||
2602 | 2598 | <table border="0"><tr><td class="d n">%d strings are landable upstream</td></tr></table></div> | ||
2603 | 2599 | """ % t_landable | ||
2604 | 2600 | print """\ | ||
2605 | 2601 | </table> | ||
2606 | 2602 | %s</div> | ||
2607 | 2603 | </div> | ||
2608 | 2604 | <script type="text/javascript" language="javascript"> | ||
2609 | 2605 | %s | ||
2610 | 2606 | </script> | ||
2611 | 2607 | </body> | ||
2612 | 2608 | </html>""" % (landable_sum, html_js) | ||
2613 | 2609 | exit(1 if changes > 0 else 0) | ||
2614 | 2610 | |||
2615 | 0 | 2611 | ||
2616 | === added file 'create-patches.sh' | |||
2617 | --- create-patches.sh 1970-01-01 00:00:00 +0000 | |||
2618 | +++ create-patches.sh 2024-02-28 12:58:47 +0000 | |||
2619 | @@ -0,0 +1,185 @@ | |||
2620 | 1 | #!/bin/sh | ||
2621 | 2 | |||
2622 | 3 | # Create the translation patches (grit format) based on the last | ||
2623 | 4 | # translation export (gettext format) | ||
2624 | 5 | # (c) 2010-2011, Fabien Tassin <fta@ubuntu.com> | ||
2625 | 6 | |||
2626 | 7 | # location of chromium2pot.py (lp:~chromium-team/chromium-browser/chromium-translations-tools.head) | ||
2627 | 8 | # (must already exist) | ||
2628 | 9 | BIN_DIR=/data/bot/chromium-translations-tools.head | ||
2629 | 10 | |||
2630 | 11 | # Launchpad translation export (must already exist, will be pulled here) | ||
2631 | 12 | LPE_DIR=/data/bot/upstream/chromium-translations-exports.head | ||
2632 | 13 | |||
2633 | 14 | # local svn branches (updated by drobotik) | ||
2634 | 15 | SRC_TRUNK_DIR=/data/bot/upstream/chromium-browser.svn/src | ||
2635 | 16 | SRC_DEV_DIR=/data/bot/upstream/chromium-dev.svn/src | ||
2636 | 17 | SRC_BETA_DIR=/data/bot/upstream/chromium-beta.svn/src | ||
2637 | 18 | SRC_STABLE_DIR=/data/bot/upstream/chromium-stable.svn/src | ||
2638 | 19 | |||
2639 | 20 | #### | ||
2640 | 21 | |||
2641 | 22 | OUT_DIR=$(mktemp -d) | ||
2642 | 23 | |||
2643 | 24 | # List of options per branch | ||
2644 | 25 | NEW_OPTS="--map-template-names ui/base/strings/ui_strings.grd=ui/base/strings/app_strings.grd" | ||
2645 | 26 | OLD_OPTS="" | ||
2646 | 27 | |||
2647 | 28 | OPTS_TRUNK=$NEW_OPTS | ||
2648 | 29 | OPTS_DEV=$NEW_OPTS | ||
2649 | 30 | OPTS_BETA=$NEW_OPTS | ||
2650 | 31 | OPTS_STABLE=$OLD_OPTS | ||
2651 | 32 | |||
2652 | 33 | # List of templates to send to Launchpad | ||
2653 | 34 | NEW_TEMPLATES=$(cat - <<EOF | ||
2654 | 35 | ui/base/strings/ui_strings.grd | ||
2655 | 36 | EOF | ||
2656 | 37 | ) | ||
2657 | 38 | OLD_TEMPLATES=$(cat - <<EOF | ||
2658 | 39 | ui/base/strings/app_strings.grd | ||
2659 | 40 | EOF | ||
2660 | 41 | ) | ||
2661 | 42 | TEMPLATES_TRUNK=$NEW_TEMPLATES | ||
2662 | 43 | TEMPLATES_DEV=$NEW_TEMPLATES | ||
2663 | 44 | TEMPLATES_BETA=$NEW_TEMPLATES | ||
2664 | 45 | TEMPLATES_STABLE=$OLD_TEMPLATES | ||
2665 | 46 | |||
2666 | 47 | TEMPLATES=$(cat - <<EOF | ||
2667 | 48 | chrome/app/chromium_strings.grd | ||
2668 | 49 | chrome/app/generated_resources.grd | ||
2669 | 50 | chrome/app/policy/policy_templates.grd | ||
2670 | 51 | webkit/glue/inspector_strings.grd | ||
2671 | 52 | webkit/glue/webkit_strings.grd | ||
2672 | 53 | EOF | ||
2673 | 54 | ) | ||
2674 | 55 | |||
2675 | 56 | # List of other templates to update for new langs, but that are | ||
2676 | 57 | # not sent to Launchpad | ||
2677 | 58 | NEW_OTHER_TEMPLATES=$(cat - <<EOF | ||
2678 | 59 | ui/base/strings/app_locale_settings.grd | ||
2679 | 60 | EOF | ||
2680 | 61 | ) | ||
2681 | 62 | OLD_OTHER_TEMPLATES=$(cat - <<EOF | ||
2682 | 63 | app/resources/app_locale_settings.grd | ||
2683 | 64 | EOF | ||
2684 | 65 | ) | ||
2685 | 66 | OTHER_TEMPLATES_TRUNK=$NEW_OTHER_TEMPLATES | ||
2686 | 67 | OTHER_TEMPLATES_DEV=$NEW_OTHER_TEMPLATES | ||
2687 | 68 | OTHER_TEMPLATES_BETA=$NEW_OTHER_TEMPLATES | ||
2688 | 69 | OTHER_TEMPLATES_STABLE=$NEW_OTHER_TEMPLATES | ||
2689 | 70 | # Common to all branches | ||
2690 | 71 | OTHER_TEMPLATES=$(cat - <<EOF | ||
2691 | 72 | chrome/app/resources/locale_settings.grd | ||
2692 | 73 | chrome/app/resources/locale_settings_linux.grd | ||
2693 | 74 | chrome/app/resources/locale_settings_cros.grd | ||
2694 | 75 | EOF | ||
2695 | 76 | ) | ||
2696 | 77 | |||
2697 | 78 | ###### | ||
2698 | 79 | |||
2699 | 80 | TEMPLATES=$(echo $TEMPLATES | tr '[ \n]' ' ') | ||
2700 | 81 | |||
2701 | 82 | (cd $LPE_DIR ; bzr pull -q) | ||
2702 | 83 | |||
2703 | 84 | space_list () { | ||
2704 | 85 | local V1="$1" | ||
2705 | 86 | local V2="$2" | ||
2706 | 87 | echo "$V1 $V2" | tr '[ \n]' ' ' | sed -e 's/ $//' | ||
2707 | 88 | } | ||
2708 | 89 | |||
2709 | 90 | comma_list () { | ||
2710 | 91 | local V1="$1" | ||
2711 | 92 | local V2="$2" | ||
2712 | 93 | |||
2713 | 94 | echo "$V1 $V2" | tr '[ \n]' ',' | sed -e 's/,$//' | ||
2714 | 95 | } | ||
2715 | 96 | |||
2716 | 97 | get_branches_info () { | ||
2717 | 98 | local BRANCH=$1 | ||
2718 | 99 | local SRC_DIR=$2 | ||
2719 | 100 | local JSON=$3 | ||
2720 | 101 | |||
2721 | 102 | # upstream url, revision & last change date | ||
2722 | 103 | UURL=$(cd $SRC_DIR; svn info | grep '^URL: ' | sed -e 's/.*: //') | ||
2723 | 104 | UREV="$(cd $SRC_DIR; svn info | grep '^Last Changed Rev:' | sed -e 's/.*: //') ($(cut -d= -f2 $SRC_DIR/chrome/VERSION | sed -e 's,$,.,' | tr -d '\n' | sed -e 's/.$//'))" | ||
2724 | 105 | UDATE=$(date -d "$(cd $SRC_DIR; svn info | grep '^Last Changed Date:' | sed -e 's/.*: //')" --utc) | ||
2725 | 106 | |||
2726 | 107 | # Launchpad url, revision & last change date | ||
2727 | 108 | LURL=$(cd $LPE_DIR; bzr info | grep 'parent branch' | sed -e 's,.*: bzr+ssh://bazaar,https://code,') | ||
2728 | 109 | LREV=$(cd $LPE_DIR; bzr revno) | ||
2729 | 110 | LDATE=$(date -d "$(cd $LPE_DIR; bzr info -v | grep 'latest revision' | sed -e 's/.*: //')" --utc) | ||
2730 | 111 | |||
2731 | 112 | cat - <<EOF > $JSON | ||
2732 | 113 | { | ||
2733 | 114 | "upstream": { | ||
2734 | 115 | "revision": "$UREV", | ||
2735 | 116 | "url": "$UURL", | ||
2736 | 117 | "date": "$UDATE" | ||
2737 | 118 | }, | ||
2738 | 119 | "launchpad-export": { | ||
2739 | 120 | "revision": $LREV, | ||
2740 | 121 | "url": "$LURL", | ||
2741 | 122 | "date": "$LDATE" | ||
2742 | 123 | } | ||
2743 | 124 | } | ||
2744 | 125 | EOF | ||
2745 | 126 | } | ||
2746 | 127 | |||
2747 | 128 | create_patch () { | ||
2748 | 129 | local BRANCH=$1 | ||
2749 | 130 | local SRC_DIR=$2 | ||
2750 | 131 | local BRANCH_OPTS="$3" | ||
2751 | 132 | local TEMPLATES="$4" | ||
2752 | 133 | local OTHER_TEMPLATES="$5" | ||
2753 | 134 | |||
2754 | 135 | LOG=converter-output.html | ||
2755 | 136 | DLOG=converter-diffstat.html | ||
2756 | 137 | |||
2757 | 138 | set -e | ||
2758 | 139 | cd $SRC_DIR | ||
2759 | 140 | mkdir -p $OUT_DIR/$BRANCH/new | ||
2760 | 141 | |||
2761 | 142 | get_branches_info $BRANCH $SRC_DIR $OUT_DIR/$BRANCH/new/revisions.json | ||
2762 | 143 | |||
2763 | 144 | local OPTS="" | ||
2764 | 145 | if [ $BRANCH = trunk ] ; then | ||
2765 | 146 | OPTS="--landable-templates chromium-strings,inspector-strings --unlandable-templates policy-templates" | ||
2766 | 147 | fi | ||
2767 | 148 | |||
2768 | 149 | # Generate the new files, using the new template and the translations exported by launchpad | ||
2769 | 150 | $BIN_DIR/chromium2pot.py \ | ||
2770 | 151 | --html-output \ | ||
2771 | 152 | --json-branches-info $OUT_DIR/$BRANCH/new/revisions.json \ | ||
2772 | 153 | --create-patches $OUT_DIR/$BRANCH/new/patches \ | ||
2773 | 154 | --import-gettext $LPE_DIR \ | ||
2774 | 155 | --export-grit $OUT_DIR/$BRANCH/new/patched-files \ | ||
2775 | 156 | --build-gyp-file build/common.gypi \ | ||
2776 | 157 | --other-grd-files $OTHER_TEMPLATES \ | ||
2777 | 158 | $OPTS \ | ||
2778 | 159 | $BRANCH_OPTS \ | ||
2779 | 160 | $TEMPLATES >> $OUT_DIR/$BRANCH/new/$LOG 2>&1 | ||
2780 | 161 | echo >> $OUT_DIR/$BRANCH/new/$LOG | ||
2781 | 162 | |||
2782 | 163 | ( cd "$OUT_DIR/$BRANCH/new/patches" ; for i in * ; do mv $i $i.txt ; done ) | ||
2783 | 164 | echo "<pre>" > $OUT_DIR/$BRANCH/new/$DLOG | ||
2784 | 165 | ( cd "$OUT_DIR/$BRANCH/new" ; find patches -type f | xargs --verbose -n 1 diffstat -p 1 >> $DLOG 2>&1 ) | ||
2785 | 166 | perl -i -pe 's,^(diffstat -p 1 )(\S+)(.*),$1<a href="$2">$2</a>$3,;' $OUT_DIR/$BRANCH/new/$DLOG | ||
2786 | 167 | echo "</pre>" >> $OUT_DIR/$BRANCH/new/$DLOG | ||
2787 | 168 | |||
2788 | 169 | # get the old files | ||
2789 | 170 | mkdir $OUT_DIR/$BRANCH/old | ||
2790 | 171 | lftp -e "lcd $OUT_DIR/$BRANCH/old; cd public_html/chromium/translations/$BRANCH; mirror; quit" sftp://people.ubuntu.com > /dev/null 2>&1 | ||
2791 | 172 | set +e | ||
2792 | 173 | (cd $OUT_DIR/$BRANCH ; diff -Nur old new > diff.patch; cd old ; patch -p 1 < ../diff.patch > /dev/null 2>&1 ) | ||
2793 | 174 | set -e | ||
2794 | 175 | |||
2795 | 176 | lftp -e "lcd $OUT_DIR/$BRANCH/old; cd public_html/chromium/translations/$BRANCH; mirror --delete -R; quit" sftp://people.ubuntu.com > /dev/null 2>&1 | ||
2796 | 177 | set +e | ||
2797 | 178 | } | ||
2798 | 179 | |||
2799 | 180 | create_patch "trunk" $SRC_TRUNK_DIR "$OPTS_TRUNK" "$(space_list "$TEMPLATES" "$TEMPLATES_TRUNK")" $(comma_list "$OTHER_TEMPLATES_TRUNK" "$OTHER_TEMPLATES") | ||
2800 | 181 | create_patch "dev" $SRC_DEV_DIR "$OPTS_DEV" "$(space_list "$TEMPLATES" "$TEMPLATES_DEV")" $(comma_list "$OTHER_TEMPLATES_DEV" "$OTHER_TEMPLATES") | ||
2801 | 182 | create_patch "beta" $SRC_BETA_DIR "$OPTS_BETA" "$(space_list "$TEMPLATES" "$TEMPLATES_BETA")" $(comma_list "$OTHER_TEMPLATES_BETA" "$OTHER_TEMPLATES") | ||
2802 | 183 | create_patch "stable" $SRC_STABLE_DIR "$OPTS_STABLE" "$(space_list "$TEMPLATES" "$TEMPLATES_STABLE")" $(comma_list "$OTHER_TEMPLATES_STABLE" "$OTHER_TEMPLATES") | ||
2803 | 184 | |||
2804 | 185 | rm -rf $OUT_DIR | ||
2805 | 0 | 186 | ||
2806 | === added file 'desktop2gettext.py' | |||
2807 | --- desktop2gettext.py 1970-01-01 00:00:00 +0000 | |||
2808 | +++ desktop2gettext.py 2024-02-28 12:58:47 +0000 | |||
2809 | @@ -0,0 +1,378 @@ | |||
2810 | 1 | #!/usr/bin/python | ||
2811 | 2 | # -*- coding: utf-8 -*- | ||
2812 | 3 | |||
2813 | 4 | # (c) 2010-2011, Fabien Tassin <fta@ubuntu.com> | ||
2814 | 5 | |||
2815 | 6 | # Convert a desktop file to Gettext files and back | ||
2816 | 7 | |||
2817 | 8 | import sys, getopt, os, codecs, re, time | ||
2818 | 9 | from datetime import datetime | ||
2819 | 10 | |||
2820 | 11 | class DesktopFile(dict): | ||
2821 | 12 | """ Read and write a desktop file """ | ||
2822 | 13 | def __init__(self, desktop = None, src_pkg = None, verbose = False): | ||
2823 | 14 | self.changed = False | ||
2824 | 15 | self.data = [] | ||
2825 | 16 | self.headers = {} | ||
2826 | 17 | self.template = {} | ||
2827 | 18 | self.translations = {} | ||
2828 | 19 | self.src_pkg = src_pkg | ||
2829 | 20 | self.mtime = None | ||
2830 | 21 | self.verbose = verbose | ||
2831 | 22 | if desktop is not None: | ||
2832 | 23 | self.read_desktop_file(desktop) | ||
2833 | 24 | |||
2834 | 25 | def read_desktop_file(self, filename): | ||
2835 | 26 | self.data = [] | ||
2836 | 27 | self.template = {} | ||
2837 | 28 | self.mtime = os.path.getmtime(filename) | ||
2838 | 29 | fd = codecs.open(filename, "rb", encoding="utf-8") | ||
2839 | 30 | section = None | ||
2840 | 31 | for line in fd.readlines(): | ||
2841 | 32 | m = re.match(r'^\[(.*?)\]', line) | ||
2842 | 33 | if m is not None: | ||
2843 | 34 | section = m.group(1) | ||
2844 | 35 | assert section not in self.template, "Duplicate section [%s]" % section | ||
2845 | 36 | self.template[section] = {} | ||
2846 | 37 | m = re.match(r'^(Name|GenericName|Comment)(\[\S+\]|)=(.*)', line) | ||
2847 | 38 | if m is None: | ||
2848 | 39 | self.data.append(line) | ||
2849 | 40 | continue | ||
2850 | 41 | assert section is not None, "Found a '%s' outside a section" % m.group(1) | ||
2851 | 42 | entry = m.group(1) | ||
2852 | 43 | value = m.group(3) | ||
2853 | 44 | if m.group(2) == "": | ||
2854 | 45 | # master string | ||
2855 | 46 | self.data.append(line) | ||
2856 | 47 | assert entry not in self.template[section], \ | ||
2857 | 48 | "Duplicate entry '%s' in section [%s]" % (entry, section) | ||
2858 | 49 | self.template[section][entry] = value | ||
2859 | 50 | if value not in self.translations: | ||
2860 | 51 | self.translations[value] = {} | ||
2861 | 52 | else: | ||
2862 | 53 | # translation | ||
2863 | 54 | lang = m.group(2)[1:-1] | ||
2864 | 55 | string = self.template[section][entry] | ||
2865 | 56 | assert entry in self.template[section], \ | ||
2866 | 57 | "Translation found for lang '%s' in section [%s] before master entry '%s'" % \ | ||
2867 | 58 | (lang, section, entry) | ||
2868 | 59 | if lang not in self.translations[string]: | ||
2869 | 60 | self.translations[string][lang] = value | ||
2870 | 61 | fd.close() | ||
2871 | 62 | |||
2872 | 63 | def dump(self): | ||
2873 | 64 | for section in sorted(self.template.keys()): | ||
2874 | 65 | print "[%s]:" % section | ||
2875 | 66 | for entry in sorted(self.template[section].keys()): | ||
2876 | 67 | print " '%s': '%s'" % (entry, self.template[section][entry]) | ||
2877 | 68 | |||
2878 | 69 | for string in sorted(self.translations.keys()): | ||
2879 | 70 | print "'%s':" % string | ||
2880 | 71 | for lang in sorted(self.translations[string].keys()): | ||
2881 | 72 | print " '%s' => '%s'" % (lang, self.translations[string][lang]) | ||
2882 | 73 | |||
2883 | 74 | def write_desktop(self, file): | ||
2884 | 75 | fd = codecs.open(file, "wb", encoding="utf-8") | ||
2885 | 76 | for ent in self.data: | ||
2886 | 77 | fd.write(ent) | ||
2887 | 78 | m = re.match(r'^(Name|GenericName|Comment)=(.*)', ent) | ||
2888 | 79 | if m is None: | ||
2889 | 80 | continue | ||
2890 | 81 | k = m.group(2) | ||
2891 | 82 | if k not in self.translations: | ||
2892 | 83 | continue | ||
2893 | 84 | for lang in sorted(self.translations[k].keys()): | ||
2894 | 85 | fd.write("%s[%s]=%s\n" % (m.group(1), lang, self.translations[k][lang])) | ||
2895 | 86 | |||
2896 | 87 | def write_gettext_header(self, fd, mtime = None, last_translator = None, lang_team = None): | ||
2897 | 88 | mtime = "YEAR-MO-DA HO:MI+ZONE" if mtime is None else \ | ||
2898 | 89 | datetime.fromtimestamp(mtime).strftime("%Y-%m-%d %H:%M+0000") | ||
2899 | 90 | last_translator = "FULL NAME <EMAIL@ADDRESS>" if last_translator is None else \ | ||
2900 | 91 | last_translator | ||
2901 | 92 | lang_team = "LANGUAGE <LL@li.org>" if lang_team is None else lang_team | ||
2902 | 93 | fd.write("""\ | ||
2903 | 94 | # %s desktop file. | ||
2904 | 95 | # Copyright (C) 2010-2011 Fabien Tassin | ||
2905 | 96 | # This file is distributed under the same license as the %s package. | ||
2906 | 97 | # Fabien Tassin <fta@ubuntu.com>, 2010-2011. | ||
2907 | 98 | # | ||
2908 | 99 | msgid "" | ||
2909 | 100 | msgstr "" | ||
2910 | 101 | "Project-Id-Version: %s\\n" | ||
2911 | 102 | "Report-Msgid-Bugs-To: https://bugs.launchpad.net/ubuntu/+source/%s/+filebug\\n" | ||
2912 | 103 | "POT-Creation-Date: %s\\n" | ||
2913 | 104 | "PO-Revision-Date: %s\\n" | ||
2914 | 105 | "Last-Translator: %s\\n" | ||
2915 | 106 | "Language-Team: %s\\n" | ||
2916 | 107 | "MIME-Version: 1.0\\n" | ||
2917 | 108 | "Content-Type: text/plain; charset=UTF-8\\n" | ||
2918 | 109 | "Content-Transfer-Encoding: 8bit\\n" | ||
2919 | 110 | |||
2920 | 111 | """ % (self.src_pkg, self.src_pkg, self.src_pkg, self.src_pkg, | ||
2921 | 112 | datetime.fromtimestamp(self.mtime).strftime("%Y-%m-%d %H:%M+0000"), | ||
2922 | 113 | mtime, last_translator, lang_team)) | ||
2923 | 114 | |||
2924 | 115 | def write_gettext_string(self, fd, string, translation = None, usage = None): | ||
2925 | 116 | if translation is None: | ||
2926 | 117 | translation = "" | ||
2927 | 118 | if usage is not None: | ||
2928 | 119 | fd.write("#. %s\n" % usage) | ||
2929 | 120 | fd.write('msgid "%s"\nmsgstr "%s"\n\n' % (string, translation)) | ||
2930 | 121 | |||
2931 | 122 | def get_usage(self, string): | ||
2932 | 123 | res = [] | ||
2933 | 124 | for section in sorted(self.template.keys()): | ||
2934 | 125 | for entry in sorted(self.template[section].keys()): | ||
2935 | 126 | if self.template[section][entry] == string: | ||
2936 | 127 | res.append("[%s] %s" % (section, entry)) | ||
2937 | 128 | return ", ".join(res) | ||
2938 | 129 | |||
2939 | 130 | def read_gettext_string(self, fd): | ||
2940 | 131 | string = {} | ||
2941 | 132 | cur = None | ||
2942 | 133 | while 1: | ||
2943 | 134 | s = fd.readline() | ||
2944 | 135 | if len(s) == 0 or s == "\n": | ||
2945 | 136 | break # EOF or end of block | ||
2946 | 137 | if s.rfind('\n') == len(s) - 1: | ||
2947 | 138 | s = s[:-1] # chomp | ||
2948 | 139 | if s.find("# ") == 0 or s == "#": # translator-comment | ||
2949 | 140 | if 'comment' not in string: | ||
2950 | 141 | string['comment'] = '' | ||
2951 | 142 | string['comment'] += s[2:] | ||
2952 | 143 | continue | ||
2953 | 144 | if s.find("#:") == 0: # reference | ||
2954 | 145 | if 'reference' not in string: | ||
2955 | 146 | string['reference'] = '' | ||
2956 | 147 | string['reference'] += s[2:] | ||
2957 | 148 | if s[2:].find(" id: ") == 0: | ||
2958 | 149 | string['id'] = s[7:].split(' ')[0] | ||
2959 | 150 | continue | ||
2960 | 151 | if s.find("#.") == 0: # extracted-comments | ||
2961 | 152 | if 'extracted' not in string: | ||
2962 | 153 | string['extracted'] = '' | ||
2963 | 154 | string['extracted'] += s[2:] | ||
2964 | 155 | if s[2:].find(" - condition: ") == 0: | ||
2965 | 156 | if 'conditions' not in string: | ||
2966 | 157 | string['conditions'] = [] | ||
2967 | 158 | string['conditions'].append(s[16:]) | ||
2968 | 159 | continue | ||
2969 | 160 | if s.find("#~") == 0: # obsolete messages | ||
2970 | 161 | continue | ||
2971 | 162 | if s.find("#") == 0: # something else | ||
2972 | 163 | print "%s not expected. Skip" % repr(s) | ||
2973 | 164 | continue # not supported/expected | ||
2974 | 165 | if s.find("msgid ") == 0: | ||
2975 | 166 | cur = "string" | ||
2976 | 167 | if cur not in string: | ||
2977 | 168 | string[cur] = u"" | ||
2978 | 169 | else: | ||
2979 | 170 | string[cur] += "\n" | ||
2980 | 171 | string[cur] += s[6:] | ||
2981 | 172 | continue | ||
2982 | 173 | if s.find("msgstr ") == 0: | ||
2983 | 174 | cur = "translation" | ||
2984 | 175 | if cur not in string: | ||
2985 | 176 | string[cur] = u"" | ||
2986 | 177 | else: | ||
2987 | 178 | string[cur] += "\n" | ||
2988 | 179 | string[cur] += s[7:] | ||
2989 | 180 | continue | ||
2990 | 181 | if s.find('"') == 0: | ||
2991 | 182 | if cur is None: | ||
2992 | 183 | print "'%s' not expected here. Skip" % s | ||
2993 | 184 | continue | ||
2994 | 185 | string[cur] += "\n" + s | ||
2995 | 186 | continue | ||
2996 | 187 | print "'%s' not expected here. Skip" % s | ||
2997 | 188 | return None if string == {} else string | ||
2998 | 189 | |||
2999 | 190 | def merge_gettext_string(self, string, lang): | ||
3000 | 191 | msg = string['string'][1:-1] | ||
3001 | 192 | if msg == "": # header | ||
3002 | 193 | self.headers[lang] = {} | ||
3003 | 194 | map(lambda x: self.headers[lang].setdefault(x.split(": ")[0], x.split(": ")[1]), | ||
3004 | 195 | string['translation'][4:-3].replace('\\n"\n"', '\n').replace('"\n"', '').split('\n')) | ||
3005 | 196 | return | ||
3006 | 197 | if msg not in self.translations: | ||
3007 | 198 | return # obsolete string | ||
3008 | 199 | translation = string['translation'][1:-1] | ||
3009 | 200 | if translation == "": # no translation | ||
3010 | 201 | return | ||
3011 | 202 | if lang not in self.translations[msg]: | ||
3012 | 203 | if self.verbose: | ||
3013 | 204 | print "merge translation for lang '%s' string '%s'" % (lang, msg) | ||
3014 | 205 | self.translations[msg][lang] = translation | ||
3015 | 206 | elif self.translations[msg][lang] != translation: | ||
3016 | 207 | if self.verbose: | ||
3017 | 208 | print "update translation for lang '%s' string '%s'" % (lang, msg) | ||
3018 | 209 | self.translations[msg][lang] = translation | ||
3019 | 210 | |||
3020 | 211 | def read_gettext_file(self, filename): | ||
3021 | 212 | fd = codecs.open(filename, "rb", encoding="utf-8") | ||
3022 | 213 | strings = [] | ||
3023 | 214 | while 1: | ||
3024 | 215 | string = self.read_gettext_string(fd) | ||
3025 | 216 | if string is None: | ||
3026 | 217 | break | ||
3027 | 218 | strings.append(string) | ||
3028 | 219 | fd.close() | ||
3029 | 220 | return strings | ||
3030 | 221 | |||
3031 | 222 | def get_langs(self): | ||
3032 | 223 | langs = [] | ||
3033 | 224 | for st in self.translations: | ||
3034 | 225 | for lang in self.translations[st]: | ||
3035 | 226 | if lang not in langs: | ||
3036 | 227 | langs.append(lang) | ||
3037 | 228 | return sorted(langs) | ||
3038 | 229 | |||
3039 | 230 | def export_gettext_file(self, directory, template_name = "desktop_file", lang = None): | ||
3040 | 231 | filename = os.path.join(directory, "%s.pot" % template_name if lang is None else "%s.po" % lang) | ||
3041 | 232 | # if there's already a file with this name, compare its content and only update it | ||
3042 | 233 | # when it's needed | ||
3043 | 234 | update = False | ||
3044 | 235 | if not os.path.exists(filename): | ||
3045 | 236 | update = True | ||
3046 | 237 | else: | ||
3047 | 238 | # compare the strings | ||
3048 | 239 | strings = self.read_gettext_file(filename)[1:] | ||
3049 | 240 | old = sorted(map(lambda x: x['string'][1:-1], strings)) | ||
3050 | 241 | new = sorted(self.translations.keys()) | ||
3051 | 242 | if old != new: | ||
3052 | 243 | update = True | ||
3053 | 244 | if self.verbose: | ||
3054 | 245 | print "strings differ for %s. Update" % filename | ||
3055 | 246 | if not update: | ||
3056 | 247 | # compare string descriptions | ||
3057 | 248 | old = map(lambda x: x['extracted'][1:], | ||
3058 | 249 | sorted(strings, lambda a, b: cmp(a['string'][1:-1], b['string'][1:-1]))) | ||
3059 | 250 | new = map(lambda x: self.get_usage(x), sorted(self.translations.keys())) | ||
3060 | 251 | if old != new: | ||
3061 | 252 | update = True | ||
3062 | 253 | if self.verbose: | ||
3063 | 254 | print "string descriptions differ for %s. Update" % filename | ||
3064 | 255 | if not update and lang is not None: | ||
3065 | 256 | # compare translations | ||
3066 | 257 | old = map(lambda x: x['translation'][1:-1], | ||
3067 | 258 | sorted(strings, lambda a, b: cmp(a['string'][1:-1], b['string'][1:-1]))) | ||
3068 | 259 | new = map(lambda x: self.translations[x][lang] if lang in self.translations[x] else "", | ||
3069 | 260 | sorted(self.translations.keys())) | ||
3070 | 261 | if old != new: | ||
3071 | 262 | update = True | ||
3072 | 263 | if self.verbose: | ||
3073 | 264 | print "translations differ for %s. Update" % filename | ||
3074 | 265 | if not update: | ||
3075 | 266 | return | ||
3076 | 267 | if self.verbose: | ||
3077 | 268 | print "update %s" % filename | ||
3078 | 269 | fd = codecs.open(filename, 'wb', encoding='utf-8') | ||
3079 | 270 | last_translator = None | ||
3080 | 271 | lang_team = None | ||
3081 | 272 | if lang is None: | ||
3082 | 273 | self.mtime = time.time() | ||
3083 | 274 | mtime = None | ||
3084 | 275 | else: | ||
3085 | 276 | mtime = time.time() | ||
3086 | 277 | if lang in self.headers and 'Last-Translator' in self.headers[lang]: | ||
3087 | 278 | last_translator = self.headers[lang]['Last-Translator'] | ||
3088 | 279 | if lang in self.headers and 'Language-Team' in self.headers[lang]: | ||
3089 | 280 | lang_team = self.headers[lang]['Language-Team'] | ||
3090 | 281 | else: | ||
3091 | 282 | lang_team = "%s <%s@li.org>" % (lang, lang) | ||
3092 | 283 | self.write_gettext_header(fd, mtime = mtime, last_translator = last_translator, lang_team = lang_team) | ||
3093 | 284 | for string in sorted(self.translations.keys()): | ||
3094 | 285 | val = self.translations[string][lang] \ | ||
3095 | 286 | if lang is not None and lang in self.translations[string] else None | ||
3096 | 287 | self.write_gettext_string(fd, string, val, usage = self.get_usage(string)) | ||
3097 | 288 | fd.close() | ||
3098 | 289 | self.changed = True | ||
3099 | 290 | |||
3100 | 291 | def export_gettext_files(self, directory): | ||
3101 | 292 | if not os.path.isdir(directory): | ||
3102 | 293 | os.makedirs(directory, 0755) | ||
3103 | 294 | self.export_gettext_file(directory) | ||
3104 | 295 | for lang in self.get_langs(): | ||
3105 | 296 | self.export_gettext_file(directory, lang = lang) | ||
3106 | 297 | |||
3107 | 298 | def import_gettext_files(self, directory): | ||
3108 | 299 | """ Import strings from gettext 'po' files, ignore the 'pot'. | ||
3109 | 300 | Only merge strings matching our desktop file """ | ||
3110 | 301 | assert os.path.isdir(directory) | ||
3111 | 302 | for file in os.listdir(directory): | ||
3112 | 303 | lang, ext = os.path.splitext(file) | ||
3113 | 304 | if ext != '.po': | ||
3114 | 305 | continue | ||
3115 | 306 | for string in self.read_gettext_file(os.path.join(directory, file)): | ||
3116 | 307 | self.merge_gettext_string(string, lang) | ||
3117 | 308 | |||
3118 | 309 | def usage(): | ||
3119 | 310 | appname = sys.argv[0].rpartition('/')[2] | ||
3120 | 311 | print """ | ||
3121 | 312 | Usage: %s [options] --import-desktop master.desktop | ||
3122 | 313 | [--export-gettext to-launchpad] | ||
3123 | 314 | [--import-gettext from-launchpad] | ||
3124 | 315 | [--export-desktop improved.desktop] | ||
3125 | 316 | [--project-name somename] | ||
3126 | 317 | |||
3127 | 318 | Convert a desktop file to Gettext files and back | ||
3128 | 319 | |||
3129 | 320 | options could be: | ||
3130 | 321 | -v | --verbose verbose mode | ||
3131 | 322 | --import-desktop file master desktop file (mandatory) | ||
3132 | 323 | --import-gettext dir GetText files to merge | ||
3133 | 324 | --export-gettext dir merged GetText files | ||
3134 | 325 | --export-desktop file improved desktop file | ||
3135 | 326 | --project-name name project or source package name | ||
3136 | 327 | |||
3137 | 328 | """ % appname | ||
3138 | 329 | |||
3139 | 330 | if '__main__' == __name__: | ||
3140 | 331 | sys.stdout = codecs.getwriter('utf8')(sys.stdout) | ||
3141 | 332 | try: | ||
3142 | 333 | opts, args = getopt.getopt(sys.argv[1:], "dhv", | ||
3143 | 334 | [ "verbose", "project-name=", | ||
3144 | 335 | "import-desktop=", "export-desktop=", | ||
3145 | 336 | "import-gettext=", "export-gettext=" ]) | ||
3146 | 337 | except getopt.GetoptError, err: | ||
3147 | 338 | print str(err) | ||
3148 | 339 | usage() | ||
3149 | 340 | sys.exit(2) | ||
3150 | 341 | |||
3151 | 342 | verbose = False | ||
3152 | 343 | desktop_in = None | ||
3153 | 344 | desktop_out = None | ||
3154 | 345 | gettext_in = None | ||
3155 | 346 | gettext_out = None | ||
3156 | 347 | project_name = "misconfigured-project" | ||
3157 | 348 | for o, a in opts: | ||
3158 | 349 | if o in ("-v", "--verbose"): | ||
3159 | 350 | verbose = True | ||
3160 | 351 | elif o in ("-h", "--help"): | ||
3161 | 352 | usage() | ||
3162 | 353 | sys.exit() | ||
3163 | 354 | elif o == "--project-name": | ||
3164 | 355 | project_name = a | ||
3165 | 356 | elif o == "--import-desktop": | ||
3166 | 357 | desktop_in = a | ||
3167 | 358 | elif o == "--export-desktop": | ||
3168 | 359 | desktop_out = a | ||
3169 | 360 | elif o == "--import-gettext": | ||
3170 | 361 | gettext_in = a | ||
3171 | 362 | elif o == "--export-gettext": | ||
3172 | 363 | gettext_out = a | ||
3173 | 364 | |||
3174 | 365 | if desktop_in is None: | ||
3175 | 366 | print "Error: --import-desktop is mandatory" | ||
3176 | 367 | usage() | ||
3177 | 368 | sys.exit(2) | ||
3178 | 369 | |||
3179 | 370 | df = DesktopFile(desktop_in, src_pkg = project_name, verbose = verbose) | ||
3180 | 371 | if gettext_in is not None: | ||
3181 | 372 | df.import_gettext_files(gettext_in) | ||
3182 | 373 | if gettext_out is not None: | ||
3183 | 374 | df.export_gettext_files(gettext_out) | ||
3184 | 375 | if desktop_out is not None: | ||
3185 | 376 | if desktop_in != desktop_out or df.changed: | ||
3186 | 377 | df.write_desktop(desktop_out) | ||
3187 | 378 | exit(1 if df.changed else 0) | ||
3188 | 0 | 379 | ||
3189 | === added file 'update-inspector.py' | |||
3190 | --- update-inspector.py 1970-01-01 00:00:00 +0000 | |||
3191 | +++ update-inspector.py 2024-02-28 12:58:47 +0000 | |||
3192 | @@ -0,0 +1,149 @@ | |||
3193 | 1 | #!/usr/bin/python | ||
3194 | 2 | # -*- coding: utf-8 -*- | ||
3195 | 3 | |||
3196 | 4 | # (c) 2010, Fabien Tassin <fta@ubuntu.com> | ||
3197 | 5 | |||
3198 | 6 | # Helper to merge the localizedStrings.js strings from Wekbit inspector into | ||
3199 | 7 | # the inspector_strings.grd Grit template | ||
3200 | 8 | |||
3201 | 9 | import os, re, sys, codecs | ||
3202 | 10 | from optparse import OptionParser | ||
3203 | 11 | |||
3204 | 12 | class JS2Grit: | ||
3205 | 13 | def __init__(self, grd = None, js = None): | ||
3206 | 14 | self.js_file = None | ||
3207 | 15 | self.js_strings = [] | ||
3208 | 16 | self.grd_file = None | ||
3209 | 17 | self.order = [] | ||
3210 | 18 | self.data = {} | ||
3211 | 19 | self.missing = [] | ||
3212 | 20 | self.obsolete = [] | ||
3213 | 21 | self.merged = [] | ||
3214 | 22 | if grd is not None: | ||
3215 | 23 | self.import_grd(grd) | ||
3216 | 24 | if js is not None: | ||
3217 | 25 | self.import_js(js) | ||
3218 | 26 | |||
3219 | 27 | def xml2js(self, s): | ||
3220 | 28 | ''' | ||
3221 | 29 | '<ph name="ERRORS_COUNT">%1$d<ex>2</ex></ph> errors, <ph name="WARNING_COUNT">%2$d<ex>1</ex></ph> warning' | ||
3222 | 30 | => '%d errors, %d warnings' | ||
3223 | 31 | ''' | ||
3224 | 32 | s2 = re.sub('<ex>.*?</ex>', '', s) | ||
3225 | 33 | s2 = re.sub('<ph name=".*?">%\d+\$(.*?)</ph>', r'%\1', s2) | ||
3226 | 34 | s2 = re.sub('<', '<', s2) | ||
3227 | 35 | s2 = re.sub('>', '>', s2) | ||
3228 | 36 | s2 = re.sub('\'\'\'', '', s2) | ||
3229 | 37 | return re.sub('<ph name=".*?">(.*?)</ph>', r'\1', s2) | ||
3230 | 38 | |||
3231 | 39 | def js2xml(self, s): | ||
3232 | 40 | ''' | ||
3233 | 41 | '%d errors, %d warnings' | ||
3234 | 42 | => '<ph name="XXX">%1$d<ex>XXX</ex></ph> errors, <ph name="XXX">%2$d<ex>XXX</ex></ph> warning' | ||
3235 | 43 | ''' | ||
3236 | 44 | s = re.sub('<', '<', s) | ||
3237 | 45 | s = re.sub('>', '>', s) | ||
3238 | 46 | s = re.sub('^ ', '\'\'\' ', s) | ||
3239 | 47 | phs = [ x for x in re.split(r'(%\d*\.?\d*[dfs])', s) if x.find('%') == 0 and x.find('%%') != 0 ] | ||
3240 | 48 | if len(phs) > 1: | ||
3241 | 49 | phs = re.split(r'(%\d*\.?\d*[dfs])', s) | ||
3242 | 50 | j = 1 | ||
3243 | 51 | for i, part in enumerate(phs): | ||
3244 | 52 | if part.find('%') == 0 and part.find('%%') != 0: | ||
3245 | 53 | phs[i] = '<ph name="XXX">%%%d$%s<ex>XXX</ex></ph>' % (j, part[1:]) | ||
3246 | 54 | j += 1 | ||
3247 | 55 | elif len(phs) == 1: | ||
3248 | 56 | phs = re.split(r'(%\d*\.?\d*[dfs])', s) | ||
3249 | 57 | for i, part in enumerate(phs): | ||
3250 | 58 | if part.find('%') == 0 and part.find('%%') != 0: | ||
3251 | 59 | phs[i] = '<ph name="XXX">%s<ex>XXX</ex></ph>' % part | ||
3252 | 60 | else: | ||
3253 | 61 | return s | ||
3254 | 62 | return ''.join(phs) | ||
3255 | 63 | |||
3256 | 64 | def import_grd(self, file): | ||
3257 | 65 | self.order = [] | ||
3258 | 66 | self.data = {} | ||
3259 | 67 | self.grd_file = file | ||
3260 | 68 | fd = codecs.open(file, 'rb', encoding='utf-8') | ||
3261 | 69 | file = fd.read() | ||
3262 | 70 | for s in re.finditer('<message name="(.*?)" desc="(.*?)">\n\s+(.*?)\n\s+</message>', file, re.S): | ||
3263 | 71 | key = self.xml2js(s.group(3)) | ||
3264 | 72 | self.order.append(key) | ||
3265 | 73 | self.data[key] = { 'code': s.group(1), 'desc': s.group(2), 'string': s.group(3) } | ||
3266 | 74 | fd.close() | ||
3267 | 75 | return self.order, self.data | ||
3268 | 76 | |||
3269 | 77 | def import_js(self, file): | ||
3270 | 78 | self.js_strings = [] | ||
3271 | 79 | self.js_file = file | ||
3272 | 80 | fd = codecs.open(file, 'rb', encoding='utf-16') | ||
3273 | 81 | file = fd.read() | ||
3274 | 82 | for s in re.finditer('localizedStrings\["(.*?)"\] = "(.*?)";', file, re.S): | ||
3275 | 83 | self.js_strings.append(s.group(1)) | ||
3276 | 84 | fd.close() | ||
3277 | 85 | return self.js_strings | ||
3278 | 86 | |||
3279 | 87 | def merge_strings(self): | ||
3280 | 88 | self.merged = [] | ||
3281 | 89 | self.missing = [ s for s in self.js_strings if s not in self.order ] | ||
3282 | 90 | self.obsolete = [ s for s in self.order if s not in self.js_strings ] | ||
3283 | 91 | for s in self.js_strings: | ||
3284 | 92 | if s in self.order: | ||
3285 | 93 | self.merged.append(self.data[s]) | ||
3286 | 94 | else: | ||
3287 | 95 | self.merged.append({ 'code': 'IDS_XXX', 'desc': 'XXX', 'string': self.js2xml(s), 'key': s }) | ||
3288 | 96 | |||
3289 | 97 | def get_new_strings_count(self): | ||
3290 | 98 | return len(self.missing) | ||
3291 | 99 | |||
3292 | 100 | def get_obsolete_strings_count(self): | ||
3293 | 101 | return len(self.obsolete) | ||
3294 | 102 | |||
3295 | 103 | def get_strings_count(self): | ||
3296 | 104 | return len(self.js_strings) | ||
3297 | 105 | |||
3298 | 106 | def export_grd(self, grd): | ||
3299 | 107 | fdi = codecs.open(self.grd_file, 'rb', encoding='utf-8') | ||
3300 | 108 | data = fdi.read() | ||
3301 | 109 | fdi.close() | ||
3302 | 110 | |||
3303 | 111 | fdo = codecs.open(grd, 'wb', encoding='utf-8') | ||
3304 | 112 | |||
3305 | 113 | # copy the header | ||
3306 | 114 | pos = data.find('<messages') | ||
3307 | 115 | pos += data[pos:].find('\n') + 1 | ||
3308 | 116 | fdo.write(data[:pos]) | ||
3309 | 117 | |||
3310 | 118 | # write the merged strings | ||
3311 | 119 | for s in self.merged: | ||
3312 | 120 | if 'key' in s and s['key'] != s['string']: | ||
3313 | 121 | fdo.write(" <!-- XXX: '%s' -->\n" % s['key']) | ||
3314 | 122 | fdo.write(' <message name="%s" desc="%s">\n %s\n </message>\n' % \ | ||
3315 | 123 | (s['code'], s['desc'], s['string'])) | ||
3316 | 124 | # copy the footer | ||
3317 | 125 | pos = data.find('</messages>') | ||
3318 | 126 | pos -= pos - data[:pos].rfind('\n') - 1 | ||
3319 | 127 | fdo.write(data[pos:]) | ||
3320 | 128 | fdo.close() | ||
3321 | 129 | |||
3322 | 130 | if '__main__' == __name__: | ||
3323 | 131 | sys.stdout = codecs.getwriter('utf8')(sys.stdout) | ||
3324 | 132 | |||
3325 | 133 | parser = OptionParser(usage = 'Usage: %prog --grd inspector_strings.grd --js localizedStrings.js -o foo.grd') | ||
3326 | 134 | parser.add_option("-j", "--js", dest="js", | ||
3327 | 135 | help="read js strings from FILE", metavar="FILE") | ||
3328 | 136 | parser.add_option("-g", "--grd", dest="grd", | ||
3329 | 137 | help="read grd template from FILE", metavar="FILE") | ||
3330 | 138 | parser.add_option("-o", "--output", dest="output", | ||
3331 | 139 | help="write merged grd template to FILE", metavar="FILE") | ||
3332 | 140 | (options, args) = parser.parse_args() | ||
3333 | 141 | |||
3334 | 142 | if options.grd is None or options.js is None or options.output is None: | ||
3335 | 143 | parser.error("One of --grd, --js or --output is missing") | ||
3336 | 144 | js2grd = JS2Grit(grd = options.grd, js = options.js) | ||
3337 | 145 | print "Found %d strings in the js file" % js2grd.get_strings_count() | ||
3338 | 146 | js2grd.merge_strings() | ||
3339 | 147 | js2grd.export_grd(options.output) | ||
3340 | 148 | print "Merged %d new strings, dropped %d obsolete strings" % \ | ||
3341 | 149 | (js2grd.get_new_strings_count(), js2grd.get_obsolete_strings_count()) | ||
3342 | 0 | 150 | ||
3343 | === added file 'update-pot.sh' | |||
3344 | --- update-pot.sh 1970-01-01 00:00:00 +0000 | |||
3345 | +++ update-pot.sh 2024-02-28 12:58:47 +0000 | |||
3346 | @@ -0,0 +1,87 @@ | |||
3347 | 1 | #!/bin/sh | ||
3348 | 2 | |||
3349 | 3 | # Update the gettext bzr branch (imported by lp rosetta) | ||
3350 | 4 | # based on a merge of all the templates in the 4 chromium | ||
3351 | 5 | # channels. | ||
3352 | 6 | # (c) 2010-2011, Fabien Tassin <fta@ubuntu.com> | ||
3353 | 7 | |||
3354 | 8 | PROJECT=chromium-browser | ||
3355 | 9 | PKG_DIR=/data/bot/chromium-browser.head | ||
3356 | 10 | BIN_DIR=/data/bot/chromium-translations-tools.head | ||
3357 | 11 | OUT_DIR=/data/bot/upstream/chromium-translations.head | ||
3358 | 12 | LPE_DIR=/data/bot/upstream/chromium-translations-exports.head | ||
3359 | 13 | |||
3360 | 14 | SRC_TRUNK_DIR=/data/bot/upstream/chromium-browser.svn/src | ||
3361 | 15 | SRC_DEV_DIR=/data/bot/upstream/chromium-dev.svn/src | ||
3362 | 16 | SRC_BETA_DIR=/data/bot/upstream/chromium-beta.svn/src | ||
3363 | 17 | SRC_STABLE_DIR=/data/bot/upstream/chromium-stable.svn/src | ||
3364 | 18 | |||
3365 | 19 | ###### | ||
3366 | 20 | |||
3367 | 21 | NEW_TEMPLATES="chrome/app/chromium_strings.grd,chrome/app/generated_resources.grd,ui/base/strings/ui_strings.grd,chrome/app/policy/policy_templates.grd,webkit/glue/inspector_strings.grd,webkit/glue/webkit_strings.grd" | ||
3368 | 22 | TEMPLATES="chrome/app/chromium_strings.grd,chrome/app/generated_resources.grd,ui/base/strings/app_strings.grd,chrome/app/policy/policy_templates.grd,webkit/glue/inspector_strings.grd,webkit/glue/webkit_strings.grd" | ||
3369 | 23 | |||
3370 | 24 | OPTS="--map-template-names ui/base/strings/ui_strings.grd=ui/base/strings/app_strings.grd" | ||
3371 | 25 | IMPORT="--import-gettext $OUT_DIR,$LPE_DIR" | ||
3372 | 26 | BRANCH_TRUNK="--import-grit-branch trunk:$SRC_TRUNK_DIR:$NEW_TEMPLATES" | ||
3373 | 27 | BRANCH_DEV="--import-grit-branch dev:$SRC_DEV_DIR:$NEW_TEMPLATES" | ||
3374 | 28 | BRANCH_BETA="--import-grit-branch beta:$SRC_BETA_DIR:$NEW_TEMPLATES" | ||
3375 | 29 | BRANCH_STABLE="--import-grit-branch stable:$SRC_STABLE_DIR:$TEMPLATES" | ||
3376 | 30 | |||
3377 | 31 | BRANCHES="$BRANCH_TRUNK $BRANCH_DEV $BRANCH_BETA $BRANCH_STABLE" | ||
3378 | 32 | |||
3379 | 33 | (cd $LPE_DIR ; bzr pull -q) | ||
3380 | 34 | (cd $BIN_DIR ; bzr pull -q) | ||
3381 | 35 | |||
3382 | 36 | cd $SRC_TRUNK_DIR | ||
3383 | 37 | $BIN_DIR/chromium2pot.py $BRANCHES $IMPORT $OPTS --export-gettext $OUT_DIR $NEW_TEMPLATES | ||
3384 | 38 | RET=$? | ||
3385 | 39 | cd $OUT_DIR | ||
3386 | 40 | set -e | ||
3387 | 41 | for f in */*.pot */*.po ; do | ||
3388 | 42 | msgfmt -c $f | ||
3389 | 43 | done | ||
3390 | 44 | set +e | ||
3391 | 45 | rm -f messages.mo | ||
3392 | 46 | |||
3393 | 47 | # desktop file | ||
3394 | 48 | DF_DIR="desktop_file" | ||
3395 | 49 | DF=$PROJECT.desktop | ||
3396 | 50 | DF_ARGS="-v --import-desktop $DF_DIR/$DF --project-name $PROJECT" | ||
3397 | 51 | if [ ! -d $DF_DIR ] ; then | ||
3398 | 52 | mkdir $DF_DIR | ||
3399 | 53 | RET=1 | ||
3400 | 54 | fi | ||
3401 | 55 | if [ ! -e $DF_DIR/$DF ] ; then # no desktop file yet | ||
3402 | 56 | cp -va $PKG_DIR/debian/$DF $DF_DIR | ||
3403 | 57 | $BIN_DIR/desktop2gettext.py $DF_ARGS --export-gettext $DF_DIR | ||
3404 | 58 | bzr add $DF_DIR | ||
3405 | 59 | RET=1 | ||
3406 | 60 | fi | ||
3407 | 61 | if [ -d $LPE_DIR/$DF_DIR ] ; then | ||
3408 | 62 | $BIN_DIR/desktop2gettext.py $DF_ARGS --import-gettext $LPE_DIR/$DF_DIR --export-gettext $DF_DIR --export-desktop $DF_DIR/$DF | ||
3409 | 63 | R=$? | ||
3410 | 64 | if [ $R = 1 ] ; then | ||
3411 | 65 | RET=1 | ||
3412 | 66 | fi | ||
3413 | 67 | diff -u $PKG_DIR/debian/$DF $DF_DIR/$DF | ||
3414 | 68 | fi | ||
3415 | 69 | |||
3416 | 70 | if [ $RET = 0 ] ; then | ||
3417 | 71 | # no changes | ||
3418 | 72 | exit 0 | ||
3419 | 73 | fi | ||
3420 | 74 | |||
3421 | 75 | REV=$(svn info $SRC_TRUNK_DIR | grep 'Last Changed Rev:' | cut -d' ' -f4) | ||
3422 | 76 | VERSION=$(cut -d= -f2 $SRC_TRUNK_DIR/chrome/VERSION | sed -e 's,$,.,' | tr -d '\n' | sed -e 's/.$//') | ||
3423 | 77 | MSG="Strings update for $VERSION r$REV" | ||
3424 | 78 | |||
3425 | 79 | if [ "Z$1" != Z ] ; then | ||
3426 | 80 | MSG=$1 | ||
3427 | 81 | fi | ||
3428 | 82 | |||
3429 | 83 | cd $OUT_DIR | ||
3430 | 84 | bzr add | ||
3431 | 85 | bzr commit -q -m "* $MSG" | ||
3432 | 86 | bzr push -q > /dev/null | ||
3433 | 87 | exit 0 |