1
# ***** BEGIN GPL LICENSE BLOCK *****
3
# This program is free software; you can redistribute it and/or
4
# modify it under the terms of the GNU General Public License
5
# as published by the Free Software Foundation; either version 2
6
# of the License, or (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software Foundation,
15
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17
# ***** END GPL LICENSE BLOCK *****
21
# Some misc utilities...
24
import concurrent.futures
30
from bl_i18n_utils import settings
33
PO_COMMENT_PREFIX = settings.PO_COMMENT_PREFIX
34
PO_COMMENT_PREFIX_MSG = settings.PO_COMMENT_PREFIX_MSG
35
PO_COMMENT_PREFIX_SOURCE = settings.PO_COMMENT_PREFIX_SOURCE
36
PO_COMMENT_PREFIX_SOURCE_CUSTOM = settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM
37
PO_COMMENT_FUZZY = settings.PO_COMMENT_FUZZY
38
PO_MSGCTXT = settings.PO_MSGCTXT
39
PO_MSGID = settings.PO_MSGID
40
PO_MSGSTR = settings.PO_MSGSTR
42
PO_HEADER_KEY = settings.PO_HEADER_KEY
43
PO_HEADER_COMMENT = settings.PO_HEADER_COMMENT
44
PO_HEADER_COMMENT_COPYRIGHT = settings.PO_HEADER_COMMENT_COPYRIGHT
45
PO_HEADER_MSGSTR = settings.PO_HEADER_MSGSTR
47
PARSER_CACHE_HASH = settings.PARSER_CACHE_HASH
49
WARN_NC = settings.WARN_MSGID_NOT_CAPITALIZED
50
NC_ALLOWED = settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED
51
PARSER_CACHE_HASH = settings.PARSER_CACHE_HASH
54
##### Misc Utils #####
57
return s.rstrip("\n\r")
60
_valid_po_path_re = re.compile(r"^\S+:[0-9]+$")
61
def is_valid_po_path(path):
62
return bool(_valid_po_path_re.match(path))
65
def get_best_similar(data):
67
key, use_similar, similar_pool = data
69
# try to find some close key in existing messages...
70
# Optimized code inspired by difflib.get_close_matches (as we only need the best match).
71
# We also consider to never make a match when len differs more than -len_key / 2, +len_key * 2 (which is valid
72
# as long as use_similar is not below ~0.7).
73
# Gives an overall ~20% of improvement!
74
#tmp = difflib.get_close_matches(key[1], similar_pool, n=1, cutoff=use_similar)
78
s = difflib.SequenceMatcher()
81
min_len = len_key // 2
83
for x in similar_pool:
84
if min_len < len(x) < max_len:
86
if s.real_quick_ratio() >= use_similar and s.quick_ratio() >= use_similar:
88
if sratio >= use_similar:
96
Internal representation of a message.
98
__slots__ = ("msgctxt_lines", "msgid_lines", "msgstr_lines", "comment_lines", "is_fuzzy", "is_commented")
100
def __init__(self, msgctxt_lines=[], msgid_lines=[], msgstr_lines=[], comment_lines=[],
101
is_commented=False, is_fuzzy=False):
102
self.msgctxt_lines = msgctxt_lines
103
self.msgid_lines = msgid_lines
104
self.msgstr_lines = msgstr_lines
105
self.comment_lines = comment_lines
106
self.is_fuzzy = is_fuzzy
107
self.is_commented = is_commented
109
def _get_msgctxt(self):
110
return ("".join(self.msgctxt_lines)).replace("\\n", "\n")
111
def _set_msgctxt(self, ctxt):
112
self.msgctxt_lines = [ctxt]
113
msgctxt = property(_get_msgctxt, _set_msgctxt)
115
def _get_msgid(self):
116
return ("".join(self.msgid_lines)).replace("\\n", "\n")
117
def _set_msgid(self, msgid):
118
self.msgid_lines = [msgid]
119
msgid = property(_get_msgid, _set_msgid)
121
def _get_msgstr(self):
122
return ("".join(self.msgstr_lines)).replace("\\n", "\n")
123
def _set_msgstr(self, msgstr):
124
self.msgstr_lines = [msgstr]
125
msgstr = property(_get_msgstr, _set_msgstr)
127
def _get_sources(self):
128
lstrip1 = len(PO_COMMENT_PREFIX_SOURCE)
129
lstrip2 = len(PO_COMMENT_PREFIX_SOURCE_CUSTOM)
130
return ([l[lstrip1:] for l in self.comment_lines if l.startswith(PO_COMMENT_PREFIX_SOURCE)] +
131
[l[lstrip2:] for l in self.comment_lines if l.startswith(PO_COMMENT_PREFIX_SOURCE_CUSTOM)])
132
def _set_sources(self, sources):
133
# list.copy() is not available in py3.2 ...
135
cmmlines[:] = self.comment_lines
137
if l.startswith(PO_COMMENT_PREFIX_SOURCE) or l.startswith(PO_COMMENT_PREFIX_SOURCE_CUSTOM):
138
self.comment_lines.remove(l)
140
lines_src_custom = []
142
if is_valid_po_path(src):
143
lines_src.append(PO_COMMENT_PREFIX_SOURCE + src)
145
lines_src_custom.append(PO_COMMENT_PREFIX_SOURCE_CUSTOM + src)
146
self.comment_lines += lines_src_custom + lines_src
147
sources = property(_get_sources, _set_sources)
149
def _get_is_tooltip(self):
150
# XXX For now, we assume that all messages > 30 chars are tooltips!
151
return len(self.msgid) > 30
152
is_tooltip = property(_get_is_tooltip)
154
def normalize(self, max_len=80):
156
Normalize this message, call this before exporting it...
157
Currently normalize msgctxt, msgid and msgstr lines to given max_len (if below 1, make them single line).
159
max_len -= 2 # The two quotes!
160
# We do not need the full power of textwrap... We just split first at escaped new lines, then into each line
161
# if needed... No word splitting, nor fancy spaces handling!
162
def _wrap(text, max_len, init_len):
163
if len(text) + init_len < max_len:
165
lines = text.splitlines()
172
cur_len += len(w) + 1
173
if cur_len > (max_len - 1) and tmp:
174
ret.append(" ".join(tmp) + " ")
179
ret.append(" ".join(tmp))
182
self.msgctxt_lines = self.msgctxt.replace("\n", "\\n\n").splitlines()
183
self.msgid_lines = self.msgid.replace("\n", "\\n\n").splitlines()
184
self.msgstr_lines = self.msgstr.replace("\n", "\\n\n").splitlines()
186
init_len = len(PO_MSGCTXT) + 1
187
if self.is_commented:
188
init_len += len(PO_COMMENT_PREFIX_MSG)
189
self.msgctxt_lines = _wrap(self.msgctxt.replace("\n", "\\n\n"), max_len, init_len)
191
init_len = len(PO_MSGID) + 1
192
if self.is_commented:
193
init_len += len(PO_COMMENT_PREFIX_MSG)
194
self.msgid_lines = _wrap(self.msgid.replace("\n", "\\n\n"), max_len, init_len)
196
init_len = len(PO_MSGSTR) + 1
197
if self.is_commented:
198
init_len += len(PO_COMMENT_PREFIX_MSG)
199
self.msgstr_lines = _wrap(self.msgstr.replace("\n", "\\n\n"), max_len, init_len)
204
Internal representation of messages for one language (iso code), with additional stats info.
207
# Avoid parsing again!
208
# Keys should be (pseudo) file-names, values are tuples (hash, I18nMessages)
209
# Note: only used by po parser currently!
212
def __init__(self, iso="__POT__", kind=None, key=None, src=None):
214
self.msgs = self._new_messages()
215
self.trans_msgs = set()
216
self.fuzzy_msgs = set()
217
self.comm_msgs = set()
218
self.ttip_msgs = set()
219
self.contexts = set()
221
self.nbr_trans_msgs = 0
223
self.nbr_trans_ttips = 0
224
self.nbr_comm_msgs = 0
226
self.nbr_trans_signs = 0
227
self.parsing_errors = []
229
self.parse(kind, key, src)
234
return getattr(collections, 'OrderedDict', dict)()
237
def gen_empty_messages(cls, iso, blender_ver, blender_rev, time, year, default_copyright=True):
238
"""Generate an empty I18nMessages object (only header is present!)."""
239
msgstr = PO_HEADER_MSGSTR.format(blender_ver=str(blender_ver), blender_rev=int(blender_rev),
240
time=str(time), iso=str(iso))
242
if default_copyright:
243
comment = PO_HEADER_COMMENT_COPYRIGHT.format(year=str(year))
244
comment = comment + PO_HEADER_COMMENT
247
msgs.msgs[PO_HEADER_KEY] = I18nMessage([], [""], [msgstr], [comment], False, True)
252
def normalize(self, max_len=80):
253
for msg in self.msgs.values():
254
msg.normalize(max_len)
256
def merge(self, replace=False, *args):
259
def update(self, ref, use_similar=0.75, keep_old_commented=True):
261
Update this I18nMessage with the ref one. Translations from ref are never used. Source comments from ref
262
completely replace current ones. If use_similar is not 0.0, it will try to match new messages in ref with an
263
existing one. Messages no more found in ref will be marked as commented if keep_old_commented is True,
267
if use_similar > 0.0:
268
for key, msg in self.msgs.items():
269
if msg.msgstr: # No need to waste time with void translations!
270
similar_pool.setdefault(key[1], set()).add(key)
272
msgs = self._new_messages().fromkeys(ref.msgs.keys())
273
ref_keys = set(ref.msgs.keys())
274
org_keys = set(self.msgs.keys())
275
new_keys = ref_keys - org_keys
276
removed_keys = org_keys - ref_keys
278
# First process keys present in both org and ref messages.
279
for key in ref_keys - new_keys:
280
msg, refmsg = self.msgs[key], ref.msgs[key]
281
msg.sources = refmsg.sources
282
msg.is_commented = refmsg.is_commented
283
msg.is_fuzzy = refmsg.is_fuzzy
286
# Next process new keys.
287
if use_similar > 0.0:
288
with concurrent.futures.ProcessPoolExecutor() as exctr:
289
for key, msgid in exctr.map(get_best_similar,
290
tuple((nk, use_similar, tuple(similar_pool.keys())) for nk in new_keys)):
292
# Try to get the same context, else just get one...
293
skey = (key[0], msgid)
294
if skey not in similar_pool[msgid]:
295
skey = tuple(similar_pool[msgid])[0]
296
# We keep org translation and comments, and mark message as fuzzy.
297
msg, refmsg = copy.deepcopy(self.msgs[skey]), ref.msgs[key]
298
msg.msgctxt = refmsg.msgctxt
299
msg.msgid = refmsg.msgid
300
msg.sources = refmsg.sources
302
msg.is_commented = refmsg.is_commented
305
msgs[key] = ref.msgs[key]
308
msgs[key] = ref.msgs[key]
310
# Add back all "old" and already commented messages as commented ones, if required
311
# (and translation was not void!).
312
if keep_old_commented:
313
for key in removed_keys:
314
msgs[key] = self.msgs[key]
315
msgs[key].is_commented = True
316
msgs[key].sources = []
318
# Special 'meta' message, change project ID version and pot creation date...
321
markers = ("Project-Id-Version:", "POT-Creation-Date:")
323
for rl in ref.msgs[key].msgstr_lines:
324
if rl.startswith(mrk):
325
for idx, ml in enumerate(msgs[key].msgstr_lines):
326
if ml.startswith(mrk):
327
rep.append((idx, rl))
329
msgs[key].msgstr_lines[idx] = txt
331
# And finalize the update!
334
def update_info(self):
335
self.trans_msgs.clear()
336
self.fuzzy_msgs.clear()
337
self.comm_msgs.clear()
338
self.ttip_msgs.clear()
339
self.contexts.clear()
341
self.nbr_trans_signs = 0
342
for key, msg in self.msgs.items():
343
if key == PO_HEADER_KEY:
346
self.comm_msgs.add(key)
349
self.trans_msgs.add(key)
351
self.fuzzy_msgs.add(key)
353
self.ttip_msgs.add(key)
354
self.contexts.add(key[0])
355
self.nbr_signs += len(msg.msgid)
356
self.nbr_trans_signs += len(msg.msgstr)
357
self.nbr_msgs = len(self.msgs)
358
self.nbr_trans_msgs = len(self.trans_msgs)
359
self.nbr_ttips = len(self.ttip_msgs)
360
self.nbr_trans_ttips = len(self.ttip_msgs & self.trans_msgs)
361
self.nbr_comm_msgs = len(self.comm_msgs)
363
def print_stats(self, prefix=""):
365
Print out some stats about an I18nMessages object.
370
lvl_trans_ttips = 0.0
371
lvl_ttips_in_trans = 0.0
372
if self.nbr_msgs > 0:
373
lvl = float(self.nbr_trans_msgs) / float(self.nbr_msgs)
374
lvl_ttips = float(self.nbr_ttips) / float(self.nbr_msgs)
375
lvl_comm = float(self.nbr_comm_msgs) / float(self.nbr_msgs + self.nbr_comm_msgs)
376
if self.nbr_ttips > 0:
377
lvl_trans_ttips = float(self.nbr_trans_ttips) / float(self.nbr_ttips)
378
if self.nbr_trans_msgs > 0:
379
lvl_ttips_in_trans = float(self.nbr_trans_ttips) / float(self.nbr_trans_msgs)
382
"{:>6.1%} done! ({} translated messages over {}).\n"
383
"".format(lvl, self.nbr_trans_msgs, self.nbr_msgs),
384
"{:>6.1%} of messages are tooltips ({} over {}).\n"
385
"".format(lvl_ttips, self.nbr_ttips, self.nbr_msgs),
386
"{:>6.1%} of tooltips are translated ({} over {}).\n"
387
"".format(lvl_trans_ttips, self.nbr_trans_ttips, self.nbr_ttips),
388
"{:>6.1%} of translated messages are tooltips ({} over {}).\n"
389
"".format(lvl_ttips_in_trans, self.nbr_trans_ttips, self.nbr_trans_msgs),
390
"{:>6.1%} of messages are commented ({} over {}).\n"
391
"".format(lvl_comm, self.nbr_comm_msgs, self.nbr_comm_msgs + self.nbr_msgs),
392
"This translation is currently made of {} signs.\n".format(self.nbr_trans_signs))
393
print(prefix.join(lines))
395
def parse(self, kind, key, src):
396
del self.parsing_errors[:]
397
self.parsers[kind](self, src, key)
398
if self.parsing_errors:
399
print("WARNING! Errors while parsing {}:".format(key))
400
for line, error in self.parsing_errors:
401
print(" Around line {}: {}".format(line, error))
402
print("The parser solved them as well as it could...")
405
def parse_messages_from_po(self, src, key=None):
408
Note: This function will silently "arrange" mis-formated entries, thus using afterward write_messages() should
409
always produce a po-valid file, though not correct!
411
reading_msgid = False
412
reading_msgstr = False
413
reading_msgctxt = False
414
reading_comment = False
423
def finalize_message(self, line_nr):
424
nonlocal reading_msgid, reading_msgstr, reading_msgctxt, reading_comment
425
nonlocal is_commented, is_fuzzy, msgid_lines, msgstr_lines, msgctxt_lines, comment_lines
427
msgid = "".join(msgid_lines)
428
msgctxt = "".join(msgctxt_lines)
429
msgkey = (msgctxt, msgid)
431
# Never allow overriding existing msgid/msgctxt pairs!
432
if msgkey in self.msgs:
433
self.parsing_errors.append((line_nr, "{} context/msgid is already in current messages!".format(msgkey)))
436
self.msgs[msgkey] = I18nMessage(msgctxt_lines, msgid_lines, msgstr_lines, comment_lines,
437
is_commented, is_fuzzy)
439
# Let's clean up and get ready for next message!
440
reading_msgid = reading_msgstr = reading_msgctxt = reading_comment = False
441
is_commented = is_fuzzy = False
447
# try to use src as file name...
448
if os.path.exists(src):
451
with open(src, 'r', encoding="utf-8") as f:
454
# Try to use values from cache!
456
if key and key in self._parser_cache:
457
old_hash, msgs = self._parser_cache[key]
459
curr_hash = hashlib.new(PARSER_CACHE_HASH, src.encode()).digest()
460
if curr_hash == old_hash:
461
self.msgs = copy.deepcopy(msgs) # we might edit self.msgs!
464
_comm_msgctxt = PO_COMMENT_PREFIX_MSG + PO_MSGCTXT
465
_len_msgctxt = len(PO_MSGCTXT + '"')
466
_len_comm_msgctxt = len(_comm_msgctxt + '"')
467
_comm_msgid = PO_COMMENT_PREFIX_MSG + PO_MSGID
468
_len_msgid = len(PO_MSGID + '"')
469
_len_comm_msgid = len(_comm_msgid + '"')
470
_comm_msgstr = PO_COMMENT_PREFIX_MSG + PO_MSGSTR
471
_len_msgstr = len(PO_MSGSTR + '"')
472
_len_comm_msgstr = len(_comm_msgstr + '"')
473
_len_comm_str = len(PO_COMMENT_PREFIX_MSG + '"')
475
# Main loop over all lines in src...
476
for line_nr, line in enumerate(src.splitlines()):
479
finalize_message(self, line_nr)
482
elif line.startswith(PO_MSGCTXT) or line.startswith(_comm_msgctxt):
483
reading_comment = False
485
if line.startswith(PO_COMMENT_PREFIX_MSG):
487
line = line[_len_comm_msgctxt:-1]
489
line = line[_len_msgctxt:-1]
490
msgctxt_lines.append(line)
492
elif line.startswith(PO_MSGID) or line.startswith(_comm_msgid):
493
reading_comment = False
495
if line.startswith(PO_COMMENT_PREFIX_MSG):
496
if not is_commented and reading_ctxt:
497
self.parsing_errors.append((line_nr, "commented msgid following regular msgctxt"))
499
line = line[_len_comm_msgid:-1]
501
line = line[_len_msgid:-1]
503
msgid_lines.append(line)
505
elif line.startswith(PO_MSGSTR) or line.startswith(_comm_msgstr):
506
if not reading_msgid:
507
self.parsing_errors.append((line_nr, "msgstr without a prior msgid"))
509
reading_msgid = False
510
reading_msgstr = True
511
if line.startswith(PO_COMMENT_PREFIX_MSG):
512
line = line[_len_comm_msgstr:-1]
514
self.parsing_errors.append((line_nr, "commented msgstr following regular msgid"))
516
line = line[_len_msgstr:-1]
518
self.parsing_errors.append((line_nr, "regular msgstr following commented msgid"))
519
msgstr_lines.append(line)
521
elif line.startswith(PO_COMMENT_PREFIX[0]):
522
if line.startswith(PO_COMMENT_PREFIX_MSG):
525
msgctxt_lines.append(line[_len_comm_str:-1])
527
msgctxt_lines.append(line)
528
self.parsing_errors.append((line_nr, "commented string while reading regular msgctxt"))
531
msgid_lines.append(line[_len_comm_str:-1])
533
msgid_lines.append(line)
534
self.parsing_errors.append((line_nr, "commented string while reading regular msgid"))
537
msgstr_lines.append(line[_len_comm_str:-1])
539
msgstr_lines.append(line)
540
self.parsing_errors.append((line_nr, "commented string while reading regular msgstr"))
542
if reading_msgctxt or reading_msgid or reading_msgstr:
543
self.parsing_errors.append((line_nr,
544
"commented string within msgctxt, msgid or msgstr scope, ignored"))
545
elif line.startswith(PO_COMMENT_FUZZY):
548
comment_lines.append(line)
549
reading_comment = True
553
msgctxt_lines.append(line[1:-1])
555
msgid_lines.append(line[1:-1])
558
msgstr_lines.append(line)
560
self.parsing_errors.append((line_nr, "regular string outside msgctxt, msgid or msgstr scope"))
561
#self.parsing_errors += (str(comment_lines), str(msgctxt_lines), str(msgid_lines), str(msgstr_lines))
563
# If no final empty line, last message is not finalized!
565
finalize_message(self, line_nr)
570
curr_hash = hashlib.new(PARSER_CACHE_HASH, src.encode()).digest()
571
self._parser_cache[key] = (curr_hash, self.msgs)
573
def write(self, kind, dest):
574
self.writers[kind](self, dest)
576
def write_messages_to_po(self, fname):
578
Write messages in fname po file.
580
self.normalize(max_len=0) # No wrapping for now...
581
with open(fname, 'w', encoding="utf-8") as f:
582
for msg in self.msgs.values():
583
f.write("\n".join(msg.comment_lines))
584
# Only mark as fuzzy if msgstr is not empty!
585
if msg.is_fuzzy and msg.msgstr:
586
f.write("\n" + PO_COMMENT_FUZZY)
587
_p = PO_COMMENT_PREFIX_MSG if msg.is_commented else ""
588
_pmsgctxt = _p + PO_MSGCTXT
589
_pmsgid = _p + PO_MSGID
590
_pmsgstr = _p + PO_MSGSTR
593
if len(msg.msgctxt_lines) > 1:
595
"\n" + _pmsgctxt + "\"\"\n" + _p + "\"",
596
("\"\n" + _p + "\"").join(msg.msgctxt_lines),
600
chunks += ["\n" + _pmsgctxt + "\"" + msg.msgctxt + "\""]
601
if len(msg.msgid_lines) > 1:
603
"\n" + _pmsgid + "\"\"\n" + _p + "\"",
604
("\"\n" + _p + "\"").join(msg.msgid_lines),
608
chunks += ["\n" + _pmsgid + "\"" + msg.msgid + "\""]
609
if len(msg.msgstr_lines) > 1:
611
"\n" + _pmsgstr + "\"\"\n" + _p + "\"",
612
("\"\n" + _p + "\"").join(msg.msgstr_lines),
616
chunks += ["\n" + _pmsgstr + "\"" + msg.msgstr + "\""]
618
f.write("".join(chunks))
621
"PO": parse_messages_from_po,
622
# "PYTUPLE": parse_messages_from_pytuple,
626
"PO": write_messages_to_po,
627
#"PYDICT": write_messages_to_pydict,
633
Internal representation of a whole translation set.
636
def __init__(self, src):
640
def update_info(self):
644
self.lvl_trans_ttips = 0.0
645
self.lvl_ttips_in_trans = 0.0
648
self.nbr_trans_signs = 0
649
self.contexts = set()
651
if TEMPLATE_ISO_ID in self.trans:
652
self.nbr_trans = len(self.trans) - 1
653
self.nbr_signs = self.trans[TEMPLATE_ISO_ID].nbr_signs
655
self.nbr_trans = len(self.trans)
656
for iso, msgs in self.trans.items():
658
if msgs.nbr_msgs > 0:
659
self.lvl += float(msgs.nbr_trans_msgs) / float(msgs.nbr_msgs)
660
self.lvl_ttips += float(msgs.nbr_ttips) / float(msgs.nbr_msgs)
661
self.lvl_comm += float(msgs.nbr_comm_msgs) / float(msgs.nbr_msgs + msgs.nbr_comm_msgs)
662
if msgs.nbr_ttips > 0:
663
self.lvl_trans_ttips = float(msgs.nbr_trans_ttips) / float(msgs.nbr_ttips)
664
if msgs.nbr_trans_msgs > 0:
665
self.lvl_ttips_in_trans = float(msgs.nbr_trans_ttips) / float(msgs.nbr_trans_msgs)
666
if self.nbr_signs == 0:
667
self.nbr_signs = msgs.nbr_signs
668
self.nbr_trans_signs += msgs.nbr_trans_signs
669
self.contexts |= msgs.contexts
671
def print_stats(self, prefix="", print_msgs=True):
673
Print out some stats about an I18n object.
674
If print_msgs is True, it will also print all its translations' stats.
677
msgs_prefix = prefix + " "
678
for key, msgs in self.trans:
679
if key == TEMPLATE_ISO_ID:
681
print(prefix + key + ":")
682
msgs.print_stats(prefix=msgs_prefix)
685
nbr_contexts = len(self.contexts - {CONTEXT_DEFAULT})
686
if nbr_contexts != 1:
687
if nbr_contexts == 0:
693
"Average stats for all {} translations:\n".format(self.nbr_trans),
694
" {:>6.1%} done!\n".format(self.lvl / self.nbr_trans),
695
" {:>6.1%} of messages are tooltips.\n".format(self.lvl_ttips / self.nbr_trans),
696
" {:>6.1%} of tooltips are translated.\n".format(self.lvl_trans_ttips / self.nbr_trans),
697
" {:>6.1%} of translated messages are tooltips.\n".format(self.lvl_ttips_in_trans / self.nbr_trans),
698
" {:>6.1%} of messages are commented.\n".format(self.lvl_comm / self.nbr_trans),
699
" The org msgids are currently made of {} signs.\n".format(self.nbr_signs),
700
" All processed translations are currently made of {} signs.\n".format(self.nbr_trans_signs),
701
" {} specific context{} present:\n {}\n"
702
"".format(self.nbr_contexts, _ctx_txt, "\n ".join(self.contexts - {CONTEXT_DEFAULT})),
704
print(prefix.join(lines))
709
#def parse_messages_from_pytuple(self, src, key=None):
711
#Returns a dict of tuples similar to the one returned by parse_messages_from_po (one per language, plus a 'pot'
712
#one keyed as '__POT__').
714
## src may be either a string to be interpreted as py code, or a real tuple!
715
#if isinstance(src, str):
719
#if key and key in _parser_cache:
720
#old_hash, ret = _parser_cache[key]
722
#curr_hash = hashlib.new(PARSER_CACHE_HASH, str(src).encode()).digest()
723
#if curr_hash == old_hash:
726
#pot = new_messages()
727
#states = gen_states()
729
#ret = {"__POT__": (pot, states, stats)}
732
#messages[msgkey] = gen_message(msgid_lines, msgstr_lines, comment_lines, msgctxt_lines)
733
#pot[key] = gen_message(msgid_lines=[key[1]], msgstr_lines=[
734
#for lang, trans, (is_fuzzy, comments) in msg[2:]:
735
#if trans and not is_fuzzy:
736
#i18n_dict.setdefault(lang, dict())[key] = trans
741
#curr_hash = hashlib.new(PARSER_CACHE_HASH, str(src).encode()).digest()
742
#_parser_cache[key] = (curr_hash, val)
b'\\ No newline at end of file'