Files
@ af7b367f6b5a
Branch filter:
Location: kallithea/scripts/i18n_utils.py
af7b367f6b5a
5.9 KiB
text/x-python
db: introduce constraint ensuring no duplicate (reviewer, pullrequest) combinations
A reviewer should only be added once to a review.
Previously, this was not ensured by the database itself, although that the
controller would try to not add duplicate reviewers. But there was no hard
guarantee: e.g. simultaneous adding of the same reviewer to the same review
by a review owner and admin, a framework bug that sends the same request
twice, ... could still trigger duplicate addition. Additionally, code
changes (e.g. a new API) could introduce bugs at the controller level.
Existing production databases were found to contain such duplicate entries.
Nevertheless, as the code displaying reviewers in a pull request filtered
out duplicates, this never showed in the UI, and never was a 'real' problem.
Add a UniqueConstraint in the database to prevent such entries, with a
database migration step that will first find and remove existing duplicates.
A reviewer should only be added once to a review.
Previously, this was not ensured by the database itself, although that the
controller would try to not add duplicate reviewers. But there was no hard
guarantee: e.g. simultaneous adding of the same reviewer to the same review
by a review owner and admin, a framework bug that sends the same request
twice, ... could still trigger duplicate addition. Additionally, code
changes (e.g. a new API) could introduce bugs at the controller level.
Existing production databases were found to contain such duplicate entries.
Nevertheless, as the code displaying reviewers in a pull request filtered
out duplicates, this never showed in the UI, and never was a 'real' problem.
Add a UniqueConstraint in the database to prevent such entries, with a
database migration step that will first find and remove existing duplicates.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import os
import re
import shutil
import subprocess
import tempfile
do_debug = False # set from scripts/i18n --debug
def debug(*args, **kwargs):
if do_debug:
print(*args, **kwargs)
def runcmd(cmd, *args, **kwargs):
debug('... Executing command: %s' % ' '.join(cmd))
subprocess.check_call(cmd, *args, **kwargs)
header_comment_strip_re = re.compile(r'''
^
[#][ ]Translations[ ]template[ ]for[ ]Kallithea[.] \n
|
^
[#][ ]FIRST[ ]AUTHOR[ ]<EMAIL@ADDRESS>,[ ]\d+[.] \n
(?:[#] \n)?
|
^
(?:[#] \n)?
[#],[ ]fuzzy \n
|
^
[#][ ][#],[ ]fuzzy \n
''',
re.MULTILINE|re.VERBOSE)
header_normalize_re = re.compile(r'''
^ "
(POT-Creation-Date|PO-Revision-Date|Last-Translator|Language-Team|X-Generator|Generated-By|Project-Id-Version):
[ ][^\\]*\\n
" \n
''',
re.MULTILINE|re.IGNORECASE|re.VERBOSE)
def _normalize_po(raw_content):
r"""
>>> print(_normalize_po(r'''
... # header comment
...
...
... # comment before header
... msgid ""
... msgstr "yada"
... "POT-Creation-Date: 2019-05-04 21:13+0200\n"
... "MIME-Version: "
... "1.0\n"
... "Last-Translator: Jabba"
... "the Hutt\n"
... "X-Generator: Weblate 1.2.3\n"
...
... # comment, but not in header
... msgid "None"
... msgstr "Ingen"
...
...
... line 2
... # third comment
...
... msgid "Special"
... msgstr ""
...
... msgid "Specialist"
... # odd comment
... msgstr ""
... "Expert"
...
... # crazy fuzzy auto translation by msgmerge, using foo for bar
... #, fuzzy
... #| msgid "some foo string"
... msgid "some bar string."
... msgstr "translation of foo string"
...
... msgid "%d minute"
... msgid_plural "%d minutes"
... msgstr[0] "minut"
... msgstr[1] "minutter"
... msgstr[2] ""
...
... msgid "%d year"
... msgid_plural "%d years"
... msgstr[0] ""
... msgstr[1] ""
...
... # last comment
... ''') + '^^^')
# header comment
<BLANKLINE>
<BLANKLINE>
# comment before header
<BLANKLINE>
msgid ""
msgstr "yada"
"MIME-Version: "
"1.0\n"
<BLANKLINE>
msgid "None"
msgstr "Ingen"
<BLANKLINE>
line 2
<BLANKLINE>
msgid "Specialist"
msgstr ""
"Expert"
<BLANKLINE>
msgid "%d minute"
msgid_plural "%d minutes"
msgstr[0] "minut"
msgstr[1] "minutter"
msgstr[2] ""
^^^
"""
header_start = raw_content.find('\nmsgid ""\n') + 1
header_end = raw_content.find('\n\n', header_start) + 1 or len(raw_content)
chunks = [
header_comment_strip_re.sub('', raw_content[0:header_start])
.strip(),
'',
header_normalize_re.sub('', raw_content[header_start:header_end])
.replace(
r'"Content-Type: text/plain; charset=utf-8\n"',
r'"Content-Type: text/plain; charset=UTF-8\n"') # maintain msgmerge casing
.strip(),
''] # preserve normalized header
# all chunks are separated by empty line
for raw_chunk in raw_content[header_end:].split('\n\n'):
if '\n#, fuzzy' in raw_chunk: # might be like "#, fuzzy, python-format"
continue # drop crazy auto translation that is worse than useless
# strip all comment lines from chunk
chunk_lines = [
line
for line in raw_chunk.splitlines()
if line
and not line.startswith('#')
]
if not chunk_lines:
continue
# check lines starting from first msgstr, skip chunk if no translation lines
msgstr_i = [i for i, line in enumerate(chunk_lines) if line.startswith('msgstr')]
if (
chunk_lines[0].startswith('msgid') and
msgstr_i and
all(line.endswith(' ""') for line in chunk_lines[msgstr_i[0]:])
): # skip translation chunks that doesn't have any actual translations
continue
chunks.append('\n'.join(chunk_lines) + '\n')
return '\n'.join(chunks)
def _normalize_po_file(po_file, merge_pot_file=None, strip=False):
if merge_pot_file:
runcmd(['msgmerge', '--width=76', '--backup=none', '--previous',
'--update', po_file, '-q', merge_pot_file])
if strip:
po_tmp = po_file + '.tmp'
with open(po_file, 'r') as src, open(po_tmp, 'w') as dest:
raw_content = src.read()
normalized_content = _normalize_po(raw_content)
dest.write(normalized_content)
os.rename(po_tmp, po_file)
def _normalized_diff(file1, file2, merge_pot_file=None, strip=False):
# Create temporary copies of both files
temp1 = tempfile.NamedTemporaryFile(prefix=os.path.basename(file1))
temp2 = tempfile.NamedTemporaryFile(prefix=os.path.basename(file2))
debug('normalized_diff: %s -> %s / %s -> %s' % (file1, temp1.name, file2, temp2.name))
shutil.copyfile(file1, temp1.name)
shutil.copyfile(file2, temp2.name)
# Normalize them in place
_normalize_po_file(temp1.name, merge_pot_file=merge_pot_file, strip=strip)
_normalize_po_file(temp2.name, merge_pot_file=merge_pot_file, strip=strip)
# Now compare
try:
runcmd(['diff', '-u', temp1.name, temp2.name])
except subprocess.CalledProcessError as e:
return e.returncode
|