Changeset - 665dfa112f2c
[Not reviewed]
default
0 6 0
Lars Kruse - 8 years ago 2017-08-25 14:30:57
devel@sumpfralle.de
py3: replace "file" with "open"
6 files changed with 12 insertions and 12 deletions:
0 comments (0 inline, 0 general)
kallithea/lib/paster_commands/install_iis.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
"""
 
kallithea.lib.paster_commands.install_iis
 
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

	
 
IIS installation tools for Kallithea
 
"""
 

	
 

	
 
import os
 
import sys
 

	
 
from kallithea.lib.paster_commands.common import BasePasterCommand
 

	
 

	
 
dispath_py_template = '''\
 
# Created by Kallithea 'gearbox install-iis'
 
import sys
 

	
 
if hasattr(sys, "isapidllhandle"):
 
    import win32traceutil
 

	
 
import isapi_wsgi
 
import os
 

	
 
def __ExtensionFactory__():
 
    from paste.deploy import loadapp
 
    from paste.script.util.logging_config import fileConfig
 
    fileConfig('%(inifile)s')
 
    application = loadapp('config:%(inifile)s')
 

	
 
    def app(environ, start_response):
 
        user = environ.get('REMOTE_USER', None)
 
        if user is not None:
 
            os.environ['REMOTE_USER'] = user
 
        return application(environ, start_response)
 

	
 
    return isapi_wsgi.ISAPIThreadPoolHandler(app)
 

	
 
if __name__=='__main__':
 
    from isapi.install import *
 
    params = ISAPIParameters()
 
    sm = [ScriptMapParams(Extension="*", Flags=0)]
 
    vd = VirtualDirParameters(Name="%(virtualdir)s",
 
                              Description = "Kallithea",
 
                              ScriptMaps = sm,
 
                              ScriptMapUpdate = "replace")
 
    params.VirtualDirs = [vd]
 
    HandleCommandLine(params)
 
'''
 

	
 
class Command(BasePasterCommand):
 
    '''Kallithea: Install into IIS using isapi-wsgi'''
 

	
 
    requires_db_session = False
 

	
 
    def take_action(self, args):
 
        config_file = os.path.abspath(args.config_file)
 
        try:
 
            import isapi_wsgi
 
        except ImportError:
 
            self.error('missing requirement: isapi-wsgi not installed')
 

	
 
        dispatchfile = os.path.join(os.getcwd(), 'dispatch.py')
 
        print 'Writing %s' % dispatchfile
 
        with file(dispatchfile, 'w') as f:
 
        with open(dispatchfile, 'w') as f:
 
            f.write(dispath_py_template % {
 
                'inifile': config_file.replace('\\', '\\\\'),
 
                'virtualdir': args.virtualdir,
 
                })
 

	
 
        print ('Run \'python "%s" install\' with administrative privileges '
 
            'to generate the _dispatch.dll file and install it into the '
 
            'default web site') % (dispatchfile,)
 

	
 
    def get_parser(self, prog_name):
 
        parser = super(Command, self).get_parser(prog_name)
 

	
 
        parser.add_argument('--virtualdir',
 
                      action='store',
 
                      dest='virtualdir',
 
                      default='/',
 
                      help='The virtual folder to install into on IIS')
 

	
 
        return parser
kallithea/tests/models/test_notifications.py
Show inline comments
 
@@ -94,198 +94,198 @@ class TestNotifications(TestController):
 
                                               subject=u'title', body=u'hi there3',
 
                                        recipients=[self.u3, self.u1, self.u2])
 
            Session().commit()
 
            notifications = Notification.query().all()
 
            assert notification in notifications
 

	
 
            Notification.delete(notification.notification_id)
 
            Session().commit()
 

	
 
            notifications = Notification.query().all()
 
            assert not notification in notifications
 

	
 
            un = UserNotification.query().filter(UserNotification.notification
 
                                                 == notification).all()
 
            assert un == []
 

	
 
    def test_delete_association(self):
 
        with test_context(self.app):
 
            notification = NotificationModel().create(created_by=self.u1,
 
                                               subject=u'title', body=u'hi there3',
 
                                        recipients=[self.u3, self.u1, self.u2])
 
            Session().commit()
 

	
 
            unotification = UserNotification.query() \
 
                                .filter(UserNotification.notification ==
 
                                        notification) \
 
                                .filter(UserNotification.user_id == self.u3) \
 
                                .scalar()
 

	
 
            assert unotification.user_id == self.u3
 

	
 
            NotificationModel().delete(self.u3,
 
                                       notification.notification_id)
 
            Session().commit()
 

	
 
            u3notification = UserNotification.query() \
 
                                .filter(UserNotification.notification ==
 
                                        notification) \
 
                                .filter(UserNotification.user_id == self.u3) \
 
                                .scalar()
 

	
 
            assert u3notification == None
 

	
 
            # notification object is still there
 
            assert Notification.query().all() == [notification]
 

	
 
            #u1 and u2 still have assignments
 
            u1notification = UserNotification.query() \
 
                                .filter(UserNotification.notification ==
 
                                        notification) \
 
                                .filter(UserNotification.user_id == self.u1) \
 
                                .scalar()
 
            assert u1notification != None
 
            u2notification = UserNotification.query() \
 
                                .filter(UserNotification.notification ==
 
                                        notification) \
 
                                .filter(UserNotification.user_id == self.u2) \
 
                                .scalar()
 
            assert u2notification != None
 

	
 
    def test_notification_counter(self):
 
        with test_context(self.app):
 
            NotificationModel().create(created_by=self.u1,
 
                                subject=u'title', body=u'hi there_delete',
 
                                recipients=[self.u3, self.u1])
 
            Session().commit()
 

	
 
            assert NotificationModel().get_unread_cnt_for_user(self.u1) == 0
 
            assert NotificationModel().get_unread_cnt_for_user(self.u2) == 0
 
            assert NotificationModel().get_unread_cnt_for_user(self.u3) == 1
 

	
 
            notification = NotificationModel().create(created_by=self.u1,
 
                                               subject=u'title', body=u'hi there3',
 
                                        recipients=[self.u3, self.u1, self.u2])
 
            Session().commit()
 

	
 
            assert NotificationModel().get_unread_cnt_for_user(self.u1) == 0
 
            assert NotificationModel().get_unread_cnt_for_user(self.u2) == 1
 
            assert NotificationModel().get_unread_cnt_for_user(self.u3) == 2
 

	
 
    @mock.patch.object(h, 'canonical_url', (lambda arg, **kwargs: 'http://%s/?%s' % (arg, '&'.join('%s=%s' % (k, v) for (k, v) in sorted(kwargs.items())))))
 
    def test_dump_html_mails(self):
 
        # Exercise all notification types and dump them to one big html file
 
        l = []
 

	
 
        def send_email(recipients, subject, body='', html_body='', headers=None, author=None):
 
            l.append('<hr/>\n')
 
            l.append('<h1>%s</h1>\n' % desc) # desc is from outer scope
 
            l.append('<pre>\n')
 
            l.append('From: %s\n' % author.username)
 
            l.append('To: %s\n' % ' '.join(recipients))
 
            l.append('Subject: %s\n' % subject)
 
            l.append('</pre>\n')
 
            l.append('<hr/>\n')
 
            l.append('<pre>%s</pre>\n' % body)
 
            l.append('<hr/>\n')
 
            l.append(html_body)
 
            l.append('<hr/>\n')
 

	
 
        with test_context(self.app):
 
            with mock.patch.object(kallithea.lib.celerylib.tasks, 'send_email', send_email):
 
                pr_kwargs = dict(
 
                    pr_nice_id='#7',
 
                    pr_title='The Title',
 
                    pr_title_short='The Title',
 
                    pr_url='http://pr.org/7',
 
                    pr_target_repo='http://mainline.com/repo',
 
                    pr_target_branch='trunk',
 
                    pr_source_repo='https://dev.org/repo',
 
                    pr_source_branch='devbranch',
 
                    pr_owner=User.get(self.u2),
 
                    pr_owner_username='u2'
 
                    )
 

	
 
                for type_, body, kwargs in [
 
                    (Notification.TYPE_CHANGESET_COMMENT,
 
                     u'This is the new comment.\n\n - and here it ends indented.',
 
                     dict(
 
                        short_id='cafe1234',
 
                        raw_id='cafe1234c0ffeecafe',
 
                        branch='brunch',
 
                        cs_comment_user='Opinionated User (jsmith)',
 
                        cs_comment_url='http://comment.org',
 
                        is_mention=[False, True],
 
                        message='This changeset did something clever which is hard to explain',
 
                        message_short='This changeset did something cl...',
 
                        status_change=[None, 'Approved'],
 
                        cs_target_repo='repo_target',
 
                        cs_url='http://changeset.com',
 
                        cs_author=User.get(self.u2))),
 
                    (Notification.TYPE_MESSAGE,
 
                     u'This is the body of the test message\n - nothing interesting here except indentation.',
 
                     dict()),
 
                    #(Notification.TYPE_MENTION, '$body', None), # not used
 
                    (Notification.TYPE_REGISTRATION,
 
                     u'Registration body',
 
                     dict(
 
                        new_username='newbie',
 
                        registered_user_url='http://newbie.org',
 
                        new_email='new@email.com',
 
                        new_full_name='New Full Name')),
 
                    (Notification.TYPE_PULL_REQUEST,
 
                     u'This PR is awesome because it does stuff\n - please approve indented!',
 
                     dict(
 
                        pr_user_created='Requesting User (root)', # pr_owner should perhaps be used for @mention in description ...
 
                        is_mention=[False, True],
 
                        pr_revisions=[('123abc'*7, "Introduce one and two\n\nand that's it"), ('567fed'*7, 'Make one plus two equal tree')],
 
                        org_repo_name='repo_org',
 
                        **pr_kwargs)),
 
                    (Notification.TYPE_PULL_REQUEST_COMMENT,
 
                     u'Me too!\n\n - and indented on second line',
 
                     dict(
 
                        closing_pr=[False, True],
 
                        is_mention=[False, True],
 
                        pr_comment_user='Opinionated User (jsmith)',
 
                        pr_comment_url='http://pr.org/comment',
 
                        status_change=[None, 'Under Review'],
 
                        **pr_kwargs)),
 
                    ]:
 
                    kwargs['repo_name'] = u'repo/name'
 
                    params = [(type_, type_, body, kwargs)]
 
                    for param_name in ['is_mention', 'status_change', 'closing_pr']: # TODO: inline/general
 
                        if not isinstance(kwargs.get(param_name), list):
 
                            continue
 
                        new_params = []
 
                        for v in kwargs[param_name]:
 
                            for desc, type_, body, kwargs in params:
 
                                kwargs = dict(kwargs)
 
                                kwargs[param_name] = v
 
                                new_params.append(('%s, %s=%r' % (desc, param_name, v), type_, body, kwargs))
 
                        params = new_params
 

	
 
                    for desc, type_, body, kwargs in params:
 
                        # desc is used as "global" variable
 
                        notification = NotificationModel().create(created_by=self.u1,
 
                                                           subject=u'unused', body=body, email_kwargs=kwargs,
 
                                                           recipients=[self.u2], type_=type_)
 

	
 
                # Email type TYPE_PASSWORD_RESET has no corresponding notification type - test it directly:
 
                desc = 'TYPE_PASSWORD_RESET'
 
                kwargs = dict(user='John Doe', reset_token='decbf64715098db5b0bd23eab44bd792670ab746', reset_url='http://reset.com/decbf64715098db5b0bd23eab44bd792670ab746')
 
                kallithea.lib.celerylib.tasks.send_email(['john@doe.com'],
 
                    "Password reset link",
 
                    EmailNotificationModel().get_email_tmpl(EmailNotificationModel.TYPE_PASSWORD_RESET, 'txt', **kwargs),
 
                    EmailNotificationModel().get_email_tmpl(EmailNotificationModel.TYPE_PASSWORD_RESET, 'html', **kwargs),
 
                    author=User.get(self.u1))
 

	
 
        out = '<!doctype html>\n<html lang="en">\n<head><title>Notifications</title><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head>\n<body>\n%s\n</body>\n</html>\n' % \
 
            re.sub(r'<(/?(?:!doctype|html|head|title|meta|body)\b[^>]*)>', r'<!--\1-->', ''.join(l))
 

	
 
        outfn = os.path.join(os.path.dirname(__file__), 'test_dump_html_mails.out.html')
 
        reffn = os.path.join(os.path.dirname(__file__), 'test_dump_html_mails.ref.html')
 
        with file(outfn, 'w') as f:
 
        with open(outfn, 'w') as f:
 
            f.write(out)
 
        with file(reffn) as f:
 
        with open(reffn) as f:
 
            ref = f.read()
 
        assert ref == out # copy test_dump_html_mails.out.html to test_dump_html_mails.ref.html to update expectations
 
        os.unlink(outfn)
scripts/docs-headings.py
Show inline comments
 
#!/usr/bin/env python2
 

	
 
"""
 
Consistent formatting of rst section titles
 
"""
 

	
 
import re
 
import subprocess
 

	
 
spaces = [
 
    (0, 1), # we assume this is a over-and-underlined header
 
    (2, 1),
 
    (1, 1),
 
    (1, 0),
 
    (1, 0),
 
    ]
 

	
 
# http://sphinx-doc.org/rest.html :
 
#   for the Python documentation, this convention is used which you may follow:
 
#   # with overline, for parts
 
#   * with overline, for chapters
 
#   =, for sections
 
#   -, for subsections
 
#   ^, for subsubsections
 
#   ", for paragraphs
 
pystyles = ['#', '*', '=', '-', '^', '"']
 

	
 
# match on a header line underlined with one of the valid characters
 
headermatch = re.compile(r'''\n*(.+)\n([][!"#$%&'()*+,./:;<=>?@\\^_`{|}~-])\2{2,}\n+''', flags=re.MULTILINE)
 

	
 

	
 
def main():
 
    for fn in subprocess.check_output(['hg', 'loc', 'set:**.rst+kallithea/i18n/how_to']).splitlines():
 
        print 'processing %s:' % fn
 
        s = file(fn).read()
 
        s = open(fn).read()
 

	
 
        # find levels and their styles
 
        lastpos = 0
 
        styles = []
 
        for markup in headermatch.findall(s):
 
            style = markup[1]
 
            if style in styles:
 
                stylepos = styles.index(style)
 
                if stylepos > lastpos + 1:
 
                    print 'bad style %r with level %s - was at %s' % (style, stylepos, lastpos)
 
            else:
 
                stylepos = len(styles)
 
                if stylepos > lastpos + 1:
 
                    print 'bad new style %r - expected %r' % (style, styles[lastpos + 1])
 
                else:
 
                    styles.append(style)
 
            lastpos = stylepos
 

	
 
        # remove superfluous spacing (may however be restored by header spacing)
 
        s = re.sub(r'''(\n\n)\n*''', r'\1', s, flags=re.MULTILINE)
 

	
 
        if styles:
 
            newstyles = pystyles[pystyles.index(styles[0]):]
 

	
 
            def subf(m):
 
                title, style = m.groups()
 
                level = styles.index(style)
 
                before, after = spaces[level]
 
                newstyle = newstyles[level]
 
                return '\n' * (before + 1) + title + '\n' + newstyle * len(title) + '\n' * (after + 1)
 
            s = headermatch.sub(subf, s)
 

	
 
        # remove superfluous spacing when headers are adjacent
 
        s = re.sub(r'''(\n.+\n([][!"#$%&'()*+,./:;<=>?@\\^_`{|}~-])\2{2,}\n\n\n)\n*''', r'\1', s, flags=re.MULTILINE)
 
        # fix trailing space and spacing before link sections
 
        s = s.strip() + '\n'
 
        s = re.sub(r'''\n+((?:\.\. _[^\n]*\n)+)$''', r'\n\n\n\1', s)
 

	
 
        file(fn, 'w').write(s)
 
        open(fn, 'w').write(s)
 
        print subprocess.check_output(['hg', 'diff', fn])
 
        print
 

	
 
if __name__ == '__main__':
 
    main()
scripts/generate-ini.py
Show inline comments
 
#!/usr/bin/env python2
 
"""
 
Based on kallithea/lib/paster_commands/template.ini.mako, generate
 
  development.ini
 
  kallithea/tests/test.ini
 
"""
 

	
 
import re
 

	
 
makofile = 'kallithea/lib/paster_commands/template.ini.mako'
 

	
 
# the mako conditionals used in all other ini files and templates
 
selected_mako_conditionals = set([
 
    "database_engine == 'sqlite'",
 
    "http_server == 'waitress'",
 
    "error_aggregation_service == 'appenlight'",
 
    "error_aggregation_service == 'sentry'",
 
])
 

	
 
# the mako variables used in all other ini files and templates
 
mako_variable_values = {
 
    'host': '127.0.0.1',
 
    'port': '5000',
 
    'uuid()': '${app_instance_uuid}',
 
}
 

	
 
# files to be generated from the mako template
 
ini_files = [
 
    ('kallithea/tests/test.ini',
 
        '''
 
        Kallithea - config for tests:
 
        sqlalchemy and kallithea_test.sqlite
 
        custom logging
 
        ''',
 
        {
 
            '[server:main]': {
 
                'port': '4999',
 
            },
 
            '[app:main]': {
 
                'app_instance_uuid': 'test',
 
                'show_revision_number': 'true',
 
                'beaker.cache.sql_cache_short.expire': '1',
 
                'beaker.session.secret': '{74e0cd75-b339-478b-b129-07dd221def1f}',
 
            },
 
            '[handler_console]': {
 
                'level': 'DEBUG',
 
                'formatter': 'color_formatter',
 
            },
 
            # The 'handler_console_sql' block is very similar to the one in
 
            # development.ini, but without the explicit 'level=DEBUG' setting:
 
            # it causes duplicate sqlalchemy debug logs, one through
 
            # handler_console_sql and another through another path.
 
            '[handler_console_sql]': {
 
                'formatter': 'color_formatter_sql',
 
            },
 
        },
 
    ),
 
    ('development.ini',
 
        '''
 
        Kallithea - Development config:
 
        listening on *:5000
 
        sqlite and kallithea.db
 
        initial_repo_scan = true
 
        debug = true
 
        verbose and colorful logging
 
        ''',
 
        {
 
            '[server:main]': {
 
                'host': '0.0.0.0',
 
            },
 
            '[app:main]': {
 
                'initial_repo_scan': 'true',
 
                'debug': 'true',
 
                'app_instance_uuid': 'development-not-secret',
 
                'beaker.session.secret': 'development-not-secret',
 
            },
 
            '[handler_console]': {
 
                'level': 'DEBUG',
 
                'formatter': 'color_formatter',
 
            },
 
            '[handler_console_sql]': {
 
                'level': 'DEBUG',
 
                'formatter': 'color_formatter_sql',
 
            },
 
        },
 
    ),
 
]
 

	
 

	
 
def main():
 
    # make sure all mako lines starting with '#' (the '##' comments) are marked up as <text>
 
    print 'reading:', makofile
 
    mako_org = file(makofile).read()
 
    mako_org = open(makofile).read()
 
    mako_no_text_markup = re.sub(r'</?%text>', '', mako_org)
 
    mako_marked_up = re.sub(r'\n(##.*)', r'\n<%text>\1</%text>', mako_no_text_markup, flags=re.MULTILINE)
 
    if mako_marked_up != mako_org:
 
        print 'writing:', makofile
 
        file(makofile, 'w').write(mako_marked_up)
 
        open(makofile, 'w').write(mako_marked_up)
 

	
 
    # select the right mako conditionals for the other less sophisticated formats
 
    def sub_conditionals(m):
 
        """given a %if...%endif match, replace with just the selected
 
        conditional sections enabled and the rest as comments
 
        """
 
        conditional_lines = m.group(1)
 
        def sub_conditional(m):
 
            """given a conditional and the corresponding lines, return them raw
 
            or commented out, based on whether conditional is selected
 
            """
 
            criteria, lines = m.groups()
 
            if criteria not in selected_mako_conditionals:
 
                lines = '\n'.join((l if not l or l.startswith('#') else '#' + l) for l in lines.split('\n'))
 
            return lines
 
        conditional_lines = re.sub(r'^%(?:el)?if (.*):\n((?:^[^%\n].*\n|\n)*)',
 
            sub_conditional, conditional_lines, flags=re.MULTILINE)
 
        return conditional_lines
 
    mako_no_conditionals = re.sub(r'^(%if .*\n(?:[^%\n].*\n|%elif .*\n|\n)*)%endif\n',
 
        sub_conditionals, mako_no_text_markup, flags=re.MULTILINE)
 

	
 
    # expand mako variables
 
    def pyrepl(m):
 
        return mako_variable_values.get(m.group(1), m.group(0))
 
    mako_no_variables = re.sub(r'\${([^}]*)}', pyrepl, mako_no_conditionals)
 

	
 
    # remove utf-8 coding header
 
    base_ini = re.sub(r'^## -\*- coding: utf-8 -\*-\n', '', mako_no_variables)
 

	
 
    # create ini files
 
    for fn, desc, settings in ini_files:
 
        print 'updating:', fn
 
        ini_lines = re.sub(
 
            '# Kallithea - config file generated with kallithea-config *#\n',
 
            ''.join('# %-77s#\n' % l.strip() for l in desc.strip().split('\n')),
 
            base_ini)
 
        def process_section(m):
 
            """process a ini section, replacing values as necessary"""
 
            sectionname, lines = m.groups()
 
            if sectionname in settings:
 
                section_settings = settings[sectionname]
 
                def process_line(m):
 
                    """process a section line and update value if necessary"""
 
                    setting, value = m.groups()
 
                    line = m.group(0)
 
                    if setting in section_settings:
 
                        line = '%s = %s' % (setting, section_settings[setting])
 
                        if '$' not in value:
 
                            line = '#%s = %s\n%s' % (setting, value, line)
 
                    return line.rstrip()
 
                lines = re.sub(r'^([^#\n].*) = ?(.*)', process_line, lines, flags=re.MULTILINE)
 
            return sectionname + '\n' + lines
 
        ini_lines = re.sub(r'^(\[.*\])\n((?:(?:[^[\n].*)?\n)*)', process_section, ini_lines, flags=re.MULTILINE)
 
        file(fn, 'w').write(ini_lines)
 
        open(fn, 'w').write(ini_lines)
 

	
 
if __name__ == '__main__':
 
    main()
scripts/logformat.py
Show inline comments
 
#!/usr/bin/env python2
 

	
 
import re
 
import sys
 

	
 
if len(sys.argv) < 2:
 
    print 'Cleanup of superfluous % formatting of log statements.'
 
    print 'Usage:'
 
    print '''  hg revert `hg loc '*.py'|grep -v logformat.py` && scripts/logformat.py `hg loc '*.py'` && hg diff'''
 
    raise SystemExit(1)
 

	
 

	
 
logre = r'''
 
(log\.(?:error|info|warning|debug)
 
[(][ \n]*
 
)
 
%s
 
(
 
[ \n]*[)]
 
)
 
'''
 
res = [
 
    # handle % () - keeping spaces around the old %
 
    (re.compile(logre % r'''("[^"]*"|'[^']*')   ([\n ]*) %  ([\n ]*) \( ( (?:[^()]|\n)* (?: \( (?:[^()]|\n)* \) (?:[^()]|\n)* )* ) \) ''', flags=re.MULTILINE|re.VERBOSE), r'\1\2,\3\4\5\6'),
 
    # handle % without () - keeping spaces around the old %
 
    (re.compile(logre % r'''("[^"]*"|'[^']*')   ([\n ]*) %  ([\n ]*)    ( (?:[^()]|\n)* (?: \( (?:[^()]|\n)* \) (?:[^()]|\n)* )* )    ''', flags=re.MULTILINE|re.VERBOSE), r'\1\2,\3\4\5\6'),
 
    # remove extra space if it is on next line
 
    (re.compile(logre % r'''("[^"]*"|'[^']*') , (\n [ ]) ([ ][\n ]*)    ( (?:[^()]|\n)* (?: \( (?:[^()]|\n)* \) (?:[^()]|\n)* )* )    ''', flags=re.MULTILINE|re.VERBOSE), r'\1\2,\3\4\5\6'),
 
    # remove extra space if it is on same line
 
    (re.compile(logre % r'''("[^"]*"|'[^']*') , [ ]+  () (   [\n ]+)    ( (?:[^()]|\n)* (?: \( (?:[^()]|\n)* \) (?:[^()]|\n)* )* )    ''', flags=re.MULTILINE|re.VERBOSE), r'\1\2,\3\4\5\6'),
 
    # remove trailing , and space
 
    (re.compile(logre % r'''("[^"]*"|'[^']*') ,       () (   [\n ]*)    ( (?:[^()]|\n)* (?: \( (?:[^()]|\n)* \) (?:[^()]|\n)* )* [^(), \n] ) [ ,]*''', flags=re.MULTILINE|re.VERBOSE), r'\1\2,\3\4\5\6'),
 
    ]
 

	
 
for f in sys.argv[1:]:
 
    s = file(f).read()
 
    s = open(f).read()
 
    for r, t in res:
 
        s = r.sub(t, s)
 
    file(f, 'w').write(s)
 
    open(f, 'w').write(s)
scripts/update-copyrights.py
Show inline comments
 
@@ -8,246 +8,246 @@ history.
 
This script and the data in it is a best effort attempt at reverse engineering
 
previous attributions and correlate that with version control history while
 
preserving all existing copyright statements and attribution. This script is
 
processing and summarizing information found elsewhere - it is not by itself
 
making any claims. Comments in the script are an attempt at reverse engineering
 
possible explanations - they are not showing any intent or confirming it is
 
correct.
 

	
 
Three files are generated / modified by this script:
 

	
 
kallithea/templates/about.html claims to show copyright holders, and the GPL
 
license requires such existing "legal notices" to be preserved. We also try to
 
keep it updated with copyright holders, but do not claim it is a correct list.
 

	
 
CONTRIBUTORS has the purpose of giving credit where credit is due and list all
 
the contributor names in the source.
 

	
 
kallithea/templates/base/base.html contains the copyright years in the page
 
footer.
 

	
 
Both make a best effort of listing all copyright holders, but revision control
 
history might be a better and more definitive source.
 

	
 
Contributors are sorted "fairly" by copyright year and amount of
 
contribution.
 

	
 
New contributors are listed, without considering if the contribution contains
 
copyrightable work.
 

	
 
When the copyright might belong to a different legal entity than the
 
contributor, the legal entity is given credit too.
 
"""
 

	
 

	
 
# Some committers are so wrong that it doesn't point at any contributor:
 
total_ignore = set()
 
total_ignore.add('*** failed to import extension hggit: No module named hggit')
 
total_ignore.add('<>')
 

	
 
# Normalize some committer names where people have contributed under different
 
# names or email addresses:
 
name_fixes = {}
 
name_fixes['Andrew Shadura'] = "Andrew Shadura <andrew@shadura.me>"
 
name_fixes['aparkar'] = "Aparkar <aparkar@icloud.com>"
 
name_fixes['Aras Pranckevicius'] = "Aras Pranckevičius <aras@unity3d.com>"
 
name_fixes['Augosto Hermann'] = "Augusto Herrmann <augusto.herrmann@planejamento.gov.br>"
 
name_fixes['"Bradley M. Kuhn" <bkuhn@ebb.org>'] = "Bradley M. Kuhn <bkuhn@sfconservancy.org>"
 
name_fixes['dmitri.kuznetsov'] = "Dmitri Kuznetsov"
 
name_fixes['Dmitri Kuznetsov'] = "Dmitri Kuznetsov"
 
name_fixes['domruf'] = "Dominik Ruf <dominikruf@gmail.com>"
 
name_fixes['Ingo von borstel'] = "Ingo von Borstel <kallithea@planetmaker.de>"
 
name_fixes['Jan Heylen'] = "Jan Heylen <heyleke@gmail.com>"
 
name_fixes['Jason F. Harris'] = "Jason Harris <jason@jasonfharris.com>"
 
name_fixes['Jelmer Vernooij'] = "Jelmer Vernooij <jelmer@samba.org>"
 
name_fixes['jfh <jason@jasonfharris.com>'] = "Jason Harris <jason@jasonfharris.com>"
 
name_fixes['Leonardo Carneiro<leonardo@unity3d.com>'] = "Leonardo Carneiro <leonardo@unity3d.com>"
 
name_fixes['leonardo'] = "Leonardo Carneiro <leonardo@unity3d.com>"
 
name_fixes['Leonardo <leo@unity3d.com>'] = "Leonardo Carneiro <leonardo@unity3d.com>"
 
name_fixes['Les Peabody'] = "Les Peabody <lpeabody@gmail.com>"
 
name_fixes['"Lorenzo M. Catucci" <lorenzo@sancho.ccd.uniroma2.it>'] = "Lorenzo M. Catucci <lorenzo@sancho.ccd.uniroma2.it>"
 
name_fixes['Lukasz Balcerzak'] = "Łukasz Balcerzak <lukaszbalcerzak@gmail.com>"
 
name_fixes['mao <mao@lins.fju.edu.tw>'] = "Ching-Chen Mao <mao@lins.fju.edu.tw>"
 
name_fixes['marcink'] = "Marcin Kuźmiński <marcin@python-works.com>"
 
name_fixes['Marcin Kuzminski'] = "Marcin Kuźmiński <marcin@python-works.com>"
 
name_fixes['nansenat16@null.tw'] = "nansenat16 <nansenat16@null.tw>"
 
name_fixes['Peter Vitt'] = "Peter Vitt <petervitt@web.de>"
 
name_fixes['philip.j@hostdime.com'] = "Philip Jameson <philip.j@hostdime.com>"
 
name_fixes['Søren Løvborg'] = "Søren Løvborg <sorenl@unity3d.com>"
 
name_fixes['Thomas De Schampheleire'] = "Thomas De Schampheleire <thomas.de.schampheleire@gmail.com>"
 
name_fixes['Weblate'] = "<>"
 
name_fixes['xpol'] = "xpol <xpolife@gmail.com>"
 

	
 

	
 
# Some committer email address domains that indicate that another entity might
 
# hold some copyright too:
 
domain_extra = {}
 
domain_extra['unity3d.com'] = "Unity Technologies"
 
domain_extra['rhodecode.com'] = "RhodeCode GmbH"
 

	
 
# Repository history show some old contributions that traditionally hasn't been
 
# listed in about.html - preserve that:
 
no_about = set(total_ignore)
 
# The following contributors were traditionally not listed in about.html and it
 
# seems unclear if the copyright is personal or belongs to a company.
 
no_about.add(('Thayne Harbaugh <thayne@fusionio.com>', '2011'))
 
no_about.add(('Dies Koper <diesk@fast.au.fujitsu.com>', '2012'))
 
no_about.add(('Erwin Kroon <e.kroon@smartmetersolutions.nl>', '2012'))
 
no_about.add(('Vincent Caron <vcaron@bearstech.com>', '2012'))
 
# These contributors' contributions might be too small to be copyrightable:
 
no_about.add(('philip.j@hostdime.com', '2012'))
 
no_about.add(('Stefan Engel <mail@engel-stefan.de>', '2012'))
 
no_about.add(('Ton Plomp <tcplomp@gmail.com>', '2013'))
 
# Was reworked and contributed later and shadowed by other contributions:
 
no_about.add(('Sean Farley <sean.michael.farley@gmail.com>', '2013'))
 

	
 
# Preserve contributors listed in about.html but not appearing in repository
 
# history:
 
other_about = [
 
    ("2011", "Aparkar <aparkar@icloud.com>"),
 
    ("2010", "RhodeCode GmbH"),
 
    ("2011", "RhodeCode GmbH"),
 
    ("2012", "RhodeCode GmbH"),
 
    ("2013", "RhodeCode GmbH"),
 
]
 

	
 
# Preserve contributors listed in CONTRIBUTORS but not appearing in repository
 
# history:
 
other_contributors = [
 
    ("", "Andrew Kesterson <andrew@aklabs.net>"),
 
    ("", "cejones"),
 
    ("", "David A. Sjøen <david.sjoen@westcon.no>"),
 
    ("", "James Rhodes <jrhodes@redpointsoftware.com.au>"),
 
    ("", "Jonas Oberschweiber <jonas.oberschweiber@d-velop.de>"),
 
    ("", "larikale"),
 
    ("", "RhodeCode GmbH"),
 
    ("", "Sebastian Kreutzberger <sebastian@rhodecode.com>"),
 
    ("", "Steve Romanow <slestak989@gmail.com>"),
 
    ("", "SteveCohen"),
 
    ("", "Thomas <thomas@rhodecode.com>"),
 
    ("", "Thomas Waldmann <tw-public@gmx.de>"),
 
]
 

	
 

	
 
import os
 
import re
 
from collections import defaultdict
 

	
 

	
 
def sortkey(x):
 
    """Return key for sorting contributors "fairly":
 
    * latest contribution
 
    * first contribution
 
    * number of contribution years
 
    * name (with some unicode normalization)
 
    The entries must be 2-tuples of a list of string years and the unicode name"""
 
    return (x[0] and -int(x[0][-1]),
 
            x[0] and int(x[0][0]),
 
            -len(x[0]),
 
            x[1].decode('utf8').lower().replace(u'\xe9', u'e').replace(u'\u0142', u'l')
 
        )
 

	
 

	
 
def nice_years(l, dash='-', join=' '):
 
    """Convert a list of years into brief range like '1900-1901, 1921'."""
 
    if not l:
 
        return ''
 
    start = end = int(l[0])
 
    ranges = []
 
    for year in l[1:] + [0]:
 
        year = int(year)
 
        if year == end + 1:
 
            end = year
 
            continue
 
        if start == end:
 
            ranges.append('%s' % start)
 
        else:
 
            ranges.append('%s%s%s' % (start, dash, end))
 
        start = end = year
 
    assert start == 0 and end == 0, (start, end)
 
    return join.join(ranges)
 

	
 

	
 
def insert_entries(
 
        filename,
 
        all_entries,
 
        no_entries,
 
        domain_extra,
 
        split_re,
 
        normalize_name,
 
        format_f):
 
    """Update file with contributor information.
 
    all_entries: list of tuples with year and name
 
    no_entries: set of names or name and year tuples to ignore
 
    domain_extra: map domain name to extra credit name
 
    split_re: regexp matching the part of file to rewrite
 
    normalize_name: function to normalize names for grouping and display
 
    format_f: function formatting year list and name to a string
 
    """
 
    name_years = defaultdict(set)
 

	
 
    for year, name in all_entries:
 
        if name in no_entries or (name, year) in no_entries:
 
            continue
 
        domain = name.split('@', 1)[-1].rstrip('>')
 
        if domain in domain_extra:
 
            name_years[domain_extra[domain]].add(year)
 
        name_years[normalize_name(name)].add(year)
 

	
 
    l = [(list(sorted(year for year in years if year)), name)
 
         for name, years in name_years.items()]
 
    l.sort(key=sortkey)
 

	
 
    with file(filename) as f:
 
    with open(filename) as f:
 
        pre, post = re.split(split_re, f.read())
 

	
 
    with file(filename, 'w') as f:
 
    with open(filename, 'w') as f:
 
        f.write(pre +
 
                ''.join(format_f(years, name) for years, name in l) +
 
                post)
 

	
 

	
 
def main():
 
    repo_entries = [
 
        (year, name_fixes.get(name) or name_fixes.get(name.rsplit('<', 1)[0].strip()) or name)
 
        for year, name in
 
        (line.strip().split(' ', 1)
 
         for line in os.popen("""hg log -r '::.' -T '{date(date,"%Y")} {author}\n'""").readlines())
 
        ]
 

	
 
    insert_entries(
 
        filename='kallithea/templates/about.html',
 
        all_entries=repo_entries + other_about,
 
        no_entries=no_about,
 
        domain_extra=domain_extra,
 
        split_re=r'(?:  <li>Copyright &copy; [^\n]*</li>\n)*',
 
        normalize_name=lambda name: name.split('<', 1)[0].strip(),
 
        format_f=lambda years, name: '  <li>Copyright &copy; %s, %s</li>\n' % (nice_years(years, '&ndash;', ', '), name),
 
        )
 

	
 
    insert_entries(
 
        filename='CONTRIBUTORS',
 
        all_entries=repo_entries + other_contributors,
 
        no_entries=total_ignore,
 
        domain_extra=domain_extra,
 
        split_re=r'(?:    [^\n]*\n)*',
 
        normalize_name=lambda name: name,
 
        format_f=lambda years, name: ('    %s%s%s\n' % (name, ' ' if years else '', nice_years(years))),
 
        )
 

	
 
    insert_entries(
 
        filename='kallithea/templates/base/base.html',
 
        all_entries=repo_entries,
 
        no_entries=total_ignore,
 
        domain_extra={},
 
        split_re=r'(?<=&copy;) .* (?=by various authors)',
 
        normalize_name=lambda name: '',
 
        format_f=lambda years, name: ' ' + nice_years(years, '&ndash;', ', ') + ' ',
 
        )
 

	
 

	
 
if __name__ == '__main__':
 
    main()
 

	
 

	
 
# To list new contributors since last tagging:
 
# { hg log -r '::tagged()' -T '    {author}\n    {author}\n'; hg log -r '::.' -T '    {author}\n' | sort | uniq; } | sort | uniq -u
0 comments (0 inline, 0 general)