Changeset - 9a4d4e623c85
[Not reviewed]
Merge default
0 8 0
Mads Kiilerich - 10 years ago 2015-12-25 13:50:18
madski@unity3d.com
Merge stable
8 files changed with 35 insertions and 6 deletions:
0 comments (0 inline, 0 general)
kallithea/lib/auth_modules/auth_internal.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
"""
 
kallithea.lib.auth_modules.auth_internal
 
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

	
 
Kallithea authentication plugin for built in internal auth
 

	
 
This file was forked by the Kallithea project in July 2014.
 
Original author and date, and relevant copyright and licensing information is below:
 
:created_on: Created on Nov 17, 2012
 
:author: marcink
 
:copyright: (c) 2013 RhodeCode GmbH, and others.
 
:license: GPLv3, see LICENSE.md for more details.
 
"""
 

	
 

	
 
import logging
 

	
 
from kallithea import EXTERN_TYPE_INTERNAL
 
from kallithea.lib import auth_modules
 
from kallithea.lib.compat import formatted_json, hybrid_property
 
from kallithea.model.db import User
 

	
 
log = logging.getLogger(__name__)
 

	
 

	
 
class KallitheaAuthPlugin(auth_modules.KallitheaAuthPluginBase):
 
    def __init__(self):
 
        pass
 

	
 
    @hybrid_property
 
    def name(self):
 
        return EXTERN_TYPE_INTERNAL
 

	
 
    def settings(self):
 
        return []
 

	
 
    def user_activation_state(self):
 
        def_user_perms = User.get_default_user().AuthUser.permissions['global']
 
        return 'hg.register.auto_activate' in def_user_perms
 

	
 
    def accepts(self, user, accepts_empty=True):
 
        """
 
        Custom accepts for this auth that doesn't accept empty users. We
 
        know that user exists in database.
 
        """
 
        return super(KallitheaAuthPlugin, self).accepts(user,
 
                                                        accepts_empty=False)
 

	
 
    def auth(self, userobj, username, password, settings, **kwargs):
 
        if not userobj:
 
            log.debug('userobj was:%s skipping', userobj)
 
            return None
 
        if userobj.extern_type != self.name:
 
            log.warning("userobj:%s extern_type mismatch got:`%s` expected:`%s`",
 
                     userobj, userobj.extern_type, self.name)
 
            return None
 
        if not username:
 
            log.debug('Empty username - skipping...')
 
            return None
 

	
 
        user_data = {
 
            "username": userobj.username,
 
            "firstname": userobj.firstname,
 
            "lastname": userobj.lastname,
 
            "groups": [],
 
            "email": userobj.email,
 
            "admin": userobj.admin,
 
            "active": userobj.active,
 
            "active_from_extern": userobj.active,
 
            "extern_name": userobj.user_id,
 
        }
 

	
 
        log.debug(formatted_json(user_data))
 
        if userobj.active:
 
            from kallithea.lib import auth
 
            password_match = auth.KallitheaCrypto.hash_check(password, userobj.password)
 
            if userobj.username == User.DEFAULT_USER and userobj.active:
 
                log.info('user %s authenticated correctly as anonymous user',
 
                         username)
 
                return user_data
 

	
 
            elif userobj.username == username and password_match:
 
                log.info('user %s authenticated correctly', user_data['username'])
 
                return user_data
 
            log.error("user %s had a bad password", username)
 
            return None
 
        else:
 
            log.warning('user %s tried auth but is disabled', username)
 
            return None
 

	
 
    def get_managed_fields(self):
 
        # Note: 'username' should only be editable (at least for user) if self registration is enabled
 
        return []
kallithea/lib/auth_modules/auth_pam.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
"""
 
kallithea.lib.auth_modules.auth_pam
 
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

	
 
Kallithea authentication library for PAM
 

	
 
This file was forked by the Kallithea project in July 2014.
 
Original author and date, and relevant copyright and licensing information is below:
 
:created_on: Created on Apr 09, 2013
 
:author: Alexey Larikov
 
"""
 

	
 
import logging
 
import time
 
import pam
 
import pwd
 
import grp
 
import re
 
import socket
 
import threading
 

	
 
from kallithea.lib import auth_modules
 
from kallithea.lib.compat import formatted_json, hybrid_property
 

	
 
log = logging.getLogger(__name__)
 

	
 
# Cache to store PAM authenticated users
 
_auth_cache = dict()
 
_pam_lock = threading.Lock()
 

	
 

	
 
class KallitheaAuthPlugin(auth_modules.KallitheaExternalAuthPlugin):
 
    # PAM authnetication can be slow. Repository operations involve a lot of
 
    # auth calls. Little caching helps speedup push/pull operations significantly
 
    AUTH_CACHE_TTL = 4
 

	
 
    def __init__(self):
 
        global _auth_cache
 
        ts = time.time()
 
        cleared_cache = dict(
 
            [(k, v) for (k, v) in _auth_cache.items() if
 
             (v + KallitheaAuthPlugin.AUTH_CACHE_TTL > ts)])
 
        _auth_cache = cleared_cache
 

	
 
    @hybrid_property
 
    def name(self):
 
        return "pam"
 

	
 
    def settings(self):
 
        settings = [
 
            {
 
                "name": "service",
 
                "validator": self.validators.UnicodeString(strip=True),
 
                "type": "string",
 
                "description": "PAM service name to use for authentication",
 
                "default": "login",
 
                "formname": "PAM service name"
 
            },
 
            {
 
                "name": "gecos",
 
                "validator": self.validators.UnicodeString(strip=True),
 
                "type": "string",
 
                "description": "Regex for extracting user name/email etc "
 
                               "from Unix userinfo",
 
                "default": "(?P<last_name>.+),\s*(?P<first_name>\w+)",
 
                "formname": "Gecos Regex"
 
            }
 
        ]
 
        return settings
 

	
 
    def use_fake_password(self):
 
        return True
 

	
 
    def auth(self, userobj, username, password, settings, **kwargs):
 
        if not username:
 
            log.debug('Empty username - skipping...')
 
            return None
 
        if username not in _auth_cache:
 
            # Need lock here, as PAM authentication is not thread safe
 
            _pam_lock.acquire()
 
            try:
 
                auth_result = pam.authenticate(username, password,
 
                                               settings["service"])
 
                # cache result only if we properly authenticated
 
                if auth_result:
 
                    _auth_cache[username] = time.time()
 
            finally:
 
                _pam_lock.release()
 

	
 
            if not auth_result:
 
                log.error("PAM was unable to authenticate user: %s", username)
 
                return None
 
        else:
 
            log.debug("Using cached auth for user: %s", username)
 

	
 
        # old attrs fetched from Kallithea database
 
        admin = getattr(userobj, 'admin', False)
 
        active = getattr(userobj, 'active', True)
 
        email = getattr(userobj, 'email', '') or "%s@%s" % (username, socket.gethostname())
 
        firstname = getattr(userobj, 'firstname', '')
 
        lastname = getattr(userobj, 'lastname', '')
 

	
 
        user_data = {
 
            'username': username,
 
            'firstname': firstname,
 
            'lastname': lastname,
 
            'groups': [g.gr_name for g in grp.getgrall() if username in g.gr_mem],
 
            'email': email,
 
            'admin': admin,
 
            'active': active,
 
            "active_from_extern": None,
 
            'extern_name': username,
 
        }
 

	
 
        try:
 
            user_pw_data = pwd.getpwnam(username)
 
            regex = settings["gecos"]
 
            match = re.search(regex, user_pw_data.pw_gecos)
 
            if match:
 
                user_data["firstname"] = match.group('first_name')
 
                user_data["lastname"] = match.group('last_name')
 
        except Exception:
 
            log.warning("Cannot extract additional info for PAM user %s", username)
 
            pass
 

	
 
        log.debug("pamuser: \n%s", formatted_json(user_data))
 
        log.info('user %s authenticated correctly', user_data['username'])
 
        return user_data
 

	
 
    def get_managed_fields(self):
 
        return ['username', 'password']
kallithea/lib/diffs.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
"""
 
kallithea.lib.diffs
 
~~~~~~~~~~~~~~~~~~~
 

	
 
Set of diffing helpers, previously part of vcs
 

	
 

	
 
This file was forked by the Kallithea project in July 2014.
 
Original author and date, and relevant copyright and licensing information is below:
 
:created_on: Dec 4, 2011
 
:author: marcink
 
:copyright: (c) 2013 RhodeCode GmbH, and others.
 
:license: GPLv3, see LICENSE.md for more details.
 
"""
 
import re
 
import difflib
 
import logging
 

	
 
from itertools import tee, imap
 

	
 
from pylons.i18n.translation import _
 

	
 
from kallithea.lib.vcs.exceptions import VCSError
 
from kallithea.lib.vcs.nodes import FileNode, SubModuleNode
 
from kallithea.lib.vcs.backends.base import EmptyChangeset
 
from kallithea.lib.helpers import escape
 
from kallithea.lib.utils2 import safe_unicode
 

	
 
log = logging.getLogger(__name__)
 

	
 

	
 
def wrap_to_table(str_):
 
    return '''<table class="code-difftable">
 
                <tr class="line no-comment">
 
                <td class="lineno new"></td>
 
                <td class="code no-comment"><pre>%s</pre></td>
 
                </tr>
 
              </table>''' % str_
 

	
 

	
 
def wrapped_diff(filenode_old, filenode_new, cut_off_limit=None,
 
                ignore_whitespace=True, line_context=3,
 
                enable_comments=False):
 
    """
 
    returns a wrapped diff into a table, checks for cut_off_limit and presents
 
    proper message
 
    """
 

	
 
    if filenode_old is None:
 
        filenode_old = FileNode(filenode_new.path, '', EmptyChangeset())
 

	
 
    if filenode_old.is_binary or filenode_new.is_binary:
 
        diff = wrap_to_table(_('Binary file'))
 
        stats = (0, 0)
 
        size = 0
 

	
 
    elif cut_off_limit != -1 and (cut_off_limit is None or
 
    (filenode_old.size < cut_off_limit and filenode_new.size < cut_off_limit)):
 

	
 
        f_gitdiff = get_gitdiff(filenode_old, filenode_new,
 
                                ignore_whitespace=ignore_whitespace,
 
                                context=line_context)
 
        diff_processor = DiffProcessor(f_gitdiff, format='gitdiff')
 

	
 
        diff = diff_processor.as_html(enable_comments=enable_comments)
 
        stats = diff_processor.stat()
 
        size = len(diff or '')
 
    else:
 
        diff = wrap_to_table(_('Changeset was too big and was cut off, use '
 
                               'diff menu to display this diff'))
 
        stats = (0, 0)
 
        size = 0
 
    if not diff:
 
        submodules = filter(lambda o: isinstance(o, SubModuleNode),
 
                            [filenode_new, filenode_old])
 
        if submodules:
 
            diff = wrap_to_table(escape('Submodule %r' % submodules[0]))
 
        else:
 
            diff = wrap_to_table(_('No changes detected'))
 

	
 
    cs1 = filenode_old.changeset.raw_id
 
    cs2 = filenode_new.changeset.raw_id
 

	
 
    return size, cs1, cs2, diff, stats
 

	
 

	
 
def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True, context=3):
 
    """
 
    Returns git style diff between given ``filenode_old`` and ``filenode_new``.
 

	
 
    :param ignore_whitespace: ignore whitespaces in diff
 
    """
 
    # make sure we pass in default context
 
    context = context or 3
 
    submodules = filter(lambda o: isinstance(o, SubModuleNode),
 
                        [filenode_new, filenode_old])
 
    if submodules:
 
        return ''
 

	
 
    for filenode in (filenode_old, filenode_new):
 
        if not isinstance(filenode, FileNode):
 
            raise VCSError("Given object should be FileNode object, not %s"
 
                % filenode.__class__)
 

	
 
    repo = filenode_new.changeset.repository
 
    old_raw_id = getattr(filenode_old.changeset, 'raw_id', repo.EMPTY_CHANGESET)
 
    new_raw_id = getattr(filenode_new.changeset, 'raw_id', repo.EMPTY_CHANGESET)
 

	
 
    vcs_gitdiff = repo.get_diff(old_raw_id, new_raw_id, filenode_new.path,
 
                                ignore_whitespace, context)
 
    return vcs_gitdiff
 

	
 
NEW_FILENODE = 1
 
DEL_FILENODE = 2
 
MOD_FILENODE = 3
 
RENAMED_FILENODE = 4
 
COPIED_FILENODE = 5
 
CHMOD_FILENODE = 6
 
BIN_FILENODE = 7
 

	
 

	
 
class DiffLimitExceeded(Exception):
 
    pass
 

	
 

	
 
class LimitedDiffContainer(object):
 

	
 
    def __init__(self, diff_limit, cur_diff_size, diff):
 
        self.diff = diff
 
        self.diff_limit = diff_limit
 
        self.cur_diff_size = cur_diff_size
 

	
 
    def __iter__(self):
 
        for l in self.diff:
 
            yield l
 

	
 

	
 
class DiffProcessor(object):
 
    """
 
    Give it a unified or git diff and it returns a list of the files that were
 
    mentioned in the diff together with a dict of meta information that
 
    can be used to render it in a HTML template.
 
    """
 
    _chunk_re = re.compile(r'^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)')
 
    _newline_marker = re.compile(r'^\\ No newline at end of file')
 
    _git_header_re = re.compile(r"""
 
        # has already been split on this:
 
        # ^diff[ ]--git
 
            [ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n
 
        (?:^old[ ]mode[ ](?P<old_mode>\d+)\n
 
           ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
 
        (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n
 
           ^rename[ ]from[ ](?P<rename_from>.+)\n
 
           ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))?
 
        (?:^old[ ]mode[ ](?P<old_mode>\d+)\n
 
           ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
 
        (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
 
        (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
 
        (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
 
            \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
 
        (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))?
 
        (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))?
 
        (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))?
 
    """, re.VERBOSE | re.MULTILINE)
 
    _hg_header_re = re.compile(r"""
 
        # has already been split on this:
 
        # ^diff[ ]--git
 
            [ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n
 
        (?:^old[ ]mode[ ](?P<old_mode>\d+)\n
 
           ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
 
        (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%(?:\n|$))?
 
        (?:^rename[ ]from[ ](?P<rename_from>.+)\n
 
           ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))?
 
        (?:^copy[ ]from[ ](?P<copy_from>.+)\n
 
           ^copy[ ]to[ ](?P<copy_to>.+)(?:\n|$))?
 
        (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
 
        (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
 
        (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
 
            \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
 
        (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))?
 
        (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))?
 
        (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))?
 
    """, re.VERBOSE | re.MULTILINE)
 

	
 
    # Used for inline highlighter word split, must match the substitutions in _escaper
 
    _token_re = re.compile(r'()(&amp;|&lt;|&gt;|<u>\t</u>|<u class="cr"></u>| <i></i>|\W+?)')
 

	
 
    _escape_re = re.compile(r'(&)|(<)|(>)|(\t)|(\r)|(?<=.)( \n| $)')
 

	
 

	
 
    def __init__(self, diff, vcs='hg', format='gitdiff', diff_limit=None):
 
        """
 
        :param diff:   a text in diff format
 
        :param vcs: type of version control hg or git
 
        :param format: format of diff passed, `udiff` or `gitdiff`
 
        :param diff_limit: define the size of diff that is considered "big"
 
            based on that parameter cut off will be triggered, set to None
 
            to show full diff
 
        """
 
        if not isinstance(diff, basestring):
 
            raise Exception('Diff must be a basestring got %s instead' % type(diff))
 

	
 
        self._diff = diff
 
        self._format = format
 
        self.adds = 0
 
        self.removes = 0
 
        # calculate diff size
 
        self.diff_size = len(diff)
 
        self.diff_limit = diff_limit
 
        self.cur_diff_size = 0
 
        self.parsed = False
 
        self.parsed_diff = []
 
        self.vcs = vcs
 

	
 
        if format == 'gitdiff':
 
            self.differ = self._highlight_line_difflib
 
            self._parser = self._parse_gitdiff
 
        else:
 
            self.differ = self._highlight_line_udiff
 
            self._parser = self._parse_udiff
 

	
 
    def _copy_iterator(self):
 
        """
 
        make a fresh copy of generator, we should not iterate thru
 
        an original as it's needed for repeating operations on
 
        this instance of DiffProcessor
 
        """
 
        self.__udiff, iterator_copy = tee(self.__udiff)
 
        return iterator_copy
 

	
 
    def _escaper(self, string):
 
        """
 
        Escaper for diff escapes special chars and checks the diff limit
 

	
 
        :param string:
 
        """
 

	
 
        self.cur_diff_size += len(string)
 

	
 
        # escaper gets iterated on each .next() call and it checks if each
 
        # parsed line doesn't exceed the diff limit
 
        if self.diff_limit is not None and self.cur_diff_size > self.diff_limit:
 
            raise DiffLimitExceeded('Diff Limit Exceeded')
 

	
 
        def substitute(m):
 
            groups = m.groups()
 
            if groups[0]:
 
                return '&amp;'
 
            if groups[1]:
 
                return '&lt;'
 
            if groups[2]:
 
                return '&gt;'
 
            if groups[3]:
 
                return '<u>\t</u>'
 
            if groups[4]:
 
                return '<u class="cr"></u>'
 
            if groups[5]:
 
                return ' <i></i>'
 
            assert False
 

	
 
        return self._escape_re.sub(substitute, safe_unicode(string))
 

	
 
    def _line_counter(self, l):
 
        """
 
        Checks each line and bumps total adds/removes for this diff
 

	
 
        :param l:
 
        """
 
        if l.startswith('+') and not l.startswith('+++'):
 
            self.adds += 1
 
        elif l.startswith('-') and not l.startswith('---'):
 
            self.removes += 1
 
        return safe_unicode(l)
 

	
 
    def _highlight_line_difflib(self, line, next_):
 
        """
 
        Highlight inline changes in both lines.
 
        """
 

	
 
        if line['action'] == 'del':
 
            old, new = line, next_
 
        else:
 
            old, new = next_, line
 

	
 
        oldwords = self._token_re.split(old['line'])
 
        newwords = self._token_re.split(new['line'])
 
        sequence = difflib.SequenceMatcher(None, oldwords, newwords)
 

	
 
        oldfragments, newfragments = [], []
 
        for tag, i1, i2, j1, j2 in sequence.get_opcodes():
 
            oldfrag = ''.join(oldwords[i1:i2])
 
            newfrag = ''.join(newwords[j1:j2])
 
            if tag != 'equal':
 
                if oldfrag:
 
                    oldfrag = '<del>%s</del>' % oldfrag
 
                if newfrag:
 
                    newfrag = '<ins>%s</ins>' % newfrag
 
            oldfragments.append(oldfrag)
 
            newfragments.append(newfrag)
 

	
 
        old['line'] = "".join(oldfragments)
 
        new['line'] = "".join(newfragments)
 

	
 
    def _highlight_line_udiff(self, line, next_):
 
        """
 
        Highlight inline changes in both lines.
 
        """
 
        start = 0
 
        limit = min(len(line['line']), len(next_['line']))
 
        while start < limit and line['line'][start] == next_['line'][start]:
 
            start += 1
 
        end = -1
 
        limit -= start
 
        while -end <= limit and line['line'][end] == next_['line'][end]:
 
            end -= 1
 
        end += 1
 
        if start or end:
 
            def do(l):
 
                last = end + len(l['line'])
 
                if l['action'] == 'add':
 
                    tag = 'ins'
 
                else:
 
                    tag = 'del'
 
                l['line'] = '%s<%s>%s</%s>%s' % (
 
                    l['line'][:start],
 
                    tag,
 
                    l['line'][start:last],
 
                    tag,
 
                    l['line'][last:]
 
                )
 
            do(line)
 
            do(next_)
 

	
 
    def _get_header(self, diff_chunk):
 
        """
 
        parses the diff header, and returns parts, and leftover diff
 
        parts consists of 14 elements::
 

	
 
            a_path, b_path, similarity_index, rename_from, rename_to,
 
            old_mode, new_mode, new_file_mode, deleted_file_mode,
 
            a_blob_id, b_blob_id, b_mode, a_file, b_file
 

	
 
        :param diff_chunk:
 
        """
 

	
 
        match = None
 
        if self.vcs == 'git':
 
            match = self._git_header_re.match(diff_chunk)
 
        elif self.vcs == 'hg':
 
            match = self._hg_header_re.match(diff_chunk)
 
        if match is None:
 
            raise Exception('diff not recognized as valid %s diff' % self.vcs)
 
        groups = match.groupdict()
 
        rest = diff_chunk[match.end():]
 
        if rest and not rest.startswith('@') and not rest.startswith('literal ') and not rest.startswith('delta '):
 
            raise Exception('cannot parse diff header: %r followed by %r' % (diff_chunk[:match.end()], rest[:1000]))
 
        difflines = imap(self._escaper, re.findall(r'.*\n|.+$', rest)) # don't split on \r as str.splitlines do
 
        return groups, difflines
 

	
 
    def _clean_line(self, line, command):
 
        if command in ['+', '-', ' ']:
 
            #only modify the line if it's actually a diff thing
 
            line = line[1:]
 
        return line
 

	
 
    def _parse_gitdiff(self, inline_diff=True):
 
        _files = []
 
        diff_container = lambda arg: arg
 

	
 
        ##split the diff in chunks of separate --git a/file b/file chunks
 
        for raw_diff in ('\n' + self._diff).split('\ndiff --git')[1:]:
 
            head, diff = self._get_header(raw_diff)
 

	
 
            op = None
 
            stats = {
 
                'added': 0,
 
                'deleted': 0,
 
                'binary': False,
 
                'ops': {},
 
            }
 

	
 
            if head['deleted_file_mode']:
 
                op = 'D'
 
                stats['binary'] = True
 
                stats['ops'][DEL_FILENODE] = 'deleted file'
 

	
 
            elif head['new_file_mode']:
 
                op = 'A'
 
                stats['binary'] = True
 
                stats['ops'][NEW_FILENODE] = 'new file %s' % head['new_file_mode']
 
            else:  # modify operation, can be cp, rename, chmod
 
                # CHMOD
 
                if head['new_mode'] and head['old_mode']:
 
                    op = 'M'
 
                    stats['binary'] = True
 
                    stats['ops'][CHMOD_FILENODE] = ('modified file chmod %s => %s'
 
                                        % (head['old_mode'], head['new_mode']))
 
                # RENAME
 
                if (head['rename_from'] and head['rename_to']
 
                      and head['rename_from'] != head['rename_to']):
 
                    op = 'R'
 
                    stats['binary'] = True
 
                    stats['ops'][RENAMED_FILENODE] = ('file renamed from %s to %s'
 
                                    % (head['rename_from'], head['rename_to']))
 
                # COPY
 
                if head.get('copy_from') and head.get('copy_to'):
 
                    op = 'M'
 
                    stats['binary'] = True
 
                    stats['ops'][COPIED_FILENODE] = ('file copied from %s to %s'
 
                                        % (head['copy_from'], head['copy_to']))
 
                # FALL BACK: detect missed old style add or remove
 
                if op is None:
 
                    if not head['a_file'] and head['b_file']:
 
                        op = 'A'
 
                        stats['binary'] = True
 
                        stats['ops'][NEW_FILENODE] = 'new file'
 

	
 
                    elif head['a_file'] and not head['b_file']:
 
                        op = 'D'
 
                        stats['binary'] = True
 
                        stats['ops'][DEL_FILENODE] = 'deleted file'
 

	
 
                # it's not ADD not DELETE
 
                if op is None:
 
                    op = 'M'
 
                    stats['binary'] = True
 
                    stats['ops'][MOD_FILENODE] = 'modified file'
 

	
 
            # a real non-binary diff
 
            if head['a_file'] or head['b_file']:
 
                try:
 
                    chunks, _stats = self._parse_lines(diff)
 
                    stats['binary'] = False
 
                    stats['added'] = _stats[0]
 
                    stats['deleted'] = _stats[1]
 
                    # explicit mark that it's a modified file
 
                    if op == 'M':
 
                        stats['ops'][MOD_FILENODE] = 'modified file'
 

	
 
                except DiffLimitExceeded:
 
                    diff_container = lambda _diff: \
 
                        LimitedDiffContainer(self.diff_limit,
 
                                            self.cur_diff_size, _diff)
 
                    break
 
            else:  # Git binary patch (or empty diff)
 
                # Git binary patch
 
                if head['bin_patch']:
 
                    stats['ops'][BIN_FILENODE] = 'binary diff not shown'
 
                chunks = []
 

	
 
            if op == 'D' and chunks:
 
                # a way of seeing deleted content could perhaps be nice - but
 
                # not with the current UI
 
                chunks = []
 

	
 
            chunks.insert(0, [{
 
                'old_lineno': '',
 
                'new_lineno': '',
 
                'action':     'context',
 
                'line':       msg,
 
                } for _op, msg in stats['ops'].iteritems()
 
                  if _op not in [MOD_FILENODE]])
 

	
 
            _files.append({
 
                'filename':         head['b_path'],
 
                'old_revision':     head['a_blob_id'],
 
                'new_revision':     head['b_blob_id'],
 
                'chunks':           chunks,
 
                'operation':        op,
 
                'stats':            stats,
 
            })
 

	
 
        if not inline_diff:
 
            return diff_container(_files)
 

	
 
        # highlight inline changes
 
        for diff_data in _files:
 
            for chunk in diff_data['chunks']:
 
                lineiter = iter(chunk)
 
                try:
 
                    while 1:
 
                        line = lineiter.next()
 
                        if line['action'] not in ['unmod', 'context']:
 
                            nextline = lineiter.next()
 
                            if nextline['action'] in ['unmod', 'context'] or \
 
                               nextline['action'] == line['action']:
 
                                continue
 
                            self.differ(line, nextline)
 
                except StopIteration:
 
                    pass
 

	
 
        return diff_container(_files)
 

	
 
    def _parse_udiff(self, inline_diff=True):
 
        raise NotImplementedError()
 

	
 
    def _parse_lines(self, diff):
 
        """
 
        Parse the diff and return data for the template.
 
        """
 

	
 
        stats = [0, 0]
 
        (old_line, old_end, new_line, new_end) = (None, None, None, None)
 

	
 
        try:
 
            chunks = []
 
            line = diff.next()
 

	
 
            while True:
 
                lines = []
 
                chunks.append(lines)
 

	
 
                match = self._chunk_re.match(line)
 

	
 
                if not match:
 
                    raise Exception('error parsing diff @@ line %r' % line)
 

	
 
                gr = match.groups()
 
                (old_line, old_end,
 
                 new_line, new_end) = [int(x or 1) for x in gr[:-1]]
 
                old_line -= 1
 
                new_line -= 1
 

	
 
                context = len(gr) == 5
 
                old_end += old_line
 
                new_end += new_line
 

	
 
                if context:
 
                    # skip context only if it's first line
 
                    if int(gr[0]) > 1:
 
                        lines.append({
 
                            'old_lineno': '...',
 
                            'new_lineno': '...',
 
                            'action':     'context',
 
                            'line':       line,
 
                        })
 

	
 
                line = diff.next()
 

	
 
                while old_line < old_end or new_line < new_end:
 
                    if not line:
 
                        raise Exception('error parsing diff - empty line at -%s+%s' % (old_line, new_line))
 

	
 
                    affects_old = affects_new = False
 

	
 
                    command = line[0]
 
                    if command == '+':
 
                        affects_new = True
 
                        action = 'add'
 
                        stats[0] += 1
 
                    elif command == '-':
 
                        affects_old = True
 
                        action = 'del'
 
                        stats[1] += 1
 
                    elif command == ' ':
 
                        affects_old = affects_new = True
 
                        action = 'unmod'
 
                    else:
 
                        raise Exception('error parsing diff - unknown command in line %r at -%s+%s' % (line, old_line, new_line))
 

	
 
                    if not self._newline_marker.match(line):
 
                        old_line += affects_old
 
                        new_line += affects_new
 
                        lines.append({
 
                            'old_lineno':   affects_old and old_line or '',
 
                            'new_lineno':   affects_new and new_line or '',
 
                            'action':       action,
 
                            'line':         self._clean_line(line, command)
 
                        })
 

	
 
                    line = diff.next()
 

	
 
                    if self._newline_marker.match(line):
 
                        # we need to append to lines, since this is not
 
                        # counted in the line specs of diff
 
                        lines.append({
 
                            'old_lineno':   '...',
 
                            'new_lineno':   '...',
 
                            'action':       'context',
 
                            'line':         self._clean_line(line, command)
 
                        })
 
                        line = diff.next()
 
                if old_line > old_end:
 
                        raise Exception('error parsing diff - more than %s "-" lines at -%s+%s' % (old_end, old_line, new_line))
 
                if new_line > new_end:
 
                        raise Exception('error parsing diff - more than %s "+" lines at -%s+%s' % (new_end, old_line, new_line))
 
        except StopIteration:
 
            pass
 
        if old_line != old_end or new_line != new_end:
 
            raise Exception('diff processing broken when old %s<>%s or new %s<>%s line %r' % (old_line, old_end, new_line, new_end, line))
 

	
 
        return chunks, stats
 

	
 
    def _safe_id(self, idstring):
 
        """Make a string safe for including in an id attribute.
 

	
 
        The HTML spec says that id attributes 'must begin with
 
        a letter ([A-Za-z]) and may be followed by any number
 
        of letters, digits ([0-9]), hyphens ("-"), underscores
 
        ("_"), colons (":"), and periods (".")'. These regexps
 
        are slightly over-zealous, in that they remove colons
 
        and periods unnecessarily.
 

	
 
        Whitespace is transformed into underscores, and then
 
        anything which is not a hyphen or a character that
 
        matches \w (alphanumerics and underscore) is removed.
 

	
 
        """
 
        # Transform all whitespace to underscore
 
        idstring = re.sub(r'\s', "_", idstring)
 
        # Remove everything that is not a hyphen or a member of \w
 
        idstring = re.sub(r'(?!-)\W', "", idstring).lower()
 
        return idstring
 

	
 
    def prepare(self, inline_diff=True):
 
        """
 
        Prepare the passed udiff for HTML rendering. It'l return a list
 
        of dicts with diff information
 
        """
 
        parsed = self._parser(inline_diff=inline_diff)
 
        self.parsed = True
 
        self.parsed_diff = parsed
 
        return parsed
 

	
 
    def as_raw(self, diff_lines=None):
 
        """
 
        Returns raw string diff
 
        """
 
        return self._diff
 
        #return u''.join(imap(self._line_counter, self._diff.splitlines(1)))
 

	
 
    def as_html(self, table_class='code-difftable', line_class='line',
 
                old_lineno_class='lineno old', new_lineno_class='lineno new',
 
                no_lineno_class='lineno',
 
                code_class='code', enable_comments=False, parsed_lines=None):
 
        """
 
        Return given diff as html table with customized css classes
 
        """
 
        def _link_to_if(condition, label, url):
 
            """
 
            Generates a link if condition is meet or just the label if not.
 
            """
 

	
 
            if condition:
 
                return '''<a href="%(url)s">%(label)s</a>''' % {
 
                    'url': url,
 
                    'label': label
 
                }
 
            else:
 
                return label
 
        if not self.parsed:
 
            self.prepare()
 

	
 
        diff_lines = self.parsed_diff
 
        if parsed_lines:
 
            diff_lines = parsed_lines
 

	
 
        _html_empty = True
 
        _html = []
 
        _html.append('''<table class="%(table_class)s">\n''' % {
 
            'table_class': table_class
 
        })
 

	
 
        for diff in diff_lines:
 
            for line in diff['chunks']:
 
                _html_empty = False
 
                for change in line:
 
                    _html.append('''<tr class="%(lc)s %(action)s">\n''' % {
 
                        'lc': line_class,
 
                        'action': change['action']
 
                    })
 
                    anchor_old_id = ''
 
                    anchor_new_id = ''
 
                    anchor_old = "%(filename)s_o%(oldline_no)s" % {
 
                        'filename': self._safe_id(diff['filename']),
 
                        'oldline_no': change['old_lineno']
 
                    }
 
                    anchor_new = "%(filename)s_n%(oldline_no)s" % {
 
                        'filename': self._safe_id(diff['filename']),
 
                        'oldline_no': change['new_lineno']
 
                    }
 
                    cond_old = (change['old_lineno'] != '...' and
 
                                change['old_lineno'])
 
                    cond_new = (change['new_lineno'] != '...' and
 
                                change['new_lineno'])
 
                    no_lineno = (change['old_lineno'] == '...' and
 
                                 change['new_lineno'] == '...')
 
                    if cond_old:
 
                        anchor_old_id = 'id="%s"' % anchor_old
 
                    if cond_new:
 
                        anchor_new_id = 'id="%s"' % anchor_new
 
                    ###########################################################
 
                    # OLD LINE NUMBER
 
                    ###########################################################
 
                    _html.append('''\t<td %(a_id)s class="%(olc)s" %(colspan)s>''' % {
 
                        'a_id': anchor_old_id,
 
                        'olc': no_lineno_class if no_lineno else old_lineno_class,
 
                        'colspan': 'colspan="2"' if no_lineno else ''
 
                    })
 

	
 
                    _html.append('''%(link)s''' % {
 
                        'link': _link_to_if(True, change['old_lineno'],
 
                                            '#%s' % anchor_old)
 
                    })
 
                    _html.append('''</td>\n''')
 
                    ###########################################################
 
                    # NEW LINE NUMBER
 
                    ###########################################################
 

	
 
                    if not no_lineno:
 
                        _html.append('''\t<td %(a_id)s class="%(nlc)s">''' % {
 
                            'a_id': anchor_new_id,
 
                            'nlc': new_lineno_class
 
                        })
 

	
 
                        _html.append('''%(link)s''' % {
 
                            'link': _link_to_if(True, change['new_lineno'],
 
                                                '#%s' % anchor_new)
 
                        })
 
                        _html.append('''</td>\n''')
 
                    ###########################################################
 
                    # CODE
 
                    ###########################################################
 
                    comments = '' if enable_comments else 'no-comment'
 
                    _html.append('''\t<td class="%(cc)s %(inc)s">''' % {
 
                        'cc': code_class,
 
                        'inc': comments
 
                    })
 
                    _html.append('''\n\t\t<div class="add-bubble"><div>&nbsp;</div></div><pre>%(code)s</pre>\n''' % {
 
                        'code': change['line']
 
                    })
 

	
 
                    _html.append('''\t</td>''')
 
                    _html.append('''\n</tr>\n''')
 
        _html.append('''</table>''')
 
        if _html_empty:
 
            return None
 
        return ''.join(_html)
 

	
 
    def stat(self):
 
        """
 
        Returns tuple of added, and removed lines for this instance
 
        """
 
        return self.adds, self.removes
kallithea/lib/indexers/daemon.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
"""
 
kallithea.lib.indexers.daemon
 
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

	
 
A daemon will read from task table and run tasks
 

	
 
This file was forked by the Kallithea project in July 2014.
 
Original author and date, and relevant copyright and licensing information is below:
 
:created_on: Jan 26, 2010
 
:author: marcink
 
:copyright: (c) 2013 RhodeCode GmbH, and others.
 
:license: GPLv3, see LICENSE.md for more details.
 
"""
 

	
 

	
 
import os
 
import sys
 
import logging
 
import traceback
 

	
 
from shutil import rmtree
 
from time import mktime
 

	
 
from os.path import dirname as dn
 
from os.path import join as jn
 

	
 
# Add location of top level folder to sys.path
 
project_path = dn(dn(dn(dn(os.path.realpath(__file__)))))
 
sys.path.append(project_path)
 

	
 
from kallithea.config.conf import INDEX_EXTENSIONS, INDEX_FILENAMES
 
from kallithea.model.scm import ScmModel
 
from kallithea.model.db import Repository
 
from kallithea.lib.utils2 import safe_unicode, safe_str
 
from kallithea.lib.indexers import SCHEMA, IDX_NAME, CHGSETS_SCHEMA, \
 
    CHGSET_IDX_NAME
 

	
 
from kallithea.lib.vcs.exceptions import ChangesetError, RepositoryError, \
 
    NodeDoesNotExistError
 

	
 
from whoosh.index import create_in, open_dir, exists_in
 
from whoosh.query import *
 
from whoosh.qparser import QueryParser
 

	
 
log = logging.getLogger('whoosh_indexer')
 

	
 

	
 
class WhooshIndexingDaemon(object):
 
    """
 
    Daemon for atomic indexing jobs
 
    """
 

	
 
    def __init__(self, indexname=IDX_NAME, index_location=None,
 
                 repo_location=None, sa=None, repo_list=None,
 
                 repo_update_list=None):
 
        self.indexname = indexname
 

	
 
        self.index_location = index_location
 
        if not index_location:
 
            raise Exception('You have to provide index location')
 

	
 
        self.repo_location = repo_location
 
        if not repo_location:
 
            raise Exception('You have to provide repositories location')
 

	
 
        self.repo_paths = ScmModel(sa).repo_scan(self.repo_location)
 

	
 
        #filter repo list
 
        if repo_list:
 
            #Fix non-ascii repo names to unicode
 
            repo_list = map(safe_unicode, repo_list)
 
            self.filtered_repo_paths = {}
 
            for repo_name, repo in self.repo_paths.items():
 
                if repo_name in repo_list:
 
                    self.filtered_repo_paths[repo_name] = repo
 

	
 
            self.repo_paths = self.filtered_repo_paths
 

	
 
        #filter update repo list
 
        self.filtered_repo_update_paths = {}
 
        if repo_update_list:
 
            self.filtered_repo_update_paths = {}
 
            for repo_name, repo in self.repo_paths.items():
 
                if repo_name in repo_update_list:
 
                    self.filtered_repo_update_paths[repo_name] = repo
 
            self.repo_paths = self.filtered_repo_update_paths
 

	
 
        self.initial = True
 
        if not os.path.isdir(self.index_location):
 
            os.makedirs(self.index_location)
 
            log.info('Cannot run incremental index since it does not '
 
                     'yet exist running full build')
 
        elif not exists_in(self.index_location, IDX_NAME):
 
            log.info('Running full index build as the file content '
 
                     'index does not exist')
 
        elif not exists_in(self.index_location, CHGSET_IDX_NAME):
 
            log.info('Running full index build as the changeset '
 
                     'index does not exist')
 
        else:
 
            self.initial = False
 

	
 
    def _get_index_revision(self, repo):
 
        db_repo = Repository.get_by_repo_name(repo.name_unicode)
 
        landing_rev = 'tip'
 
        if db_repo:
 
            _rev_type, _rev = db_repo.landing_rev
 
            landing_rev = _rev
 
        return landing_rev
 

	
 
    def _get_index_changeset(self, repo, index_rev=None):
 
        if not index_rev:
 
            index_rev = self._get_index_revision(repo)
 
        cs = repo.get_changeset(index_rev)
 
        return cs
 

	
 
    def get_paths(self, repo):
 
        """
 
        recursive walk in root dir and return a set of all path in that dir
 
        based on repository walk function
 
        """
 
        index_paths_ = set()
 
        try:
 
            cs = self._get_index_changeset(repo)
 
            for _topnode, _dirs, files in cs.walk('/'):
 
                for f in files:
 
                    index_paths_.add(jn(safe_str(repo.path), safe_str(f.path)))
 

	
 
        except RepositoryError:
 
            log.debug(traceback.format_exc())
 
            pass
 
        return index_paths_
 

	
 
    def get_node(self, repo, path, index_rev=None):
 
        """
 
        gets a filenode based on given full path. It operates on string for
 
        hg git compatibility.
 

	
 
        :param repo: scm repo instance
 
        :param path: full path including root location
 
        :return: FileNode
 
        """
 
        # FIXME: paths should be normalized ... or even better: don't include repo.path
 
        path = safe_str(path)
 
        repo_path = safe_str(repo.path)
 
        assert path.startswith(repo_path)
 
        assert path[len(repo_path)] in (os.path.sep, os.path.altsep)
 
        node_path = path[len(repo_path) + 1:]
 
        cs = self._get_index_changeset(repo, index_rev=index_rev)
 
        node = cs.get_node(node_path)
 
        return node
 

	
 
    def is_indexable_node(self, node):
 
        """
 
        Just index the content of chosen files, skipping binary files
 
        """
 
        return (node.extension in INDEX_EXTENSIONS or node.name in INDEX_FILENAMES) and \
 
               not node.is_binary
 

	
 
    def get_node_mtime(self, node):
 
        return mktime(node.last_changeset.date.timetuple())
 

	
 
    def add_doc(self, writer, path, repo, repo_name, index_rev=None):
 
        """
 
        Adding doc to writer this function itself fetches data from
 
        the instance of vcs backend
 
        """
 
        try:
 
            node = self.get_node(repo, path, index_rev)
 
        except (ChangesetError, NodeDoesNotExistError):
 
            log.debug("couldn't add doc - %s did not have %r at %s", repo, path, index_rev)
 
            return 0, 0
 

	
 
        node = self.get_node(repo, path, index_rev)
 
        indexed = indexed_w_content = 0
 
        if self.is_indexable_node(node):
 
            u_content = node.content
 
            if not isinstance(u_content, unicode):
 
                log.warning('  >> %s Could not get this content as unicode '
 
                            'replacing with empty content' % path)
 
                u_content = u''
 
            else:
 
                log.debug('    >> %s [WITH CONTENT]', path)
 
                indexed_w_content += 1
 

	
 
        else:
 
            log.debug('    >> %s', path)
 
            # just index file name without it's content
 
            u_content = u''
 
            indexed += 1
 

	
 
        p = safe_unicode(path)
 
        writer.add_document(
 
            fileid=p,
 
            owner=unicode(repo.contact),
 
            repository=safe_unicode(repo_name),
 
            path=p,
 
            content=u_content,
 
            modtime=self.get_node_mtime(node),
 
            extension=node.extension
 
        )
 
        return indexed, indexed_w_content
 

	
 
    def index_changesets(self, writer, repo_name, repo, start_rev=None):
 
        """
 
        Add all changeset in the vcs repo starting at start_rev
 
        to the index writer
 

	
 
        :param writer: the whoosh index writer to add to
 
        :param repo_name: name of the repository from whence the
 
          changeset originates including the repository group
 
        :param repo: the vcs repository instance to index changesets for,
 
          the presumption is the repo has changesets to index
 
        :param start_rev=None: the full sha id to start indexing from
 
          if start_rev is None then index from the first changeset in
 
          the repo
 
        """
 

	
 
        if start_rev is None:
 
            start_rev = repo[0].raw_id
 

	
 
        log.debug('indexing changesets in %s starting at rev: %s',
 
                  repo_name, start_rev)
 

	
 
        indexed = 0
 
        cs_iter = repo.get_changesets(start=start_rev)
 
        total = len(cs_iter)
 
        for cs in cs_iter:
 
            log.debug('    >> %s/%s', cs, total)
 
            writer.add_document(
 
                raw_id=unicode(cs.raw_id),
 
                owner=unicode(repo.contact),
 
                date=cs._timestamp,
 
                repository=safe_unicode(repo_name),
 
                author=cs.author,
 
                message=cs.message,
 
                last=cs.last,
 
                added=u' '.join([safe_unicode(node.path) for node in cs.added]).lower(),
 
                removed=u' '.join([safe_unicode(node.path) for node in cs.removed]).lower(),
 
                changed=u' '.join([safe_unicode(node.path) for node in cs.changed]).lower(),
 
                parents=u' '.join([cs.raw_id for cs in cs.parents]),
 
            )
 
            indexed += 1
 

	
 
        log.debug('indexed %d changesets for repo %s', indexed, repo_name)
 
        return indexed
 

	
 
    def index_files(self, file_idx_writer, repo_name, repo):
 
        """
 
        Index files for given repo_name
 

	
 
        :param file_idx_writer: the whoosh index writer to add to
 
        :param repo_name: name of the repository we're indexing
 
        :param repo: instance of vcs repo
 
        """
 
        i_cnt = iwc_cnt = 0
 
        log.debug('building index for %s @revision:%s', repo.path,
 
                                                self._get_index_revision(repo))
 
        index_rev = self._get_index_revision(repo)
 
        for idx_path in self.get_paths(repo):
 
            i, iwc = self.add_doc(file_idx_writer, idx_path, repo, repo_name, index_rev)
 
            i_cnt += i
 
            iwc_cnt += iwc
 

	
 
        log.debug('added %s files %s with content for repo %s',
 
                  i_cnt + iwc_cnt, iwc_cnt, repo.path)
 
        return i_cnt, iwc_cnt
 

	
 
    def update_changeset_index(self):
 
        idx = open_dir(self.index_location, indexname=CHGSET_IDX_NAME)
 

	
 
        with idx.searcher() as searcher:
 
            writer = idx.writer()
 
            writer_is_dirty = False
 
            try:
 
                indexed_total = 0
 
                repo_name = None
 
                for repo_name, repo in self.repo_paths.items():
 
                    # skip indexing if there aren't any revs in the repo
 
                    num_of_revs = len(repo)
 
                    if num_of_revs < 1:
 
                        continue
 

	
 
                    qp = QueryParser('repository', schema=CHGSETS_SCHEMA)
 
                    q = qp.parse(u"last:t AND %s" % repo_name)
 

	
 
                    results = searcher.search(q)
 

	
 
                    # default to scanning the entire repo
 
                    last_rev = 0
 
                    start_id = None
 

	
 
                    if len(results) > 0:
 
                        # assuming that there is only one result, if not this
 
                        # may require a full re-index.
 
                        start_id = results[0]['raw_id']
 
                        last_rev = repo.get_changeset(revision=start_id).revision
 

	
 
                    # there are new changesets to index or a new repo to index
 
                    if last_rev == 0 or num_of_revs > last_rev + 1:
 
                        # delete the docs in the index for the previous
 
                        # last changeset(s)
 
                        for hit in results:
 
                            q = qp.parse(u"last:t AND %s AND raw_id:%s" %
 
                                            (repo_name, hit['raw_id']))
 
                            writer.delete_by_query(q)
 

	
 
                        # index from the previous last changeset + all new ones
 
                        indexed_total += self.index_changesets(writer,
 
                                                repo_name, repo, start_id)
 
                        writer_is_dirty = True
 
                log.debug('indexed %s changesets for repo %s',
 
                          indexed_total, repo_name
 
                )
 
            finally:
 
                if writer_is_dirty:
 
                    log.debug('>> COMMITING CHANGES TO CHANGESET INDEX<<')
 
                    writer.commit(merge=True)
 
                    log.debug('>>> FINISHED REBUILDING CHANGESET INDEX <<<')
 
                else:
 
                    log.debug('>> NOTHING TO COMMIT TO CHANGESET INDEX<<')
 

	
 
    def update_file_index(self):
 
        log.debug((u'STARTING INCREMENTAL INDEXING UPDATE FOR EXTENSIONS %s '
 
                   'AND REPOS %s') % (INDEX_EXTENSIONS, self.repo_paths.keys()))
 

	
 
        idx = open_dir(self.index_location, indexname=self.indexname)
 
        # The set of all paths in the index
 
        indexed_paths = set()
 
        # The set of all paths we need to re-index
 
        to_index = set()
 

	
 
        writer = idx.writer()
 
        writer_is_dirty = False
 
        try:
 
            with idx.reader() as reader:
 

	
 
                # Loop over the stored fields in the index
 
                for fields in reader.all_stored_fields():
 
                    indexed_path = fields['path']
 
                    indexed_repo_path = fields['repository']
 
                    indexed_paths.add(indexed_path)
 

	
 
                    if not indexed_repo_path in self.filtered_repo_update_paths:
 
                        continue
 

	
 
                    repo = self.repo_paths[indexed_repo_path]
 

	
 
                    try:
 
                        node = self.get_node(repo, indexed_path)
 
                        # Check if this file was changed since it was indexed
 
                        indexed_time = fields['modtime']
 
                        mtime = self.get_node_mtime(node)
 
                        if mtime > indexed_time:
 
                            # The file has changed, delete it and add it to
 
                            # the list of files to reindex
 
                            log.debug(
 
                                'adding to reindex list %s mtime: %s vs %s',
 
                                    indexed_path, mtime, indexed_time
 
                            )
 
                            writer.delete_by_term('fileid', indexed_path)
 
                            writer_is_dirty = True
 

	
 
                            to_index.add(indexed_path)
 
                    except (ChangesetError, NodeDoesNotExistError):
 
                        # This file was deleted since it was indexed
 
                        log.debug('removing from index %s', indexed_path)
 
                        writer.delete_by_term('path', indexed_path)
 
                        writer_is_dirty = True
 

	
 
            # Loop over the files in the filesystem
 
            # Assume we have a function that gathers the filenames of the
 
            # documents to be indexed
 
            ri_cnt_total = 0  # indexed
 
            riwc_cnt_total = 0  # indexed with content
 
            for repo_name, repo in self.repo_paths.items():
 
                # skip indexing if there aren't any revisions
 
                if len(repo) < 1:
 
                    continue
 
                ri_cnt = 0   # indexed
 
                riwc_cnt = 0  # indexed with content
 
                for path in self.get_paths(repo):
 
                    path = safe_unicode(path)
 
                    if path in to_index or path not in indexed_paths:
 

	
 
                        # This is either a file that's changed, or a new file
 
                        # that wasn't indexed before. So index it!
 
                        i, iwc = self.add_doc(writer, path, repo, repo_name)
 
                        writer_is_dirty = True
 
                        log.debug('re indexing %s', path)
 
                        ri_cnt += i
 
                        ri_cnt_total += 1
 
                        riwc_cnt += iwc
 
                        riwc_cnt_total += iwc
 
                log.debug('added %s files %s with content for repo %s',
 
                             ri_cnt + riwc_cnt, riwc_cnt, repo.path
 
                )
 
            log.debug('indexed %s files in total and %s with content',
 
                        ri_cnt_total, riwc_cnt_total
 
            )
 
        finally:
 
            if writer_is_dirty:
 
                log.debug('>> COMMITING CHANGES TO FILE INDEX <<')
 
                writer.commit(merge=True)
 
                log.debug('>>> FINISHED REBUILDING FILE INDEX <<<')
 
            else:
 
                log.debug('>> NOTHING TO COMMIT TO FILE INDEX <<')
 
                writer.cancel()
 

	
 
    def build_indexes(self):
 
        if os.path.exists(self.index_location):
 
            log.debug('removing previous index')
 
            rmtree(self.index_location)
 

	
 
        if not os.path.exists(self.index_location):
 
            os.mkdir(self.index_location)
 

	
 
        chgset_idx = create_in(self.index_location, CHGSETS_SCHEMA,
 
                               indexname=CHGSET_IDX_NAME)
 
        chgset_idx_writer = chgset_idx.writer()
 

	
 
        file_idx = create_in(self.index_location, SCHEMA, indexname=IDX_NAME)
 
        file_idx_writer = file_idx.writer()
 
        log.debug('BUILDING INDEX FOR EXTENSIONS %s '
 
                  'AND REPOS %s' % (INDEX_EXTENSIONS, self.repo_paths.keys()))
 

	
 
        for repo_name, repo in self.repo_paths.items():
 
            # skip indexing if there aren't any revisions
 
            if len(repo) < 1:
 
                continue
 

	
 
            self.index_files(file_idx_writer, repo_name, repo)
 
            self.index_changesets(chgset_idx_writer, repo_name, repo)
 

	
 
        log.debug('>> COMMITING CHANGES <<')
 
        file_idx_writer.commit(merge=True)
 
        chgset_idx_writer.commit(merge=True)
 
        log.debug('>>> FINISHED BUILDING INDEX <<<')
 

	
 
    def update_indexes(self):
 
        self.update_file_index()
 
        self.update_changeset_index()
 

	
 
    def run(self, full_index=False):
 
        """Run daemon"""
 
        if full_index or self.initial:
 
            self.build_indexes()
 
        else:
 
            self.update_indexes()
kallithea/model/db.py
Show inline comments
 
@@ -605,1948 +605,1954 @@ class User(Base, BaseModel):
 
            q = q.options(joinedload(UserEmailMap.user))
 
            if cache:
 
                q = q.options(FromCache("sql_cache_short",
 
                                        "get_email_map_key_%s" % email))
 
            ret = getattr(q.scalar(), 'user', None)
 

	
 
        return ret
 

	
 
    @classmethod
 
    def get_from_cs_author(cls, author):
 
        """
 
        Tries to get User objects out of commit author string
 

	
 
        :param author:
 
        """
 
        from kallithea.lib.helpers import email, author_name
 
        # Valid email in the attribute passed, see if they're in the system
 
        _email = email(author)
 
        if _email:
 
            user = cls.get_by_email(_email, case_insensitive=True)
 
            if user is not None:
 
                return user
 
        # Maybe we can match by username?
 
        _author = author_name(author)
 
        user = cls.get_by_username(_author, case_insensitive=True)
 
        if user is not None:
 
            return user
 

	
 
    def update_lastlogin(self):
 
        """Update user lastlogin"""
 
        self.last_login = datetime.datetime.now()
 
        Session().add(self)
 
        log.debug('updated user %s lastlogin', self.username)
 

	
 
    @classmethod
 
    def get_first_admin(cls):
 
        user = User.query().filter(User.admin == True).first()
 
        if user is None:
 
            raise Exception('Missing administrative account!')
 
        return user
 

	
 
    @classmethod
 
    def get_default_user(cls, cache=False):
 
        user = User.get_by_username(User.DEFAULT_USER, cache=cache)
 
        if user is None:
 
            raise Exception('Missing default account!')
 
        return user
 

	
 
    def get_api_data(self, details=False):
 
        """
 
        Common function for generating user related data for API
 
        """
 
        user = self
 
        data = dict(
 
            user_id=user.user_id,
 
            username=user.username,
 
            firstname=user.name,
 
            lastname=user.lastname,
 
            email=user.email,
 
            emails=user.emails,
 
            active=user.active,
 
            admin=user.admin,
 
        )
 
        if details:
 
            data.update(dict(
 
                extern_type=user.extern_type,
 
                extern_name=user.extern_name,
 
                api_key=user.api_key,
 
                api_keys=user.api_keys,
 
                last_login=user.last_login,
 
                ip_addresses=user.ip_addresses
 
                ))
 
        return data
 

	
 
    def __json__(self):
 
        data = dict(
 
            full_name=self.full_name,
 
            full_name_or_username=self.full_name_or_username,
 
            short_contact=self.short_contact,
 
            full_contact=self.full_contact
 
        )
 
        data.update(self.get_api_data())
 
        return data
 

	
 

	
 
class UserApiKeys(Base, BaseModel):
 
    __tablename__ = 'user_api_keys'
 
    __table_args__ = (
 
        Index('uak_api_key_idx', 'api_key'),
 
        Index('uak_api_key_expires_idx', 'api_key', 'expires'),
 
        UniqueConstraint('api_key'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 
    __mapper_args__ = {}
 

	
 
    user_api_key_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=True, unique=None, default=None)
 
    api_key = Column(String(255, convert_unicode=False), nullable=False, unique=True)
 
    description = Column(UnicodeText(1024))
 
    expires = Column(Float(53), nullable=False)
 
    created_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 

	
 
    user = relationship('User')
 

	
 
    @property
 
    def expired(self):
 
        if self.expires == -1:
 
            return False
 
        return time.time() > self.expires
 

	
 

	
 
class UserEmailMap(Base, BaseModel):
 
    __tablename__ = 'user_email_map'
 
    __table_args__ = (
 
        Index('uem_email_idx', 'email'),
 
        UniqueConstraint('email'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 
    __mapper_args__ = {}
 

	
 
    email_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=True, unique=None, default=None)
 
    _email = Column("email", String(255, convert_unicode=False), nullable=True, unique=False, default=None)
 
    user = relationship('User')
 

	
 
    @validates('_email')
 
    def validate_email(self, key, email):
 
        # check if this email is not main one
 
        main_email = Session().query(User).filter(User.email == email).scalar()
 
        if main_email is not None:
 
            raise AttributeError('email %s is present is user table' % email)
 
        return email
 

	
 
    @hybrid_property
 
    def email(self):
 
        return self._email
 

	
 
    @email.setter
 
    def email(self, val):
 
        self._email = val.lower() if val else None
 

	
 

	
 
class UserIpMap(Base, BaseModel):
 
    __tablename__ = 'user_ip_map'
 
    __table_args__ = (
 
        UniqueConstraint('user_id', 'ip_addr'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 
    __mapper_args__ = {}
 

	
 
    ip_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=True, unique=None, default=None)
 
    ip_addr = Column(String(255, convert_unicode=False), nullable=True, unique=False, default=None)
 
    active = Column(Boolean(), nullable=True, unique=None, default=True)
 
    user = relationship('User')
 

	
 
    @classmethod
 
    def _get_ip_range(cls, ip_addr):
 
        from kallithea.lib import ipaddr
 
        net = ipaddr.IPNetwork(address=ip_addr)
 
        return [str(net.network), str(net.broadcast)]
 

	
 
    def __json__(self):
 
        return dict(
 
          ip_addr=self.ip_addr,
 
          ip_range=self._get_ip_range(self.ip_addr)
 
        )
 

	
 
    def __unicode__(self):
 
        return u"<%s('user_id:%s=>%s')>" % (self.__class__.__name__,
 
                                            self.user_id, self.ip_addr)
 

	
 
class UserLog(Base, BaseModel):
 
    __tablename__ = 'user_logs'
 
    __table_args__ = (
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True},
 
    )
 
    user_log_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=True, unique=None, default=None)
 
    username = Column(String(255, convert_unicode=False), nullable=True, unique=None, default=None)
 
    repository_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=True)
 
    repository_name = Column(String(255, convert_unicode=False), nullable=True, unique=None, default=None)
 
    user_ip = Column(String(255, convert_unicode=False), nullable=True, unique=None, default=None)
 
    action = Column(UnicodeText(1200000, convert_unicode=False), nullable=True, unique=None, default=None)
 
    action_date = Column(DateTime(timezone=False), nullable=True, unique=None, default=None)
 

	
 
    def __unicode__(self):
 
        return u"<%s('id:%s:%s')>" % (self.__class__.__name__,
 
                                      self.repository_name,
 
                                      self.action)
 

	
 
    @property
 
    def action_as_day(self):
 
        return datetime.date(*self.action_date.timetuple()[:3])
 

	
 
    user = relationship('User')
 
    repository = relationship('Repository', cascade='')
 

	
 

	
 
class UserGroup(Base, BaseModel):
 
    __tablename__ = 'users_groups'
 
    __table_args__ = (
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True},
 
    )
 

	
 
    users_group_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    users_group_name = Column(String(255, convert_unicode=False), nullable=False, unique=True, default=None)
 
    user_group_description = Column(String(10000, convert_unicode=False), nullable=True, unique=None, default=None)
 
    users_group_active = Column(Boolean(), nullable=True, unique=None, default=None)
 
    inherit_default_permissions = Column("users_group_inherit_default_permissions", Boolean(), nullable=False, unique=None, default=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False, unique=False, default=None)
 
    created_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 
    _group_data = Column("group_data", LargeBinary(), nullable=True)  # JSON data
 

	
 
    members = relationship('UserGroupMember', cascade="all, delete-orphan")
 
    users_group_to_perm = relationship('UserGroupToPerm', cascade='all')
 
    users_group_repo_to_perm = relationship('UserGroupRepoToPerm', cascade='all')
 
    users_group_repo_group_to_perm = relationship('UserGroupRepoGroupToPerm', cascade='all')
 
    user_user_group_to_perm = relationship('UserUserGroupToPerm ', cascade='all')
 
    user_group_user_group_to_perm = relationship('UserGroupUserGroupToPerm ', primaryjoin="UserGroupUserGroupToPerm.target_user_group_id==UserGroup.users_group_id", cascade='all')
 

	
 
    user = relationship('User')
 

	
 
    @hybrid_property
 
    def group_data(self):
 
        if not self._group_data:
 
            return {}
 

	
 
        try:
 
            return json.loads(self._group_data)
 
        except TypeError:
 
            return {}
 

	
 
    @group_data.setter
 
    def group_data(self, val):
 
        try:
 
            self._group_data = json.dumps(val)
 
        except Exception:
 
            log.error(traceback.format_exc())
 

	
 
    def __unicode__(self):
 
        return u"<%s('id:%s:%s')>" % (self.__class__.__name__,
 
                                      self.users_group_id,
 
                                      self.users_group_name)
 

	
 
    @classmethod
 
    def get_by_group_name(cls, group_name, cache=False,
 
                          case_insensitive=False):
 
        if case_insensitive:
 
            q = cls.query().filter(cls.users_group_name.ilike(group_name))
 
        else:
 
            q = cls.query().filter(cls.users_group_name == group_name)
 
        if cache:
 
            q = q.options(FromCache(
 
                            "sql_cache_short",
 
                            "get_group_%s" % _hash_key(group_name)
 
                          )
 
            )
 
        return q.scalar()
 

	
 
    @classmethod
 
    def get(cls, user_group_id, cache=False):
 
        user_group = cls.query()
 
        if cache:
 
            user_group = user_group.options(FromCache("sql_cache_short",
 
                                    "get_users_group_%s" % user_group_id))
 
        return user_group.get(user_group_id)
 

	
 
    def get_api_data(self, with_members=True):
 
        user_group = self
 

	
 
        data = dict(
 
            users_group_id=user_group.users_group_id,
 
            group_name=user_group.users_group_name,
 
            group_description=user_group.user_group_description,
 
            active=user_group.users_group_active,
 
            owner=user_group.user.username,
 
        )
 
        if with_members:
 
            members = []
 
            for user in user_group.members:
 
                user = user.user
 
                members.append(user.get_api_data())
 
            data['members'] = members
 

	
 
        return data
 

	
 

	
 
class UserGroupMember(Base, BaseModel):
 
    __tablename__ = 'users_groups_members'
 
    __table_args__ = (
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True},
 
    )
 

	
 
    users_group_member_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    users_group_id = Column(Integer(), ForeignKey('users_groups.users_group_id'), nullable=False, unique=None, default=None)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False, unique=None, default=None)
 

	
 
    user = relationship('User')
 
    users_group = relationship('UserGroup')
 

	
 
    def __init__(self, gr_id='', u_id=''):
 
        self.users_group_id = gr_id
 
        self.user_id = u_id
 

	
 

	
 
class RepositoryField(Base, BaseModel):
 
    __tablename__ = 'repositories_fields'
 
    __table_args__ = (
 
        UniqueConstraint('repository_id', 'field_key'),  # no-multi field
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True},
 
    )
 
    PREFIX = 'ex_'  # prefix used in form to not conflict with already existing fields
 

	
 
    repo_field_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    repository_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=False, unique=None, default=None)
 
    field_key = Column(String(250, convert_unicode=False))
 
    field_label = Column(String(1024, convert_unicode=False), nullable=False)
 
    field_value = Column(String(10000, convert_unicode=False), nullable=False)
 
    field_desc = Column(String(1024, convert_unicode=False), nullable=False)
 
    field_type = Column(String(255), nullable=False, unique=None)
 
    created_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 

	
 
    repository = relationship('Repository')
 

	
 
    @property
 
    def field_key_prefixed(self):
 
        return 'ex_%s' % self.field_key
 

	
 
    @classmethod
 
    def un_prefix_key(cls, key):
 
        if key.startswith(cls.PREFIX):
 
            return key[len(cls.PREFIX):]
 
        return key
 

	
 
    @classmethod
 
    def get_by_key_name(cls, key, repo):
 
        row = cls.query() \
 
                .filter(cls.repository == repo) \
 
                .filter(cls.field_key == key).scalar()
 
        return row
 

	
 

	
 
class Repository(Base, BaseModel):
 
    __tablename__ = 'repositories'
 
    __table_args__ = (
 
        UniqueConstraint('repo_name'),
 
        Index('r_repo_name_idx', 'repo_name'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True},
 
    )
 
    DEFAULT_CLONE_URI = '{scheme}://{user}@{netloc}/{repo}'
 
    DEFAULT_CLONE_URI_ID = '{scheme}://{user}@{netloc}/_{repoid}'
 

	
 
    STATE_CREATED = 'repo_state_created'
 
    STATE_PENDING = 'repo_state_pending'
 
    STATE_ERROR = 'repo_state_error'
 

	
 
    repo_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    repo_name = Column(String(255, convert_unicode=False), nullable=False, unique=True, default=None)
 
    repo_state = Column(String(255), nullable=True)
 

	
 
    clone_uri = Column(String(255, convert_unicode=False), nullable=True, unique=False, default=None)
 
    repo_type = Column(String(255, convert_unicode=False), nullable=False, unique=False, default=None)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False, unique=False, default=None)
 
    private = Column(Boolean(), nullable=True, unique=None, default=None)
 
    enable_statistics = Column("statistics", Boolean(), nullable=True, unique=None, default=True)
 
    enable_downloads = Column("downloads", Boolean(), nullable=True, unique=None, default=True)
 
    description = Column(String(10000, convert_unicode=False), nullable=True, unique=None, default=None)
 
    created_on = Column(DateTime(timezone=False), nullable=False, unique=None, default=datetime.datetime.now)
 
    updated_on = Column(DateTime(timezone=False), nullable=False, unique=None, default=datetime.datetime.now)
 
    _landing_revision = Column("landing_revision", String(255, convert_unicode=False), nullable=False, unique=False, default=None)
 
    enable_locking = Column(Boolean(), nullable=False, unique=None, default=False)
 
    _locked = Column("locked", String(255, convert_unicode=False), nullable=True, unique=False, default=None)
 
    _changeset_cache = Column("changeset_cache", LargeBinary(), nullable=True) #JSON data
 

	
 
    fork_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=True, unique=False, default=None)
 
    group_id = Column(Integer(), ForeignKey('groups.group_id'), nullable=True, unique=False, default=None)
 

	
 
    user = relationship('User')
 
    fork = relationship('Repository', remote_side=repo_id)
 
    group = relationship('RepoGroup')
 
    repo_to_perm = relationship('UserRepoToPerm', cascade='all', order_by='UserRepoToPerm.repo_to_perm_id')
 
    users_group_to_perm = relationship('UserGroupRepoToPerm', cascade='all')
 
    stats = relationship('Statistics', cascade='all', uselist=False)
 

	
 
    followers = relationship('UserFollowing',
 
                             primaryjoin='UserFollowing.follows_repo_id==Repository.repo_id',
 
                             cascade='all')
 
    extra_fields = relationship('RepositoryField',
 
                                cascade="all, delete-orphan")
 

	
 
    logs = relationship('UserLog')
 
    comments = relationship('ChangesetComment', cascade="all, delete-orphan")
 

	
 
    pull_requests_org = relationship('PullRequest',
 
                    primaryjoin='PullRequest.org_repo_id==Repository.repo_id',
 
                    cascade="all, delete-orphan")
 

	
 
    pull_requests_other = relationship('PullRequest',
 
                    primaryjoin='PullRequest.other_repo_id==Repository.repo_id',
 
                    cascade="all, delete-orphan")
 

	
 
    def __unicode__(self):
 
        return u"<%s('%s:%s')>" % (self.__class__.__name__, self.repo_id,
 
                                   safe_unicode(self.repo_name))
 

	
 
    @hybrid_property
 
    def landing_rev(self):
 
        # always should return [rev_type, rev]
 
        if self._landing_revision:
 
            _rev_info = self._landing_revision.split(':')
 
            if len(_rev_info) < 2:
 
                _rev_info.insert(0, 'rev')
 
            return [_rev_info[0], _rev_info[1]]
 
        return [None, None]
 

	
 
    @landing_rev.setter
 
    def landing_rev(self, val):
 
        if ':' not in val:
 
            raise ValueError('value must be delimited with `:` and consist '
 
                             'of <rev_type>:<rev>, got %s instead' % val)
 
        self._landing_revision = val
 

	
 
    @hybrid_property
 
    def locked(self):
 
        # always should return [user_id, timelocked]
 
        if self._locked:
 
            _lock_info = self._locked.split(':')
 
            return int(_lock_info[0]), _lock_info[1]
 
        return [None, None]
 

	
 
    @locked.setter
 
    def locked(self, val):
 
        if val and isinstance(val, (list, tuple)):
 
            self._locked = ':'.join(map(str, val))
 
        else:
 
            self._locked = None
 

	
 
    @hybrid_property
 
    def changeset_cache(self):
 
        try:
 
            cs_cache = json.loads(self._changeset_cache) # might raise on bad data
 
            cs_cache['raw_id'] # verify data, raise exception on error
 
            return cs_cache
 
        except (TypeError, KeyError, ValueError):
 
            return EmptyChangeset().__json__()
 

	
 
    @changeset_cache.setter
 
    def changeset_cache(self, val):
 
        try:
 
            self._changeset_cache = json.dumps(val)
 
        except Exception:
 
            log.error(traceback.format_exc())
 

	
 
    @classmethod
 
    def url_sep(cls):
 
        return URL_SEP
 

	
 
    @classmethod
 
    def normalize_repo_name(cls, repo_name):
 
        """
 
        Normalizes os specific repo_name to the format internally stored inside
 
        database using URL_SEP
 

	
 
        :param cls:
 
        :param repo_name:
 
        """
 
        return cls.url_sep().join(repo_name.split(os.sep))
 

	
 
    @classmethod
 
    def get_by_repo_name(cls, repo_name):
 
        q = Session().query(cls).filter(cls.repo_name == repo_name)
 
        q = q.options(joinedload(Repository.fork)) \
 
                .options(joinedload(Repository.user)) \
 
                .options(joinedload(Repository.group))
 
        return q.scalar()
 

	
 
    @classmethod
 
    def get_by_full_path(cls, repo_full_path):
 
        repo_name = repo_full_path.split(cls.base_path(), 1)[-1]
 
        repo_name = cls.normalize_repo_name(repo_name)
 
        return cls.get_by_repo_name(repo_name.strip(URL_SEP))
 

	
 
    @classmethod
 
    def get_repo_forks(cls, repo_id):
 
        return cls.query().filter(Repository.fork_id == repo_id)
 

	
 
    @classmethod
 
    def base_path(cls):
 
        """
 
        Returns base path where all repos are stored
 

	
 
        :param cls:
 
        """
 
        q = Session().query(Ui) \
 
            .filter(Ui.ui_key == cls.url_sep())
 
        q = q.options(FromCache("sql_cache_short", "repository_repo_path"))
 
        return q.one().ui_value
 

	
 
    @property
 
    def forks(self):
 
        """
 
        Return forks of this repo
 
        """
 
        return Repository.get_repo_forks(self.repo_id)
 

	
 
    @property
 
    def parent(self):
 
        """
 
        Returns fork parent
 
        """
 
        return self.fork
 

	
 
    @property
 
    def just_name(self):
 
        return self.repo_name.split(Repository.url_sep())[-1]
 

	
 
    @property
 
    def groups_with_parents(self):
 
        groups = []
 
        if self.group is None:
 
            return groups
 

	
 
        cur_gr = self.group
 
        groups.insert(0, cur_gr)
 
        while 1:
 
            gr = getattr(cur_gr, 'parent_group', None)
 
            cur_gr = cur_gr.parent_group
 
            if gr is None:
 
                break
 
            groups.insert(0, gr)
 

	
 
        return groups
 

	
 
    @property
 
    def groups_and_repo(self):
 
        return self.groups_with_parents, self.just_name, self.repo_name
 

	
 
    @LazyProperty
 
    def repo_path(self):
 
        """
 
        Returns base full path for that repository means where it actually
 
        exists on a filesystem
 
        """
 
        q = Session().query(Ui).filter(Ui.ui_key ==
 
                                              Repository.url_sep())
 
        q = q.options(FromCache("sql_cache_short", "repository_repo_path"))
 
        return q.one().ui_value
 

	
 
    @property
 
    def repo_full_path(self):
 
        p = [self.repo_path]
 
        # we need to split the name by / since this is how we store the
 
        # names in the database, but that eventually needs to be converted
 
        # into a valid system path
 
        p += self.repo_name.split(Repository.url_sep())
 
        return os.path.join(*map(safe_unicode, p))
 

	
 
    @property
 
    def cache_keys(self):
 
        """
 
        Returns associated cache keys for that repo
 
        """
 
        return CacheInvalidation.query() \
 
            .filter(CacheInvalidation.cache_args == self.repo_name) \
 
            .order_by(CacheInvalidation.cache_key) \
 
            .all()
 

	
 
    def get_new_name(self, repo_name):
 
        """
 
        returns new full repository name based on assigned group and new new
 

	
 
        :param group_name:
 
        """
 
        path_prefix = self.group.full_path_splitted if self.group else []
 
        return Repository.url_sep().join(path_prefix + [repo_name])
 

	
 
    @property
 
    def _ui(self):
 
        """
 
        Creates an db based ui object for this repository
 
        """
 
        from kallithea.lib.utils import make_ui
 
        return make_ui('db', clear_session=False)
 

	
 
    @classmethod
 
    def is_valid(cls, repo_name):
 
        """
 
        returns True if given repo name is a valid filesystem repository
 

	
 
        :param cls:
 
        :param repo_name:
 
        """
 
        from kallithea.lib.utils import is_valid_repo
 

	
 
        return is_valid_repo(repo_name, cls.base_path())
 

	
 
    def get_api_data(self):
 
        """
 
        Common function for generating repo api data
 

	
 
        """
 
        repo = self
 
        data = dict(
 
            repo_id=repo.repo_id,
 
            repo_name=repo.repo_name,
 
            repo_type=repo.repo_type,
 
            clone_uri=repo.clone_uri,
 
            private=repo.private,
 
            created_on=repo.created_on,
 
            description=repo.description,
 
            landing_rev=repo.landing_rev,
 
            owner=repo.user.username,
 
            fork_of=repo.fork.repo_name if repo.fork else None,
 
            enable_statistics=repo.enable_statistics,
 
            enable_locking=repo.enable_locking,
 
            enable_downloads=repo.enable_downloads,
 
            last_changeset=repo.changeset_cache,
 
            locked_by=User.get(self.locked[0]).get_api_data() \
 
                if self.locked[0] else None,
 
            locked_date=time_to_datetime(self.locked[1]) \
 
                if self.locked[1] else None
 
        )
 
        rc_config = Setting.get_app_settings()
 
        repository_fields = str2bool(rc_config.get('repository_fields'))
 
        if repository_fields:
 
            for f in self.extra_fields:
 
                data[f.field_key_prefixed] = f.field_value
 

	
 
        return data
 

	
 
    @classmethod
 
    def lock(cls, repo, user_id, lock_time=None):
 
        if lock_time is not None:
 
            lock_time = time.time()
 
        repo.locked = [user_id, lock_time]
 
        Session().add(repo)
 
        Session().commit()
 

	
 
    @classmethod
 
    def unlock(cls, repo):
 
        repo.locked = None
 
        Session().add(repo)
 
        Session().commit()
 

	
 
    @classmethod
 
    def getlock(cls, repo):
 
        return repo.locked
 

	
 
    @property
 
    def last_db_change(self):
 
        return self.updated_on
 

	
 
    @property
 
    def clone_uri_hidden(self):
 
        clone_uri = self.clone_uri
 
        if clone_uri:
 
            import urlobject
 
            url_obj = urlobject.URLObject(self.clone_uri)
 
            if url_obj.password:
 
                clone_uri = url_obj.with_password('*****')
 
        return clone_uri
 

	
 
    def clone_url(self, **override):
 
        import kallithea.lib.helpers as h
 
        qualified_home_url = h.canonical_url('home')
 

	
 
        uri_tmpl = None
 
        if 'with_id' in override:
 
            uri_tmpl = self.DEFAULT_CLONE_URI_ID
 
            del override['with_id']
 

	
 
        if 'uri_tmpl' in override:
 
            uri_tmpl = override['uri_tmpl']
 
            del override['uri_tmpl']
 

	
 
        # we didn't override our tmpl from **overrides
 
        if not uri_tmpl:
 
            uri_tmpl = self.DEFAULT_CLONE_URI
 
            try:
 
                from pylons import tmpl_context as c
 
                uri_tmpl = c.clone_uri_tmpl
 
            except AttributeError:
 
                # in any case if we call this outside of request context,
 
                # ie, not having tmpl_context set up
 
                pass
 

	
 
        return get_clone_url(uri_tmpl=uri_tmpl,
 
                             qualified_home_url=qualified_home_url,
 
                             repo_name=self.repo_name,
 
                             repo_id=self.repo_id, **override)
 

	
 
    def set_state(self, state):
 
        self.repo_state = state
 
        Session().add(self)
 
    #==========================================================================
 
    # SCM PROPERTIES
 
    #==========================================================================
 

	
 
    def get_changeset(self, rev=None):
 
        return get_changeset_safe(self.scm_instance, rev)
 

	
 
    def get_landing_changeset(self):
 
        """
 
        Returns landing changeset, or if that doesn't exist returns the tip
 
        """
 
        _rev_type, _rev = self.landing_rev
 
        cs = self.get_changeset(_rev)
 
        if isinstance(cs, EmptyChangeset):
 
            return self.get_changeset()
 
        return cs
 

	
 
    def update_changeset_cache(self, cs_cache=None):
 
        """
 
        Update cache of last changeset for repository, keys should be::
 

	
 
            short_id
 
            raw_id
 
            revision
 
            message
 
            date
 
            author
 

	
 
        :param cs_cache:
 
        """
 
        from kallithea.lib.vcs.backends.base import BaseChangeset
 
        if cs_cache is None:
 
            cs_cache = EmptyChangeset()
 
            # use no-cache version here
 
            scm_repo = self.scm_instance_no_cache()
 
            if scm_repo:
 
                cs_cache = scm_repo.get_changeset()
 

	
 
        if isinstance(cs_cache, BaseChangeset):
 
            cs_cache = cs_cache.__json__()
 

	
 
        if (not self.changeset_cache or cs_cache['raw_id'] != self.changeset_cache['raw_id']):
 
            _default = datetime.datetime.fromtimestamp(0)
 
            last_change = cs_cache.get('date') or _default
 
            log.debug('updated repo %s with new cs cache %s',
 
                      self.repo_name, cs_cache)
 
            self.updated_on = last_change
 
            self.changeset_cache = cs_cache
 
            Session().add(self)
 
            Session().commit()
 
        else:
 
            log.debug('changeset_cache for %s already up to date with %s',
 
                      self.repo_name, cs_cache['raw_id'])
 

	
 
    @property
 
    def tip(self):
 
        return self.get_changeset('tip')
 

	
 
    @property
 
    def author(self):
 
        return self.tip.author
 

	
 
    @property
 
    def last_change(self):
 
        return self.scm_instance.last_change
 

	
 
    def get_comments(self, revisions=None):
 
        """
 
        Returns comments for this repository grouped by revisions
 

	
 
        :param revisions: filter query by revisions only
 
        """
 
        cmts = ChangesetComment.query() \
 
            .filter(ChangesetComment.repo == self)
 
        if revisions is not None:
 
            if not revisions:
 
                return [] # don't use sql 'in' on empty set
 
            cmts = cmts.filter(ChangesetComment.revision.in_(revisions))
 
        grouped = collections.defaultdict(list)
 
        for cmt in cmts.all():
 
            grouped[cmt.revision].append(cmt)
 
        return grouped
 

	
 
    def statuses(self, revisions):
 
        """
 
        Returns statuses for this repository.
 
        PRs without any votes do _not_ show up as unreviewed.
 

	
 
        :param revisions: list of revisions to get statuses for
 
        """
 
        if not revisions:
 
            return {}
 

	
 
        statuses = ChangesetStatus.query() \
 
            .filter(ChangesetStatus.repo == self) \
 
            .filter(ChangesetStatus.version == 0) \
 
            .filter(ChangesetStatus.revision.in_(revisions))
 

	
 
        grouped = {}
 
        for stat in statuses.all():
 
            pr_id = pr_nice_id = pr_repo = None
 
            if stat.pull_request:
 
                pr_id = stat.pull_request.pull_request_id
 
                pr_nice_id = PullRequest.make_nice_id(pr_id)
 
                pr_repo = stat.pull_request.other_repo.repo_name
 
            grouped[stat.revision] = [str(stat.status), stat.status_lbl,
 
                                      pr_id, pr_repo, pr_nice_id]
 
        return grouped
 

	
 
    def _repo_size(self):
 
        from kallithea.lib import helpers as h
 
        log.debug('calculating repository size...')
 
        return h.format_byte_size(self.scm_instance.size)
 

	
 
    #==========================================================================
 
    # SCM CACHE INSTANCE
 
    #==========================================================================
 

	
 
    def set_invalidate(self):
 
        """
 
        Mark caches of this repo as invalid.
 
        """
 
        CacheInvalidation.set_invalidate(self.repo_name)
 

	
 
    def scm_instance_no_cache(self):
 
        return self.__get_instance()
 

	
 
    @property
 
    def scm_instance(self):
 
        import kallithea
 
        full_cache = str2bool(kallithea.CONFIG.get('vcs_full_cache'))
 
        if full_cache:
 
            return self.scm_instance_cached()
 
        return self.__get_instance()
 

	
 
    def scm_instance_cached(self, valid_cache_keys=None):
 
        @cache_region('long_term')
 
        def _c(repo_name):
 
            return self.__get_instance()
 
        rn = self.repo_name
 

	
 
        valid = CacheInvalidation.test_and_set_valid(rn, None, valid_cache_keys=valid_cache_keys)
 
        if not valid:
 
            log.debug('Cache for %s invalidated, getting new object', rn)
 
            region_invalidate(_c, None, rn)
 
        else:
 
            log.debug('Getting scm_instance of %s from cache', rn)
 
        return _c(rn)
 

	
 
    def __get_instance(self):
 
        repo_full_path = self.repo_full_path
 

	
 
        alias = get_scm(repo_full_path)[0]
 
        log.debug('Creating instance of %s repository from %s',
 
                  alias, repo_full_path)
 
        backend = get_backend(alias)
 

	
 
        if alias == 'hg':
 
            repo = backend(safe_str(repo_full_path), create=False,
 
                           baseui=self._ui)
 
        else:
 
            repo = backend(repo_full_path, create=False)
 

	
 
        return repo
 

	
 
    def __json__(self):
 
        return dict(landing_rev = self.landing_rev)
 

	
 
class RepoGroup(Base, BaseModel):
 
    __tablename__ = 'groups'
 
    __table_args__ = (
 
        UniqueConstraint('group_name', 'group_parent_id'),
 
        CheckConstraint('group_id != group_parent_id'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True},
 
    )
 
    __mapper_args__ = {'order_by': 'group_name'}
 

	
 
    SEP = ' &raquo; '
 

	
 
    group_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    group_name = Column(String(255, convert_unicode=False), nullable=False, unique=True, default=None)
 
    group_parent_id = Column(Integer(), ForeignKey('groups.group_id'), nullable=True, unique=None, default=None)
 
    group_description = Column(String(10000, convert_unicode=False), nullable=True, unique=None, default=None)
 
    enable_locking = Column(Boolean(), nullable=False, unique=None, default=False)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False, unique=False, default=None)
 
    created_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 

	
 
    repo_group_to_perm = relationship('UserRepoGroupToPerm', cascade='all', order_by='UserRepoGroupToPerm.group_to_perm_id')
 
    users_group_to_perm = relationship('UserGroupRepoGroupToPerm', cascade='all')
 
    parent_group = relationship('RepoGroup', remote_side=group_id)
 
    user = relationship('User')
 

	
 
    def __init__(self, group_name='', parent_group=None):
 
        self.group_name = group_name
 
        self.parent_group = parent_group
 

	
 
    def __unicode__(self):
 
        return u"<%s('id:%s:%s')>" % (self.__class__.__name__, self.group_id,
 
                                      self.group_name)
 

	
 
    @classmethod
 
    def _generate_choice(cls, repo_group):
 
        """Return tuple with group_id and name as html literal"""
 
        from webhelpers.html import literal
 
        if repo_group is None:
 
            return (-1, u'-- %s --' % _('top level'))
 
        return repo_group.group_id, literal(cls.SEP.join(repo_group.full_path_splitted))
 

	
 
    @classmethod
 
    def groups_choices(cls, groups):
 
        """Return tuples with group_id and name as html literal."""
 
        return sorted((cls._generate_choice(g) for g in groups),
 
                      key=lambda c: c[1].split(cls.SEP))
 

	
 
    @classmethod
 
    def url_sep(cls):
 
        return URL_SEP
 

	
 
    @classmethod
 
    def get_by_group_name(cls, group_name, cache=False, case_insensitive=False):
 
        if case_insensitive:
 
            gr = cls.query() \
 
                .filter(cls.group_name.ilike(group_name))
 
        else:
 
            gr = cls.query() \
 
                .filter(cls.group_name == group_name)
 
        if cache:
 
            gr = gr.options(FromCache(
 
                            "sql_cache_short",
 
                            "get_group_%s" % _hash_key(group_name)
 
                            )
 
            )
 
        return gr.scalar()
 

	
 
    @property
 
    def parents(self):
 
        parents_recursion_limit = 10
 
        groups = []
 
        if self.parent_group is None:
 
            return groups
 
        cur_gr = self.parent_group
 
        groups.insert(0, cur_gr)
 
        cnt = 0
 
        while 1:
 
            cnt += 1
 
            gr = getattr(cur_gr, 'parent_group', None)
 
            cur_gr = cur_gr.parent_group
 
            if gr is None:
 
                break
 
            if cnt == parents_recursion_limit:
 
                # this will prevent accidental infinite loops
 
                log.error(('more than %s parents found for group %s, stopping '
 
                           'recursive parent fetching' % (parents_recursion_limit, self)))
 
                break
 

	
 
            groups.insert(0, gr)
 
        return groups
 

	
 
    @property
 
    def children(self):
 
        return RepoGroup.query().filter(RepoGroup.parent_group == self)
 

	
 
    @property
 
    def name(self):
 
        return self.group_name.split(RepoGroup.url_sep())[-1]
 

	
 
    @property
 
    def full_path(self):
 
        return self.group_name
 

	
 
    @property
 
    def full_path_splitted(self):
 
        return self.group_name.split(RepoGroup.url_sep())
 

	
 
    @property
 
    def repositories(self):
 
        return Repository.query() \
 
                .filter(Repository.group == self) \
 
                .order_by(Repository.repo_name)
 

	
 
    @property
 
    def repositories_recursive_count(self):
 
        cnt = self.repositories.count()
 

	
 
        def children_count(group):
 
            cnt = 0
 
            for child in group.children:
 
                cnt += child.repositories.count()
 
                cnt += children_count(child)
 
            return cnt
 

	
 
        return cnt + children_count(self)
 

	
 
    def _recursive_objects(self, include_repos=True):
 
        all_ = []
 

	
 
        def _get_members(root_gr):
 
            if include_repos:
 
                for r in root_gr.repositories:
 
                    all_.append(r)
 
            childs = root_gr.children.all()
 
            if childs:
 
                for gr in childs:
 
                    all_.append(gr)
 
                    _get_members(gr)
 

	
 
        _get_members(self)
 
        return [self] + all_
 

	
 
    def recursive_groups_and_repos(self):
 
        """
 
        Recursive return all groups, with repositories in those groups
 
        """
 
        return self._recursive_objects()
 

	
 
    def recursive_groups(self):
 
        """
 
        Returns all children groups for this group including children of children
 
        """
 
        return self._recursive_objects(include_repos=False)
 

	
 
    def get_new_name(self, group_name):
 
        """
 
        returns new full group name based on parent and new name
 

	
 
        :param group_name:
 
        """
 
        path_prefix = (self.parent_group.full_path_splitted if
 
                       self.parent_group else [])
 
        return RepoGroup.url_sep().join(path_prefix + [group_name])
 

	
 
    def get_api_data(self):
 
        """
 
        Common function for generating api data
 

	
 
        """
 
        group = self
 
        data = dict(
 
            group_id=group.group_id,
 
            group_name=group.group_name,
 
            group_description=group.group_description,
 
            parent_group=group.parent_group.group_name if group.parent_group else None,
 
            repositories=[x.repo_name for x in group.repositories],
 
            owner=group.user.username
 
        )
 
        return data
 

	
 

	
 
class Permission(Base, BaseModel):
 
    __tablename__ = 'permissions'
 
    __table_args__ = (
 
        Index('p_perm_name_idx', 'permission_name'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True},
 
    )
 
    PERMS = [
 
        ('hg.admin', _('Kallithea Administrator')),
 

	
 
        ('repository.none', _('Default user has no access to new repositories')),
 
        ('repository.read', _('Default user has read access to new repositories')),
 
        ('repository.write', _('Default user has write access to new repositories')),
 
        ('repository.admin', _('Default user has admin access to new repositories')),
 

	
 
        ('group.none', _('Default user has no access to new repository groups')),
 
        ('group.read', _('Default user has read access to new repository groups')),
 
        ('group.write', _('Default user has write access to new repository groups')),
 
        ('group.admin', _('Default user has admin access to new repository groups')),
 

	
 
        ('usergroup.none', _('Default user has no access to new user groups')),
 
        ('usergroup.read', _('Default user has read access to new user groups')),
 
        ('usergroup.write', _('Default user has write access to new user groups')),
 
        ('usergroup.admin', _('Default user has admin access to new user groups')),
 

	
 
        ('hg.repogroup.create.false', _('Only admins can create repository groups')),
 
        ('hg.repogroup.create.true', _('Non-admins can create repository groups')),
 

	
 
        ('hg.usergroup.create.false', _('Only admins can create user groups')),
 
        ('hg.usergroup.create.true', _('Non-admins can create user groups')),
 

	
 
        ('hg.create.none', _('Only admins can create top level repositories')),
 
        ('hg.create.repository', _('Non-admins can create top level repositories')),
 

	
 
        ('hg.create.write_on_repogroup.true', _('Repository creation enabled with write permission to a repository group')),
 
        ('hg.create.write_on_repogroup.false', _('Repository creation disabled with write permission to a repository group')),
 

	
 
        ('hg.fork.none', _('Only admins can fork repositories')),
 
        ('hg.fork.repository', _('Non-admins can fork repositories')),
 

	
 
        ('hg.register.none', _('Registration disabled')),
 
        ('hg.register.manual_activate', _('User registration with manual account activation')),
 
        ('hg.register.auto_activate', _('User registration with automatic account activation')),
 

	
 
        ('hg.extern_activate.manual', _('Manual activation of external account')),
 
        ('hg.extern_activate.auto', _('Automatic activation of external account')),
 
    ]
 

	
 
    #definition of system default permissions for DEFAULT user
 
    DEFAULT_USER_PERMISSIONS = [
 
        'repository.read',
 
        'group.read',
 
        'usergroup.read',
 
        'hg.create.repository',
 
        'hg.create.write_on_repogroup.true',
 
        'hg.fork.repository',
 
        'hg.register.manual_activate',
 
        'hg.extern_activate.auto',
 
    ]
 

	
 
    # defines which permissions are more important higher the more important
 
    # Weight defines which permissions are more important.
 
    # The higher number the more important.
 
    PERM_WEIGHTS = {
 
        'repository.none': 0,
 
        'repository.read': 1,
 
        'repository.write': 3,
 
        'repository.admin': 4,
 

	
 
        'group.none': 0,
 
        'group.read': 1,
 
        'group.write': 3,
 
        'group.admin': 4,
 

	
 
        'usergroup.none': 0,
 
        'usergroup.read': 1,
 
        'usergroup.write': 3,
 
        'usergroup.admin': 4,
 

	
 
        'hg.repogroup.create.false': 0,
 
        'hg.repogroup.create.true': 1,
 

	
 
        'hg.usergroup.create.false': 0,
 
        'hg.usergroup.create.true': 1,
 

	
 
        'hg.fork.none': 0,
 
        'hg.fork.repository': 1,
 

	
 
        'hg.create.none': 0,
 
        'hg.create.repository': 1
 
    }
 

	
 
    permission_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    permission_name = Column(String(255, convert_unicode=False), nullable=True, unique=None, default=None)
 
    permission_longname = Column(String(255, convert_unicode=False), nullable=True, unique=None, default=None)
 

	
 
    def __unicode__(self):
 
        return u"<%s('%s:%s')>" % (
 
            self.__class__.__name__, self.permission_id, self.permission_name
 
        )
 

	
 
    @classmethod
 
    def get_by_key(cls, key):
 
        return cls.query().filter(cls.permission_name == key).scalar()
 

	
 
    @classmethod
 
    def get_default_perms(cls, default_user_id):
 
        q = Session().query(UserRepoToPerm, Repository, cls) \
 
         .join((Repository, UserRepoToPerm.repository_id == Repository.repo_id)) \
 
         .join((cls, UserRepoToPerm.permission_id == cls.permission_id)) \
 
         .filter(UserRepoToPerm.user_id == default_user_id)
 

	
 
        return q.all()
 

	
 
    @classmethod
 
    def get_default_group_perms(cls, default_user_id):
 
        q = Session().query(UserRepoGroupToPerm, RepoGroup, cls) \
 
         .join((RepoGroup, UserRepoGroupToPerm.group_id == RepoGroup.group_id)) \
 
         .join((cls, UserRepoGroupToPerm.permission_id == cls.permission_id)) \
 
         .filter(UserRepoGroupToPerm.user_id == default_user_id)
 

	
 
        return q.all()
 

	
 
    @classmethod
 
    def get_default_user_group_perms(cls, default_user_id):
 
        q = Session().query(UserUserGroupToPerm, UserGroup, cls) \
 
         .join((UserGroup, UserUserGroupToPerm.user_group_id == UserGroup.users_group_id)) \
 
         .join((cls, UserUserGroupToPerm.permission_id == cls.permission_id)) \
 
         .filter(UserUserGroupToPerm.user_id == default_user_id)
 

	
 
        return q.all()
 

	
 

	
 
class UserRepoToPerm(Base, BaseModel):
 
    __tablename__ = 'repo_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('user_id', 'repository_id', 'permission_id'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 
    repo_to_perm_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False, unique=None, default=None)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False, unique=None, default=None)
 
    repository_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=False, unique=None, default=None)
 

	
 
    user = relationship('User')
 
    repository = relationship('Repository')
 
    permission = relationship('Permission')
 

	
 
    @classmethod
 
    def create(cls, user, repository, permission):
 
        n = cls()
 
        n.user = user
 
        n.repository = repository
 
        n.permission = permission
 
        Session().add(n)
 
        return n
 

	
 
    def __unicode__(self):
 
        return u'<%s => %s >' % (self.user, self.repository)
 

	
 

	
 
class UserUserGroupToPerm(Base, BaseModel):
 
    __tablename__ = 'user_user_group_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('user_id', 'user_group_id', 'permission_id'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 
    user_user_group_to_perm_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False, unique=None, default=None)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False, unique=None, default=None)
 
    user_group_id = Column(Integer(), ForeignKey('users_groups.users_group_id'), nullable=False, unique=None, default=None)
 

	
 
    user = relationship('User')
 
    user_group = relationship('UserGroup')
 
    permission = relationship('Permission')
 

	
 
    @classmethod
 
    def create(cls, user, user_group, permission):
 
        n = cls()
 
        n.user = user
 
        n.user_group = user_group
 
        n.permission = permission
 
        Session().add(n)
 
        return n
 

	
 
    def __unicode__(self):
 
        return u'<%s => %s >' % (self.user, self.user_group)
 

	
 

	
 
class UserToPerm(Base, BaseModel):
 
    __tablename__ = 'user_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('user_id', 'permission_id'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 
    user_to_perm_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False, unique=None, default=None)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False, unique=None, default=None)
 

	
 
    user = relationship('User')
 
    permission = relationship('Permission')
 

	
 
    def __unicode__(self):
 
        return u'<%s => %s >' % (self.user, self.permission)
 

	
 

	
 
class UserGroupRepoToPerm(Base, BaseModel):
 
    __tablename__ = 'users_group_repo_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('repository_id', 'users_group_id', 'permission_id'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 
    users_group_to_perm_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    users_group_id = Column(Integer(), ForeignKey('users_groups.users_group_id'), nullable=False, unique=None, default=None)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False, unique=None, default=None)
 
    repository_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=False, unique=None, default=None)
 

	
 
    users_group = relationship('UserGroup')
 
    permission = relationship('Permission')
 
    repository = relationship('Repository')
 

	
 
    @classmethod
 
    def create(cls, users_group, repository, permission):
 
        n = cls()
 
        n.users_group = users_group
 
        n.repository = repository
 
        n.permission = permission
 
        Session().add(n)
 
        return n
 

	
 
    def __unicode__(self):
 
        return u'<UserGroupRepoToPerm:%s => %s >' % (self.users_group, self.repository)
 

	
 

	
 
class UserGroupUserGroupToPerm(Base, BaseModel):
 
    __tablename__ = 'user_group_user_group_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('target_user_group_id', 'user_group_id', 'permission_id'),
 
        CheckConstraint('target_user_group_id != user_group_id'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 
    user_group_user_group_to_perm_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    target_user_group_id = Column(Integer(), ForeignKey('users_groups.users_group_id'), nullable=False, unique=None, default=None)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False, unique=None, default=None)
 
    user_group_id = Column(Integer(), ForeignKey('users_groups.users_group_id'), nullable=False, unique=None, default=None)
 

	
 
    target_user_group = relationship('UserGroup', primaryjoin='UserGroupUserGroupToPerm.target_user_group_id==UserGroup.users_group_id')
 
    user_group = relationship('UserGroup', primaryjoin='UserGroupUserGroupToPerm.user_group_id==UserGroup.users_group_id')
 
    permission = relationship('Permission')
 

	
 
    @classmethod
 
    def create(cls, target_user_group, user_group, permission):
 
        n = cls()
 
        n.target_user_group = target_user_group
 
        n.user_group = user_group
 
        n.permission = permission
 
        Session().add(n)
 
        return n
 

	
 
    def __unicode__(self):
 
        return u'<UserGroupUserGroup:%s => %s >' % (self.target_user_group, self.user_group)
 

	
 

	
 
class UserGroupToPerm(Base, BaseModel):
 
    __tablename__ = 'users_group_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('users_group_id', 'permission_id',),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 
    users_group_to_perm_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    users_group_id = Column(Integer(), ForeignKey('users_groups.users_group_id'), nullable=False, unique=None, default=None)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False, unique=None, default=None)
 

	
 
    users_group = relationship('UserGroup')
 
    permission = relationship('Permission')
 

	
 

	
 
class UserRepoGroupToPerm(Base, BaseModel):
 
    __tablename__ = 'user_repo_group_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('user_id', 'group_id', 'permission_id'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 

	
 
    group_to_perm_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False, unique=None, default=None)
 
    group_id = Column(Integer(), ForeignKey('groups.group_id'), nullable=False, unique=None, default=None)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False, unique=None, default=None)
 

	
 
    user = relationship('User')
 
    group = relationship('RepoGroup')
 
    permission = relationship('Permission')
 

	
 
    @classmethod
 
    def create(cls, user, repository_group, permission):
 
        n = cls()
 
        n.user = user
 
        n.group = repository_group
 
        n.permission = permission
 
        Session().add(n)
 
        return n
 

	
 

	
 
class UserGroupRepoGroupToPerm(Base, BaseModel):
 
    __tablename__ = 'users_group_repo_group_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('users_group_id', 'group_id'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 

	
 
    users_group_repo_group_to_perm_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    users_group_id = Column(Integer(), ForeignKey('users_groups.users_group_id'), nullable=False, unique=None, default=None)
 
    group_id = Column(Integer(), ForeignKey('groups.group_id'), nullable=False, unique=None, default=None)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False, unique=None, default=None)
 

	
 
    users_group = relationship('UserGroup')
 
    permission = relationship('Permission')
 
    group = relationship('RepoGroup')
 

	
 
    @classmethod
 
    def create(cls, user_group, repository_group, permission):
 
        n = cls()
 
        n.users_group = user_group
 
        n.group = repository_group
 
        n.permission = permission
 
        Session().add(n)
 
        return n
 

	
 

	
 
class Statistics(Base, BaseModel):
 
    __tablename__ = 'statistics'
 
    __table_args__ = (
 
         UniqueConstraint('repository_id'),
 
         {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
          'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 
    stat_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    repository_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=False, unique=True, default=None)
 
    stat_on_revision = Column(Integer(), nullable=False)
 
    commit_activity = Column(LargeBinary(1000000), nullable=False)#JSON data
 
    commit_activity_combined = Column(LargeBinary(), nullable=False)#JSON data
 
    languages = Column(LargeBinary(1000000), nullable=False)#JSON data
 

	
 
    repository = relationship('Repository', single_parent=True)
 

	
 

	
 
class UserFollowing(Base, BaseModel):
 
    __tablename__ = 'user_followings'
 
    __table_args__ = (
 
        UniqueConstraint('user_id', 'follows_repository_id'),
 
        UniqueConstraint('user_id', 'follows_user_id'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 

	
 
    user_following_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False, unique=None, default=None)
 
    follows_repo_id = Column("follows_repository_id", Integer(), ForeignKey('repositories.repo_id'), nullable=True, unique=None, default=None)
 
    follows_user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=True, unique=None, default=None)
 
    follows_from = Column(DateTime(timezone=False), nullable=True, unique=None, default=datetime.datetime.now)
 

	
 
    user = relationship('User', primaryjoin='User.user_id==UserFollowing.user_id')
 

	
 
    follows_user = relationship('User', primaryjoin='User.user_id==UserFollowing.follows_user_id')
 
    follows_repository = relationship('Repository', order_by='Repository.repo_name')
 

	
 
    @classmethod
 
    def get_repo_followers(cls, repo_id):
 
        return cls.query().filter(cls.follows_repo_id == repo_id)
 

	
 

	
 
class CacheInvalidation(Base, BaseModel):
 
    __tablename__ = 'cache_invalidation'
 
    __table_args__ = (
 
        UniqueConstraint('cache_key'),
 
        Index('key_idx', 'cache_key'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True},
 
    )
 
    # cache_id, not used
 
    cache_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    # cache_key as created by _get_cache_key
 
    cache_key = Column(String(255, convert_unicode=False))
 
    # cache_args is a repo_name
 
    cache_args = Column(String(255, convert_unicode=False))
 
    # instance sets cache_active True when it is caching, other instances set
 
    # cache_active to False to indicate that this cache is invalid
 
    cache_active = Column(Boolean(), nullable=True, unique=None, default=False)
 

	
 
    def __init__(self, cache_key, repo_name=''):
 
        self.cache_key = cache_key
 
        self.cache_args = repo_name
 
        self.cache_active = False
 

	
 
    def __unicode__(self):
 
        return u"<%s('%s:%s[%s]')>" % (
 
            self.__class__.__name__,
 
            self.cache_id, self.cache_key, self.cache_active)
 

	
 
    def _cache_key_partition(self):
 
        prefix, repo_name, suffix = self.cache_key.partition(self.cache_args)
 
        return prefix, repo_name, suffix
 

	
 
    def get_prefix(self):
 
        """
 
        get prefix that might have been used in _get_cache_key to
 
        generate self.cache_key. Only used for informational purposes
 
        in repo_edit.html.
 
        """
 
        # prefix, repo_name, suffix
 
        return self._cache_key_partition()[0]
 

	
 
    def get_suffix(self):
 
        """
 
        get suffix that might have been used in _get_cache_key to
 
        generate self.cache_key. Only used for informational purposes
 
        in repo_edit.html.
 
        """
 
        # prefix, repo_name, suffix
 
        return self._cache_key_partition()[2]
 

	
 
    @classmethod
 
    def clear_cache(cls):
 
        """
 
        Delete all cache keys from database.
 
        Should only be run when all instances are down and all entries thus stale.
 
        """
 
        cls.query().delete()
 
        Session().commit()
 

	
 
    @classmethod
 
    def _get_cache_key(cls, key):
 
        """
 
        Wrapper for generating a unique cache key for this instance and "key".
 
        key must / will start with a repo_name which will be stored in .cache_args .
 
        """
 
        import kallithea
 
        prefix = kallithea.CONFIG.get('instance_id', '')
 
        return "%s%s" % (prefix, key)
 

	
 
    @classmethod
 
    def set_invalidate(cls, repo_name):
 
        """
 
        Mark all caches of a repo as invalid in the database.
 
        """
 
        inv_objs = Session().query(cls).filter(cls.cache_args == repo_name).all()
 
        log.debug('for repo %s got %s invalidation objects',
 
                  safe_str(repo_name), inv_objs)
 

	
 
        for inv_obj in inv_objs:
 
            log.debug('marking %s key for invalidation based on repo_name=%s',
 
                      inv_obj, safe_str(repo_name))
 
            Session().delete(inv_obj)
 
        Session().commit()
 

	
 
    @classmethod
 
    def test_and_set_valid(cls, repo_name, kind, valid_cache_keys=None):
 
        """
 
        Mark this cache key as active and currently cached.
 
        Return True if the existing cache registration still was valid.
 
        Return False to indicate that it had been invalidated and caches should be refreshed.
 
        """
 

	
 
        key = (repo_name + '_' + kind) if kind else repo_name
 
        cache_key = cls._get_cache_key(key)
 

	
 
        if valid_cache_keys and cache_key in valid_cache_keys:
 
            return True
 

	
 
        inv_obj = cls.query().filter(cls.cache_key == cache_key).scalar()
 
        if not inv_obj:
 
            inv_obj = CacheInvalidation(cache_key, repo_name)
 
        if inv_obj.cache_active:
 
            return True
 
        inv_obj.cache_active = True
 
        Session().add(inv_obj)
 
        Session().commit()
 
        try:
 
            Session().commit()
 
        except exc.IntegrityError:
 
            inv_obj = cls.query().filter(cls.cache_key == cache_key).scalar()
 
            if not inv_obj:
 
                raise
 
            # TOCTOU - another thread added the key at the same time; no further action required
 
        return False
 

	
 
    @classmethod
 
    def get_valid_cache_keys(cls):
 
        """
 
        Return opaque object with information of which caches still are valid
 
        and can be used without checking for invalidation.
 
        """
 
        return set(inv_obj.cache_key for inv_obj in cls.query().filter(cls.cache_active).all())
 

	
 

	
 
class ChangesetComment(Base, BaseModel):
 
    __tablename__ = 'changeset_comments'
 
    __table_args__ = (
 
        Index('cc_revision_idx', 'revision'),
 
        Index('cc_pull_request_id_idx', 'pull_request_id'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True},
 
    )
 
    comment_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    repo_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=False)
 
    revision = Column(String(40))
 
    pull_request_id = Column(Integer(), ForeignKey('pull_requests.pull_request_id'))
 
    line_no = Column(Unicode(10))
 
    f_path = Column(Unicode(1000))
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False)
 
    text = Column(UnicodeText(25000), nullable=False)
 
    created_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 
    modified_at = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 

	
 
    author = relationship('User')
 
    repo = relationship('Repository')
 
    # status_change is frequently used directly in templates - make it a lazy
 
    # join to avoid fetching each related ChangesetStatus on demand.
 
    # There will only be one ChangesetStatus referencing each comment so the join will not explode.
 
    status_change = relationship('ChangesetStatus',
 
                                 cascade="all, delete-orphan", lazy='joined')
 
    pull_request = relationship('PullRequest')
 

	
 
    @classmethod
 
    def get_users(cls, revision=None, pull_request_id=None):
 
        """
 
        Returns user associated with this ChangesetComment. ie those
 
        who actually commented
 

	
 
        :param cls:
 
        :param revision:
 
        """
 
        q = Session().query(User) \
 
                .join(ChangesetComment.author)
 
        if revision is not None:
 
            q = q.filter(cls.revision == revision)
 
        elif pull_request_id is not None:
 
            q = q.filter(cls.pull_request_id == pull_request_id)
 
        return q.all()
 

	
 
    def url(self):
 
        anchor = "comment-%s" % self.comment_id
 
        import kallithea.lib.helpers as h
 
        if self.revision:
 
            return h.url('changeset_home', repo_name=self.repo.repo_name, revision=self.revision, anchor=anchor)
 
        elif self.pull_request_id is not None:
 
            return self.pull_request.url(anchor=anchor)
 

	
 
class ChangesetStatus(Base, BaseModel):
 
    __tablename__ = 'changeset_statuses'
 
    __table_args__ = (
 
        Index('cs_revision_idx', 'revision'),
 
        Index('cs_version_idx', 'version'),
 
        Index('cs_pull_request_id_idx', 'pull_request_id'),
 
        Index('cs_changeset_comment_id_idx', 'changeset_comment_id'),
 
        Index('cs_pull_request_id_user_id_version_idx', 'pull_request_id', 'user_id', 'version'),
 
        UniqueConstraint('repo_id', 'revision', 'version'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 
    STATUS_NOT_REVIEWED = DEFAULT = 'not_reviewed'
 
    STATUS_APPROVED = 'approved'
 
    STATUS_REJECTED = 'rejected'
 
    STATUS_UNDER_REVIEW = 'under_review'
 

	
 
    STATUSES = [
 
        (STATUS_NOT_REVIEWED, _("Not reviewed")),  # (no icon) and default
 
        (STATUS_APPROVED, _("Approved")),
 
        (STATUS_REJECTED, _("Rejected")),
 
        (STATUS_UNDER_REVIEW, _("Under review")),
 
    ]
 

	
 
    changeset_status_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    repo_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=False)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False, unique=None)
 
    revision = Column(String(40), nullable=False)
 
    status = Column(String(128), nullable=False, default=DEFAULT)
 
    changeset_comment_id = Column(Integer(), ForeignKey('changeset_comments.comment_id'), nullable=False)
 
    modified_at = Column(DateTime(), nullable=False, default=datetime.datetime.now)
 
    version = Column(Integer(), nullable=False, default=0)
 
    pull_request_id = Column(Integer(), ForeignKey('pull_requests.pull_request_id'), nullable=True)
 

	
 
    author = relationship('User')
 
    repo = relationship('Repository')
 
    comment = relationship('ChangesetComment')
 
    pull_request = relationship('PullRequest')
 

	
 
    def __unicode__(self):
 
        return u"<%s('%s:%s')>" % (
 
            self.__class__.__name__,
 
            self.status, self.author
 
        )
 

	
 
    @classmethod
 
    def get_status_lbl(cls, value):
 
        return dict(cls.STATUSES).get(value)
 

	
 
    @property
 
    def status_lbl(self):
 
        return ChangesetStatus.get_status_lbl(self.status)
 

	
 

	
 
class PullRequest(Base, BaseModel):
 
    __tablename__ = 'pull_requests'
 
    __table_args__ = (
 
        Index('pr_org_repo_id_idx', 'org_repo_id'),
 
        Index('pr_other_repo_id_idx', 'other_repo_id'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True},
 
    )
 

	
 
    # values for .status
 
    STATUS_NEW = u'new'
 
    STATUS_CLOSED = u'closed'
 

	
 
    pull_request_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    title = Column(Unicode(255), nullable=True)
 
    description = Column(UnicodeText(10240))
 
    status = Column(Unicode(255), nullable=False, default=STATUS_NEW) # only for closedness, not approve/reject/etc
 
    created_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 
    updated_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False, unique=None)
 
    _revisions = Column('revisions', UnicodeText(20500))  # 500 revisions max
 
    org_repo_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=False)
 
    org_ref = Column(Unicode(255), nullable=False)
 
    other_repo_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=False)
 
    other_ref = Column(Unicode(255), nullable=False)
 

	
 
    @hybrid_property
 
    def revisions(self):
 
        return self._revisions.split(':')
 

	
 
    @revisions.setter
 
    def revisions(self, val):
 
        self._revisions = safe_unicode(':'.join(val))
 

	
 
    @property
 
    def org_ref_parts(self):
 
        return self.org_ref.split(':')
 

	
 
    @property
 
    def other_ref_parts(self):
 
        return self.other_ref.split(':')
 

	
 
    owner = relationship('User')
 
    reviewers = relationship('PullRequestReviewers',
 
                             cascade="all, delete-orphan")
 
    org_repo = relationship('Repository', primaryjoin='PullRequest.org_repo_id==Repository.repo_id')
 
    other_repo = relationship('Repository', primaryjoin='PullRequest.other_repo_id==Repository.repo_id')
 
    statuses = relationship('ChangesetStatus')
 
    comments = relationship('ChangesetComment',
 
                             cascade="all, delete-orphan")
 

	
 
    def is_closed(self):
 
        return self.status == self.STATUS_CLOSED
 

	
 
    def user_review_status(self, user_id):
 
        """Return the user's latest status votes on PR"""
 
        # note: no filtering on repo - that would be redundant
 
        status = ChangesetStatus.query() \
 
            .filter(ChangesetStatus.pull_request == self) \
 
            .filter(ChangesetStatus.user_id == user_id) \
 
            .order_by(ChangesetStatus.version) \
 
            .first()
 
        return str(status.status) if status else ''
 

	
 
    @classmethod
 
    def make_nice_id(cls, pull_request_id):
 
        '''Return pull request id nicely formatted for displaying'''
 
        return '#%s' % pull_request_id
 

	
 
    def nice_id(self):
 
        '''Return the id of this pull request, nicely formatted for displaying'''
 
        return self.make_nice_id(self.pull_request_id)
 

	
 
    def __json__(self):
 
        return dict(
 
            revisions=self.revisions
 
        )
 

	
 
    def url(self, **kwargs):
 
        canonical = kwargs.pop('canonical', None)
 
        import kallithea.lib.helpers as h
 
        b = self.org_ref_parts[1]
 
        if b != self.other_ref_parts[1]:
 
            s = '/_/' + b
 
        else:
 
            s = '/_/' + self.title
 
        kwargs['extra'] = urlreadable(s)
 
        if canonical:
 
            return h.canonical_url('pullrequest_show', repo_name=self.other_repo.repo_name,
 
                                   pull_request_id=self.pull_request_id, **kwargs)
 
        return h.url('pullrequest_show', repo_name=self.other_repo.repo_name,
 
                     pull_request_id=self.pull_request_id, **kwargs)
 

	
 
class PullRequestReviewers(Base, BaseModel):
 
    __tablename__ = 'pull_request_reviewers'
 
    __table_args__ = (
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True},
 
    )
 

	
 
    def __init__(self, user=None, pull_request=None):
 
        self.user = user
 
        self.pull_request = pull_request
 

	
 
    pull_requests_reviewers_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    pull_request_id = Column(Integer(), ForeignKey('pull_requests.pull_request_id'), nullable=False)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=True)
 

	
 
    user = relationship('User')
 
    pull_request = relationship('PullRequest')
 

	
 

	
 
class Notification(Base, BaseModel):
 
    __tablename__ = 'notifications'
 
    __table_args__ = (
 
        Index('notification_type_idx', 'type'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True},
 
    )
 

	
 
    TYPE_CHANGESET_COMMENT = u'cs_comment'
 
    TYPE_MESSAGE = u'message'
 
    TYPE_MENTION = u'mention'
 
    TYPE_REGISTRATION = u'registration'
 
    TYPE_PULL_REQUEST = u'pull_request'
 
    TYPE_PULL_REQUEST_COMMENT = u'pull_request_comment'
 

	
 
    notification_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    subject = Column(Unicode(512), nullable=True)
 
    body = Column(UnicodeText(50000), nullable=True)
 
    created_by = Column(Integer(), ForeignKey('users.user_id'), nullable=True)
 
    created_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 
    type_ = Column('type', Unicode(255))
 

	
 
    created_by_user = relationship('User')
 
    notifications_to_users = relationship('UserNotification', cascade="all, delete-orphan")
 

	
 
    @property
 
    def recipients(self):
 
        return [x.user for x in UserNotification.query()
 
                .filter(UserNotification.notification == self)
 
                .order_by(UserNotification.user_id.asc()).all()]
 

	
 
    @classmethod
 
    def create(cls, created_by, subject, body, recipients, type_=None):
 
        if type_ is None:
 
            type_ = Notification.TYPE_MESSAGE
 

	
 
        notification = cls()
 
        notification.created_by_user = created_by
 
        notification.subject = subject
 
        notification.body = body
 
        notification.type_ = type_
 
        notification.created_on = datetime.datetime.now()
 

	
 
        for recipient in recipients:
 
            un = UserNotification()
 
            un.notification = notification
 
            un.user_id = recipient.user_id
 
            # Mark notifications to self "pre-read" - should perhaps just be skipped
 
            if recipient == created_by:
 
                un.read = True
 
            Session().add(un)
 

	
 
        Session().add(notification)
 
        Session().flush() # assign notificaiton.notification_id
 
        return notification
 

	
 
    @property
 
    def description(self):
 
        from kallithea.model.notification import NotificationModel
 
        return NotificationModel().make_description(self)
 

	
 

	
 
class UserNotification(Base, BaseModel):
 
    __tablename__ = 'user_to_notification'
 
    __table_args__ = (
 
        UniqueConstraint('user_id', 'notification_id'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), primary_key=True)
 
    notification_id = Column(Integer(), ForeignKey('notifications.notification_id'), primary_key=True)
 
    read = Column(Boolean, default=False)
 
    sent_on = Column(DateTime(timezone=False), nullable=True, unique=None)
 

	
 
    user = relationship('User')
 
    notification = relationship('Notification')
 

	
 
    def mark_as_read(self):
 
        self.read = True
 
        Session().add(self)
 

	
 

	
 
class Gist(Base, BaseModel):
 
    __tablename__ = 'gists'
 
    __table_args__ = (
 
        Index('g_gist_access_id_idx', 'gist_access_id'),
 
        Index('g_created_on_idx', 'created_on'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True}
 
    )
 
    GIST_PUBLIC = u'public'
 
    GIST_PRIVATE = u'private'
 
    DEFAULT_FILENAME = u'gistfile1.txt'
 

	
 
    gist_id = Column(Integer(), nullable=False, unique=True, primary_key=True)
 
    gist_access_id = Column(Unicode(250))
 
    gist_description = Column(UnicodeText(1024))
 
    gist_owner = Column('user_id', Integer(), ForeignKey('users.user_id'), nullable=True)
 
    gist_expires = Column(Float(53), nullable=False)
 
    gist_type = Column(Unicode(128), nullable=False)
 
    created_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 
    modified_at = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 

	
 
    owner = relationship('User')
 

	
 
    def __repr__(self):
 
        return '<Gist:[%s]%s>' % (self.gist_type, self.gist_access_id)
 

	
 
    @classmethod
 
    def get_or_404(cls, id_):
 
        res = cls.query().filter(cls.gist_access_id == id_).scalar()
 
        if res is None:
 
            raise HTTPNotFound
 
        return res
 

	
 
    @classmethod
 
    def get_by_access_id(cls, gist_access_id):
 
        return cls.query().filter(cls.gist_access_id == gist_access_id).scalar()
 

	
 
    def gist_url(self):
 
        import kallithea
 
        alias_url = kallithea.CONFIG.get('gist_alias_url')
 
        if alias_url:
 
            return alias_url.replace('{gistid}', self.gist_access_id)
 

	
 
        import kallithea.lib.helpers as h
 
        return h.canonical_url('gist', gist_id=self.gist_access_id)
 

	
 
    @classmethod
 
    def base_path(cls):
 
        """
 
        Returns base path where all gists are stored
 

	
 
        :param cls:
 
        """
 
        from kallithea.model.gist import GIST_STORE_LOC
 
        q = Session().query(Ui) \
 
            .filter(Ui.ui_key == URL_SEP)
 
        q = q.options(FromCache("sql_cache_short", "repository_repo_path"))
 
        return os.path.join(q.one().ui_value, GIST_STORE_LOC)
 

	
 
    def get_api_data(self):
 
        """
 
        Common function for generating gist related data for API
 
        """
 
        gist = self
 
        data = dict(
 
            gist_id=gist.gist_id,
 
            type=gist.gist_type,
 
            access_id=gist.gist_access_id,
 
            description=gist.gist_description,
 
            url=gist.gist_url(),
 
            expires=gist.gist_expires,
 
            created_on=gist.created_on,
 
        )
 
        return data
 

	
 
    def __json__(self):
 
        data = dict(
 
        )
 
        data.update(self.get_api_data())
 
        return data
 
    ## SCM functions
 

	
 
    @property
 
    def scm_instance(self):
 
        from kallithea.lib.vcs import get_repo
 
        base_path = self.base_path()
 
        return get_repo(os.path.join(*map(safe_str,
 
                                          [base_path, self.gist_access_id])))
 

	
 

	
 
class DbMigrateVersion(Base, BaseModel):
 
    __tablename__ = 'db_migrate_version'
 
    __table_args__ = (
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8', 'sqlite_autoincrement': True},
 
    )
 
    repository_id = Column(String(250), nullable=False, unique=True, primary_key=True)
 
    repository_path = Column(Text)
 
    version = Column(Integer)
kallithea/public/js/base.js
Show inline comments
 
@@ -385,1547 +385,1547 @@ var ajaxGET = function(url, success, fai
 
            };
 
    }
 
    return $.ajax({url: url, headers: {'X-PARTIAL-XHR': '1'}, cache: false})
 
        .done(success)
 
        .fail(failure);
 
};
 

	
 
var ajaxPOST = function(url, postData, success, failure) {
 
    postData['_authentication_token'] = _authentication_token;
 
    var postData = _toQueryString(postData);
 
    if(failure === undefined) {
 
        failure = function(jqXHR, textStatus, errorThrown) {
 
                if (textStatus != "abort")
 
                    alert("Error posting to server: " + textStatus);
 
            };
 
    }
 
    return $.ajax({url: url, data: postData, type: 'POST', headers: {'X-PARTIAL-XHR': '1'}, cache: false})
 
        .done(success)
 
        .fail(failure);
 
};
 

	
 

	
 
/**
 
 * activate .show_more links
 
 * the .show_more must have an id that is the the id of an element to hide prefixed with _
 
 * the parentnode will be displayed
 
 */
 
var show_more_event = function(){
 
    $('.show_more').click(function(e){
 
        var el = e.currentTarget;
 
        $('#' + el.id.substring(1)).hide();
 
        $(el.parentNode).show();
 
    });
 
};
 

	
 
/**
 
 * activate .lazy-cs mouseover for showing changeset tooltip
 
 */
 
var show_changeset_tooltip = function(){
 
    $('.lazy-cs').mouseover(function(e){
 
        var $target = $(e.currentTarget);
 
        var rid = $target.attr('raw_id');
 
        var repo_name = $target.attr('repo_name');
 
        if(rid && !$target.hasClass('tooltip')){
 
            _show_tooltip(e, _TM['loading ...']);
 
            var url = pyroutes.url('changeset_info', {"repo_name": repo_name, "revision": rid});
 
            ajaxGET(url, function(json){
 
                    $target.addClass('tooltip');
 
                    _show_tooltip(e, json['message']);
 
                    _activate_tooltip($target);
 
                });
 
        }
 
    });
 
};
 

	
 
var _onSuccessFollow = function(target){
 
    var $target = $(target);
 
    var $f_cnt = $('#current_followers_count');
 
    if ($target.hasClass('follow')) {
 
        $target.removeClass('follow').addClass('following');
 
        $target.prop('title', _TM['Stop following this repository']);
 
        if ($f_cnt.html()) {
 
            var cnt = Number($f_cnt.html())+1;
 
            $f_cnt.html(cnt);
 
        }
 
    } else {
 
        $target.removeClass('following').addClass('follow');
 
        $target.prop('title', _TM['Start following this repository']);
 
        if ($f_cnt.html()) {
 
            var cnt = Number($f_cnt.html())-1;
 
            $f_cnt.html(cnt);
 
        }
 
    }
 
}
 

	
 
var toggleFollowingRepo = function(target, follows_repo_id){
 
    var args = 'follows_repo_id=' + follows_repo_id;
 
    args += '&amp;_authentication_token=' + _authentication_token;
 
    $.post(TOGGLE_FOLLOW_URL, args, function(data){
 
            _onSuccessFollow(target);
 
        });
 
    return false;
 
};
 

	
 
var showRepoSize = function(target, repo_name){
 
    var args = '_authentication_token=' + _authentication_token;
 

	
 
    if(!$("#" + target).hasClass('loaded')){
 
        $("#" + target).html(_TM['Loading ...']);
 
        var url = pyroutes.url('repo_size', {"repo_name":repo_name});
 
        $.post(url, args, function(data) {
 
            $("#" + target).html(data);
 
            $("#" + target).addClass('loaded');
 
        });
 
    }
 
    return false;
 
};
 

	
 
/**
 
 * tooltips
 
 */
 

	
 
var tooltip_activate = function(){
 
    $(document).ready(_init_tooltip);
 
};
 

	
 
var _activate_tooltip = function($tt){
 
    $tt.mouseover(_show_tooltip);
 
    $tt.mousemove(_move_tooltip);
 
    $tt.mouseout(_close_tooltip);
 
};
 

	
 
var _init_tooltip = function(){
 
    var $tipBox = $('#tip-box');
 
    if(!$tipBox.length){
 
        $tipBox = $('<div id="tip-box"></div>');
 
        $(document.body).append($tipBox);
 
    }
 

	
 
    $tipBox.hide();
 
    $tipBox.css('position', 'absolute');
 
    $tipBox.css('max-width', '600px');
 

	
 
    _activate_tooltip($('.tooltip'));
 
};
 

	
 
var _show_tooltip = function(e, tipText, safe){
 
    e.stopImmediatePropagation();
 
    var el = e.currentTarget;
 
    var $el = $(el);
 
    if(tipText){
 
        // just use it
 
    } else if(el.tagName.toLowerCase() === 'img'){
 
        tipText = el.alt ? el.alt : '';
 
    } else {
 
        tipText = el.title ? el.title : '';
 
        safe = safe || $el.hasClass("safe-html-title");
 
    }
 

	
 
    if(tipText !== ''){
 
        // save org title
 
        $el.attr('tt_title', tipText);
 
        // reset title to not show org tooltips
 
        $el.prop('title', '');
 

	
 
        var $tipBox = $('#tip-box');
 
        if (safe) {
 
            $tipBox.html(tipText);
 
        } else {
 
            $tipBox.text(tipText);
 
        }
 
        $tipBox.css('display', 'block');
 
    }
 
};
 

	
 
var _move_tooltip = function(e){
 
    e.stopImmediatePropagation();
 
    var $tipBox = $('#tip-box');
 
    $tipBox.css('top', (e.pageY + 15) + 'px');
 
    $tipBox.css('left', (e.pageX + 15) + 'px');
 
};
 

	
 
var _close_tooltip = function(e){
 
    e.stopImmediatePropagation();
 
    var $tipBox = $('#tip-box');
 
    $tipBox.hide();
 
    var el = e.currentTarget;
 
    $(el).prop('title', $(el).attr('tt_title'));
 
};
 

	
 
/**
 
 * Quick filter widget
 
 *
 
 * @param target: filter input target
 
 * @param nodes: list of nodes in html we want to filter.
 
 * @param display_element function that takes current node from nodes and
 
 *    does hide or show based on the node
 
 */
 
var q_filter = (function() {
 
    var _namespace = {};
 
    var namespace = function (target) {
 
        if (!(target in _namespace)) {
 
            _namespace[target] = {};
 
        }
 
        return _namespace[target];
 
    };
 
    return function (target, $nodes, display_element) {
 
        var $nodes = $nodes;
 
        var $q_filter_field = $('#' + target);
 
        var F = namespace(target);
 

	
 
        $q_filter_field.keyup(function (e) {
 
            clearTimeout(F.filterTimeout);
 
            F.filterTimeout = setTimeout(F.updateFilter, 600);
 
        });
 

	
 
        F.filterTimeout = null;
 

	
 
        F.updateFilter = function () {
 
            // Reset timeout
 
            F.filterTimeout = null;
 

	
 
            var obsolete = [];
 

	
 
            var req = $q_filter_field.val().toLowerCase();
 

	
 
            var showing = 0;
 
            $nodes.each(function () {
 
                var n = this;
 
                var target_element = display_element(n);
 
                if (req && n.innerHTML.toLowerCase().indexOf(req) == -1) {
 
                    $(target_element).hide();
 
                }
 
                else {
 
                    $(target_element).show();
 
                    showing += 1;
 
                }
 
            });
 

	
 
            $('#repo_count').html(showing);
 
            /* FIXME: don't hardcode */
 
        }
 
    }
 
})();
 

	
 

	
 
/**
 
 * Comment handling
 
 */
 

	
 
// move comments to their right location, inside new trs
 
function move_comments($anchorcomments) {
 
    $anchorcomments.each(function(i, anchorcomment) {
 
        var $anchorcomment = $(anchorcomment);
 
        var target_id = $anchorcomment.data('target-id');
 
        var $comment_div = _get_add_comment_div(target_id);
 
        var f_path = $anchorcomment.data('f_path');
 
        var line_no = $anchorcomment.data('line_no');
 
        if ($comment_div[0]) {
 
            $comment_div.append($anchorcomment.children());
 
            _comment_div_append_add($comment_div, f_path, line_no);
 
        } else {
 
           $anchorcomment.before("Comment to {0} line {1} which is outside the diff context:".format(f_path || '?', line_no || '?'));
 
        }
 
    });
 
    linkInlineComments($('.firstlink'), $('.comment:first-child'));
 
}
 

	
 
// comment bubble was clicked - insert new tr and show form
 
function show_comment_form($bubble) {
 
    var children = $bubble.closest('tr.line').children('[id]');
 
    var line_td_id = children[children.length - 1].id;
 
    var $comment_div = _get_add_comment_div(line_td_id);
 
    var f_path = $bubble.closest('div.full_f_path').data('f_path');
 
    var parts = line_td_id.split('_');
 
    var line_no = parts[parts.length-1];
 
    comment_div_state($comment_div, f_path, line_no, true);
 
}
 

	
 
// return comment div for target_id - add it if it doesn't exist yet
 
function _get_add_comment_div(target_id) {
 
    var comments_box_id = 'comments-' + target_id;
 
    var $comments_box = $('#' + comments_box_id);
 
    if (!$comments_box.length) {
 
        var html = '<tr><td id="{0}" colspan="3" class="inline-comments"></td></tr>'.format(comments_box_id);
 
        $('#' + target_id).closest('tr').after(html);
 
        $comments_box = $('#' + comments_box_id);
 
    }
 
    return $comments_box;
 
}
 

	
 
// set $comment_div state - showing or not showing form and Add button
 
function comment_div_state($comment_div, f_path, line_no, show_form) {
 
    var $forms = $comment_div.children('.comment-inline-form');
 
    var $buttonrow = $comment_div.children('.add-button-row');
 
    var $comments = $comment_div.children('.comment');
 
    if (show_form) {
 
        if (!$forms.length) {
 
            _comment_div_append_form($comment_div, f_path, line_no);
 
        }
 
    } else {
 
        $forms.remove();
 
    }
 
    $buttonrow.remove();
 
    if ($comments.length && !show_form) {
 
        _comment_div_append_add($comment_div, f_path, line_no);
 
    }
 
}
 

	
 
// append an Add button to $comment_div and hook it up to show form
 
function _comment_div_append_add($comment_div, f_path, line_no) {
 
    var addlabel = TRANSLATION_MAP['Add Another Comment'];
 
    var $add = $('<div class="add-button-row"><span class="btn btn-mini add-button">{0}</span></div>'.format(addlabel));
 
    $comment_div.append($add);
 
    $add.children('.add-button').click(function(e) {
 
        comment_div_state($comment_div, f_path, line_no, true);
 
    });
 
}
 

	
 
// append a comment form to $comment_div
 
function _comment_div_append_form($comment_div, f_path, line_no) {
 
    var $form_div = $($('#comment-inline-form-template').html().format(f_path, line_no))
 
        .addClass('comment-inline-form');
 
    $comment_div.append($form_div);
 
    var $form = $comment_div.find("form");
 

	
 
    $form.submit(function(e) {
 
        e.preventDefault();
 

	
 
        var text = $('#text_'+line_no).val();
 
        if (!text){
 
            return;
 
        }
 

	
 
        $form.find('.submitting-overlay').show();
 

	
 
        var success = function(json_data) {
 
            $comment_div.append(json_data['rendered_text']);
 
            comment_div_state($comment_div, f_path, line_no, false);
 
            linkInlineComments($('.firstlink'), $('.comment:first-child'));
 
        };
 
        var postData = {
 
            'text': text,
 
            'f_path': f_path,
 
            'line': line_no
 
        };
 
        ajaxPOST(AJAX_COMMENT_URL, postData, success);
 
    });
 

	
 
    $('#preview-btn_'+line_no).click(function(e){
 
        var text = $('#text_'+line_no).val();
 
        if(!text){
 
            return
 
        }
 
        $('#preview-box_'+line_no).addClass('unloaded');
 
        $('#preview-box_'+line_no).html(_TM['Loading ...']);
 
        $('#edit-container_'+line_no).hide();
 
        $('#edit-btn_'+line_no).show();
 
        $('#preview-container_'+line_no).show();
 
        $('#preview-btn_'+line_no).hide();
 

	
 
        var url = pyroutes.url('changeset_comment_preview', {'repo_name': REPO_NAME});
 
        var post_data = {'text': text};
 
        ajaxPOST(url, post_data, function(html) {
 
            $('#preview-box_'+line_no).html(html);
 
            $('#preview-box_'+line_no).removeClass('unloaded');
 
        })
 
    })
 
    $('#edit-btn_'+line_no).click(function(e) {
 
        $('#edit-container_'+line_no).show();
 
        $('#edit-btn_'+line_no).hide();
 
        $('#preview-container_'+line_no).hide();
 
        $('#preview-btn_'+line_no).show();
 
    })
 

	
 
    // create event for hide button
 
    $form.find('.hide-inline-form').click(function(e) {
 
        comment_div_state($comment_div, f_path, line_no, false);
 
    });
 

	
 
    setTimeout(function() {
 
        // callbacks
 
        tooltip_activate();
 
        MentionsAutoComplete($('#text_'+line_no), $('#mentions_container_'+line_no),
 
                             _USERS_AC_DATA);
 
        $('#text_'+line_no).focus();
 
    }, 10);
 
}
 

	
 

	
 
function deleteComment(comment_id) {
 
    var url = AJAX_COMMENT_DELETE_URL.replace('__COMMENT_ID__', comment_id);
 
    var postData = {'_method': 'delete'};
 
    var success = function(o) {
 
        $('#comment-'+comment_id).remove();
 
        // Ignore that this might leave a stray Add button (or have a pending form with another comment) ...
 
    }
 
    ajaxPOST(url, postData, success);
 
}
 

	
 

	
 
/**
 
 * Double link comments
 
 */
 
var linkInlineComments = function($firstlinks, $comments){
 
    if ($comments.length > 0) {
 
        $firstlinks.html('<a href="#{0}">First comment</a>'.format($comments.prop('id')));
 
    }
 
    if ($comments.length <= 1) {
 
        return;
 
    }
 

	
 
    $comments.each(function(i, e){
 
            var prev = '';
 
            if (i > 0){
 
                var prev_anchor = $($comments.get(i-1)).prop('id');
 
                prev = '<a href="#{0}">Previous comment</a>'.format(prev_anchor);
 
            }
 
            var next = '';
 
            if (i+1 < $comments.length){
 
                var next_anchor = $($comments.get(i+1)).prop('id');
 
                next = '<a href="#{0}">Next comment</a>'.format(next_anchor);
 
            }
 
            $(this).find('.comment-prev-next-links').html(
 
                '<div class="prev-comment">{0}</div>'.format(prev) +
 
                '<div class="next-comment">{0}</div>'.format(next));
 
        });
 
}
 

	
 
/* activate files.html stuff */
 
var fileBrowserListeners = function(current_url, node_list_url, url_base){
 
    var current_url_branch = "?branch=__BRANCH__";
 

	
 
    $('#stay_at_branch').on('click',function(e){
 
        if(e.currentTarget.checked){
 
            var uri = current_url_branch;
 
            uri = uri.replace('__BRANCH__',e.currentTarget.value);
 
            window.location = uri;
 
        }
 
        else{
 
            window.location = current_url;
 
        }
 
    });
 

	
 
    var $node_filter = $('#node_filter');
 

	
 
    var filterTimeout = null;
 
    var nodes = null;
 

	
 
    var initFilter = function(){
 
        $('#node_filter_box_loading').show();
 
        $('#search_activate_id').hide();
 
        $('#add_node_id').hide();
 
        $.ajax({url: node_list_url, headers: {'X-PARTIAL-XHR': '1'}, cache: false})
 
            .done(function(json) {
 
                    nodes = json.nodes;
 
                    $('#node_filter_box_loading').hide();
 
                    $('#node_filter_box').show();
 
                    $node_filter.focus();
 
                    if($node_filter.hasClass('init')){
 
                        $node_filter.val('');
 
                        $node_filter.removeClass('init');
 
                    }
 
                })
 
            .fail(function() {
 
                    console.log('fileBrowserListeners initFilter failed to load');
 
                })
 
        ;
 
    }
 

	
 
    var updateFilter = function(e) {
 
        return function(){
 
            // Reset timeout
 
            filterTimeout = null;
 
            var query = e.currentTarget.value.toLowerCase();
 
            var match = [];
 
            var matches = 0;
 
            var matches_max = 20;
 
            if (query != ""){
 
                for(var i=0;i<nodes.length;i++){
 
                    var pos = nodes[i].name.toLowerCase().indexOf(query);
 
                    if(query && pos != -1){
 
                        matches++
 
                        //show only certain amount to not kill browser
 
                        if (matches > matches_max){
 
                            break;
 
                        }
 

	
 
                        var n = nodes[i].name;
 
                        var t = nodes[i].type;
 
                        var n_hl = n.substring(0,pos)
 
                            + "<b>{0}</b>".format(n.substring(pos,pos+query.length))
 
                            + n.substring(pos+query.length);
 
                        var new_url = url_base.replace('__FPATH__',n);
 
                        match.push('<tr><td><a class="browser-{0}" href="{1}">{2}</a></td><td colspan="5"></td></tr>'.format(t,new_url,n_hl));
 
                    }
 
                    if(match.length >= matches_max){
 
                        match.push('<tr><td>{0}</td><td colspan="5"></td></tr>'.format(_TM['Search truncated']));
 
                        break;
 
                    }
 
                }
 
            }
 
            if(query != ""){
 
                $('#tbody').hide();
 
                $('#tbody_filtered').show();
 

	
 
                if (match.length==0){
 
                  match.push('<tr><td>{0}</td><td colspan="5"></td></tr>'.format(_TM['No matching files']));
 
                }
 

	
 
                $('#tbody_filtered').html(match.join(""));
 
            }
 
            else{
 
                $('#tbody').show();
 
                $('#tbody_filtered').hide();
 
            }
 
        }
 
    };
 

	
 
    $('#filter_activate').click(function(){
 
            initFilter();
 
        });
 
    $node_filter.click(function(){
 
            if($node_filter.hasClass('init')){
 
                $node_filter.val('');
 
                $node_filter.removeClass('init');
 
            }
 
        });
 
    $node_filter.keyup(function(e){
 
            clearTimeout(filterTimeout);
 
            filterTimeout = setTimeout(updateFilter(e),600);
 
        });
 
};
 

	
 

	
 
var initCodeMirror = function(textarea_id, baseUrl, resetUrl){
 
    var myCodeMirror = CodeMirror.fromTextArea($('#' + textarea_id)[0], {
 
            mode: "null",
 
            lineNumbers: true,
 
            indentUnit: 4,
 
            autofocus: true
 
        });
 
    CodeMirror.modeURL = baseUrl + "/codemirror/mode/%N/%N.js";
 

	
 
    $('#reset').click(function(e){
 
            window.location=resetUrl;
 
        });
 

	
 
    $('#file_enable').click(function(){
 
            $('#editor_container').show();
 
            $('#upload_file_container').hide();
 
            $('#filename_container').show();
 
            $('#mimetype_header').show();
 
        });
 

	
 
    $('#upload_file_enable').click(function(){
 
            $('#editor_container').hide();
 
            $('#upload_file_container').show();
 
            $('#filename_container').hide();
 
            $('#mimetype_header').hide();
 
        });
 

	
 
    return myCodeMirror
 
};
 

	
 
var setCodeMirrorMode = function(codeMirrorInstance, mode) {
 
    CodeMirror.autoLoadMode(codeMirrorInstance, mode);
 
}
 

	
 

	
 
var _getIdentNode = function(n){
 
    //iterate thrugh nodes until matching interesting node
 

	
 
    if (typeof n == 'undefined'){
 
        return -1
 
    }
 

	
 
    if(typeof n.id != "undefined" && n.id.match('L[0-9]+')){
 
        return n
 
    }
 
    else{
 
        return _getIdentNode(n.parentNode);
 
    }
 
};
 

	
 
/* generate links for multi line selects that can be shown by files.html page_highlights.
 
 * This is a mouseup handler for hlcode from CodeHtmlFormatter and pygmentize */
 
var getSelectionLink = function(e) {
 
    //get selection from start/to nodes
 
    if (typeof window.getSelection != "undefined") {
 
        var s = window.getSelection();
 

	
 
        var from = _getIdentNode(s.anchorNode);
 
        var till = _getIdentNode(s.focusNode);
 

	
 
        var f_int = parseInt(from.id.replace('L',''));
 
        var t_int = parseInt(till.id.replace('L',''));
 

	
 
        var yoffset = 35;
 
        var ranges = [parseInt(from.id.replace('L','')), parseInt(till.id.replace('L',''))];
 
        if (ranges[0] > ranges[1]){
 
            //highlight from bottom
 
            yoffset = -yoffset;
 
            ranges = [ranges[1], ranges[0]];
 
        }
 
        var $hl_div = $('div#linktt');
 
        // if we select more than 2 lines
 
        if (ranges[0] != ranges[1]){
 
            if ($hl_div.length) {
 
                $hl_div.html('');
 
            } else {
 
                $hl_div = $('<div id="linktt" class="hl-tip-box">');
 
                $('body').prepend($hl_div);
 
            }
 

	
 
            $hl_div.append($('<a>').html(_TM['Selection Link']).prop('href', location.href.substring(0, location.href.indexOf('#')) + '#L' + ranges[0] + '-'+ranges[1]));
 
            var xy = $(till).offset();
 
            $hl_div.css('top', (xy.top + yoffset) + 'px').css('left', xy.left + 'px');
 
            $hl_div.show();
 
        }
 
        else{
 
            $hl_div.hide();
 
        }
 
    }
 
};
 

	
 
var deleteNotification = function(url, notification_id, callbacks){
 
    var success = function(o){
 
            $("#notification_"+notification_id).remove();
 
            _run_callbacks(callbacks);
 
        };
 
    var failure = function(o){
 
            alert("deleteNotification failure");
 
        };
 
    var postData = {'_method': 'delete'};
 
    var sUrl = url.replace('__NOTIFICATION_ID__',notification_id);
 
    ajaxPOST(sUrl, postData, success, failure);
 
};
 

	
 
var readNotification = function(url, notification_id, callbacks){
 
    var success = function(o){
 
            var $obj = $("#notification_"+notification_id);
 
            $obj.removeClass('unread');
 
            $obj.find('.read-notification').remove();
 
            _run_callbacks(callbacks);
 
        };
 
    var failure = function(o){
 
            alert("readNotification failure");
 
        };
 
    var postData = {'_method': 'put'};
 
    var sUrl = url.replace('__NOTIFICATION_ID__',notification_id);
 
    ajaxPOST(sUrl, postData, success, failure);
 
};
 

	
 
/**
 
 * Autocomplete functionality
 
 */
 

	
 
// Custom search function for the DataSource of users
 
var autocompleteMatchUsers = function (sQuery, myUsers) {
 
    // Case insensitive matching
 
    var query = sQuery.toLowerCase();
 
    var i = 0;
 
    var l = myUsers.length;
 
    var matches = [];
 

	
 
    // Match against each name of each contact
 
    for (; i < l; i++) {
 
        var contact = myUsers[i];
 
        if (((contact.fname+"").toLowerCase().indexOf(query) > -1) ||
 
             ((contact.lname+"").toLowerCase().indexOf(query) > -1) ||
 
             ((contact.nname) && ((contact.nname).toLowerCase().indexOf(query) > -1))) {
 
            matches[matches.length] = contact;
 
        }
 
    }
 
    return matches;
 
};
 

	
 
// Custom search function for the DataSource of userGroups
 
var autocompleteMatchGroups = function (sQuery, myGroups) {
 
    // Case insensitive matching
 
    var query = sQuery.toLowerCase();
 
    var i = 0;
 
    var l = myGroups.length;
 
    var matches = [];
 

	
 
    // Match against each name of each group
 
    for (; i < l; i++) {
 
        var matched_group = myGroups[i];
 
        if (matched_group.grname.toLowerCase().indexOf(query) > -1) {
 
            matches[matches.length] = matched_group;
 
        }
 
    }
 
    return matches;
 
};
 

	
 
// Helper highlight function for the formatter
 
var autocompleteHighlightMatch = function (full, snippet, matchindex) {
 
    return full.substring(0, matchindex)
 
        + "<span class='match'>"
 
        + full.substr(matchindex, snippet.length)
 
        + "</span>" + full.substring(matchindex + snippet.length);
 
};
 

	
 
var gravatar = function(link, size, cssclass) {
 
    var elem = '<img alt="" class="{2}" style="width: {0}px; height: {0}px" src="{1}"/>'.format(size, link, cssclass);
 
    if (!link) {
 
        elem = '<i class="icon-user {1}" style="font-size: {0}px;"></i>'.format(size, cssclass);
 
    }
 
    return elem;
 
}
 

	
 
var autocompleteGravatar = function(res, link, size, group) {
 
    var elem = gravatar(link, size, "perm-gravatar-ac");
 
    if (group !== undefined) {
 
        elem = '<i class="perm-gravatar-ac icon-users"></i>';
 
    }
 
    return '<div class="ac-container-wrap">{0}{1}</div>'.format(elem, res);
 
}
 

	
 
// Custom formatter to highlight the matching letters
 
var autocompleteFormatter = function (oResultData, sQuery, sResultMatch) {
 
    var query = sQuery.toLowerCase();
 

	
 
    // group
 
    if (oResultData.grname != undefined) {
 
        var grname = oResultData.grname;
 
        var grmembers = oResultData.grmembers;
 
        var grnameMatchIndex = grname.toLowerCase().indexOf(query);
 
        var grprefix = "{0}: ".format(_TM['Group']);
 
        var grsuffix = " ({0} {1})".format(grmembers, _TM['members']);
 

	
 
        if (grnameMatchIndex > -1) {
 
            return autocompleteGravatar(grprefix + autocompleteHighlightMatch(grname, query, grnameMatchIndex) + grsuffix, null, null, true);
 
        }
 
        return autocompleteGravatar(grprefix + oResultData.grname + grsuffix, null, null, true);
 

	
 
    // users
 
    } else if (oResultData.nname != undefined) {
 
        var fname = oResultData.fname || "";
 
        var lname = oResultData.lname || "";
 
        var nname = oResultData.nname;
 

	
 
        // Guard against null value
 
        var fnameMatchIndex = fname.toLowerCase().indexOf(query),
 
            lnameMatchIndex = lname.toLowerCase().indexOf(query),
 
            nnameMatchIndex = nname.toLowerCase().indexOf(query),
 
            displayfname, displaylname, displaynname, displayname;
 

	
 
        if (fnameMatchIndex > -1) {
 
            displayfname = autocompleteHighlightMatch(fname, query, fnameMatchIndex);
 
        } else {
 
            displayfname = fname;
 
        }
 

	
 
        if (lnameMatchIndex > -1) {
 
            displaylname = autocompleteHighlightMatch(lname, query, lnameMatchIndex);
 
        } else {
 
            displaylname = lname;
 
        }
 

	
 
        if (nnameMatchIndex > -1) {
 
            displaynname = autocompleteHighlightMatch(nname, query, nnameMatchIndex);
 
        } else {
 
            displaynname = nname;
 
        }
 

	
 
        displayname = displaynname;
 
        if (displayfname && displaylname) {
 
            displayname = "{0} {1} ({2})".format(displayfname, displaylname, displayname);
 
        }
 

	
 
        return autocompleteGravatar(displayname, oResultData.gravatar_lnk, oResultData.gravatar_size);
 
    } else {
 
        return '';
 
    }
 
};
 

	
 
// Generate a basic autocomplete instance that can be tweaked further by the caller
 
var autocompleteCreate = function ($inputElement, $container, matchFunc) {
 
    var datasource = new YAHOO.util.FunctionDataSource(matchFunc);
 

	
 
    var autocomplete = new YAHOO.widget.AutoComplete($inputElement[0], $container[0], datasource);
 
    autocomplete.useShadow = false;
 
    autocomplete.resultTypeList = false;
 
    autocomplete.animVert = false;
 
    autocomplete.animHoriz = false;
 
    autocomplete.animSpeed = 0.1;
 
    autocomplete.formatResult = autocompleteFormatter;
 

	
 
    return autocomplete;
 
}
 

	
 
var SimpleUserAutoComplete = function ($inputElement, $container, users_list) {
 

	
 
    var matchUsers = function (sQuery) {
 
        return autocompleteMatchUsers(sQuery, users_list);
 
    }
 

	
 
    var userAC = autocompleteCreate($inputElement, $container, matchUsers);
 

	
 
    // Handler for selection of an entry
 
    var itemSelectHandler = function (sType, aArgs) {
 
        var myAC = aArgs[0]; // reference back to the AC instance
 
        var elLI = aArgs[1]; // reference to the selected LI element
 
        var oData = aArgs[2]; // object literal of selected item's result data
 
        myAC.getInputEl().value = oData.nname;
 
    };
 
    userAC.itemSelectEvent.subscribe(itemSelectHandler);
 
}
 

	
 
var MembersAutoComplete = function ($inputElement, $container, users_list, groups_list) {
 

	
 
    var matchAll = function (sQuery) {
 
        var u = autocompleteMatchUsers(sQuery, users_list);
 
        var g = autocompleteMatchGroups(sQuery, groups_list);
 
        return u.concat(g);
 
    };
 

	
 
    var membersAC = autocompleteCreate($inputElement, $container, matchAll);
 

	
 
    // Handler for selection of an entry
 
    var itemSelectHandler = function (sType, aArgs) {
 
        var nextId = $inputElement.prop('id').split('perm_new_member_name_')[1];
 
        var myAC = aArgs[0]; // reference back to the AC instance
 
        var elLI = aArgs[1]; // reference to the selected LI element
 
        var oData = aArgs[2]; // object literal of selected item's result data
 
        //fill the autocomplete with value
 
        if (oData.nname != undefined) {
 
            //users
 
            myAC.getInputEl().value = oData.nname;
 
            $('#perm_new_member_type_'+nextId).val('user');
 
        } else {
 
            //groups
 
            myAC.getInputEl().value = oData.grname;
 
            $('#perm_new_member_type_'+nextId).val('users_group');
 
        }
 
    };
 
    membersAC.itemSelectEvent.subscribe(itemSelectHandler);
 
}
 

	
 
var MentionsAutoComplete = function ($inputElement, $container, users_list) {
 

	
 
    var matchUsers = function (sQuery) {
 
            var org_sQuery = sQuery;
 
            if(this.mentionQuery == null){
 
                return []
 
            }
 
            sQuery = this.mentionQuery;
 
            return autocompleteMatchUsers(sQuery, users_list);
 
    }
 

	
 
    var mentionsAC = autocompleteCreate($inputElement, $container, matchUsers);
 
    mentionsAC.suppressInputUpdate = true;
 
    // Overwrite formatResult to take into account mentionQuery
 
    mentionsAC.formatResult = function (oResultData, sQuery, sResultMatch) {
 
        var org_sQuery = sQuery;
 
        if (this.dataSource.mentionQuery != null) {
 
            sQuery = this.dataSource.mentionQuery;
 
        }
 
        return autocompleteFormatter(oResultData, sQuery, sResultMatch);
 
    }
 

	
 
    // Handler for selection of an entry
 
    if(mentionsAC.itemSelectEvent){
 
        mentionsAC.itemSelectEvent.subscribe(function (sType, aArgs) {
 
            var myAC = aArgs[0]; // reference back to the AC instance
 
            var elLI = aArgs[1]; // reference to the selected LI element
 
            var oData = aArgs[2]; // object literal of selected item's result data
 
            //Replace the mention name with replaced
 
            var re = new RegExp();
 
            var org = myAC.getInputEl().value;
 
            var chunks = myAC.dataSource.chunks
 
            // replace middle chunk(the search term) with actuall  match
 
            chunks[1] = chunks[1].replace('@'+myAC.dataSource.mentionQuery,
 
                                          '@'+oData.nname+' ');
 
            myAC.getInputEl().value = chunks.join('');
 
            myAC.getInputEl().focus(); // Y U NO WORK !?
 
        });
 
    }
 

	
 
    // in this keybuffer we will gather current value of search !
 
    // since we need to get this just when someone does `@` then we do the
 
    // search
 
    mentionsAC.dataSource.chunks = [];
 
    mentionsAC.dataSource.mentionQuery = null;
 

	
 
    mentionsAC.get_mention = function(msg, max_pos) {
 
        var org = msg;
 
        // Must match utils2.py MENTIONS_REGEX.
 
        // Only matching on string up to cursor, so it must end with $
 
        var re = new RegExp('(?:^|[^a-zA-Z0-9])@([a-zA-Z0-9][-_.a-zA-Z0-9]*[a-zA-Z0-9])$');
 
        var chunks  = [];
 

	
 
        // cut first chunk until current pos
 
        var to_max = msg.substr(0, max_pos);
 
        var at_pos = Math.max(0,to_max.lastIndexOf('@')-1);
 
        var msg2 = to_max.substr(at_pos);
 

	
 
        chunks.push(org.substr(0,at_pos)); // prefix chunk
 
        chunks.push(msg2);                 // search chunk
 
        chunks.push(org.substr(max_pos));  // postfix chunk
 

	
 
        // clean up msg2 for filtering and regex match
 
        var msg2 = msg2.lstrip(' ').lstrip('\n');
 

	
 
        if(re.test(msg2)){
 
            var unam = re.exec(msg2)[1];
 
            return [unam, chunks];
 
        }
 
        return [null, null];
 
    };
 

	
 
    $inputElement.keyup(function(e){
 
            var currentMessage = $inputElement.val();
 
            var currentCaretPosition = $inputElement[0].selectionStart;
 

	
 
            var unam = mentionsAC.get_mention(currentMessage, currentCaretPosition);
 
            var curr_search = null;
 
            if(unam[0]){
 
                curr_search = unam[0];
 
            }
 

	
 
            mentionsAC.dataSource.chunks = unam[1];
 
            mentionsAC.dataSource.mentionQuery = curr_search;
 
        });
 
}
 

	
 
var addReviewMember = function(id,fname,lname,nname,gravatar_link,gravatar_size){
 
    var displayname = nname;
 
    if ((fname != "") && (lname != "")) {
 
        displayname = "{0} {1} ({2})".format(fname, lname, nname);
 
    }
 
    var gravatarelm = gravatar(gravatar_link, gravatar_size, "");
 
    // WARNING: the HTML below is duplicate with
 
    // kallithea/templates/pullrequests/pullrequest_show.html
 
    // If you change something here it should be reflected in the template too.
 
    var element = (
 
        '     <li id="reviewer_{2}">\n'+
 
        '       <div class="reviewers_member">\n'+
 
        '           <div class="reviewer_status tooltip" title="not_reviewed">\n'+
 
        '             <i class="icon-circle changeset-status-not_reviewed"></i>\n'+
 
        '           </div>\n'+
 
        '         <div class="reviewer_gravatar gravatar">{0}</div>\n'+
 
        '         <div style="float:left;">{1}</div>\n'+
 
        '         <input type="hidden" value="{2}" name="review_members" />\n'+
 
        '         <div class="reviewer_member_remove action_button" onclick="removeReviewMember({2})">\n'+
 
        '             <i class="icon-minus-circled"></i>\n'+
 
        '         </div> (add not saved)\n'+
 
        '       </div>\n'+
 
        '     </li>\n'
 
        ).format(gravatarelm, displayname, id);
 
    // check if we don't have this ID already in
 
    var ids = [];
 
    $('#review_members').find('li').each(function() {
 
            ids.push(this.id);
 
        });
 
    if(ids.indexOf('reviewer_'+id) == -1){
 
        //only add if it's not there
 
        $('#review_members').append(element);
 
    }
 
}
 

	
 
var removeReviewMember = function(reviewer_id, repo_name, pull_request_id){
 
    var $li = $('#reviewer_{0}'.format(reviewer_id));
 
    $li.find('div div').css("text-decoration", "line-through");
 
    $li.find('input').prop('name', 'review_members_removed');
 
    $li.find('.reviewer_member_remove').replaceWith('&nbsp;(remove not saved)');
 
}
 

	
 
/* activate auto completion of users as PR reviewers */
 
var PullRequestAutoComplete = function ($inputElement, $container, users_list) {
 

	
 
    var matchUsers = function (sQuery) {
 
        return autocompleteMatchUsers(sQuery, users_list);
 
    };
 

	
 
    var reviewerAC = autocompleteCreate($inputElement, $container, matchUsers);
 
    reviewerAC.suppressInputUpdate = true;
 

	
 
    // Handler for selection of an entry
 
    if(reviewerAC.itemSelectEvent){
 
        reviewerAC.itemSelectEvent.subscribe(function (sType, aArgs) {
 
            var myAC = aArgs[0]; // reference back to the AC instance
 
            var elLI = aArgs[1]; // reference to the selected LI element
 
            var oData = aArgs[2]; // object literal of selected item's result data
 

	
 
            addReviewMember(oData.id, oData.fname, oData.lname, oData.nname,
 
                            oData.gravatar_lnk, oData.gravatar_size);
 
            myAC.getInputEl().value = '';
 
        });
 
    }
 
}
 

	
 
/**
 
 * Activate .quick_repo_menu
 
 */
 
var quick_repo_menu = function(){
 
    $(".quick_repo_menu").mouseenter(function(e) {
 
            var $menu = $(e.currentTarget).children().first().children().first();
 
            if($menu.hasClass('hidden')){
 
                $menu.removeClass('hidden').addClass('active');
 
                $(e.currentTarget).removeClass('hidden').addClass('active');
 
            }
 
        });
 
    $(".quick_repo_menu").mouseleave(function(e) {
 
            var $menu = $(e.currentTarget).children().first().children().first();
 
            if($menu.hasClass('active')){
 
                $menu.removeClass('active').addClass('hidden');
 
                $(e.currentTarget).removeClass('active').addClass('hidden');
 
            }
 
        });
 
};
 

	
 

	
 
/**
 
 * TABLE SORTING
 
 */
 

	
 
var revisionSort = function(a, b, desc, field) {
 
    var a_ = parseInt(a.getData('last_rev_raw') || 0);
 
    var b_ = parseInt(b.getData('last_rev_raw') || 0);
 

	
 
    return YAHOO.util.Sort.compare(a_, b_, desc);
 
};
 

	
 
var ageSort = function(a, b, desc, field) {
 
    // data is like: <span class="tooltip" date="2014-06-04 18:18:55.325474" title="Wed, 04 Jun 2014 18:18:55">1 day and 23 hours ago</span>
 
    var a_ = $(a.getData(field)).attr('date');
 
    var b_ = $(b.getData(field)).attr('date');
 

	
 
    return YAHOO.util.Sort.compare(a_, b_, desc);
 
};
 

	
 
var lastLoginSort = function(a, b, desc, field) {
 
    var a_ = parseFloat(a.getData('last_login_raw') || 0);
 
    var b_ = parseFloat(b.getData('last_login_raw') || 0);
 

	
 
    return YAHOO.util.Sort.compare(a_, b_, desc);
 
};
 

	
 
var nameSort = function(a, b, desc, field) {
 
    var a_ = a.getData('raw_name') || 0;
 
    var b_ = b.getData('raw_name') || 0;
 

	
 
    return YAHOO.util.Sort.compare(a_, b_, desc);
 
};
 

	
 
var dateSort = function(a, b, desc, field) {
 
    var a_ = parseFloat(a.getData('raw_date') || 0);
 
    var b_ = parseFloat(b.getData('raw_date') || 0);
 

	
 
    return YAHOO.util.Sort.compare(a_, b_, desc);
 
};
 

	
 
var addPermAction = function(_html, users_list, groups_list){
 
    var $last_node = $('.last_new_member').last(); // empty tr between last and add
 
    var next_id = $('.new_members').length;
 
    $last_node.before($('<tr class="new_members">').append(_html.format(next_id)));
 
    MembersAutoComplete($("#perm_new_member_name_"+next_id),
 
            $("#perm_container_"+next_id), users_list, groups_list);
 
}
 

	
 
function ajaxActionRevokePermission(url, obj_id, obj_type, field_id, extra_data) {
 
    var success = function (o) {
 
            $('#' + field_id).remove();
 
        };
 
    var failure = function (o) {
 
            alert(_TM['Failed to revoke permission'] + ": " + o.status);
 
        };
 
    var query_params = {
 
        '_method': 'delete'
 
    }
 
    // put extra data into POST
 
    if (extra_data !== undefined && (typeof extra_data === 'object')){
 
        for(var k in extra_data){
 
            query_params[k] = extra_data[k];
 
        }
 
    }
 

	
 
    if (obj_type=='user'){
 
        query_params['user_id'] = obj_id;
 
        query_params['obj_type'] = 'user';
 
    }
 
    else if (obj_type=='user_group'){
 
        query_params['user_group_id'] = obj_id;
 
        query_params['obj_type'] = 'user_group';
 
    }
 

	
 
    ajaxPOST(url, query_params, success, failure);
 
};
 

	
 
/* Multi selectors */
 

	
 
var MultiSelectWidget = function(selected_id, available_id, form_id){
 
    var $availableselect = $('#' + available_id);
 
    var $selectedselect = $('#' + selected_id);
 

	
 
    //fill available only with those not in selected
 
    var $selectedoptions = $selectedselect.children('option');
 
    $availableselect.children('option').filter(function(i, e){
 
            for(var j = 0, node; node = $selectedoptions[j]; j++){
 
                if(node.value == e.value){
 
                    return true;
 
                }
 
            }
 
            return false;
 
        }).remove();
 

	
 
    $('#add_element').click(function(e){
 
            $selectedselect.append($availableselect.children('option:selected'));
 
        });
 
    $('#remove_element').click(function(e){
 
            $availableselect.append($selectedselect.children('option:selected'));
 
        });
 

	
 
    $('#'+form_id).submit(function(){
 
            $selectedselect.children('option').each(function(i, e){
 
                e.selected = 'selected';
 
            });
 
        });
 
}
 

	
 
// custom paginator
 
var YUI_paginator = function(links_per_page, containers){
 

	
 
    (function () {
 

	
 
        var Paginator = YAHOO.widget.Paginator,
 
            l         = YAHOO.lang,
 
            setId     = YAHOO.util.Dom.generateId;
 

	
 
        Paginator.ui.MyFirstPageLink = function (p) {
 
            this.paginator = p;
 

	
 
            p.subscribe('recordOffsetChange',this.update,this,true);
 
            p.subscribe('rowsPerPageChange',this.update,this,true);
 
            p.subscribe('totalRecordsChange',this.update,this,true);
 
            p.subscribe('destroy',this.destroy,this,true);
 

	
 
            // TODO: make this work
 
            p.subscribe('firstPageLinkLabelChange',this.update,this,true);
 
            p.subscribe('firstPageLinkClassChange',this.update,this,true);
 
        };
 

	
 
        Paginator.ui.MyFirstPageLink.init = function (p) {
 
            p.setAttributeConfig('firstPageLinkLabel', {
 
                value : 1,
 
                validator : l.isString
 
            });
 
            p.setAttributeConfig('firstPageLinkClass', {
 
                value : 'yui-pg-first',
 
                validator : l.isString
 
            });
 
            p.setAttributeConfig('firstPageLinkTitle', {
 
                value : 'First Page',
 
                validator : l.isString
 
            });
 
        };
 

	
 
        // Instance members and methods
 
        Paginator.ui.MyFirstPageLink.prototype = {
 
            current   : null,
 
            leftmost_page: null,
 
            rightmost_page: null,
 
            link      : null,
 
            span      : null,
 
            dotdot    : null,
 
            getPos    : function(cur_page, max_page, items){
 
                var edge = parseInt(items / 2) + 1;
 
                if (cur_page <= edge){
 
                    var radius = Math.max(parseInt(items / 2), items - cur_page);
 
                }
 
                else if ((max_page - cur_page) < edge) {
 
                    var radius = (items - 1) - (max_page - cur_page);
 
                }
 
                else{
 
                    var radius = parseInt(items / 2);
 
                }
 

	
 
                var left = Math.max(1, (cur_page - (radius)));
 
                var right = Math.min(max_page, cur_page + (radius));
 
                return [left, cur_page, right]
 
            },
 
            render : function (id_base) {
 
                var p      = this.paginator,
 
                    c      = p.get('firstPageLinkClass'),
 
                    label  = p.get('firstPageLinkLabel'),
 
                    title  = p.get('firstPageLinkTitle');
 

	
 
                this.link     = document.createElement('a');
 
                this.span     = document.createElement('span');
 
                $(this.span).hide();
 

	
 
                var _pos = this.getPos(p.getCurrentPage(), p.getTotalPages(), 5);
 
                this.leftmost_page = _pos[0];
 
                this.rightmost_page = _pos[2];
 

	
 
                setId(this.link, id_base + '-first-link');
 
                this.link.href      = '#';
 
                this.link.className = c;
 
                this.link.innerHTML = label;
 
                this.link.title     = title;
 
                YUE.on(this.link,'click',this.onClick,this,true);
 

	
 
                setId(this.span, id_base + '-first-span');
 
                this.span.className = c;
 
                this.span.innerHTML = label;
 

	
 
                this.current = p.getCurrentPage() > 1 ? this.link : this.span;
 
                return this.current;
 
            },
 
            update : function (e) {
 
                var p      = this.paginator;
 
                var _pos   = this.getPos(p.getCurrentPage(), p.getTotalPages(), 5);
 
                this.leftmost_page = _pos[0];
 
                this.rightmost_page = _pos[2];
 

	
 
                if (e && e.prevValue === e.newValue) {
 
                    return;
 
                }
 

	
 
                var par = this.current ? this.current.parentNode : null;
 
                if (this.leftmost_page > 1) {
 
                    if (par && this.current === this.span) {
 
                        par.replaceChild(this.link,this.current);
 
                        this.current = this.link;
 
                    }
 
                } else {
 
                    if (par && this.current === this.link) {
 
                        par.replaceChild(this.span,this.current);
 
                        this.current = this.span;
 
                    }
 
                }
 
            },
 
            destroy : function () {
 
                YUE.purgeElement(this.link);
 
                this.current.parentNode.removeChild(this.current);
 
                this.link = this.span = null;
 
            },
 
            onClick : function (e) {
 
                YUE.stopEvent(e);
 
                this.paginator.setPage(1);
 
            }
 
        };
 

	
 
        })();
 

	
 
    (function () {
 

	
 
        var Paginator = YAHOO.widget.Paginator,
 
            l         = YAHOO.lang,
 
            setId     = YAHOO.util.Dom.generateId;
 

	
 
        Paginator.ui.MyLastPageLink = function (p) {
 
            this.paginator = p;
 

	
 
            p.subscribe('recordOffsetChange',this.update,this,true);
 
            p.subscribe('rowsPerPageChange',this.update,this,true);
 
            p.subscribe('totalRecordsChange',this.update,this,true);
 
            p.subscribe('destroy',this.destroy,this,true);
 

	
 
            // TODO: make this work
 
            p.subscribe('lastPageLinkLabelChange',this.update,this,true);
 
            p.subscribe('lastPageLinkClassChange', this.update,this,true);
 
        };
 

	
 
        Paginator.ui.MyLastPageLink.init = function (p) {
 
            p.setAttributeConfig('lastPageLinkLabel', {
 
                value : -1,
 
                validator : l.isString
 
            });
 
            p.setAttributeConfig('lastPageLinkClass', {
 
                value : 'yui-pg-last',
 
                validator : l.isString
 
            });
 
            p.setAttributeConfig('lastPageLinkTitle', {
 
                value : 'Last Page',
 
                validator : l.isString
 
            });
 

	
 
        };
 

	
 
        Paginator.ui.MyLastPageLink.prototype = {
 

	
 
            current   : null,
 
            leftmost_page: null,
 
            rightmost_page: null,
 
            link      : null,
 
            span      : null,
 
            dotdot    : null,
 
            na        : null,
 
            getPos    : function(cur_page, max_page, items){
 
                var edge = parseInt(items / 2) + 1;
 
                if (cur_page <= edge){
 
                    var radius = Math.max(parseInt(items / 2), items - cur_page);
 
                }
 
                else if ((max_page - cur_page) < edge) {
 
                    var radius = (items - 1) - (max_page - cur_page);
 
                }
 
                else{
 
                    var radius = parseInt(items / 2);
 
                }
 

	
 
                var left = Math.max(1, (cur_page - (radius)));
 
                var right = Math.min(max_page, cur_page + (radius));
 
                return [left, cur_page, right]
 
            },
 
            render : function (id_base) {
 
                var p      = this.paginator,
 
                    c      = p.get('lastPageLinkClass'),
 
                    label  = p.get('lastPageLinkLabel'),
 
                    last   = p.getTotalPages(),
 
                    title  = p.get('lastPageLinkTitle');
 

	
 
                var _pos = this.getPos(p.getCurrentPage(), p.getTotalPages(), 5);
 
                this.leftmost_page = _pos[0];
 
                this.rightmost_page = _pos[2];
 

	
 
                this.link = document.createElement('a');
 
                this.span = document.createElement('span');
 
                $(this.span).hide();
 

	
 
                this.na   = this.span.cloneNode(false);
 

	
 
                setId(this.link, id_base + '-last-link');
 
                this.link.href      = '#';
 
                this.link.className = c;
 
                this.link.innerHTML = label;
 
                this.link.title     = title;
 
                YUE.on(this.link,'click',this.onClick,this,true);
 

	
 
                setId(this.span, id_base + '-last-span');
 
                this.span.className = c;
 
                this.span.innerHTML = label;
 

	
 
                setId(this.na, id_base + '-last-na');
 

	
 
                if (this.rightmost_page < p.getTotalPages()){
 
                    this.current = this.link;
 
                }
 
                else{
 
                    this.current = this.span;
 
                }
 

	
 
                this.current.innerHTML = p.getTotalPages();
 
                return this.current;
 
            },
 

	
 
            update : function (e) {
 
                var p      = this.paginator;
 

	
 
                var _pos = this.getPos(p.getCurrentPage(), p.getTotalPages(), 5);
 
                this.leftmost_page = _pos[0];
 
                this.rightmost_page = _pos[2];
 

	
 
                if (e && e.prevValue === e.newValue) {
 
                    return;
 
                }
 

	
 
                var par   = this.current ? this.current.parentNode : null,
 
                    after = this.link;
 
                if (par) {
 

	
 
                    // only show the last page if the rightmost one is
 
                    // lower, so we don't have doubled entries at the end
 
                    if (!(this.rightmost_page < p.getTotalPages())){
 
                        after = this.span
 
                    }
 

	
 
                    if (this.current !== after) {
 
                        par.replaceChild(after,this.current);
 
                        this.current = after;
 
                    }
 
                }
 
                this.current.innerHTML = this.paginator.getTotalPages();
 

	
 
            },
 
            destroy : function () {
 
                YUE.purgeElement(this.link);
 
                this.current.parentNode.removeChild(this.current);
 
                this.link = this.span = null;
 
            },
 
            onClick : function (e) {
 
                YUE.stopEvent(e);
 
                this.paginator.setPage(this.paginator.getTotalPages());
 
            }
 
        };
 

	
 
        })();
 

	
 
    var pagi = new YAHOO.widget.Paginator({
 
        rowsPerPage: links_per_page,
 
        alwaysVisible: false,
 
        template : "{PreviousPageLink} {MyFirstPageLink} {PageLinks} {MyLastPageLink} {NextPageLink}",
 
        pageLinks: 5,
 
        containerClass: 'pagination-wh',
 
        currentPageClass: 'pager_curpage',
 
        pageLinkClass: 'pager_link',
 
        nextPageLinkLabel: '&gt;',
 
        previousPageLinkLabel: '&lt;',
 
        containers:containers
 
    });
 

	
 
    return pagi
 
}
 

	
 
var YUI_datatable = function(data, fields, columns, countnode, sortkey, rows){
 
    var myDataSource = new YAHOO.util.DataSource(data);
 
    myDataSource.responseType = YAHOO.util.DataSource.TYPE_JSON;
 
    myDataSource.responseSchema = {
 
        resultsList: "records",
 
        fields: fields
 
        };
 
    myDataSource.doBeforeCallback = function(req, raw, res, cb) {
 
        // This is the filter function
 
        var data     = res.results || [],
 
            filtered = [],
 
            i, l;
 

	
 
        if (req) {
 
            req = req.toLowerCase();
 
            for (i = 0; i<data.length; i++) {
 
                var pos = data[i].raw_name.toLowerCase().indexOf(req);
 
                if (pos != -1) {
 
                    filtered.push(data[i]);
 
                }
 
            }
 
            res.results = filtered;
 
        }
 
        $(countnode).html(res.results.length);
 
        return res;
 
    }
 

	
 
    var myDataTable = new YAHOO.widget.DataTable("datatable_list_wrap", columns, myDataSource, {
 
        sortedBy: {key:sortkey, dir:"asc"},
 
        paginator: YUI_paginator(rows !== undefined && rows ? rows : 25, ['user-paginator']),
 
        MSG_SORTASC: _TM['MSG_SORTASC'],
 
        MSG_SORTDESC: _TM['MSG_SORTDESC'],
 
        MSG_EMPTY: _TM['MSG_EMPTY'],
 
        MSG_ERROR: _TM['MSG_ERROR'],
 
        MSG_LOADING: _TM['MSG_LOADING']
 
        });
 
    myDataTable.subscribe('postRenderEvent',function(oArgs) {
 
        tooltip_activate();
 
        quick_repo_menu();
 
        });
 

	
 
    var filterTimeout = null;
 
    var $q_filter = $('#q_filter');
 

	
 
    var updateFilter = function () {
 
        // Reset timeout
 
        filterTimeout = null;
 

	
 
        // Reset sort
 
        var state = myDataTable.getState();
 
        state.sortedBy = {key:sortkey, dir:YAHOO.widget.DataTable.CLASS_ASC};
 

	
 
        // Get filtered data
 
        myDataSource.sendRequest($q_filter.val(), {
 
            success : myDataTable.onDataReturnInitializeTable,
 
            failure : myDataTable.onDataReturnInitializeTable,
 
            scope   : myDataTable,
 
            argument: state});
 
        };
 

	
 
    $q_filter.click(function(){
 
            if(!$q_filter.hasClass('loaded')){
 
                //TODO: load here full list later to do search within groups
 
                $q_filter.addClass('loaded');
 
            }
 
        });
 

	
 
    $q_filter.keyup(function (e) {
 
            clearTimeout(filterTimeout);
 
            filterTimeout = setTimeout(updateFilter, 600);
 
        });
 
}
 

	
 
/**
 
 Branch Sorting callback for select2, modifying the filtered result so prefix
 
 matches come before matches in the line.
 
 **/
 
var branchSort = function(results, container, query) {
 
    if (query.term) {
 
        return results.sort(function (a, b) {
 
            // Put closed branches after open ones (a bit of a hack ...)
 
            var aClosed = a.text.indexOf("(closed)") > -1,
 
                bClosed = b.text.indexOf("(closed)") > -1;
 
            if (aClosed && !bClosed) {
 
                return 1;
 
            }
 
            if (bClosed && !aClosed) {
 
                return -1;
 
            }
 

	
 
            // Put prefix matches before matches in the line
 
            var aPos = a.text.toLowerCase().indexOf(query.term.toLowerCase()),
 
                bPos = b.text.toLowerCase().indexOf(query.term.toLowerCase());
 
            if (aPos === 0 && bPos !== 0) {
 
                return -1;
 
            }
 
            if (bPos === 0 && aPos !== 0) {
 
                return 1;
 
            }
 

	
 
            // Default sorting
 
            if (a.text > b.text) {
 
                return 1;
 
            }
 
            if (a.text < b.text) {
 
                return -1;
 
            }
 
            return 0;
 
        });
 
    }
 
    return results;
 
};
 

	
 
var prefixFirstSort = function(results, container, query) {
 
    if (query.term) {
 
        return results.sort(function (a, b) {
 
            // if parent node, no sorting
 
            if (a.children != undefined || b.children != undefined) {
 
                return 0;
 
            }
 

	
 
            // Put prefix matches before matches in the line
 
            var aPos = a.text.toLowerCase().indexOf(query.term.toLowerCase()),
 
                bPos = b.text.toLowerCase().indexOf(query.term.toLowerCase());
 
            if (aPos === 0 && bPos !== 0) {
 
                return -1;
 
            }
 
            if (bPos === 0 && aPos !== 0) {
 
                return 1;
 
            }
 

	
 
            // Default sorting
 
            if (a.text > b.text) {
 
                return 1;
 
            }
 
            if (a.text < b.text) {
 
                return -1;
 
            }
 
            return 0;
 
        });
 
    }
 
    return results;
 
};
 

	
 
// global hooks after DOM is loaded
 

	
 
$(document).ready(function(){
 
    $('.diff-collapse-button').click(function(e) {
 
        var $button = $(e.currentTarget);
 
        var $target = $('#' + $button.prop('target'));
 
        var $target = $('#' + $button.attr('target'));
 
        if($target.hasClass('hidden')){
 
            $target.removeClass('hidden');
 
            $button.html("&uarr; {0} &uarr;".format(_TM['Collapse Diff']));
 
        }
 
        else if(!$target.hasClass('hidden')){
 
            $target.addClass('hidden');
 
            $button.html("&darr; {0} &darr;".format(_TM['Expand Diff']));
 
        }
 
    });
 
});
kallithea/tests/fixtures/git_diff_rename_file.diff
Show inline comments
 
diff --git a/work-horus.xls b/file.xls
 
similarity index 100%
 
rename from work-horus.xls
 
rename to file.xls
 
diff --git a/files/var/www/favicon.ico b/files/var/www/favicon.ico/DEFAULT
 
old mode 100644
 
new mode 100755
 
similarity index 100%
 
rename from files/var/www/favicon.ico
 
rename to files/var/www/favicon.ico/DEFAULT
kallithea/tests/models/test_diff_parsers.py
Show inline comments
 
from kallithea.tests import *
 
from kallithea.lib.diffs import DiffProcessor, NEW_FILENODE, DEL_FILENODE, \
 
    MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE, COPIED_FILENODE
 
from kallithea.tests.fixture import Fixture
 

	
 
fixture = Fixture()
 

	
 

	
 
DIFF_FIXTURES = {
 
    'hg_diff_add_single_binary_file.diff': [
 
        ('US Warszawa.jpg', 'A',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {NEW_FILENODE: 'new file 100755',
 
                  BIN_FILENODE: 'binary diff not shown'}}),
 
    ],
 
    'hg_diff_mod_single_binary_file.diff': [
 
        ('US Warszawa.jpg', 'M',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {MOD_FILENODE: 'modified file',
 
                  BIN_FILENODE: 'binary diff not shown'}}),
 
    ],
 

	
 
    'hg_diff_mod_single_file_and_rename_and_chmod.diff': [
 
        ('README', 'R',
 
         {'added': 3,
 
          'deleted': 0,
 
          'binary': False,
 
          'ops': {RENAMED_FILENODE: 'file renamed from README.rst to README',
 
                  CHMOD_FILENODE: 'modified file chmod 100755 => 100644'}}),
 
    ],
 
    'hg_diff_mod_file_and_rename.diff': [
 
        ('README.rst', 'R',
 
         {'added': 3,
 
          'deleted': 0,
 
          'binary': False,
 
          'ops': {RENAMED_FILENODE: 'file renamed from README to README.rst'}}),
 
    ],
 
    'hg_diff_del_single_binary_file.diff': [
 
        ('US Warszawa.jpg', 'D',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {DEL_FILENODE: 'deleted file',
 
                  BIN_FILENODE: 'binary diff not shown'}}),
 
    ],
 
    'hg_diff_chmod_and_mod_single_binary_file.diff': [
 
        ('gravatar.png', 'M',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {CHMOD_FILENODE: 'modified file chmod 100644 => 100755',
 
                  BIN_FILENODE: 'binary diff not shown'}}),
 
    ],
 
    'hg_diff_chmod.diff': [
 
        ('file', 'M',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {CHMOD_FILENODE: 'modified file chmod 100755 => 100644'}}),
 
    ],
 
    'hg_diff_rename_file.diff': [
 
        ('file_renamed', 'R',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {RENAMED_FILENODE: 'file renamed from file to file_renamed'}}),
 
    ],
 
    'hg_diff_rename_and_chmod_file.diff': [
 
        ('README', 'R',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {CHMOD_FILENODE: 'modified file chmod 100644 => 100755',
 
                  RENAMED_FILENODE: 'file renamed from README.rst to README'}}),
 
    ],
 
    'hg_diff_binary_and_normal.diff': [
 
        ('img/baseline-10px.png', 'A',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {NEW_FILENODE: 'new file 100644',
 
                  BIN_FILENODE: 'binary diff not shown'}}),
 
        ('img/baseline-20px.png', 'D',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {DEL_FILENODE: 'deleted file',
 
                  BIN_FILENODE: 'binary diff not shown'}}),
 
        ('index.html', 'M',
 
         {'added': 3,
 
          'deleted': 2,
 
          'binary': False,
 
          'ops': {MOD_FILENODE: 'modified file'}}),
 
        ('js/global.js', 'D',
 
         {'added': 0,
 
          'deleted': 75,
 
          'binary': False,
 
          'ops': {DEL_FILENODE: 'deleted file'}}),
 
        ('js/jquery/hashgrid.js', 'A',
 
         {'added': 340,
 
          'deleted': 0,
 
          'binary': False,
 
          'ops': {NEW_FILENODE: 'new file 100755'}}),
 
        ('less/docs.less', 'M',
 
         {'added': 34,
 
          'deleted': 0,
 
          'binary': False,
 
          'ops': {MOD_FILENODE: 'modified file'}}),
 
        ('less/scaffolding.less', 'M',
 
         {'added': 1,
 
          'deleted': 3,
 
          'binary': False,
 
          'ops': {MOD_FILENODE: 'modified file'}}),
 
        ('readme.markdown', 'M',
 
         {'added': 1,
 
          'deleted': 10,
 
          'binary': False,
 
          'ops': {MOD_FILENODE: 'modified file'}}),
 
    ],
 
    'git_diff_chmod.diff': [
 
        ('work-horus.xls', 'M',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {CHMOD_FILENODE: 'modified file chmod 100644 => 100755'}})
 
    ],
 
    'git_diff_rename_file.diff': [
 
        ('file.xls', 'R',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {RENAMED_FILENODE: 'file renamed from work-horus.xls to file.xls'}})
 
          'ops': {RENAMED_FILENODE: 'file renamed from work-horus.xls to file.xls'}}),
 
        ('files/var/www/favicon.ico/DEFAULT',
 
         'R',
 
         {'added': 0,
 
          'binary': True,
 
          'deleted': 0,
 
          'ops': {4: 'file renamed from files/var/www/favicon.ico to files/var/www/favicon.ico/DEFAULT',
 
                  6: 'modified file chmod 100644 => 100755'}})
 
    ],
 
    'git_diff_mod_single_binary_file.diff': [
 
        ('US Warszawa.jpg', 'M',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {MOD_FILENODE: 'modified file',
 
                  BIN_FILENODE: 'binary diff not shown'}})
 
    ],
 
    'git_diff_binary_and_normal.diff': [
 
        ('img/baseline-10px.png', 'A',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {NEW_FILENODE: 'new file 100644',
 
                  BIN_FILENODE: 'binary diff not shown'}}),
 
        ('img/baseline-20px.png', 'D',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {DEL_FILENODE: 'deleted file',
 
                  BIN_FILENODE: 'binary diff not shown'}}),
 
        ('index.html', 'M',
 
         {'added': 3,
 
          'deleted': 2,
 
          'binary': False,
 
          'ops': {MOD_FILENODE: 'modified file'}}),
 
        ('js/global.js', 'D',
 
         {'added': 0,
 
          'deleted': 75,
 
          'binary': False,
 
          'ops': {DEL_FILENODE: 'deleted file'}}),
 
        ('js/jquery/hashgrid.js', 'A',
 
         {'added': 340,
 
          'deleted': 0,
 
          'binary': False,
 
          'ops': {NEW_FILENODE: 'new file 100755'}}),
 
        ('less/docs.less', 'M',
 
         {'added': 34,
 
          'deleted': 0,
 
          'binary': False,
 
          'ops': {MOD_FILENODE: 'modified file'}}),
 
        ('less/scaffolding.less', 'M',
 
         {'added': 1,
 
          'deleted': 3,
 
          'binary': False,
 
          'ops': {MOD_FILENODE: 'modified file'}}),
 
        ('readme.markdown', 'M',
 
         {'added': 1,
 
          'deleted': 10,
 
          'binary': False,
 
          'ops': {MOD_FILENODE: 'modified file'}}),
 
    ],
 
    'diff_with_diff_data.diff': [
 
        ('vcs/backends/base.py', 'M',
 
         {'added': 18,
 
          'deleted': 2,
 
          'binary': False,
 
          'ops': {MOD_FILENODE: 'modified file'}}),
 
        ('vcs/backends/git/repository.py', 'M',
 
         {'added': 46,
 
          'deleted': 15,
 
          'binary': False,
 
          'ops': {MOD_FILENODE: 'modified file'}}),
 
        ('vcs/backends/hg.py', 'M',
 
         {'added': 22,
 
          'deleted': 3,
 
          'binary': False,
 
          'ops': {MOD_FILENODE: 'modified file'}}),
 
        ('vcs/tests/test_git.py', 'M',
 
         {'added': 5,
 
          'deleted': 5,
 
          'binary': False,
 
          'ops': {MOD_FILENODE: 'modified file'}}),
 
        ('vcs/tests/test_repository.py', 'M',
 
         {'added': 174,
 
          'deleted': 2,
 
          'binary': False,
 
          'ops': {MOD_FILENODE: 'modified file'}}),
 
    ],
 
    'git_diff_modify_binary_file.diff': [
 
        ('file.name', 'M',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {MOD_FILENODE: 'modified file',
 
                  BIN_FILENODE: 'binary diff not shown'}})
 
    ],
 
    'hg_diff_copy_file.diff': [
 
        ('file2', 'M',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {COPIED_FILENODE: 'file copied from file1 to file2'}}),
 
    ],
 
    'hg_diff_copy_and_modify_file.diff': [
 
        ('file3', 'M',
 
         {'added': 1,
 
          'deleted': 0,
 
          'binary': False,
 
          'ops': {COPIED_FILENODE: 'file copied from file2 to file3',
 
                  MOD_FILENODE: 'modified file'}}),
 
    ],
 
    'hg_diff_copy_and_chmod_file.diff': [
 
        ('file4', 'M',
 
         {'added': 0,
 
          'deleted': 0,
 
          'binary': True,
 
          'ops': {COPIED_FILENODE: 'file copied from file3 to file4',
 
                  CHMOD_FILENODE: 'modified file chmod 100644 => 100755'}}),
 
    ],
 
    'hg_diff_copy_chmod_and_edit_file.diff': [
 
        ('file5', 'M',
 
         {'added': 2,
 
          'deleted': 1,
 
          'binary': False,
 
          'ops': {COPIED_FILENODE: 'file copied from file4 to file5',
 
                  CHMOD_FILENODE: 'modified file chmod 100755 => 100644',
 
                  MOD_FILENODE: 'modified file'}}),
 
    ],
 
    'hg_diff_rename_space_cr.diff': [
 
        ('oh yes', 'R',
 
         {'added': 3,
 
          'deleted': 2,
 
          'binary': False,
 
          'ops': {RENAMED_FILENODE: 'file renamed from oh no to oh yes'}}),
 
    ],
 
}
 

	
 

	
 
class DiffLibTest(BaseTestCase):
 

	
 
    @parameterized.expand([(x,) for x in DIFF_FIXTURES])
 
    def test_diff(self, diff_fixture):
 
        diff = fixture.load_resource(diff_fixture, strip=False)
 
        diff_proc = DiffProcessor(diff)
 
        diff_proc_d = diff_proc.prepare()
 
        data = [(x['filename'], x['operation'], x['stats']) for x in diff_proc_d]
 
        expected_data = DIFF_FIXTURES[diff_fixture]
 
        self.assertListEqual(expected_data, data)
 

	
 
    def test_diff_markup(self):
 
        diff = fixture.load_resource('markuptest.diff', strip=False)
 
        diff_proc = DiffProcessor(diff)
 
        diff_proc_d = diff_proc.prepare()
 
        chunks = diff_proc_d[0]['chunks']
 
        self.assertFalse(chunks[0])
 
        #from pprint import pprint; pprint(chunks[1])
 
        l = ['\n']
 
        for d in chunks[1]:
 
            l.append('%(action)-7s %(new_lineno)3s %(old_lineno)3s %(line)r\n' % d)
 
        s = ''.join(l)
 
        print s
 
        self.assertEqual(s, r'''
 
context ... ... u'@@ -51,5 +51,12 @@\n'
 
unmod    51  51 u'<u>\t</u>begin();\n'
 
unmod    52  52 u'<u>\t</u>\n'
 
add      53     u'<u>\t</u>int foo;<u class="cr"></u>\n'
 
add      54     u'<u>\t</u>int bar; <u class="cr"></u>\n'
 
add      55     u'<u>\t</u>int baz;<u>\t</u><u class="cr"></u>\n'
 
add      56     u'<u>\t</u>int space; <i></i>'
 
add      57     u'<u>\t</u>int tab;<u>\t</u>\n'
 
add      58     u'<u>\t</u>\n'
 
unmod    59  53 u' <i></i>'
 
del          54 u'<u>\t</u><del>#define MAX_STEPS (48)</del>\n'
 
add      60     u'<u>\t</u><ins><u class="cr"></u></ins>\n'
 
add      61     u'<u>\t</u>#define MAX_STEPS (64)<u class="cr"></u>\n'
 
unmod    62  55 u'\n'
 
''')
0 comments (0 inline, 0 general)