Changeset - 9ae95fdeca18
[Not reviewed]
Marcin Kuzminski - 13 years ago 2012-09-03 21:59:31
marcin@python-works.com
merge with beta
8 files changed with 39 insertions and 30 deletions:
0 comments (0 inline, 0 general)
rhodecode/controllers/summary.py
Show inline comments
 
@@ -6,240 +6,242 @@
 
    Summary controller for Rhodecode
 

	
 
    :created_on: Apr 18, 2010
 
    :author: marcink
 
    :copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com>
 
    :license: GPLv3, see COPYING for more details.
 
"""
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 

	
 
import traceback
 
import calendar
 
import logging
 
import urllib
 
from time import mktime
 
from datetime import timedelta, date
 
from urlparse import urlparse
 
from rhodecode.lib.compat import product
 

	
 
from rhodecode.lib.vcs.exceptions import ChangesetError, EmptyRepositoryError, \
 
    NodeDoesNotExistError
 

	
 
from pylons import tmpl_context as c, request, url, config
 
from pylons.i18n.translation import _
 

	
 
from beaker.cache import cache_region, region_invalidate
 

	
 
from rhodecode.config.conf import ALL_READMES, ALL_EXTS, LANGUAGES_EXTENSIONS_MAP
 
from rhodecode.model.db import Statistics, CacheInvalidation
 
from rhodecode.lib.utils2 import safe_unicode
 
from rhodecode.lib.auth import LoginRequired, HasRepoPermissionAnyDecorator
 
from rhodecode.lib.base import BaseRepoController, render
 
from rhodecode.lib.vcs.backends.base import EmptyChangeset
 
from rhodecode.lib.markup_renderer import MarkupRenderer
 
from rhodecode.lib.celerylib import run_task
 
from rhodecode.lib.celerylib.tasks import get_commits_stats
 
from rhodecode.lib.helpers import RepoPage
 
from rhodecode.lib.compat import json, OrderedDict
 
from rhodecode.lib.vcs.nodes import FileNode
 

	
 
log = logging.getLogger(__name__)
 

	
 
README_FILES = [''.join([x[0][0], x[1][0]]) for x in
 
                    sorted(list(product(ALL_READMES, ALL_EXTS)),
 
                           key=lambda y:y[0][1] + y[1][1])]
 

	
 

	
 
class SummaryController(BaseRepoController):
 

	
 
    @LoginRequired()
 
    @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
 
                                   'repository.admin')
 
    def __before__(self):
 
        super(SummaryController, self).__before__()
 

	
 
    def index(self, repo_name):
 
        c.dbrepo = dbrepo = c.rhodecode_db_repo
 
        c.following = self.scm_model.is_following_repo(repo_name,
 
                                                self.rhodecode_user.user_id)
 

	
 
        def url_generator(**kw):
 
            return url('shortlog_home', repo_name=repo_name, size=10, **kw)
 

	
 
        c.repo_changesets = RepoPage(c.rhodecode_repo, page=1,
 
                                     items_per_page=10, url=url_generator)
 

	
 
        if self.rhodecode_user.username == 'default':
 
            # for default(anonymous) user we don't need to pass credentials
 
            username = ''
 
            password = ''
 
        else:
 
            username = str(self.rhodecode_user.username)
 
            password = '@'
 

	
 
        parsed_url = urlparse(url.current(qualified=True))
 

	
 
        default_clone_uri = '{scheme}://{user}{pass}{netloc}{path}'
 

	
 
        uri_tmpl = config.get('clone_uri', default_clone_uri)
 
        uri_tmpl = uri_tmpl.replace('{', '%(').replace('}', ')s')
 
        decoded_path = safe_unicode(urllib.unquote(parsed_url.path))
 
        uri_dict = {
 
           'user': username,
 
           'pass': password,
 
           'scheme': parsed_url.scheme,
 
           'netloc': parsed_url.netloc,
 
           'path': decoded_path
 
        }
 

	
 
        uri = uri_tmpl % uri_dict
 
        # generate another clone url by id
 
        uri_dict.update(
 
         {'path': decoded_path.replace(repo_name, '_%s' % c.dbrepo.repo_id)}
 
        )
 
        uri_id = uri_tmpl % uri_dict
 

	
 
        c.clone_repo_url = uri
 
        c.clone_repo_url_id = uri_id
 
        c.repo_tags = OrderedDict()
 
        for name, hash_ in c.rhodecode_repo.tags.items()[:10]:
 
            try:
 
                c.repo_tags[name] = c.rhodecode_repo.get_changeset(hash_)
 
            except ChangesetError:
 
                c.repo_tags[name] = EmptyChangeset(hash_)
 

	
 
        c.repo_branches = OrderedDict()
 
        for name, hash_ in c.rhodecode_repo.branches.items()[:10]:
 
            try:
 
                c.repo_branches[name] = c.rhodecode_repo.get_changeset(hash_)
 
            except ChangesetError:
 
                c.repo_branches[name] = EmptyChangeset(hash_)
 

	
 
        td = date.today() + timedelta(days=1)
 
        td_1m = td - timedelta(days=calendar.mdays[td.month])
 
        td_1y = td - timedelta(days=365)
 

	
 
        ts_min_m = mktime(td_1m.timetuple())
 
        ts_min_y = mktime(td_1y.timetuple())
 
        ts_max_y = mktime(td.timetuple())
 

	
 
        if dbrepo.enable_statistics:
 
            c.show_stats = True
 
            c.no_data_msg = _('No data loaded yet')
 
            run_task(get_commits_stats, c.dbrepo.repo_name, ts_min_y, ts_max_y)
 
        else:
 
            c.show_stats = False
 
            c.no_data_msg = _('Statistics are disabled for this repository')
 
        c.ts_min = ts_min_m
 
        c.ts_max = ts_max_y
 

	
 
        stats = self.sa.query(Statistics)\
 
            .filter(Statistics.repository == dbrepo)\
 
            .scalar()
 

	
 
        c.stats_percentage = 0
 

	
 
        if stats and stats.languages:
 
            c.no_data = False is dbrepo.enable_statistics
 
            lang_stats_d = json.loads(stats.languages)
 
            c.commit_data = stats.commit_activity
 
            c.overview_data = stats.commit_activity_combined
 

	
 
            lang_stats = ((x, {"count": y,
 
                               "desc": LANGUAGES_EXTENSIONS_MAP.get(x)})
 
                          for x, y in lang_stats_d.items())
 

	
 
            c.trending_languages = json.dumps(
 
                sorted(lang_stats, reverse=True, key=lambda k: k[1])[:10]
 
            )
 
            last_rev = stats.stat_on_revision + 1
 
            c.repo_last_rev = c.rhodecode_repo.count()\
 
                if c.rhodecode_repo.revisions else 0
 
            if last_rev == 0 or c.repo_last_rev == 0:
 
                pass
 
            else:
 
                c.stats_percentage = '%.2f' % ((float((last_rev)) /
 
                                                c.repo_last_rev) * 100)
 
        else:
 
            c.commit_data = json.dumps({})
 
            c.overview_data = json.dumps([[ts_min_y, 0], [ts_max_y, 10]])
 
            c.trending_languages = json.dumps({})
 
            c.no_data = True
 

	
 
        c.enable_downloads = dbrepo.enable_downloads
 
        if c.enable_downloads:
 
            c.download_options = self._get_download_links(c.rhodecode_repo)
 

	
 
        c.readme_data, c.readme_file = \
 
            self.__get_readme_data(c.rhodecode_db_repo)
 
        return render('summary/summary.html')
 

	
 
    def __get_readme_data(self, db_repo):
 
        repo_name = db_repo.repo_name
 

	
 
        @cache_region('long_term')
 
        def _get_readme_from_cache(key):
 
            readme_data = None
 
            readme_file = None
 
            log.debug('Looking for README file')
 
            try:
 
                # get's the landing revision! or tip if fails
 
                cs = db_repo.get_landing_changeset()
 
                if isinstance(cs, EmptyChangeset):
 
                    raise EmptyRepositoryError()
 
                renderer = MarkupRenderer()
 
                for f in README_FILES:
 
                    try:
 
                        readme = cs.get_node(f)
 
                        if not isinstance(readme, FileNode):
 
                            continue
 
                        readme_file = f
 
                        log.debug('Found README file `%s` rendering...' %
 
                                  readme_file)
 
                        readme_data = renderer.render(readme.content, f)
 
                        break
 
                    except NodeDoesNotExistError:
 
                        continue
 
            except ChangesetError:
 
                log.error(traceback.format_exc())
 
                pass
 
            except EmptyRepositoryError:
 
                pass
 
            except Exception:
 
                log.error(traceback.format_exc())
 

	
 
            return readme_data, readme_file
 

	
 
        key = repo_name + '_README'
 
        inv = CacheInvalidation.invalidate(key)
 
        if inv is not None:
 
            region_invalidate(_get_readme_from_cache, None, key)
 
            CacheInvalidation.set_valid(inv.cache_key)
 
        return _get_readme_from_cache(key)
 

	
 
    def _get_download_links(self, repo):
 

	
 
        download_l = []
 

	
 
        branches_group = ([], _("Branches"))
 
        tags_group = ([], _("Tags"))
 

	
 
        for name, chs in c.rhodecode_repo.branches.items():
 
            #chs = chs.split(':')[-1]
 
            branches_group[0].append((chs, name),)
 
        download_l.append(branches_group)
 

	
 
        for name, chs in c.rhodecode_repo.tags.items():
 
            #chs = chs.split(':')[-1]
 
            tags_group[0].append((chs, name),)
 
        download_l.append(tags_group)
 

	
 
        return download_l
rhodecode/lib/compat.py
Show inline comments
 
@@ -218,368 +218,377 @@ class _odict(object):
 
        while curr_key != _nil:
 
            _, val, next_key = dict_impl.__getitem__(self, curr_key)
 
            yield curr_key, val
 
            curr_key = next_key
 

	
 
    def items(self):
 
        return list(self.iteritems())
 

	
 
    def sort(self, cmp=None, key=None, reverse=False):
 
        items = [(k, v) for k, v in self.items()]
 
        if cmp is not None:
 
            items = sorted(items, cmp=cmp)
 
        elif key is not None:
 
            items = sorted(items, key=key)
 
        else:
 
            items = sorted(items, key=lambda x: x[1])
 
        if reverse:
 
            items.reverse()
 
        self.clear()
 
        self.__init__(items)
 

	
 
    def clear(self):
 
        dict_impl = self._dict_impl()
 
        dict_impl.clear(self)
 
        dict_impl.__setattr__(self, 'lh', _nil)
 
        dict_impl.__setattr__(self, 'lt', _nil)
 

	
 
    def copy(self):
 
        return self.__class__(self)
 

	
 
    def update(self, data=(), **kwds):
 
        if kwds:
 
            raise TypeError("update() of ordered dict takes no keyword "
 
                            "arguments to avoid an ordering trap.")
 
        if hasattr(data, "iteritems"):
 
            data = data.iteritems()
 
        for key, val in data:
 
            self[key] = val
 

	
 
    def setdefault(self, k, x=None):
 
        try:
 
            return self[k]
 
        except KeyError:
 
            self[k] = x
 
            return x
 

	
 
    def pop(self, k, x=_nil):
 
        try:
 
            val = self[k]
 
            del self[k]
 
            return val
 
        except KeyError:
 
            if x == _nil:
 
                raise
 
            return x
 

	
 
    def popitem(self):
 
        try:
 
            dict_impl = self._dict_impl()
 
            key = dict_impl.__getattribute__(self, 'lt')
 
            return key, self.pop(key)
 
        except KeyError:
 
            raise KeyError("'popitem(): ordered dictionary is empty'")
 

	
 
    def riterkeys(self):
 
        """To iterate on keys in reversed order.
 
        """
 
        dict_impl = self._dict_impl()
 
        curr_key = dict_impl.__getattribute__(self, 'lt')
 
        while curr_key != _nil:
 
            yield curr_key
 
            curr_key = dict_impl.__getitem__(self, curr_key)[0]
 

	
 
    __reversed__ = riterkeys
 

	
 
    def rkeys(self):
 
        """List of the keys in reversed order.
 
        """
 
        return list(self.riterkeys())
 

	
 
    def ritervalues(self):
 
        """To iterate on values in reversed order.
 
        """
 
        dict_impl = self._dict_impl()
 
        curr_key = dict_impl.__getattribute__(self, 'lt')
 
        while curr_key != _nil:
 
            curr_key, val, _ = dict_impl.__getitem__(self, curr_key)
 
            yield val
 

	
 
    def rvalues(self):
 
        """List of the values in reversed order.
 
        """
 
        return list(self.ritervalues())
 

	
 
    def riteritems(self):
 
        """To iterate on (key, value) in reversed order.
 
        """
 
        dict_impl = self._dict_impl()
 
        curr_key = dict_impl.__getattribute__(self, 'lt')
 
        while curr_key != _nil:
 
            pred_key, val, _ = dict_impl.__getitem__(self, curr_key)
 
            yield curr_key, val
 
            curr_key = pred_key
 

	
 
    def ritems(self):
 
        """List of the (key, value) in reversed order.
 
        """
 
        return list(self.riteritems())
 

	
 
    def firstkey(self):
 
        if self:
 
            return self._dict_impl().__getattribute__(self, 'lh')
 
        else:
 
            raise KeyError("'firstkey(): ordered dictionary is empty'")
 

	
 
    def lastkey(self):
 
        if self:
 
            return self._dict_impl().__getattribute__(self, 'lt')
 
        else:
 
            raise KeyError("'lastkey(): ordered dictionary is empty'")
 

	
 
    def as_dict(self):
 
        return self._dict_impl()(self.items())
 

	
 
    def _repr(self):
 
        """_repr(): low level repr of the whole data contained in the odict.
 
        Useful for debugging.
 
        """
 
        dict_impl = self._dict_impl()
 
        form = "odict low level repr lh,lt,data: %r, %r, %s"
 
        return form % (dict_impl.__getattribute__(self, 'lh'),
 
                       dict_impl.__getattribute__(self, 'lt'),
 
                       dict_impl.__repr__(self))
 

	
 

	
 
class OrderedDict(_odict, dict):
 

	
 
    def _dict_impl(self):
 
        return dict
 

	
 

	
 
#==============================================================================
 
# OrderedSet
 
#==============================================================================
 
from sqlalchemy.util import OrderedSet
 

	
 

	
 
#==============================================================================
 
# kill FUNCTIONS
 
#==============================================================================
 
if __platform__ in PLATFORM_WIN:
 
    import ctypes
 

	
 
    def kill(pid, sig):
 
        """kill function for Win32"""
 
        kernel32 = ctypes.windll.kernel32
 
        handle = kernel32.OpenProcess(1, 0, pid)
 
        return (0 != kernel32.TerminateProcess(handle, 0))
 

	
 
else:
 
    kill = os.kill
 

	
 

	
 
#==============================================================================
 
# itertools.product
 
#==============================================================================
 

	
 
try:
 
    from itertools import product
 
except ImportError:
 
    def product(*args, **kwds):
 
        # product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
 
        # product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
 
        pools = map(tuple, args) * kwds.get('repeat', 1)
 
        result = [[]]
 
        for pool in pools:
 
            result = [x + [y] for x in result for y in pool]
 
        for prod in result:
 
            yield tuple(prod)
 

	
 

	
 
#==============================================================================
 
# BytesIO
 
#==============================================================================
 

	
 
try:
 
    from io import BytesIO
 
except ImportError:
 
    from cStringIO import StringIO as BytesIO
 

	
 

	
 
#==============================================================================
 
# bytes
 
#==============================================================================
 
if __py_version__ >= (2, 6):
 
    _bytes = bytes
 
else:
 
    # in py2.6 bytes is a synonim for str
 
    _bytes = str
 

	
 
#==============================================================================
 
# deque
 
#==============================================================================
 

	
 
if __py_version__ >= (2, 6):
 
    from collections import deque
 
else:
 
    #need to implement our own deque with maxlen
 
    class deque(object):
 

	
 
        def __init__(self, iterable=(), maxlen=-1):
 
        def __init__(self, iterable=(), maxlen= -1):
 
            if not hasattr(self, 'data'):
 
                self.left = self.right = 0
 
                self.data = {}
 
            self.maxlen = maxlen
 
            self.maxlen = maxlen or -1
 
            self.extend(iterable)
 

	
 
        def append(self, x):
 
            self.data[self.right] = x
 
            self.right += 1
 
            if self.maxlen != -1 and len(self) > self.maxlen:
 
                self.popleft()
 

	
 
        def appendleft(self, x):
 
            self.left -= 1
 
            self.data[self.left] = x
 
            if self.maxlen != -1 and len(self) > self.maxlen:
 
                self.pop()
 

	
 
        def pop(self):
 
            if self.left == self.right:
 
                raise IndexError('cannot pop from empty deque')
 
            self.right -= 1
 
            elem = self.data[self.right]
 
            del self.data[self.right]
 
            return elem
 

	
 
        def popleft(self):
 
            if self.left == self.right:
 
                raise IndexError('cannot pop from empty deque')
 
            elem = self.data[self.left]
 
            del self.data[self.left]
 
            self.left += 1
 
            return elem
 

	
 
        def clear(self):
 
            self.data.clear()
 
            self.left = self.right = 0
 

	
 
        def extend(self, iterable):
 
            for elem in iterable:
 
                self.append(elem)
 

	
 
        def extendleft(self, iterable):
 
            for elem in iterable:
 
                self.appendleft(elem)
 

	
 
        def rotate(self, n=1):
 
            if self:
 
                n %= len(self)
 
                for i in xrange(n):
 
                    self.appendleft(self.pop())
 

	
 
        def __getitem__(self, i):
 
            if i < 0:
 
                i += len(self)
 
            try:
 
                return self.data[i + self.left]
 
            except KeyError:
 
                raise IndexError
 

	
 
        def __setitem__(self, i, value):
 
            if i < 0:
 
                i += len(self)
 
            try:
 
                self.data[i + self.left] = value
 
            except KeyError:
 
                raise IndexError
 

	
 
        def __delitem__(self, i):
 
            size = len(self)
 
            if not (-size <= i < size):
 
                raise IndexError
 
            data = self.data
 
            if i < 0:
 
                i += size
 
            for j in xrange(self.left + i, self.right - 1):
 
                data[j] = data[j + 1]
 
            self.pop()
 

	
 
        def __len__(self):
 
            return self.right - self.left
 

	
 
        def __cmp__(self, other):
 
            if type(self) != type(other):
 
                return cmp(type(self), type(other))
 
            return cmp(list(self), list(other))
 

	
 
        def __repr__(self, _track=[]):
 
            if id(self) in _track:
 
                return '...'
 
            _track.append(id(self))
 
            r = 'deque(%r, maxlen=%s)' % (list(self), self.maxlen)
 
            _track.remove(id(self))
 
            return r
 

	
 
        def __getstate__(self):
 
            return (tuple(self),)
 

	
 
        def __setstate__(self, s):
 
            self.__init__(s[0])
 

	
 
        def __hash__(self):
 
            raise TypeError
 

	
 
        def __copy__(self):
 
            return self.__class__(self)
 

	
 
        def __deepcopy__(self, memo={}):
 
            from copy import deepcopy
 
            result = self.__class__()
 
            memo[id(self)] = result
 
            result.__init__(deepcopy(tuple(self), memo))
 
            return result
 

	
 

	
 
#==============================================================================
 
# threading.Event
 
#==============================================================================
 

	
 
if __py_version__ >= (2, 6):
 
    from threading import Event
 
    from threading import Event, Thread
 
else:
 
    from threading import _Verbose, Condition, Lock
 
    from threading import _Verbose, Condition, Lock, Thread
 

	
 
    def Event(*args, **kwargs):
 
        return _Event(*args, **kwargs)
 

	
 
    class _Event(_Verbose):
 

	
 
        # After Tim Peters' event class (without is_posted())
 

	
 
        def __init__(self, verbose=None):
 
            _Verbose.__init__(self, verbose)
 
            self.__cond = Condition(Lock())
 
            self.__flag = False
 

	
 
        def isSet(self):
 
            return self.__flag
 

	
 
        is_set = isSet
 

	
 
        def set(self):
 
            self.__cond.acquire()
 
            try:
 
                self.__flag = True
 
                self.__cond.notify_all()
 
            finally:
 
                self.__cond.release()
 

	
 
        def clear(self):
 
            self.__cond.acquire()
 
            try:
 
                self.__flag = False
 
            finally:
 
                self.__cond.release()
 

	
 
        def wait(self, timeout=None):
 
            self.__cond.acquire()
 
            try:
 
                if not self.__flag:
 
                    self.__cond.wait(timeout)
 
            finally:
 
                self.__cond.release()
 

	
 

	
 

	
rhodecode/lib/subprocessio.py
Show inline comments
 
'''
 
Module provides a class allowing to wrap communication over subprocess.Popen
 
input, output, error streams into a meaningfull, non-blocking, concurrent
 
stream processor exposing the output data as an iterator fitting to be a
 
return value passed by a WSGI applicaiton to a WSGI server per PEP 3333.
 

	
 
Copyright (c) 2011  Daniel Dotsenko <dotsa@hotmail.com>
 

	
 
This file is part of git_http_backend.py Project.
 

	
 
git_http_backend.py Project is free software: you can redistribute it and/or
 
modify it under the terms of the GNU Lesser General Public License as
 
published by the Free Software Foundation, either version 2.1 of the License,
 
or (at your option) any later version.
 

	
 
git_http_backend.py Project is distributed in the hope that it will be useful,
 
but WITHOUT ANY WARRANTY; without even the implied warranty of
 
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 
GNU Lesser General Public License for more details.
 

	
 
You should have received a copy of the GNU Lesser General Public License
 
along with git_http_backend.py Project.
 
If not, see <http://www.gnu.org/licenses/>.
 
'''
 
import os
 
import subprocess
 
import threading
 
from rhodecode.lib.compat import deque, Event
 
from rhodecode.lib.compat import deque, Event, Thread, _bytes
 

	
 

	
 
class StreamFeeder(threading.Thread):
 
class StreamFeeder(Thread):
 
    """
 
    Normal writing into pipe-like is blocking once the buffer is filled.
 
    This thread allows a thread to seep data from a file-like into a pipe
 
    without blocking the main thread.
 
    We close inpipe once the end of the source stream is reached.
 
    """
 
    def __init__(self, source):
 
        super(StreamFeeder, self).__init__()
 
        self.daemon = True
 
        filelike = False
 
        self.bytes = bytes()
 
        if type(source) in (type(''), bytes, bytearray):  # string-like
 
            self.bytes = bytes(source)
 
        self.bytes = _bytes()
 
        if type(source) in (type(''), _bytes, bytearray):  # string-like
 
            self.bytes = _bytes(source)
 
        else:  # can be either file pointer or file-like
 
            if type(source) in (int, long):  # file pointer it is
 
                ## converting file descriptor (int) stdin into file-like
 
                try:
 
                    source = os.fdopen(source, 'rb', 16384)
 
                except Exception:
 
                    pass
 
            # let's see if source is file-like by now
 
            try:
 
                filelike = source.read
 
            except Exception:
 
                pass
 
        if not filelike and not self.bytes:
 
            raise TypeError("StreamFeeder's source object must be a readable "
 
                            "file-like, a file descriptor, or a string-like.")
 
        self.source = source
 
        self.readiface, self.writeiface = os.pipe()
 

	
 
    def run(self):
 
        t = self.writeiface
 
        if self.bytes:
 
            os.write(t, self.bytes)
 
        else:
 
            s = self.source
 
            b = s.read(4096)
 
            while b:
 
                os.write(t, b)
 
                b = s.read(4096)
 
        os.close(t)
 

	
 
    @property
 
    def output(self):
 
        return self.readiface
 

	
 

	
 
class InputStreamChunker(threading.Thread):
 
class InputStreamChunker(Thread):
 
    def __init__(self, source, target, buffer_size, chunk_size):
 

	
 
        super(InputStreamChunker, self).__init__()
 

	
 
        self.daemon = True  # die die die.
 

	
 
        self.source = source
 
        self.target = target
 
        self.chunk_count_max = int(buffer_size / chunk_size) + 1
 
        self.chunk_size = chunk_size
 

	
 
        self.data_added = Event()
 
        self.data_added.clear()
 

	
 
        self.keep_reading = Event()
 
        self.keep_reading.set()
 

	
 
        self.EOF = Event()
 
        self.EOF.clear()
 

	
 
        self.go = Event()
 
        self.go.set()
 

	
 
    def stop(self):
 
        self.go.clear()
 
        self.EOF.set()
 
        try:
 
            # this is not proper, but is done to force the reader thread let
 
            # go of the input because, if successful, .close() will send EOF
 
            # down the pipe.
 
            self.source.close()
 
        except:
 
            pass
 

	
 
    def run(self):
 
        s = self.source
 
        t = self.target
 
        cs = self.chunk_size
 
        ccm = self.chunk_count_max
 
        kr = self.keep_reading
 
        da = self.data_added
 
        go = self.go
 
        b = s.read(cs)
 

	
 
        while b and go.is_set():
 
            if len(t) > ccm:
 
                kr.clear()
 
                kr.wait(2)
 
#                # this only works on 2.7.x and up
 
#                if not kr.wait(10):
 
#                    raise Exception("Timed out while waiting for input to be read.")
 
                # instead we'll use this
 
                if len(t) > ccm + 3:
 
                    raise IOError("Timed out while waiting for input from subprocess.")
 
            t.append(b)
 
            da.set()
 
            b = s.read(cs)
 
        self.EOF.set()
 
        da.set()  # for cases when done but there was no input.
 

	
 

	
 
class BufferedGenerator():
 
    '''
 
    Class behaves as a non-blocking, buffered pipe reader.
 
    Reads chunks of data (through a thread)
 
    from a blocking pipe, and attaches these to an array (Deque) of chunks.
 
    Reading is halted in the thread when max chunks is internally buffered.
 
    The .next() may operate in blocking or non-blocking fashion by yielding
 
    '' if no data is ready
 
    to be sent or by not returning until there is some data to send
 
    When we get EOF from underlying source pipe we raise the marker to raise
 
    StopIteration after the last chunk of data is yielded.
 
    '''
 

	
 
    def __init__(self, source, buffer_size=65536, chunk_size=4096,
 
                 starting_values=[], bottomless=False):
 

	
 
        if bottomless:
 
            maxlen = int(buffer_size / chunk_size)
 
        else:
 
            maxlen = None
 

	
 
        self.data = deque(starting_values, maxlen)
 

	
 
        self.worker = InputStreamChunker(source, self.data, buffer_size,
 
                                         chunk_size)
 
        if starting_values:
 
            self.worker.data_added.set()
 
        self.worker.start()
 

	
 
    ####################
 
    # Generator's methods
 
    ####################
 

	
 
    def __iter__(self):
 
        return self
 

	
 
    def next(self):
 
        while not len(self.data) and not self.worker.EOF.is_set():
 
            self.worker.data_added.clear()
 
            self.worker.data_added.wait(0.2)
 
        if len(self.data):
 
            self.worker.keep_reading.set()
 
            return bytes(self.data.popleft())
 
            return _bytes(self.data.popleft())
 
        elif self.worker.EOF.is_set():
 
            raise StopIteration
 

	
 
    def throw(self, type, value=None, traceback=None):
 
        if not self.worker.EOF.is_set():
 
            raise type(value)
 

	
 
    def start(self):
 
        self.worker.start()
 

	
 
    def stop(self):
 
        self.worker.stop()
 

	
 
    def close(self):
 
        try:
 
            self.worker.stop()
 
            self.throw(GeneratorExit)
 
        except (GeneratorExit, StopIteration):
 
            pass
 

	
 
    def __del__(self):
 
        self.close()
 

	
 
    ####################
 
    # Threaded reader's infrastructure.
 
    ####################
 
    @property
 
    def input(self):
 
        return self.worker.w
 

	
 
    @property
 
    def data_added_event(self):
 
        return self.worker.data_added
 

	
 
    @property
 
    def data_added(self):
 
        return self.worker.data_added.is_set()
 

	
 
    @property
 
    def reading_paused(self):
 
        return not self.worker.keep_reading.is_set()
 

	
 
    @property
 
    def done_reading_event(self):
 
        '''
 
        Done_reding does not mean that the iterator's buffer is empty.
 
        Iterator might have done reading from underlying source, but the read
 
        chunks might still be available for serving through .next() method.
 

	
 
        @return An Event class instance.
 
        '''
 
        return self.worker.EOF
 

	
 
    @property
 
    def done_reading(self):
 
        '''
 
        Done_reding does not mean that the iterator's buffer is empty.
 
        Iterator might have done reading from underlying source, but the read
 
        chunks might still be available for serving through .next() method.
 

	
 
        @return An Bool value.
 
        '''
 
        return self.worker.EOF.is_set()
 

	
 
    @property
 
    def length(self):
 
        '''
 
        returns int.
 

	
 
        This is the lenght of the que of chunks, not the length of
 
        the combined contents in those chunks.
 

	
 
        __len__() cannot be meaningfully implemented because this
 
        reader is just flying throuh a bottomless pit content and
 
        can only know the lenght of what it already saw.
 

	
 
        If __len__() on WSGI server per PEP 3333 returns a value,
 
        the responce's length will be set to that. In order not to
 
        confuse WSGI PEP3333 servers, we will not implement __len__
 
        at all.
 
        '''
 
        return len(self.data)
 

	
 
    def prepend(self, x):
 
        self.data.appendleft(x)
 

	
 
    def append(self, x):
 
        self.data.append(x)
 

	
 
    def extend(self, o):
 
        self.data.extend(o)
 

	
 
    def __getitem__(self, i):
 
        return self.data[i]
 

	
 

	
 
class SubprocessIOChunker(object):
 
    '''
 
    Processor class wrapping handling of subprocess IO.
 

	
 
    In a way, this is a "communicate()" replacement with a twist.
 

	
 
    - We are multithreaded. Writing in and reading out, err are all sep threads.
 
    - We support concurrent (in and out) stream processing.
 
    - The output is not a stream. It's a queue of read string (bytes, not unicode)
 
      chunks. The object behaves as an iterable. You can "for chunk in obj:" us.
 
    - We are non-blocking in more respects than communicate()
 
      (reading from subprocess out pauses when internal buffer is full, but
 
       does not block the parent calling code. On the flip side, reading from
 
       slow-yielding subprocess may block the iteration until data shows up. This
 
       does not block the parallel inpipe reading occurring parallel thread.)
 

	
 
    The purpose of the object is to allow us to wrap subprocess interactions into
 
    and interable that can be passed to a WSGI server as the application's return
 
    value. Because of stream-processing-ability, WSGI does not have to read ALL
 
    of the subprocess's output and buffer it, before handing it to WSGI server for
 
    HTTP response. Instead, the class initializer reads just a bit of the stream
 
    to figure out if error ocurred or likely to occur and if not, just hands the
 
    further iteration over subprocess output to the server for completion of HTTP
 
    response.
 

	
 
    The real or perceived subprocess error is trapped and raised as one of
 
    EnvironmentError family of exceptions
 

	
 
    Example usage:
 
    #    try:
 
    #        answer = SubprocessIOChunker(
 
    #            cmd,
 
    #            input,
 
    #            buffer_size = 65536,
 
    #            chunk_size = 4096
 
    #            )
 
    #    except (EnvironmentError) as e:
 
    #        print str(e)
 
    #        raise e
 
    #
 
    #    return answer
 

	
 

	
 
    '''
 
    def __init__(self, cmd, inputstream=None, buffer_size=65536,
 
                 chunk_size=4096, starting_values=[], **kwargs):
 
        '''
 
        Initializes SubprocessIOChunker
 

	
 
        :param cmd: A Subprocess.Popen style "cmd". Can be string or array of strings
 
        :param inputstream: (Default: None) A file-like, string, or file pointer.
 
        :param buffer_size: (Default: 65536) A size of total buffer per stream in bytes.
 
        :param chunk_size: (Default: 4096) A max size of a chunk. Actual chunk may be smaller.
 
        :param starting_values: (Default: []) An array of strings to put in front of output que.
 
        '''
 

	
 
        if inputstream:
 
            input_streamer = StreamFeeder(inputstream)
 
            input_streamer.start()
 
            inputstream = input_streamer.output
 

	
 
        if isinstance(cmd, (list, tuple)):
 
            cmd = ' '.join(cmd)
 

	
 
        _shell = kwargs.get('shell') or True
 
        kwargs['shell'] = _shell
 
        _p = subprocess.Popen(cmd,
 
            bufsize=-1,
 
            stdin=inputstream,
 
            stdout=subprocess.PIPE,
 
            stderr=subprocess.PIPE,
 
            **kwargs
 
            )
 

	
 
        bg_out = BufferedGenerator(_p.stdout, buffer_size, chunk_size, starting_values)
 
        bg_err = BufferedGenerator(_p.stderr, 16000, 1, bottomless=True)
 

	
 
        while not bg_out.done_reading and not bg_out.reading_paused and not bg_err.length:
 
            # doing this until we reach either end of file, or end of buffer.
 
            bg_out.data_added_event.wait(1)
 
            bg_out.data_added_event.clear()
 

	
 
        # at this point it's still ambiguous if we are done reading or just full buffer.
 
        # Either way, if error (returned by ended process, or implied based on
 
        # presence of stuff in stderr output) we error out.
 
        # Else, we are happy.
 
        _returncode = _p.poll()
 
        if _returncode or (_returncode == None and bg_err.length):
 
            try:
 
                _p.terminate()
 
            except:
 
                pass
 
            bg_out.stop()
 
            bg_err.stop()
 
            err = '%s' % ''.join(bg_err)
 
            raise EnvironmentError("Subprocess exited due to an error:\n" + err)
rhodecode/lib/utils.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
"""
 
    rhodecode.lib.utils
 
    ~~~~~~~~~~~~~~~~~~~
 

	
 
    Utilities library for RhodeCode
 

	
 
    :created_on: Apr 18, 2010
 
    :author: marcink
 
    :copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com>
 
    :license: GPLv3, see COPYING for more details.
 
"""
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 

	
 
import os
 
import re
 
import logging
 
import datetime
 
import traceback
 
import paste
 
import beaker
 
import tarfile
 
import shutil
 
from os.path import abspath
 
from os.path import dirname as dn, join as jn
 

	
 
from paste.script.command import Command, BadCommand
 

	
 
from mercurial import ui, config
 

	
 
from webhelpers.text import collapse, remove_formatting, strip_tags
 

	
 
from rhodecode.lib.vcs import get_backend
 
from rhodecode.lib.vcs.backends.base import BaseChangeset
 
from rhodecode.lib.vcs.utils.lazy import LazyProperty
 
from rhodecode.lib.vcs.utils.helpers import get_scm
 
from rhodecode.lib.vcs.exceptions import VCSError
 

	
 
from rhodecode.lib.caching_query import FromCache
 

	
 
from rhodecode.model import meta
 
from rhodecode.model.db import Repository, User, RhodeCodeUi, \
 
    UserLog, RepoGroup, RhodeCodeSetting, CacheInvalidation
 
from rhodecode.model.meta import Session
 
from rhodecode.model.repos_group import ReposGroupModel
 
from rhodecode.lib.utils2 import safe_str, safe_unicode
 
from rhodecode.lib.vcs.utils.fakemod import create_module
 

	
 
log = logging.getLogger(__name__)
 

	
 
REMOVED_REPO_PAT = re.compile(r'rm__\d{8}_\d{6}_\d{6}__.*')
 

	
 

	
 
def recursive_replace(str_, replace=' '):
 
    """
 
    Recursive replace of given sign to just one instance
 

	
 
    :param str_: given string
 
    :param replace: char to find and replace multiple instances
 

	
 
    Examples::
 
    >>> recursive_replace("Mighty---Mighty-Bo--sstones",'-')
 
    'Mighty-Mighty-Bo-sstones'
 
    """
 

	
 
    if str_.find(replace * 2) == -1:
 
        return str_
 
    else:
 
        str_ = str_.replace(replace * 2, replace)
 
        return recursive_replace(str_, replace)
 

	
 

	
 
def repo_name_slug(value):
 
    """
 
    Return slug of name of repository
 
    This function is called on each creation/modification
 
    of repository to prevent bad names in repo
 
    """
 

	
 
    slug = remove_formatting(value)
 
    slug = strip_tags(slug)
 

	
 
    for c in """=[]\;'"<>,/~!@#$%^&*()+{}|: """:
 
    for c in """`?=[]\;'"<>,/~!@#$%^&*()+{}|: """:
 
        slug = slug.replace(c, '-')
 
    slug = recursive_replace(slug, '-')
 
    slug = collapse(slug, '-')
 
    return slug
 

	
 

	
 
def get_repo_slug(request):
 
    _repo = request.environ['pylons.routes_dict'].get('repo_name')
 
    if _repo:
 
        _repo = _repo.rstrip('/')
 
    return _repo
 

	
 

	
 
def get_repos_group_slug(request):
 
    _group = request.environ['pylons.routes_dict'].get('group_name')
 
    if _group:
 
        _group = _group.rstrip('/')
 
    return _group
 

	
 

	
 
def action_logger(user, action, repo, ipaddr='', sa=None, commit=False):
 
    """
 
    Action logger for various actions made by users
 

	
 
    :param user: user that made this action, can be a unique username string or
 
        object containing user_id attribute
 
    :param action: action to log, should be on of predefined unique actions for
 
        easy translations
 
    :param repo: string name of repository or object containing repo_id,
 
        that action was made on
 
    :param ipaddr: optional ip address from what the action was made
 
    :param sa: optional sqlalchemy session
 

	
 
    """
 

	
 
    if not sa:
 
        sa = meta.Session()
 

	
 
    try:
 
        if hasattr(user, 'user_id'):
 
            user_obj = user
 
        elif isinstance(user, basestring):
 
            user_obj = User.get_by_username(user)
 
        else:
 
            raise Exception('You have to provide user object or username')
 

	
 
        if hasattr(repo, 'repo_id'):
 
            repo_obj = Repository.get(repo.repo_id)
 
            repo_name = repo_obj.repo_name
 
        elif  isinstance(repo, basestring):
 
            repo_name = repo.lstrip('/')
 
            repo_obj = Repository.get_by_repo_name(repo_name)
 
        else:
 
            repo_obj = None
 
            repo_name = ''
 

	
 
        user_log = UserLog()
 
        user_log.user_id = user_obj.user_id
 
        user_log.action = safe_unicode(action)
 

	
 
        user_log.repository = repo_obj
 
        user_log.repository_name = repo_name
 

	
 
        user_log.action_date = datetime.datetime.now()
 
        user_log.user_ip = ipaddr
 
        sa.add(user_log)
 

	
 
        log.info(
 
            'Adding user %s, action %s on %s' % (user_obj, action,
 
                                                 safe_unicode(repo))
 
        )
 
        if commit:
 
            sa.commit()
 
    except:
 
        log.error(traceback.format_exc())
 
        raise
 

	
 

	
 
def get_repos(path, recursive=False):
 
    """
 
    Scans given path for repos and return (name,(type,path)) tuple
 

	
 
    :param path: path to scan for repositories
 
    :param recursive: recursive search and return names with subdirs in front
 
    """
 

	
 
    # remove ending slash for better results
 
    path = path.rstrip(os.sep)
 

	
 
    def _get_repos(p):
 
        if not os.access(p, os.W_OK):
 
            return
 
        for dirpath in os.listdir(p):
 
            if os.path.isfile(os.path.join(p, dirpath)):
 
                continue
 
            cur_path = os.path.join(p, dirpath)
 
            try:
 
                scm_info = get_scm(cur_path)
 
                yield scm_info[1].split(path, 1)[-1].lstrip(os.sep), scm_info
 
            except VCSError:
 
                if not recursive:
 
                    continue
 
                #check if this dir containts other repos for recursive scan
 
                rec_path = os.path.join(p, dirpath)
 
                if os.path.isdir(rec_path):
 
                    for inner_scm in _get_repos(rec_path):
 
                        yield inner_scm
 

	
 
    return _get_repos(path)
 

	
 

	
 
def is_valid_repo(repo_name, base_path, scm=None):
 
    """
 
    Returns True if given path is a valid repository False otherwise.
 
    If scm param is given also compare if given scm is the same as expected 
 
    from scm parameter
 

	
 
    :param repo_name:
 
    :param base_path:
 
    :param scm:
 

	
 
    :return True: if given path is a valid repository
 
    """
 
    full_path = os.path.join(safe_str(base_path), safe_str(repo_name))
 

	
 
    try:
 
        scm_ = get_scm(full_path)
 
        if scm:
 
            return scm_[0] == scm
 
        return True
 
    except VCSError:
 
        return False
 

	
 

	
 
def is_valid_repos_group(repos_group_name, base_path):
 
    """
 
    Returns True if given path is a repos group False otherwise
 

	
 
    :param repo_name:
 
    :param base_path:
 
    """
 
    full_path = os.path.join(safe_str(base_path), safe_str(repos_group_name))
 

	
 
    # check if it's not a repo
 
    if is_valid_repo(repos_group_name, base_path):
 
        return False
 

	
 
    try:
 
        # we need to check bare git repos at higher level
 
        # since we might match branches/hooks/info/objects or possible
 
        # other things inside bare git repo
 
        get_scm(os.path.dirname(full_path))
 
        return False
 
    except VCSError:
 
        pass
 

	
 
    # check if it's a valid path
 
    if os.path.isdir(full_path):
 
        return True
 

	
 
    return False
 

	
 

	
 
def ask_ok(prompt, retries=4, complaint='Yes or no, please!'):
 
    while True:
 
        ok = raw_input(prompt)
 
        if ok in ('y', 'ye', 'yes'):
 
            return True
 
        if ok in ('n', 'no', 'nop', 'nope'):
 
            return False
 
        retries = retries - 1
 
        if retries < 0:
 
            raise IOError
 
        print complaint
 

	
 
#propagated from mercurial documentation
 
ui_sections = ['alias', 'auth',
 
                'decode/encode', 'defaults',
 
                'diff', 'email',
 
                'extensions', 'format',
 
                'merge-patterns', 'merge-tools',
 
                'hooks', 'http_proxy',
 
                'smtp', 'patch',
 
                'paths', 'profiling',
 
                'server', 'trusted',
 
                'ui', 'web', ]
 

	
 

	
 
def make_ui(read_from='file', path=None, checkpaths=True, clear_session=True):
 
    """
 
    A function that will read python rc files or database
 
    and make an mercurial ui object from read options
rhodecode/model/db.py
Show inline comments
 
@@ -1345,385 +1345,385 @@ class Statistics(Base, BaseModel):
 
    repository_id = Column("repository_id", Integer(), ForeignKey('repositories.repo_id'), nullable=False, unique=True, default=None)
 
    stat_on_revision = Column("stat_on_revision", Integer(), nullable=False)
 
    commit_activity = Column("commit_activity", LargeBinary(1000000), nullable=False)#JSON data
 
    commit_activity_combined = Column("commit_activity_combined", LargeBinary(), nullable=False)#JSON data
 
    languages = Column("languages", LargeBinary(1000000), nullable=False)#JSON data
 

	
 
    repository = relationship('Repository', single_parent=True)
 

	
 

	
 
class UserFollowing(Base, BaseModel):
 
    __tablename__ = 'user_followings'
 
    __table_args__ = (
 
        UniqueConstraint('user_id', 'follows_repository_id'),
 
        UniqueConstraint('user_id', 'follows_user_id'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8'}
 
    )
 

	
 
    user_following_id = Column("user_following_id", Integer(), nullable=False, unique=True, default=None, primary_key=True)
 
    user_id = Column("user_id", Integer(), ForeignKey('users.user_id'), nullable=False, unique=None, default=None)
 
    follows_repo_id = Column("follows_repository_id", Integer(), ForeignKey('repositories.repo_id'), nullable=True, unique=None, default=None)
 
    follows_user_id = Column("follows_user_id", Integer(), ForeignKey('users.user_id'), nullable=True, unique=None, default=None)
 
    follows_from = Column('follows_from', DateTime(timezone=False), nullable=True, unique=None, default=datetime.datetime.now)
 

	
 
    user = relationship('User', primaryjoin='User.user_id==UserFollowing.user_id')
 

	
 
    follows_user = relationship('User', primaryjoin='User.user_id==UserFollowing.follows_user_id')
 
    follows_repository = relationship('Repository', order_by='Repository.repo_name')
 

	
 
    @classmethod
 
    def get_repo_followers(cls, repo_id):
 
        return cls.query().filter(cls.follows_repo_id == repo_id)
 

	
 

	
 
class CacheInvalidation(Base, BaseModel):
 
    __tablename__ = 'cache_invalidation'
 
    __table_args__ = (
 
        UniqueConstraint('cache_key'),
 
        Index('key_idx', 'cache_key'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8'},
 
    )
 
    cache_id = Column("cache_id", Integer(), nullable=False, unique=True, default=None, primary_key=True)
 
    cache_key = Column("cache_key", String(255, convert_unicode=False, assert_unicode=None), nullable=True, unique=None, default=None)
 
    cache_args = Column("cache_args", String(255, convert_unicode=False, assert_unicode=None), nullable=True, unique=None, default=None)
 
    cache_active = Column("cache_active", Boolean(), nullable=True, unique=None, default=False)
 

	
 
    def __init__(self, cache_key, cache_args=''):
 
        self.cache_key = cache_key
 
        self.cache_args = cache_args
 
        self.cache_active = False
 

	
 
    def __unicode__(self):
 
        return u"<%s('%s:%s')>" % (self.__class__.__name__,
 
                                  self.cache_id, self.cache_key)
 

	
 
    @classmethod
 
    def clear_cache(cls):
 
        cls.query().delete()
 

	
 
    @classmethod
 
    def _get_key(cls, key):
 
        """
 
        Wrapper for generating a key, together with a prefix
 

	
 
        :param key:
 
        """
 
        import rhodecode
 
        prefix = ''
 
        iid = rhodecode.CONFIG.get('instance_id')
 
        if iid:
 
            prefix = iid
 
        return "%s%s" % (prefix, key), prefix, key.rstrip('_README')
 

	
 
    @classmethod
 
    def get_by_key(cls, key):
 
        return cls.query().filter(cls.cache_key == key).scalar()
 

	
 
    @classmethod
 
    def _get_or_create_key(cls, key, prefix, org_key):
 
        inv_obj = Session().query(cls).filter(cls.cache_key == key).scalar()
 
        if not inv_obj:
 
            try:
 
                inv_obj = CacheInvalidation(key, org_key)
 
                Session().add(inv_obj)
 
                Session().commit()
 
            except Exception:
 
                log.error(traceback.format_exc())
 
                Session().rollback()
 
        return inv_obj
 

	
 
    @classmethod
 
    def invalidate(cls, key):
 
        """
 
        Returns Invalidation object if this given key should be invalidated
 
        None otherwise. `cache_active = False` means that this cache
 
        state is not valid and needs to be invalidated
 

	
 
        :param key:
 
        """
 

	
 
        key, _prefix, _org_key = cls._get_key(key)
 
        inv = cls._get_or_create_key(key, _prefix, _org_key)
 

	
 
        if inv and inv.cache_active is False:
 
            return inv
 

	
 
    @classmethod
 
    def set_invalidate(cls, key):
 
        """
 
        Mark this Cache key for invalidation
 

	
 
        :param key:
 
        """
 

	
 
        key, _prefix, _org_key = cls._get_key(key)
 
        inv_objs = Session().query(cls).filter(cls.cache_args == _org_key).all()
 
        log.debug('marking %s key[s] %s for invalidation' % (len(inv_objs),
 
                                                             _org_key))
 
        try:
 
            for inv_obj in inv_objs:
 
                if inv_obj:
 
                    inv_obj.cache_active = False
 

	
 
                Session().add(inv_obj)
 
            Session().commit()
 
        except Exception:
 
            log.error(traceback.format_exc())
 
            Session().rollback()
 

	
 
    @classmethod
 
    def set_valid(cls, key):
 
        """
 
        Mark this cache key as active and currently cached
 

	
 
        :param key:
 
        """
 
        inv_obj = cls.get_by_key(key)
 
        inv_obj.cache_active = True
 
        Session().add(inv_obj)
 
        Session().commit()
 

	
 
    @classmethod
 
    def get_cache_map(cls):
 

	
 
        class cachemapdict(dict):
 

	
 
            def __init__(self, *args, **kwargs):
 
                fixkey = kwargs.get('fixkey')
 
                if fixkey:
 
                    del kwargs['fixkey']
 
                self.fixkey = fixkey
 
                super(cachemapdict, self).__init__(*args, **kwargs)
 

	
 
            def __getattr__(self, name):
 
                key = name
 
                if self.fixkey:
 
                    key, _prefix, _org_key = cls._get_key(key)
 
                if key in self.__dict__:
 
                    return self.__dict__[key]
 
                else:
 
                    return self[key]
 

	
 
            def __getitem__(self, key):
 
                if self.fixkey:
 
                    key, _prefix, _org_key = cls._get_key(key)
 
                try:
 
                    return super(cachemapdict, self).__getitem__(key)
 
                except KeyError:
 
                    return
 

	
 
        cache_map = cachemapdict(fixkey=True)
 
        for obj in cls.query().all():
 
            cache_map[obj.cache_key] = cachemapdict(obj.get_dict())
 
        return cache_map
 

	
 

	
 
class ChangesetComment(Base, BaseModel):
 
    __tablename__ = 'changeset_comments'
 
    __table_args__ = (
 
        Index('cc_revision_idx', 'revision'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8'},
 
    )
 
    comment_id = Column('comment_id', Integer(), nullable=False, primary_key=True)
 
    repo_id = Column('repo_id', Integer(), ForeignKey('repositories.repo_id'), nullable=False)
 
    revision = Column('revision', String(40), nullable=True)
 
    pull_request_id = Column("pull_request_id", Integer(), ForeignKey('pull_requests.pull_request_id'), nullable=True)
 
    line_no = Column('line_no', Unicode(10), nullable=True)
 
    hl_lines = Column('hl_lines', Unicode(512), nullable=True)
 
    f_path = Column('f_path', Unicode(1000), nullable=True)
 
    user_id = Column('user_id', Integer(), ForeignKey('users.user_id'), nullable=False)
 
    text = Column('text', Unicode(25000), nullable=False)
 
    text = Column('text', UnicodeText(25000), nullable=False)
 
    created_on = Column('created_on', DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 
    modified_at = Column('modified_at', DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 

	
 
    author = relationship('User', lazy='joined')
 
    repo = relationship('Repository')
 
    status_change = relationship('ChangesetStatus', cascade="all, delete, delete-orphan")
 
    pull_request = relationship('PullRequest', lazy='joined')
 

	
 
    @classmethod
 
    def get_users(cls, revision=None, pull_request_id=None):
 
        """
 
        Returns user associated with this ChangesetComment. ie those
 
        who actually commented
 

	
 
        :param cls:
 
        :param revision:
 
        """
 
        q = Session().query(User)\
 
                .join(ChangesetComment.author)
 
        if revision:
 
            q = q.filter(cls.revision == revision)
 
        elif pull_request_id:
 
            q = q.filter(cls.pull_request_id == pull_request_id)
 
        return q.all()
 

	
 

	
 
class ChangesetStatus(Base, BaseModel):
 
    __tablename__ = 'changeset_statuses'
 
    __table_args__ = (
 
        Index('cs_revision_idx', 'revision'),
 
        Index('cs_version_idx', 'version'),
 
        UniqueConstraint('repo_id', 'revision', 'version'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8'}
 
    )
 
    STATUS_NOT_REVIEWED = DEFAULT = 'not_reviewed'
 
    STATUS_APPROVED = 'approved'
 
    STATUS_REJECTED = 'rejected'
 
    STATUS_UNDER_REVIEW = 'under_review'
 

	
 
    STATUSES = [
 
        (STATUS_NOT_REVIEWED, _("Not Reviewed")),  # (no icon) and default
 
        (STATUS_APPROVED, _("Approved")),
 
        (STATUS_REJECTED, _("Rejected")),
 
        (STATUS_UNDER_REVIEW, _("Under Review")),
 
    ]
 

	
 
    changeset_status_id = Column('changeset_status_id', Integer(), nullable=False, primary_key=True)
 
    repo_id = Column('repo_id', Integer(), ForeignKey('repositories.repo_id'), nullable=False)
 
    user_id = Column("user_id", Integer(), ForeignKey('users.user_id'), nullable=False, unique=None)
 
    revision = Column('revision', String(40), nullable=False)
 
    status = Column('status', String(128), nullable=False, default=DEFAULT)
 
    changeset_comment_id = Column('changeset_comment_id', Integer(), ForeignKey('changeset_comments.comment_id'))
 
    modified_at = Column('modified_at', DateTime(), nullable=False, default=datetime.datetime.now)
 
    version = Column('version', Integer(), nullable=False, default=0)
 
    pull_request_id = Column("pull_request_id", Integer(), ForeignKey('pull_requests.pull_request_id'), nullable=True)
 

	
 
    author = relationship('User', lazy='joined')
 
    repo = relationship('Repository')
 
    comment = relationship('ChangesetComment', lazy='joined')
 
    pull_request = relationship('PullRequest', lazy='joined')
 

	
 
    def __unicode__(self):
 
        return u"<%s('%s:%s')>" % (
 
            self.__class__.__name__,
 
            self.status, self.author
 
        )
 

	
 
    @classmethod
 
    def get_status_lbl(cls, value):
 
        return dict(cls.STATUSES).get(value)
 

	
 
    @property
 
    def status_lbl(self):
 
        return ChangesetStatus.get_status_lbl(self.status)
 

	
 

	
 
class PullRequest(Base, BaseModel):
 
    __tablename__ = 'pull_requests'
 
    __table_args__ = (
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8'},
 
    )
 

	
 
    STATUS_NEW = u'new'
 
    STATUS_OPEN = u'open'
 
    STATUS_CLOSED = u'closed'
 

	
 
    pull_request_id = Column('pull_request_id', Integer(), nullable=False, primary_key=True)
 
    title = Column('title', Unicode(256), nullable=True)
 
    description = Column('description', UnicodeText(10240), nullable=True)
 
    status = Column('status', Unicode(256), nullable=False, default=STATUS_NEW)
 
    created_on = Column('created_on', DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 
    updated_on = Column('updated_on', DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 
    user_id = Column("user_id", Integer(), ForeignKey('users.user_id'), nullable=False, unique=None)
 
    _revisions = Column('revisions', UnicodeText(20500))  # 500 revisions max
 
    org_repo_id = Column('org_repo_id', Integer(), ForeignKey('repositories.repo_id'), nullable=False)
 
    org_ref = Column('org_ref', Unicode(256), nullable=False)
 
    other_repo_id = Column('other_repo_id', Integer(), ForeignKey('repositories.repo_id'), nullable=False)
 
    other_ref = Column('other_ref', Unicode(256), nullable=False)
 

	
 
    @hybrid_property
 
    def revisions(self):
 
        return self._revisions.split(':')
 

	
 
    @revisions.setter
 
    def revisions(self, val):
 
        self._revisions = ':'.join(val)
 

	
 
    author = relationship('User', lazy='joined')
 
    reviewers = relationship('PullRequestReviewers',
 
                             cascade="all, delete, delete-orphan")
 
    org_repo = relationship('Repository', primaryjoin='PullRequest.org_repo_id==Repository.repo_id')
 
    other_repo = relationship('Repository', primaryjoin='PullRequest.other_repo_id==Repository.repo_id')
 
    statuses = relationship('ChangesetStatus')
 
    comments = relationship('ChangesetComment',
 
                             cascade="all, delete, delete-orphan")
 

	
 
    def is_closed(self):
 
        return self.status == self.STATUS_CLOSED
 

	
 
    def __json__(self):
 
        return dict(
 
          revisions=self.revisions
 
        )
 

	
 

	
 
class PullRequestReviewers(Base, BaseModel):
 
    __tablename__ = 'pull_request_reviewers'
 
    __table_args__ = (
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8'},
 
    )
 

	
 
    def __init__(self, user=None, pull_request=None):
 
        self.user = user
 
        self.pull_request = pull_request
 

	
 
    pull_requests_reviewers_id = Column('pull_requests_reviewers_id', Integer(), nullable=False, primary_key=True)
 
    pull_request_id = Column("pull_request_id", Integer(), ForeignKey('pull_requests.pull_request_id'), nullable=False)
 
    user_id = Column("user_id", Integer(), ForeignKey('users.user_id'), nullable=True)
 

	
 
    user = relationship('User')
 
    pull_request = relationship('PullRequest')
 

	
 

	
 
class Notification(Base, BaseModel):
 
    __tablename__ = 'notifications'
 
    __table_args__ = (
 
        Index('notification_type_idx', 'type'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8'},
 
    )
 

	
 
    TYPE_CHANGESET_COMMENT = u'cs_comment'
 
    TYPE_MESSAGE = u'message'
 
    TYPE_MENTION = u'mention'
 
    TYPE_REGISTRATION = u'registration'
 
    TYPE_PULL_REQUEST = u'pull_request'
 
    TYPE_PULL_REQUEST_COMMENT = u'pull_request_comment'
 

	
 
    notification_id = Column('notification_id', Integer(), nullable=False, primary_key=True)
 
    subject = Column('subject', Unicode(512), nullable=True)
 
    body = Column('body', UnicodeText(50000), nullable=True)
 
    created_by = Column("created_by", Integer(), ForeignKey('users.user_id'), nullable=True)
 
    created_on = Column('created_on', DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 
    type_ = Column('type', Unicode(256))
 

	
 
    created_by_user = relationship('User')
 
    notifications_to_users = relationship('UserNotification', lazy='joined',
 
                                          cascade="all, delete, delete-orphan")
 

	
 
    @property
 
    def recipients(self):
 
        return [x.user for x in UserNotification.query()\
 
                .filter(UserNotification.notification == self)\
 
                .order_by(UserNotification.user_id.asc()).all()]
 

	
 
    @classmethod
 
    def create(cls, created_by, subject, body, recipients, type_=None):
 
        if type_ is None:
 
            type_ = Notification.TYPE_MESSAGE
 

	
 
        notification = cls()
 
        notification.created_by_user = created_by
 
        notification.subject = subject
 
        notification.body = body
 
        notification.type_ = type_
 
        notification.created_on = datetime.datetime.now()
 

	
 
        for u in recipients:
 
            assoc = UserNotification()
rhodecode/public/js/rhodecode.js
Show inline comments
 
@@ -183,463 +183,461 @@ var _run_callbacks = function(callbacks)
 
	    	if(typeof(func)=='function'){
 
	            try{
 
	          	    func();
 
	            }catch (err){};            		
 
	    	}
 
	    }
 
	}		
 
}
 

	
 
/**
 
 * Partial Ajax Implementation
 
 * 
 
 * @param url: defines url to make partial request
 
 * @param container: defines id of container to input partial result
 
 * @param s_call: success callback function that takes o as arg
 
 *  o.tId
 
 *  o.status
 
 *  o.statusText
 
 *  o.getResponseHeader[ ]
 
 *  o.getAllResponseHeaders
 
 *  o.responseText
 
 *  o.responseXML
 
 *  o.argument
 
 * @param f_call: failure callback
 
 * @param args arguments 
 
 */
 
function ypjax(url,container,s_call,f_call,args){
 
	var method='GET';
 
	if(args===undefined){
 
		args=null;
 
	}
 
	
 
	// Set special header for partial ajax == HTTP_X_PARTIAL_XHR
 
	YUC.initHeader('X-PARTIAL-XHR',true);
 
	
 
	// wrapper of passed callback
 
	var s_wrapper = (function(o){
 
		return function(o){
 
			YUD.get(container).innerHTML=o.responseText;
 
			YUD.setStyle(container,'opacity','1.0');
 
    		//execute the given original callback
 
    		if (s_call !== undefined){
 
    			s_call(o);
 
    		}
 
		}
 
	})()	
 
	YUD.setStyle(container,'opacity','0.3');
 
	YUC.asyncRequest(method,url,{
 
		success:s_wrapper,
 
		failure:function(o){
 
			console.log(o);
 
			YUD.get(container).innerHTML='<span class="error_red">ERROR: {0}</span>'.format(o.status);
 
			YUD.setStyle(container,'opacity','1.0');
 
		},
 
		cache:false
 
	},args);
 
	
 
};
 

	
 
var ajaxPOST = function(url,postData,success) {
 
	// Set special header for ajax == HTTP_X_PARTIAL_XHR
 
	YUC.initHeader('X-PARTIAL-XHR',true);
 
	
 
	var toQueryString = function(o) {
 
	    if(typeof o !== 'object') {
 
	        return false;
 
	    }
 
	    var _p, _qs = [];
 
	    for(_p in o) {
 
	        _qs.push(encodeURIComponent(_p) + '=' + encodeURIComponent(o[_p]));
 
	    }
 
	    return _qs.join('&');
 
	};
 
	
 
    var sUrl = url;
 
    var callback = {
 
        success: success,
 
        failure: function (o) {
 
            alert("error");
 
        },
 
    };
 
    var postData = toQueryString(postData);
 
    var request = YAHOO.util.Connect.asyncRequest('POST', sUrl, callback, postData);
 
    return request;
 
};
 

	
 

	
 
/**
 
 * tooltip activate
 
 */
 
var tooltip_activate = function(){
 
    function toolTipsId(){
 
        var ids = [];
 
        var tts = YUQ('.tooltip');
 
        for (var i = 0; i < tts.length; i++) {
 
            // if element doesn't not have and id 
 
        	//  autogenerate one for tooltip 
 
            if (!tts[i].id){
 
                tts[i].id='tt'+((i*100)+tts.length);
 
            }
 
            ids.push(tts[i].id);
 
        }
 
        return ids
 
    };
 
    var myToolTips = new YAHOO.widget.Tooltip("tooltip", {
 
        context: [[toolTipsId()],"tl","bl",null,[0,5]],
 
        monitorresize:false,
 
        xyoffset :[0,0],
 
        autodismissdelay:300000,
 
        hidedelay:5,
 
        showdelay:20,
 
    });
 
};
 

	
 
/**
 
 * show more
 
 */
 
var show_more_event = function(){
 
    YUE.on(YUD.getElementsByClassName('show_more'),'click',function(e){
 
        var el = e.target;
 
        YUD.setStyle(YUD.get(el.id.substring(1)),'display','');
 
        YUD.setStyle(el.parentNode,'display','none');
 
    });
 
};
 

	
 

	
 
/**
 
 * Quick filter widget
 
 * 
 
 * @param target: filter input target
 
 * @param nodes: list of nodes in html we want to filter.
 
 * @param display_element function that takes current node from nodes and
 
 *    does hide or show based on the node
 
 * 
 
 */
 
var q_filter = function(target,nodes,display_element){
 
	
 
	var nodes = nodes;
 
	var q_filter_field = YUD.get(target);
 
	var F = YAHOO.namespace(target);
 

	
 
	YUE.on(q_filter_field,'click',function(){
 
	   q_filter_field.value = '';
 
	});
 

	
 
	YUE.on(q_filter_field,'keyup',function(e){
 
	    clearTimeout(F.filterTimeout); 
 
	    F.filterTimeout = setTimeout(F.updateFilter,600); 
 
	});
 

	
 
	F.filterTimeout = null;
 

	
 
	var show_node = function(node){
 
		YUD.setStyle(node,'display','')
 
	}
 
	var hide_node = function(node){
 
		YUD.setStyle(node,'display','none');
 
	}
 
	
 
	F.updateFilter  = function() { 
 
	   // Reset timeout 
 
	   F.filterTimeout = null;
 
	   
 
	   var obsolete = [];
 
	   
 
	   var req = q_filter_field.value.toLowerCase();
 
	   
 
	   var l = nodes.length;
 
	   var i;
 
	   var showing = 0;
 
	   
 
       for (i=0;i<l;i++ ){
 
    	   var n = nodes[i];
 
    	   var target_element = display_element(n)
 
    	   if(req && n.innerHTML.toLowerCase().indexOf(req) == -1){
 
    		   hide_node(target_element);
 
    	   }
 
    	   else{
 
    		   show_node(target_element);
 
    		   showing+=1;
 
    	   }
 
       }	  	   
 

	
 
	   // if repo_count is set update the number
 
	   var cnt = YUD.get('repo_count');
 
	   if(cnt){
 
		   YUD.get('repo_count').innerHTML = showing;
 
	   }       
 
       
 
	}	
 
};
 

	
 
var tableTr = function(cls,body){
 
	var tr = document.createElement('tr');
 
	YUD.addClass(tr, cls);
 
	
 
	
 
var tableTr = function(cls, body){
 
	var _el = document.createElement('div');
 
	var cont = new YAHOO.util.Element(body);
 
	var comment_id = fromHTML(body).children[0].id.split('comment-')[1];
 
	tr.id = 'comment-tr-{0}'.format(comment_id);
 
	tr.innerHTML = '<td class="lineno-inline new-inline"></td>'+
 
    				 '<td class="lineno-inline old-inline"></td>'+ 
 
                     '<td>{0}</td>'.format(body);
 
	return tr;
 
	var id = 'comment-tr-{0}'.format(comment_id);
 
	var _html = ('<table><tbody><tr id="{0}" class="{1}">'+
 
	              '<td class="lineno-inline new-inline"></td>'+
 
    			  '<td class="lineno-inline old-inline"></td>'+ 
 
                  '<td>{2}</td>'+
 
                 '</tr></tbody></table>').format(id, cls, body);
 
	_el.innerHTML = _html;
 
	return _el.children[0].children[0].children[0];
 
};
 

	
 
/** comments **/
 
var removeInlineForm = function(form) {
 
	form.parentNode.removeChild(form);
 
};
 

	
 
var createInlineForm = function(parent_tr, f_path, line) {
 
	var tmpl = YUD.get('comment-inline-form-template').innerHTML;
 
	tmpl = tmpl.format(f_path, line);
 
	var form = tableTr('comment-form-inline',tmpl)
 
	
 

	
 
	// create event for hide button
 
	form = new YAHOO.util.Element(form);
 
	var form_hide_button = new YAHOO.util.Element(YUD.getElementsByClassName('hide-inline-form',null,form)[0]);
 
	form_hide_button.on('click', function(e) {
 
		var newtr = e.currentTarget.parentNode.parentNode.parentNode.parentNode.parentNode;
 
		if(YUD.hasClass(newtr.nextElementSibling,'inline-comments-button')){
 
			YUD.setStyle(newtr.nextElementSibling,'display','');
 
		}
 
		removeInlineForm(newtr);
 
		YUD.removeClass(parent_tr, 'form-open');
 
		
 
	});
 
	
 
	return form
 
};
 

	
 
/**
 
 * Inject inline comment for on given TR this tr should be always an .line
 
 * tr containing the line. Code will detect comment, and always put the comment
 
 * block at the very bottom
 
 */
 
var injectInlineForm = function(tr){
 
	  if(!YUD.hasClass(tr, 'line')){
 
		  return
 
	  }
 
	  var submit_url = AJAX_COMMENT_URL;
 
	  var _td = YUD.getElementsByClassName('code',null,tr)[0];
 
	  if(YUD.hasClass(tr,'form-open') || YUD.hasClass(tr,'context') || YUD.hasClass(_td,'no-comment')){
 
		  return
 
	  }	
 
	  YUD.addClass(tr,'form-open');
 
	  var node = YUD.getElementsByClassName('full_f_path',null,tr.parentNode.parentNode.parentNode)[0];
 
	  var f_path = YUD.getAttribute(node,'path');
 
	  var lineno = getLineNo(tr);
 
	  var form = createInlineForm(tr, f_path, lineno, submit_url);
 
	  
 
	  var parent = tr;
 
	  while (1){
 
		  var n = parent.nextElementSibling;
 
		  // next element are comments !
 
		  if(YUD.hasClass(n,'inline-comments')){
 
			  parent = n;
 
		  }
 
		  else{
 
			  break;
 
		  }
 
	  }	  
 
	  YUD.insertAfter(form,parent);
 
	  
 
	  var f = YUD.get(form);
 
	  
 
	  var overlay = YUD.getElementsByClassName('overlay',null,f)[0];
 
	  var _form = YUD.getElementsByClassName('inline-form',null,f)[0];
 
	  
 
	  form.on('submit',function(e){
 
	  YUE.on(YUD.get(_form), 'submit',function(e){
 
		  YUE.preventDefault(e);
 
		  
 
		  //ajax submit
 
		  var text = YUD.get('text_'+lineno).value;
 
		  var postData = {
 
	            'text':text,
 
	            'f_path':f_path,
 
	            'line':lineno
 
		  };
 
		  
 
		  if(lineno === undefined){
 
			  alert('missing line !');
 
			  return
 
		  }
 
		  if(f_path === undefined){
 
			  alert('missing file path !');
 
			  return
 
		  }
 
		  
 
		  if(text == ""){
 
			  return
 
		  }
 
		  
 
		  var success = function(o){
 
			  YUD.removeClass(tr, 'form-open');
 
			  removeInlineForm(f);			  
 
			  var json_data = JSON.parse(o.responseText);
 
	          renderInlineComment(json_data);
 
		  };
 

	
 
		  if (YUD.hasClass(overlay,'overlay')){
 
			  var w = _form.offsetWidth;
 
			  var h = _form.offsetHeight;
 
			  YUD.setStyle(overlay,'width',w+'px');
 
			  YUD.setStyle(overlay,'height',h+'px');
 
		  }		  
 
		  YUD.addClass(overlay, 'submitting');		  
 
		  
 
		  ajaxPOST(submit_url, postData, success);
 
	  });
 
	  
 
	  setTimeout(function(){
 
		  // callbacks
 
		  tooltip_activate();
 
		  MentionsAutoComplete('text_'+lineno, 'mentions_container_'+lineno, 
 
	                         _USERS_AC_DATA, _GROUPS_AC_DATA);
 
		  var _e = YUD.get('text_'+lineno);
 
		  if(_e){
 
			  _e.focus();
 
		  }
 
	  },10)
 
};
 

	
 
var deleteComment = function(comment_id){
 
	var url = AJAX_COMMENT_DELETE_URL.replace('__COMMENT_ID__',comment_id);
 
    var postData = {'_method':'delete'};
 
    var success = function(o){
 
        var n = YUD.get('comment-tr-'+comment_id);
 
        var root = prevElementSibling(prevElementSibling(n));
 
        n.parentNode.removeChild(n);
 

	
 
        // scann nodes, and attach add button to last one
 
        placeAddButton(root);
 
    }
 
    ajaxPOST(url,postData,success);
 
}
 

	
 
var updateReviewers = function(reviewers_ids){
 
	var url = AJAX_UPDATE_PULLREQUEST;
 
	var postData = {'_method':'put',
 
			        'reviewers_ids': reviewers_ids};
 
	var success = function(o){
 
		window.location.reload();
 
	}
 
	ajaxPOST(url,postData,success);
 
}
 

	
 
var createInlineAddButton = function(tr){
 

	
 
	var label = TRANSLATION_MAP['add another comment'];
 
	
 
	var html_el = document.createElement('div');
 
	YUD.addClass(html_el, 'add-comment');
 
	html_el.innerHTML = '<span class="ui-btn">{0}</span>'.format(label);
 
	
 
	var add = new YAHOO.util.Element(html_el);
 
	add.on('click', function(e) {
 
		injectInlineForm(tr);
 
	});
 
	return add;
 
};
 

	
 
var getLineNo = function(tr) {
 
	var line;
 
	var o = tr.children[0].id.split('_');
 
	var n = tr.children[1].id.split('_');
 

	
 
	if (n.length >= 2) {
 
		line = n[n.length-1];
 
	} else if (o.length >= 2) {
 
		line = o[o.length-1];
 
	}
 

	
 
	return line
 
};
 

	
 
var placeAddButton = function(target_tr){
 
	if(!target_tr){
 
		return
 
	}
 
	var last_node = target_tr;
 
    //scann	
 
	  while (1){
 
		  var n = last_node.nextElementSibling;
 
		  // next element are comments !
 
		  if(YUD.hasClass(n,'inline-comments')){
 
			  last_node = n;
 
			  //also remove the comment button from previous
 
			  var comment_add_buttons = YUD.getElementsByClassName('add-comment',null,last_node);
 
			  for(var i=0;i<comment_add_buttons.length;i++){
 
				  var b = comment_add_buttons[i];
 
				  b.parentNode.removeChild(b);
 
			  }
 
		  }
 
		  else{
 
			  break;
 
		  }
 
	  }
 
	  
 
    var add = createInlineAddButton(target_tr);
 
    // get the comment div
 
    var comment_block = YUD.getElementsByClassName('comment',null,last_node)[0];
 
    // attach add button
 
    YUD.insertAfter(add,comment_block);	
 
}
 

	
 
/**
 
 * Places the inline comment into the changeset block in proper line position
 
 */
 
var placeInline = function(target_container,lineno,html){
 
	  var lineid = "{0}_{1}".format(target_container,lineno);
 
	  var target_line = YUD.get(lineid);
 
	  var comment = new YAHOO.util.Element(tableTr('inline-comments',html))
 
	  
 
	  // check if there are comments already !
 
	  var parent = target_line.parentNode;
 
	  var root_parent = parent;
 
	  while (1){
 
		  var n = parent.nextElementSibling;
 
		  // next element are comments !
 
		  if(YUD.hasClass(n,'inline-comments')){
 
			  parent = n;
 
		  }
 
		  else{
 
			  break;
 
		  }
 
	  }
 
	  // put in the comment at the bottom
 
	  YUD.insertAfter(comment,parent);
 
	  
 
	  // scann nodes, and attach add button to last one
 
      placeAddButton(root_parent);
 

	
 
	  return target_line;
 
}
 

	
 
/**
 
 * make a single inline comment and place it inside
 
 */
 
var renderInlineComment = function(json_data){
 
    try{
 
	  var html =  json_data['rendered_text'];
 
	  var lineno = json_data['line_no'];
 
	  var target_id = json_data['target_id'];
 
	  placeInline(target_id, lineno, html);
 

	
 
    }catch(e){
 
  	  console.log(e);
 
    }
 
}
 

	
 
/**
 
 * Iterates over all the inlines, and places them inside proper blocks of data
 
 */
 
var renderInlineComments = function(file_comments){
 
	for (f in file_comments){
 
        // holding all comments for a FILE
 
		var box = file_comments[f];
 

	
 
		var target_id = YUD.getAttribute(box,'target_id');
 
		// actually comments with line numbers
 
        var comments = box.children;
rhodecode/templates/index_base.html
Show inline comments
 
<%page args="parent" />
 
    <div class="box">
 
        <!-- box / title -->
 
        <div class="title">
 
            <h5>
 
            <input class="q_filter_box" id="q_filter" size="15" type="text" name="filter" value="${_('quick filter...')}"/> ${parent.breadcrumbs()} <span id="repo_count">0</span> ${_('repositories')}
 
            </h5>
 
            %if c.rhodecode_user.username != 'default':
 
                %if h.HasPermissionAny('hg.admin','hg.create.repository')():
 
                <ul class="links">
 
                  <li>
 
                  %if c.group:
 
                    <span>${h.link_to(_('ADD REPOSITORY'),h.url('admin_settings_create_repository',parent_group=c.group.group_id))}</span>
 
                  %else:
 
                    <span>${h.link_to(_('ADD REPOSITORY'),h.url('admin_settings_create_repository'))}</span>
 
                  %endif
 
                  </li>
 
                </ul>
 
                %endif
 
            %endif
 
        </div>
 
        <!-- end box / title -->
 
        <div class="table">
 
           % if c.groups:
 
            <div id='groups_list_wrap' class="yui-skin-sam">
 
              <table id="groups_list">
 
                  <thead>
 
                      <tr>
 
                          <th class="left"><a href="#">${_('Group name')}</a></th>
 
                          <th class="left"><a href="#">${_('Description')}</a></th>
 
                          ##<th class="left"><a href="#">${_('Number of repositories')}</a></th>
 
                      </tr>
 
                  </thead>
 

	
 
                  ## REPO GROUPS
 
                  % for gr in c.groups:
 
                    <tr>
 
                        <td>
 
                            <div style="white-space: nowrap">
 
                            <img class="icon" alt="${_('Repositories group')}" src="${h.url('/images/icons/database_link.png')}"/>
 
                            ${h.link_to(gr.name,url('repos_group_home',group_name=gr.group_name))}
 
                            </div>
 
                        </td>
 
                        %if c.visual.stylify_metatags:
 
                            <td>${h.desc_stylize(gr.group_description)}</td>
 
                            <td>${h.urlify_text(h.desc_stylize(gr.group_description))}</td>
 
                        %else:
 
                            <td>${gr.group_description}</td>
 
                        %endif
 
                        ## this is commented out since for multi nested repos can be HEAVY!
 
                        ## in number of executed queries during traversing uncomment at will
 
                        ##<td><b>${gr.repositories_recursive_count}</b></td>
 
                    </tr>
 
                  % endfor
 

	
 
              </table>
 
            </div>
 
            <div style="height: 20px"></div>
 
            % endif
 
            <div id="welcome" style="display:none;text-align:center">
 
                <h1><a href="${h.url('home')}">${c.rhodecode_name} ${c.rhodecode_version}</a></h1>
 
            </div>
 
            <div id='repos_list_wrap' class="yui-skin-sam">
 
            <%cnt=0%>
 
            <%namespace name="dt" file="/data_table/_dt_elements.html"/>
 

	
 
            <table id="repos_list">
 
            <thead>
 
                <tr>
 
                    <th class="left"></th>
 
                    <th class="left">${_('Name')}</th>
 
                    <th class="left">${_('Description')}</th>
 
                    <th class="left">${_('Last change')}</th>
 
                    <th class="left">${_('Tip')}</th>
 
                    <th class="left">${_('Owner')}</th>
 
                    <th class="left">${_('RSS')}</th>
 
                    <th class="left">${_('Atom')}</th>
 
                </tr>
 
            </thead>
 
            <tbody>
 
            %for cnt,repo in enumerate(c.repos_list):
 
                <tr class="parity${(cnt+1)%2}">
 
                    ##QUICK MENU
 
                    <td class="quick_repo_menu">
 
                      ${dt.quick_menu(repo['name'])}
 
                    </td>
 
                    ##REPO NAME AND ICONS
 
                    <td class="reponame">
 
                      ${dt.repo_name(repo['name'],repo['dbrepo']['repo_type'],repo['dbrepo']['private'],repo['dbrepo_fork'].get('repo_name'),pageargs.get('short_repo_names'))}
 
                    </td>
 
                    ##DESCRIPTION
 
                    <td><span class="tooltip" title="${h.tooltip(repo['description'])}">
 
                       %if c.visual.stylify_metatags:
 
                       ${h.urlify_text(h.desc_stylize(h.truncate(repo['description'],60)))}</span>
 
                       %else:
 
                       ${h.truncate(repo['description'],60)}</span>
 
                       %endif
 
                    </td>
 
                    ##LAST CHANGE DATE
 
                    <td>
 
                      <span class="tooltip" date="${repo['last_change']}" title="${h.tooltip(h.fmt_date(repo['last_change']))}">${h.age(repo['last_change'])}</span>
 
                    </td>
 
                    ##LAST REVISION
 
                    <td>
 
                        ${dt.revision(repo['name'],repo['rev'],repo['tip'],repo['author'],repo['last_msg'])}
 
                    </td>
 
                    ##
 
                    <td title="${repo['contact']}">${h.person(repo['contact'])}</td>
 
                    <td>
 
                      %if c.rhodecode_user.username != 'default':
 
                        <a title="${_('Subscribe to %s rss feed')%repo['name']}" class="rss_icon"  href="${h.url('rss_feed_home',repo_name=repo['name'],api_key=c.rhodecode_user.api_key)}"></a>
 
                      %else:
 
                        <a title="${_('Subscribe to %s rss feed')%repo['name']}" class="rss_icon"  href="${h.url('rss_feed_home',repo_name=repo['name'])}"></a>
 
                      %endif:
 
                    </td>
 
                    <td>
 
                      %if c.rhodecode_user.username != 'default':
 
                        <a title="${_('Subscribe to %s atom feed')%repo['name']}"  class="atom_icon" href="${h.url('atom_feed_home',repo_name=repo['name'],api_key=c.rhodecode_user.api_key)}"></a>
 
                      %else:
 
                        <a title="${_('Subscribe to %s atom feed')%repo['name']}"  class="atom_icon" href="${h.url('atom_feed_home',repo_name=repo['name'])}"></a>
 
                      %endif:
 
                    </td>
 
                </tr>
 
            %endfor
 
            </tbody>
 
            </table>
 
            </div>
 
        </div>
 
    </div>
 
    <script>
 
      YUD.get('repo_count').innerHTML = ${cnt+1 if cnt else 0};
 
      var func = function(node){
 
          return node.parentNode.parentNode.parentNode.parentNode;
 
      }
 

	
 
      var sort_by = "name";
 
      var sort_dir = "asc";
 

	
 
      // groups table sorting
 
      var myColumnDefs = [
 
          {key:"name",label:"${_('Group Name')}",sortable:true,
 
              sortOptions: { sortFunction: groupNameSort }},
 
          {key:"desc",label:"${_('Description')}",sortable:true},
 
      ];
 

	
 
      var myDataSource = new YAHOO.util.DataSource(YUD.get("groups_list"));
 

	
 
      myDataSource.responseType = YAHOO.util.DataSource.TYPE_HTMLTABLE;
 
      myDataSource.responseSchema = {
 
          fields: [
 
              {key:"name"},
 
              {key:"desc"},
 
          ]
 
      };
 

	
 
      var myDataTable = new YAHOO.widget.DataTable("groups_list_wrap", myColumnDefs, myDataSource,
 
              {
 
               sortedBy:{key:"name",dir:"asc"},
 
               MSG_SORTASC:"${_('Click to sort ascending')}",
 
               MSG_SORTDESC:"${_('Click to sort descending')}"
 
              }
 
      );
 

	
 
      // main table sorting
 
      var myColumnDefs = [
 
          {key:"menu",label:"",sortable:false,className:"quick_repo_menu hidden"},
 
          {key:"name",label:"${_('Name')}",sortable:true,
 
              sortOptions: { sortFunction: nameSort }},
 
          {key:"desc",label:"${_('Description')}",sortable:true},
 
          {key:"last_change",label:"${_('Last Change')}",sortable:true,
 
              sortOptions: { sortFunction: ageSort }},
 
          {key:"tip",label:"${_('Tip')}",sortable:true,
 
        	  sortOptions: { sortFunction: revisionSort }},
 
          {key:"owner",label:"${_('Owner')}",sortable:true},
 
          {key:"rss",label:"",sortable:false},
 
          {key:"atom",label:"",sortable:false},
 
      ];
 

	
 
      var myDataSource = new YAHOO.util.DataSource(YUD.get("repos_list"));
 

	
 
      myDataSource.responseType = YAHOO.util.DataSource.TYPE_HTMLTABLE;
 

	
 
      myDataSource.responseSchema = {
 
          fields: [
 
              {key:"menu"},
 
              {key:"name"},
 
              {key:"desc"},
 
              {key:"last_change"},
 
              {key:"tip"},
 
              {key:"owner"},
 
              {key:"rss"},
 
              {key:"atom"},
 
          ]
 
      };
 

	
 
      var myDataTable = new YAHOO.widget.DataTable("repos_list_wrap", myColumnDefs, myDataSource,
 
              {
 
               sortedBy:{key:sort_by,dir:sort_dir},
 
               MSG_SORTASC:"${_('Click to sort ascending')}",
 
               MSG_SORTDESC:"${_('Click to sort descending')}",
 
               MSG_EMPTY:"${_('No records found.')}",
 
               MSG_ERROR:"${_('Data error.')}",
 
               MSG_LOADING:"${_('Loading...')}",
 
              }
 
      );
 
      myDataTable.subscribe('postRenderEvent',function(oArgs) {
 
          tooltip_activate();
 
          quick_repo_menu();
 
          q_filter('q_filter',YUQ('div.table tr td a.repo_name'),func);
 
      });
 

	
 
    </script>
rhodecode/tests/test_libs.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
"""
 
    rhodecode.tests.test_libs
 
    ~~~~~~~~~~~~~~~~~~~~~~~~~
 

	
 

	
 
    Package for testing various lib/helper functions in rhodecode
 

	
 
    :created_on: Jun 9, 2011
 
    :copyright: (C) 2011-2012 Marcin Kuzminski <marcin@python-works.com>
 
    :license: GPLv3, see COPYING for more details.
 
"""
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 

	
 
from __future__ import with_statement
 
import unittest
 
import datetime
 
import hashlib
 
import mock
 
from rhodecode.tests import *
 

	
 
proto = 'http'
 
TEST_URLS = [
 
    ('%s://127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
 
     '%s://127.0.0.1' % proto),
 
    ('%s://marcink@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
 
     '%s://127.0.0.1' % proto),
 
    ('%s://marcink:pass@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
 
     '%s://127.0.0.1' % proto),
 
    ('%s://127.0.0.1:8080' % proto, ['%s://' % proto, '127.0.0.1', '8080'],
 
     '%s://127.0.0.1:8080' % proto),
 
    ('%s://domain.org' % proto, ['%s://' % proto, 'domain.org'],
 
     '%s://domain.org' % proto),
 
    ('%s://user:pass@domain.org:8080' % proto, ['%s://' % proto, 'domain.org',
 
                                                '8080'],
 
     '%s://domain.org:8080' % proto),
 
]
 

	
 
proto = 'https'
 
TEST_URLS += [
 
    ('%s://127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
 
     '%s://127.0.0.1' % proto),
 
    ('%s://marcink@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
 
     '%s://127.0.0.1' % proto),
 
    ('%s://marcink:pass@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
 
     '%s://127.0.0.1' % proto),
 
    ('%s://127.0.0.1:8080' % proto, ['%s://' % proto, '127.0.0.1', '8080'],
 
     '%s://127.0.0.1:8080' % proto),
 
    ('%s://domain.org' % proto, ['%s://' % proto, 'domain.org'],
 
     '%s://domain.org' % proto),
 
    ('%s://user:pass@domain.org:8080' % proto, ['%s://' % proto, 'domain.org',
 
                                                '8080'],
 
     '%s://domain.org:8080' % proto),
 
]
 

	
 

	
 
class TestLibs(unittest.TestCase):
 

	
 
    def test_uri_filter(self):
 
        from rhodecode.lib.utils2 import uri_filter
 

	
 
        for url in TEST_URLS:
 
            self.assertEqual(uri_filter(url[0]), url[1])
 

	
 
    def test_credentials_filter(self):
 
        from rhodecode.lib.utils2 import credentials_filter
 

	
 
        for url in TEST_URLS:
 
            self.assertEqual(credentials_filter(url[0]), url[2])
 

	
 
    def test_str2bool(self):
 
        from rhodecode.lib.utils2 import str2bool
 
        test_cases = [
 
            ('t', True),
 
            ('true', True),
 
            ('y', True),
 
            ('yes', True),
 
            ('on', True),
 
            ('1', True),
 
            ('Y', True),
 
            ('yeS', True),
 
            ('Y', True),
 
            ('TRUE', True),
 
            ('T', True),
 
            ('False', False),
 
            ('F', False),
 
            ('FALSE', False),
 
            ('0', False),
 
            ('-1', False),
 
            ('', False), ]
 

	
 
        for case in test_cases:
 
            self.assertEqual(str2bool(case[0]), case[1])
 

	
 
    def test_mention_extractor(self):
 
        from rhodecode.lib.utils2 import extract_mentioned_users
 
        sample = (
 
            "@first hi there @marcink here's my email marcin@email.com "
 
            "@lukaszb check @one_more22 it pls @ ttwelve @D[] @one@two@three "
 
            "@MARCIN    @maRCiN @2one_more22 @john please see this http://org.pl "
 
            "@marian.user just do it @marco-polo and next extract @marco_polo "
 
            "user.dot  hej ! not-needed maril@domain.org"
 
        )
 

	
 
        s = sorted([
 
        'first', 'marcink', 'lukaszb', 'one_more22', 'MARCIN', 'maRCiN', 'john',
 
        'marian.user', 'marco-polo', 'marco_polo'
 
        ], key=lambda k: k.lower())
 
        self.assertEqual(s, extract_mentioned_users(sample))
 

	
 
    def test_age(self):
 
        import calendar
 
        from rhodecode.lib.utils2 import age
 
        n = datetime.datetime.now()
 
        delt = lambda *args, **kwargs: datetime.timedelta(*args, **kwargs)
 
        self.assertEqual(age(n), u'just now')
 
        self.assertEqual(age(n - delt(seconds=1)), u'1 second ago')
 
        self.assertEqual(age(n - delt(seconds=60 * 2)), u'2 minutes ago')
 
        self.assertEqual(age(n - delt(hours=1)), u'1 hour ago')
 
        self.assertEqual(age(n - delt(hours=24)), u'1 day ago')
 
        self.assertEqual(age(n - delt(hours=24 * 5)), u'5 days ago')
 
        self.assertEqual(age(n - delt(hours=24 * (calendar.mdays[n.month-1] + 2))),
 
                         u'1 month and 2 days ago')
 
        self.assertEqual(age(n - delt(hours=24 * 400)), u'1 year and 1 month ago')
 

	
 
    def test_tag_exctrator(self):
 
        sample = (
 
            "hello pta[tag] gog [[]] [[] sda ero[or]d [me =>>< sa]"
 
            "[requires] [stale] [see<>=>] [see => http://url.com]"
 
            "[requires => url] [lang => python] [just a tag]"
 
            "[,d] [ => ULR ] [obsolete] [desc]]"
 
        )
 
        from rhodecode.lib.helpers import desc_stylize
 
        res = desc_stylize(sample)
 
        self.assertTrue('<div class="metatag" tag="tag">tag</div>' in res)
 
        self.assertTrue('<div class="metatag" tag="obsolete">obsolete</div>' in res)
 
        self.assertTrue('<div class="metatag" tag="stale">stale</div>' in res)
 
        self.assertTrue('<div class="metatag" tag="lang">python</div>' in res)
 
        self.assertTrue('<div class="metatag" tag="requires">requires =&gt; <a href="/url">url</a></div>' in res)
 
        self.assertTrue('<div class="metatag" tag="tag">tag</div>' in res)
 

	
 
    def test_alternative_gravatar(self):
 
        from rhodecode.lib.helpers import gravatar_url
 
        _md5 = lambda s: hashlib.md5(s).hexdigest()
 

	
 
        def fake_conf(**kwargs):
 
            from pylons import config
 
            config['app_conf'] = {}
 
            config['app_conf']['use_gravatar'] = True
 
            config['app_conf'].update(kwargs)
 
            return config
 
        fake = fake_conf(alternative_gravatar_url='http://test.com/{email}')
 
        with mock.patch('pylons.config', fake):
 
            grav = gravatar_url(email_address='test@foo.com', size=24)
 
            assert grav == 'http://test.com/test@foo.com'
 

	
 
        fake = fake_conf(alternative_gravatar_url='http://test.com/{md5email}')
 
        with mock.patch('pylons.config', fake):
 
            em = 'test@foo.com'
 
            grav = gravatar_url(email_address=em, size=24)
 
            assert grav == 'http://test.com/%s' % (_md5(em))
 

	
 
        fake = fake_conf(alternative_gravatar_url='http://test.com/{md5email}/{size}')
 
        with mock.patch('pylons.config', fake):
 
            em = 'test@foo.com'
 
            grav = gravatar_url(email_address=em, size=24)
 
            assert grav == 'http://test.com/%s/%s' % (_md5(em), 24)
0 comments (0 inline, 0 general)