Changeset - c759acdc463a
[Not reviewed]
default
0 1 0
Mads Kiilerich - 9 years ago 2016-09-06 00:51:18
madski@unity3d.com
helpers: simplify urlify_issues - urlify_commit and thus urlify_issues are always passed a repository
1 file changed with 2 insertions and 3 deletions:
0 comments (0 inline, 0 general)
kallithea/lib/helpers.py
Show inline comments
 
@@ -974,538 +974,537 @@ class Page(_Page):
 
        for thispage in xrange(leftmost_page, rightmost_page + 1):
 
            # Highlight the current page number and do not use a link
 
            text_ = str(thispage)
 
            if thispage == self.page:
 
                # Wrap in a SPAN tag if nolink_attr is set
 
                if self.curpage_attr:
 
                    text_ = HTML.span(c=text_, **self.curpage_attr)
 
                nav_items.append(text_)
 
            # Otherwise create just a link to that page
 
            else:
 
                nav_items.append(self._pagerlink(thispage, text_))
 

	
 
        # Insert dots if there are pages between the displayed
 
        # page numbers and the end of the page range
 
        if self.last_page - rightmost_page > 1:
 
            text_ = '..'
 
            # Wrap in a SPAN tag if nolink_attr is set
 
            if self.dotdot_attr:
 
                text_ = HTML.span(c=text_, **self.dotdot_attr)
 
            nav_items.append(text_)
 

	
 
        # Create a link to the very last page (unless we are on the last
 
        # page or there would be no need to insert '..' spacers)
 
        if self.page != self.last_page and rightmost_page < self.last_page:
 
            nav_items.append(self._pagerlink(self.last_page, self.last_page))
 

	
 
        #_page_link = url.current()
 
        #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
 
        #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
 
        return self.separator.join(nav_items)
 

	
 
    def pager(self, format='~2~', page_param='page', partial_param='partial',
 
        show_if_single_page=False, separator=' ', onclick=None,
 
        symbol_first='<<', symbol_last='>>',
 
        symbol_previous='<', symbol_next='>',
 
        link_attr=None,
 
        curpage_attr=None,
 
        dotdot_attr=None, **kwargs):
 
        self.curpage_attr = curpage_attr or {'class': 'pager_curpage'}
 
        self.separator = separator
 
        self.pager_kwargs = kwargs
 
        self.page_param = page_param
 
        self.partial_param = partial_param
 
        self.onclick = onclick
 
        self.link_attr = link_attr or {'class': 'pager_link', 'rel': 'prerender'}
 
        self.dotdot_attr = dotdot_attr or {'class': 'pager_dotdot'}
 

	
 
        # Don't show navigator if there is no more than one page
 
        if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
 
            return ''
 

	
 
        from string import Template
 
        # Replace ~...~ in token format by range of pages
 
        result = re.sub(r'~(\d+)~', self._range, format)
 

	
 
        # Interpolate '%' variables
 
        result = Template(result).safe_substitute({
 
            'first_page': self.first_page,
 
            'last_page': self.last_page,
 
            'page': self.page,
 
            'page_count': self.page_count,
 
            'items_per_page': self.items_per_page,
 
            'first_item': self.first_item,
 
            'last_item': self.last_item,
 
            'item_count': self.item_count,
 
            'link_first': self.page > self.first_page and \
 
                    self._pagerlink(self.first_page, symbol_first) or '',
 
            'link_last': self.page < self.last_page and \
 
                    self._pagerlink(self.last_page, symbol_last) or '',
 
            'link_previous': self.previous_page and \
 
                    self._pagerlink(self.previous_page, symbol_previous) \
 
                    or HTML.span(symbol_previous, class_="yui-pg-previous"),
 
            'link_next': self.next_page and \
 
                    self._pagerlink(self.next_page, symbol_next) \
 
                    or HTML.span(symbol_next, class_="yui-pg-next")
 
        })
 

	
 
        return literal(result)
 

	
 

	
 
#==============================================================================
 
# REPO PAGER, PAGER FOR REPOSITORY
 
#==============================================================================
 
class RepoPage(Page):
 

	
 
    def __init__(self, collection, page=1, items_per_page=20,
 
                 item_count=None, **kwargs):
 

	
 
        """Create a "RepoPage" instance. special pager for paging
 
        repository
 
        """
 
        # TODO: call baseclass __init__
 
        self._url_generator = kwargs.pop('url', url.current)
 

	
 
        # Safe the kwargs class-wide so they can be used in the pager() method
 
        self.kwargs = kwargs
 

	
 
        # Save a reference to the collection
 
        self.original_collection = collection
 

	
 
        self.collection = collection
 

	
 
        # The self.page is the number of the current page.
 
        # The first page has the number 1!
 
        try:
 
            self.page = int(page)  # make it int() if we get it as a string
 
        except (ValueError, TypeError):
 
            self.page = 1
 

	
 
        self.items_per_page = items_per_page
 

	
 
        # Unless the user tells us how many items the collections has
 
        # we calculate that ourselves.
 
        if item_count is not None:
 
            self.item_count = item_count
 
        else:
 
            self.item_count = len(self.collection)
 

	
 
        # Compute the number of the first and last available page
 
        if self.item_count > 0:
 
            self.first_page = 1
 
            self.page_count = int(math.ceil(float(self.item_count) /
 
                                            self.items_per_page))
 
            self.last_page = self.first_page + self.page_count - 1
 

	
 
            # Make sure that the requested page number is the range of
 
            # valid pages
 
            if self.page > self.last_page:
 
                self.page = self.last_page
 
            elif self.page < self.first_page:
 
                self.page = self.first_page
 

	
 
            # Note: the number of items on this page can be less than
 
            #       items_per_page if the last page is not full
 
            self.first_item = max(0, (self.item_count) - (self.page *
 
                                                          items_per_page))
 
            self.last_item = ((self.item_count - 1) - items_per_page *
 
                              (self.page - 1))
 

	
 
            self.items = list(self.collection[self.first_item:self.last_item + 1])
 

	
 
            # Links to previous and next page
 
            if self.page > self.first_page:
 
                self.previous_page = self.page - 1
 
            else:
 
                self.previous_page = None
 

	
 
            if self.page < self.last_page:
 
                self.next_page = self.page + 1
 
            else:
 
                self.next_page = None
 

	
 
        # No items available
 
        else:
 
            self.first_page = None
 
            self.page_count = 0
 
            self.last_page = None
 
            self.first_item = None
 
            self.last_item = None
 
            self.previous_page = None
 
            self.next_page = None
 
            self.items = []
 

	
 
        # This is a subclass of the 'list' type. Initialise the list now.
 
        list.__init__(self, reversed(self.items))
 

	
 

	
 
def changed_tooltip(nodes):
 
    """
 
    Generates a html string for changed nodes in changeset page.
 
    It limits the output to 30 entries
 

	
 
    :param nodes: LazyNodesGenerator
 
    """
 
    if nodes:
 
        pref = ': <br/> '
 
        suf = ''
 
        if len(nodes) > 30:
 
            suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
 
        return literal(pref + '<br/> '.join([safe_unicode(x.path)
 
                                             for x in nodes[:30]]) + suf)
 
    else:
 
        return ': ' + _('No files')
 

	
 

	
 
def repo_link(groups_and_repos):
 
    """
 
    Makes a breadcrumbs link to repo within a group
 
    joins &raquo; on each group to create a fancy link
 

	
 
    ex::
 
        group >> subgroup >> repo
 

	
 
    :param groups_and_repos:
 
    :param last_url:
 
    """
 
    groups, just_name, repo_name = groups_and_repos
 
    last_url = url('summary_home', repo_name=repo_name)
 
    last_link = link_to(just_name, last_url)
 

	
 
    def make_link(group):
 
        return link_to(group.name,
 
                       url('repos_group_home', group_name=group.group_name))
 
    return literal(' &raquo; '.join(map(make_link, groups) + ['<span>%s</span>' % last_link]))
 

	
 

	
 
def fancy_file_stats(stats):
 
    """
 
    Displays a fancy two colored bar for number of added/deleted
 
    lines of code on file
 

	
 
    :param stats: two element list of added/deleted lines of code
 
    """
 
    from kallithea.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
 
        MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
 

	
 
    def cgen(l_type, a_v, d_v):
 
        mapping = {'tr': 'top-right-rounded-corner-mid',
 
                   'tl': 'top-left-rounded-corner-mid',
 
                   'br': 'bottom-right-rounded-corner-mid',
 
                   'bl': 'bottom-left-rounded-corner-mid'}
 
        map_getter = lambda x: mapping[x]
 

	
 
        if l_type == 'a' and d_v:
 
            #case when added and deleted are present
 
            return ' '.join(map(map_getter, ['tl', 'bl']))
 

	
 
        if l_type == 'a' and not d_v:
 
            return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
 

	
 
        if l_type == 'd' and a_v:
 
            return ' '.join(map(map_getter, ['tr', 'br']))
 

	
 
        if l_type == 'd' and not a_v:
 
            return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
 

	
 
    a, d = stats['added'], stats['deleted']
 
    width = 100
 

	
 
    if stats['binary']:
 
        #binary mode
 
        lbl = ''
 
        bin_op = 1
 

	
 
        if BIN_FILENODE in stats['ops']:
 
            lbl = 'bin+'
 

	
 
        if NEW_FILENODE in stats['ops']:
 
            lbl += _('new file')
 
            bin_op = NEW_FILENODE
 
        elif MOD_FILENODE in stats['ops']:
 
            lbl += _('mod')
 
            bin_op = MOD_FILENODE
 
        elif DEL_FILENODE in stats['ops']:
 
            lbl += _('del')
 
            bin_op = DEL_FILENODE
 
        elif RENAMED_FILENODE in stats['ops']:
 
            lbl += _('rename')
 
            bin_op = RENAMED_FILENODE
 

	
 
        #chmod can go with other operations
 
        if CHMOD_FILENODE in stats['ops']:
 
            _org_lbl = _('chmod')
 
            lbl += _org_lbl if lbl.endswith('+') else '+%s' % _org_lbl
 

	
 
        #import ipdb;ipdb.set_trace()
 
        b_d = '<div class="bin bin%s %s" style="width:100%%">%s</div>' % (bin_op, cgen('a', a_v='', d_v=0), lbl)
 
        b_a = '<div class="bin bin1" style="width:0%"></div>'
 
        return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
 

	
 
    t = stats['added'] + stats['deleted']
 
    unit = float(width) / (t or 1)
 

	
 
    # needs > 9% of width to be visible or 0 to be hidden
 
    a_p = max(9, unit * a) if a > 0 else 0
 
    d_p = max(9, unit * d) if d > 0 else 0
 
    p_sum = a_p + d_p
 

	
 
    if p_sum > width:
 
        #adjust the percentage to be == 100% since we adjusted to 9
 
        if a_p > d_p:
 
            a_p = a_p - (p_sum - width)
 
        else:
 
            d_p = d_p - (p_sum - width)
 

	
 
    a_v = a if a > 0 else ''
 
    d_v = d if d > 0 else ''
 

	
 
    d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
 
        cgen('a', a_v, d_v), a_p, a_v
 
    )
 
    d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
 
        cgen('d', a_v, d_v), d_p, d_v
 
    )
 
    return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
 

	
 

	
 
def _urlify_text_replace(match_obj):
 
    url_full = match_obj.group(1)
 
    return '<a href="%(url)s">%(url)s</a>' % {'url': url_full}
 

	
 

	
 
def _urlify_text(s):
 
    """
 
    Extract urls from text and make html links out of them
 
    """
 
    return url_re.sub(_urlify_text_replace, s)
 

	
 
def urlify_text(s, truncate=None, stylize=False, truncatef=truncate):
 
    """
 
    Extract urls from text and make literal html links out of them
 
    """
 
    if truncate is not None:
 
        s = truncatef(s, truncate, whole_word=True)
 
    s = html_escape(s)
 
    if stylize:
 
        s = desc_stylize(s)
 
    s = _urlify_text(s)
 
    return literal(s)
 

	
 

	
 
def _urlify_changeset_replace_f(repo_name):
 
    from pylons import url  # doh, we need to re-import url to mock it later
 
    def urlify_changeset_replace(match_obj):
 
        rev = match_obj.group(0)
 
        return '<a class="revision-link" href="%(url)s">%(rev)s</a>' % {
 
         'url': url('changeset_home', repo_name=repo_name, revision=rev),
 
         'rev': rev,
 
        }
 
    return urlify_changeset_replace
 

	
 

	
 
urilify_changeset_re = r'(?:^|(?<=[\s(),]))([0-9a-fA-F]{12,40})(?=$|\s|[.,:()])'
 

	
 
def urlify_changesets(text_, repo_name):
 
    """
 
    Extract revision ids from changeset and make link from them
 
    """
 
    urlify_changeset_replace = _urlify_changeset_replace_f(repo_name)
 
    return re.sub(urilify_changeset_re, urlify_changeset_replace, text_)
 

	
 

	
 
def linkify_others(t, l):
 
    """Add a default link to html with links.
 
    HTML doesn't allow nesting of links, so the outer link must be broken up
 
    in pieces and give space for other links.
 
    """
 
    urls = re.compile(r'(\<a.*?\<\/a\>)',)
 
    links = []
 
    for e in urls.split(t):
 
        if not urls.match(e):
 
            links.append('<a class="message-link" href="%s">%s</a>' % (l, e))
 
        else:
 
            links.append(e)
 

	
 
    return ''.join(links)
 

	
 
def urlify_commit(text_, repo_name, link_=None):
 
    """
 
    Parses given text message and makes proper links.
 
    Issues are linked to given issue-server. If link_ is provided, all other
 
    text will link there.
 
    """
 
    newtext = html_escape(text_)
 

	
 
    # urlify changesets - extract revisions and make link out of them
 
    newtext = urlify_changesets(newtext, repo_name)
 

	
 
    # extract http/https links and make them real urls
 
    newtext = _urlify_text(newtext)
 

	
 
    newtext = urlify_issues(newtext, repo_name, link_)
 

	
 
    return literal(newtext)
 

	
 

	
 
def _urlify_issues_replace_f(repo_name, ISSUE_SERVER_LNK, ISSUE_PREFIX):
 
    def urlify_issues_replace(match_obj):
 
        pref = ''
 
        if match_obj.group().startswith(' '):
 
            pref = ' '
 

	
 
        issue_id = ''.join(match_obj.groups())
 
        issue_url = ISSUE_SERVER_LNK.replace('{id}', issue_id)
 
        if repo_name:
 
            issue_url = issue_url.replace('{repo}', repo_name)
 
            issue_url = issue_url.replace('{repo_name}', repo_name.split(URL_SEP)[-1])
 
        issue_url = issue_url.replace('{repo}', repo_name)
 
        issue_url = issue_url.replace('{repo_name}', repo_name.split(URL_SEP)[-1])
 

	
 
        return (
 
            '%(pref)s<a class="%(cls)s" href="%(url)s">'
 
            '%(issue-prefix)s%(id-repr)s'
 
            '</a>'
 
            ) % {
 
             'pref': pref,
 
             'cls': 'issue-tracker-link',
 
             'url': issue_url,
 
             'id-repr': issue_id,
 
             'issue-prefix': ISSUE_PREFIX,
 
             'serv': ISSUE_SERVER_LNK,
 
            }
 
    return urlify_issues_replace
 

	
 

	
 
def urlify_issues(newtext, repo_name, link_=None):
 
    from kallithea import CONFIG as conf
 

	
 
    # allow multiple issue servers to be used
 
    valid_indices = [
 
        x.group(1)
 
        for x in map(lambda x: re.match(r'issue_pat(.*)', x), conf.keys())
 
        if x and 'issue_server_link%s' % x.group(1) in conf
 
        and 'issue_prefix%s' % x.group(1) in conf
 
    ]
 

	
 
    if valid_indices:
 
        log.debug('found issue server suffixes `%s` during valuation of: %s',
 
                  ','.join(valid_indices), newtext)
 

	
 
    for pattern_index in valid_indices:
 
        ISSUE_PATTERN = conf.get('issue_pat%s' % pattern_index)
 
        ISSUE_SERVER_LNK = conf.get('issue_server_link%s' % pattern_index)
 
        ISSUE_PREFIX = conf.get('issue_prefix%s' % pattern_index)
 

	
 
        log.debug('pattern suffix `%s` PAT:%s SERVER_LINK:%s PREFIX:%s',
 
                  pattern_index, ISSUE_PATTERN, ISSUE_SERVER_LNK,
 
                  ISSUE_PREFIX)
 

	
 
        URL_PAT = re.compile(ISSUE_PATTERN)
 

	
 
        urlify_issues_replace = _urlify_issues_replace_f(repo_name, ISSUE_SERVER_LNK, ISSUE_PREFIX)
 
        newtext = URL_PAT.sub(urlify_issues_replace, newtext)
 
        log.debug('processed prefix:`%s` => %s', pattern_index, newtext)
 

	
 
    # if we actually did something above
 
    if link_:
 
        # wrap not links into final link => link_
 
        newtext = linkify_others(newtext, link_)
 
    return newtext
 

	
 

	
 
def _mentions_replace(match_obj):
 
    return '<b>@%s</b>' % match_obj.group(1)
 

	
 

	
 
def render_w_mentions(source, repo_name=None):
 
    """
 
    Render plain text with revision hashes and issue references urlified
 
    and with @mention highlighting.
 
    """
 
    s = source.rstrip()
 
    s = safe_unicode(s)
 
    s = '\n'.join(s.splitlines())
 
    s = html_escape(s)
 
    # this sequence of html-ifications seems to be safe and non-conflicting
 
    # if the issues regexp is sane
 
    s = _urlify_text(s)
 
    if repo_name is not None:
 
        s = urlify_changesets(s, repo_name)
 
    s = urlify_issues(s, repo_name)
 
    s = MENTIONS_REGEX.sub(_mentions_replace, s)
 
    return literal('<div class="formatted-fixed">%s</div>' % s)
 

	
 

	
 
def short_ref(ref_type, ref_name):
 
    if ref_type == 'rev':
 
        return short_id(ref_name)
 
    return ref_name
 

	
 
def link_to_ref(repo_name, ref_type, ref_name, rev=None):
 
    """
 
    Return full markup for a href to changeset_home for a changeset.
 
    If ref_type is branch it will link to changelog.
 
    ref_name is shortened if ref_type is 'rev'.
 
    if rev is specified show it too, explicitly linking to that revision.
 
    """
 
    txt = short_ref(ref_type, ref_name)
 
    if ref_type == 'branch':
 
        u = url('changelog_home', repo_name=repo_name, branch=ref_name)
 
    else:
 
        u = url('changeset_home', repo_name=repo_name, revision=ref_name)
 
    l = link_to(repo_name + '#' + txt, u)
 
    if rev and ref_type != 'rev':
 
        l = literal('%s (%s)' % (l, link_to(short_id(rev), url('changeset_home', repo_name=repo_name, revision=rev))))
 
    return l
 

	
 
def changeset_status(repo, revision):
 
    return ChangesetStatusModel().get_status(repo, revision)
 

	
 

	
 
def changeset_status_lbl(changeset_status):
 
    return ChangesetStatus.get_status_lbl(changeset_status)
 

	
 

	
 
def get_permission_name(key):
 
    return dict(Permission.PERMS).get(key)
 

	
 

	
 
def journal_filter_help():
 
    return _(textwrap.dedent('''
 
        Example filter terms:
 
            repository:vcs
 
            username:developer
 
            action:*push*
 
            ip:127.0.0.1
 
            date:20120101
 
            date:[20120101100000 TO 20120102]
 

	
 
        Generate wildcards using '*' character:
 
            "repository:vcs*" - search everything starting with 'vcs'
 
            "repository:*vcs*" - search for repository containing 'vcs'
 

	
 
        Optional AND / OR operators in queries
 
            "repository:vcs OR repository:test"
 
            "username:test AND repository:test*"
 
    '''))
 

	
 

	
 
def not_mapped_error(repo_name):
 
    flash(_('%s repository is not mapped to db perhaps'
 
            ' it was created or renamed from the filesystem'
 
            ' please run the application again'
 
            ' in order to rescan repositories') % repo_name, category='error')
 

	
 

	
 
def ip_range(ip_addr):
 
    from kallithea.model.db import UserIpMap
 
    s, e = UserIpMap._get_ip_range(ip_addr)
 
    return '%s - %s' % (s, e)
 

	
 

	
 
def form(url, method="post", **attrs):
 
    """Like webhelpers.html.tags.form but automatically using secure_form with
 
    authentication_token for POST. authentication_token is thus never leaked
 
    in the URL."""
 
    if method.lower() == 'get':
 
        return insecure_form(url, method=method, **attrs)
 
    # webhelpers will turn everything but GET into POST
 
    return secure_form(url, method=method, **attrs)
0 comments (0 inline, 0 general)