Changeset - 9203621cae03
[Not reviewed]
default
0 18 0
Mads Kiilerich - 6 years ago 2019-12-28 01:08:48
mads@kiilerich.com
Grafted from: bcf2dec5faa9
vcs: always return bytes from node.content

We will rather have the unicode conversions explicit.

Note: Py3 bytes doesn't have .startswith - replace that with a regexp.
18 files changed with 43 insertions and 51 deletions:
0 comments (0 inline, 0 general)
kallithea/controllers/admin/gists.py
Show inline comments
 
@@ -89,176 +89,176 @@ class GistsController(BaseController):
 
        elif c.show_public and not c.show_private:
 
            gists = gists.filter(Gist.gist_type == Gist.GIST_PUBLIC) \
 
                             .filter(Gist.owner_id == request.authuser.user_id)
 

	
 
        # MY public+private
 
        elif c.show_private and c.show_public:
 
            gists = gists.filter(or_(Gist.gist_type == Gist.GIST_PUBLIC,
 
                                     Gist.gist_type == Gist.GIST_PRIVATE)) \
 
                             .filter(Gist.owner_id == request.authuser.user_id)
 

	
 
        # default show ALL public gists
 
        if not c.show_public and not c.show_private:
 
            gists = gists.filter(Gist.gist_type == Gist.GIST_PUBLIC)
 

	
 
        c.gists = gists
 
        p = safe_int(request.GET.get('page'), 1)
 
        c.gists_pager = Page(c.gists, page=p, items_per_page=10,
 
                             **url_params)
 
        return render('admin/gists/index.html')
 

	
 
    @LoginRequired()
 
    def create(self):
 
        self.__load_defaults()
 
        gist_form = GistForm([x[0] for x in c.lifetime_values])()
 
        try:
 
            form_result = gist_form.to_python(dict(request.POST))
 
            # TODO: multiple files support, from the form
 
            filename = form_result['filename'] or Gist.DEFAULT_FILENAME
 
            nodes = {
 
                filename: {
 
                    'content': form_result['content'],
 
                    'lexer': form_result['mimetype']  # None is autodetect
 
                }
 
            }
 
            _public = form_result['public']
 
            gist_type = Gist.GIST_PUBLIC if _public else Gist.GIST_PRIVATE
 
            gist = GistModel().create(
 
                description=form_result['description'],
 
                owner=request.authuser.user_id,
 
                ip_addr=request.ip_addr,
 
                gist_mapping=nodes,
 
                gist_type=gist_type,
 
                lifetime=form_result['lifetime']
 
            )
 
            Session().commit()
 
            new_gist_id = gist.gist_access_id
 
        except formencode.Invalid as errors:
 
            defaults = errors.value
 

	
 
            return formencode.htmlfill.render(
 
                render('admin/gists/new.html'),
 
                defaults=defaults,
 
                errors=errors.error_dict or {},
 
                prefix_error=False,
 
                encoding="UTF-8",
 
                force_defaults=False)
 

	
 
        except Exception as e:
 
            log.error(traceback.format_exc())
 
            h.flash(_('Error occurred during gist creation'), category='error')
 
            raise HTTPFound(location=url('new_gist'))
 
        raise HTTPFound(location=url('gist', gist_id=new_gist_id))
 

	
 
    @LoginRequired()
 
    def new(self, format='html'):
 
        self.__load_defaults()
 
        return render('admin/gists/new.html')
 

	
 
    @LoginRequired()
 
    def delete(self, gist_id):
 
        gist = GistModel().get_gist(gist_id)
 
        owner = gist.owner_id == request.authuser.user_id
 
        if h.HasPermissionAny('hg.admin')() or owner:
 
            GistModel().delete(gist)
 
            Session().commit()
 
            h.flash(_('Deleted gist %s') % gist.gist_access_id, category='success')
 
        else:
 
            raise HTTPForbidden()
 

	
 
        raise HTTPFound(location=url('gists'))
 

	
 
    @LoginRequired(allow_default_user=True)
 
    def show(self, gist_id, revision='tip', format='html', f_path=None):
 
        c.gist = Gist.get_or_404(gist_id)
 

	
 
        if c.gist.is_expired:
 
            log.error('Gist expired at %s',
 
                      time_to_datetime(c.gist.gist_expires))
 
            raise HTTPNotFound()
 
        try:
 
            c.file_changeset, c.files = GistModel().get_gist_files(gist_id,
 
                                                            revision=revision)
 
        except VCSError:
 
            log.error(traceback.format_exc())
 
            raise HTTPNotFound()
 
        if format == 'raw':
 
            content = '\n\n'.join([f.content for f in c.files if (f_path is None or safe_unicode(f.path) == f_path)])
 
            content = '\n\n'.join([safe_unicode(f.content) for f in c.files if (f_path is None or safe_unicode(f.path) == f_path)])
 
            response.content_type = 'text/plain'
 
            return content
 
        return render('admin/gists/show.html')
 

	
 
    @LoginRequired()
 
    def edit(self, gist_id, format='html'):
 
        c.gist = Gist.get_or_404(gist_id)
 

	
 
        if c.gist.is_expired:
 
            log.error('Gist expired at %s',
 
                      time_to_datetime(c.gist.gist_expires))
 
            raise HTTPNotFound()
 
        try:
 
            c.file_changeset, c.files = GistModel().get_gist_files(gist_id)
 
        except VCSError:
 
            log.error(traceback.format_exc())
 
            raise HTTPNotFound()
 

	
 
        self.__load_defaults(extra_values=('0', _('Unmodified')))
 
        rendered = render('admin/gists/edit.html')
 

	
 
        if request.POST:
 
            rpost = request.POST
 
            nodes = {}
 
            for org_filename, filename, mimetype, content in zip(
 
                                                    rpost.getall('org_files'),
 
                                                    rpost.getall('files'),
 
                                                    rpost.getall('mimetypes'),
 
                                                    rpost.getall('contents')):
 

	
 
                nodes[org_filename] = {
 
                    'org_filename': org_filename,
 
                    'filename': filename,
 
                    'content': content,
 
                    'lexer': mimetype,
 
                }
 
            try:
 
                GistModel().update(
 
                    gist=c.gist,
 
                    description=rpost['description'],
 
                    owner=c.gist.owner, # FIXME: request.authuser.user_id ?
 
                    ip_addr=request.ip_addr,
 
                    gist_mapping=nodes,
 
                    gist_type=c.gist.gist_type,
 
                    lifetime=rpost['lifetime']
 
                )
 

	
 
                Session().commit()
 
                h.flash(_('Successfully updated gist content'), category='success')
 
            except NodeNotChangedError:
 
                # raised if nothing was changed in repo itself. We anyway then
 
                # store only DB stuff for gist
 
                Session().commit()
 
                h.flash(_('Successfully updated gist data'), category='success')
 
            except Exception:
 
                log.error(traceback.format_exc())
 
                h.flash(_('Error occurred during update of gist %s') % gist_id,
 
                        category='error')
 

	
 
            raise HTTPFound(location=url('gist', gist_id=gist_id))
 

	
 
        return rendered
 

	
 
    @LoginRequired()
 
    @jsonify
 
    def check_revision(self, gist_id):
 
        c.gist = Gist.get_or_404(gist_id)
 
        last_rev = c.gist.scm_instance.get_changeset()
 
        success = True
 
        revision = request.POST.get('revision')
 

	
 
        # TODO: maybe move this to model ?
 
        if revision != last_rev.raw_id:
 
            log.error('Last revision %s is different than submitted %s',
 
                      revision, last_rev)
 
            # our gist has newer version than we
 
            success = False
 

	
 
        return {'success': success}
kallithea/controllers/compare.py
Show inline comments
 
@@ -179,111 +179,111 @@ class CompareController(BaseRepoControll
 
    def compare(self, repo_name, org_ref_type, org_ref_name, other_ref_type, other_ref_name):
 
        org_ref_name = org_ref_name.strip()
 
        other_ref_name = other_ref_name.strip()
 

	
 
        # If merge is True:
 
        #   Show what org would get if merged with other:
 
        #   List changesets that are ancestors of other but not of org.
 
        #   New changesets in org is thus ignored.
 
        #   Diff will be from common ancestor, and merges of org to other will thus be ignored.
 
        # If merge is False:
 
        #   Make a raw diff from org to other, no matter if related or not.
 
        #   Changesets in one and not in the other will be ignored
 
        merge = bool(request.GET.get('merge'))
 
        # fulldiff disables cut_off_limit
 
        fulldiff = request.GET.get('fulldiff')
 
        # partial uses compare_cs.html template directly
 
        partial = request.environ.get('HTTP_X_PARTIAL_XHR')
 
        # is_ajax_preview puts hidden input field with changeset revisions
 
        c.is_ajax_preview = partial and request.GET.get('is_ajax_preview')
 
        # swap url for compare_diff page - never partial and never is_ajax_preview
 
        c.swap_url = h.url('compare_url',
 
            repo_name=c.cs_repo.repo_name,
 
            org_ref_type=other_ref_type, org_ref_name=other_ref_name,
 
            other_repo=c.a_repo.repo_name,
 
            other_ref_type=org_ref_type, other_ref_name=org_ref_name,
 
            merge=merge or '')
 

	
 
        # set callbacks for generating markup for icons
 
        c.ignorews_url = _ignorews_url
 
        c.context_url = _context_url
 
        ignore_whitespace = request.GET.get('ignorews') == '1'
 
        line_context = safe_int(request.GET.get('context'), 3)
 

	
 
        c.a_rev = self._get_ref_rev(c.a_repo, org_ref_type, org_ref_name,
 
            returnempty=True)
 
        c.cs_rev = self._get_ref_rev(c.cs_repo, other_ref_type, other_ref_name)
 

	
 
        c.compare_home = False
 
        c.a_ref_name = org_ref_name
 
        c.a_ref_type = org_ref_type
 
        c.cs_ref_name = other_ref_name
 
        c.cs_ref_type = other_ref_type
 

	
 
        c.cs_ranges, c.cs_ranges_org, c.ancestors = self._get_changesets(
 
            c.a_repo.scm_instance.alias, c.a_repo.scm_instance, c.a_rev,
 
            c.cs_repo.scm_instance, c.cs_rev)
 
        raw_ids = [x.raw_id for x in c.cs_ranges]
 
        c.cs_comments = c.cs_repo.get_comments(raw_ids)
 
        c.cs_statuses = c.cs_repo.statuses(raw_ids)
 

	
 
        revs = [ctx.revision for ctx in reversed(c.cs_ranges)]
 
        c.jsdata = graph_data(c.cs_repo.scm_instance, revs)
 

	
 
        if partial:
 
            return render('compare/compare_cs.html')
 

	
 
        org_repo = c.a_repo
 
        other_repo = c.cs_repo
 

	
 
        if merge:
 
            rev1 = msg = None
 
            if not c.cs_ranges:
 
                msg = _('Cannot show empty diff')
 
            elif not c.ancestors:
 
                msg = _('No ancestor found for merge diff')
 
            elif len(c.ancestors) == 1:
 
                rev1 = c.ancestors[0]
 
            else:
 
                msg = _('Multiple merge ancestors found for merge compare')
 
            if rev1 is None:
 
                h.flash(msg, category='error')
 
                log.error(msg)
 
                raise HTTPNotFound
 

	
 
            # case we want a simple diff without incoming changesets,
 
            # previewing what will be merged.
 
            # Make the diff on the other repo (which is known to have other_rev)
 
            log.debug('Using ancestor %s as rev1 instead of %s',
 
                      rev1, c.a_rev)
 
            org_repo = other_repo
 
        else: # comparing tips, not necessarily linearly related
 
            if org_repo != other_repo:
 
                # TODO: we could do this by using hg unionrepo
 
                log.error('cannot compare across repos %s and %s', org_repo, other_repo)
 
                h.flash(_('Cannot compare repositories without using common ancestor'), category='error')
 
                raise HTTPBadRequest
 
            rev1 = c.a_rev
 

	
 
        diff_limit = None if fulldiff else self.cut_off_limit
 

	
 
        log.debug('running diff between %s and %s in %s',
 
                  rev1, c.cs_rev, org_repo.scm_instance.path)
 
        raw_diff = diffs.get_diff(org_repo.scm_instance, rev1=rev1, rev2=c.cs_rev,
 
                                      ignore_whitespace=ignore_whitespace,
 
                                      context=line_context)
 

	
 
        diff_processor = diffs.DiffProcessor(raw_diff or '', diff_limit=diff_limit)
 
        diff_processor = diffs.DiffProcessor(raw_diff, diff_limit=diff_limit)
 
        c.limited_diff = diff_processor.limited_diff
 
        c.file_diff_data = []
 
        c.lines_added = 0
 
        c.lines_deleted = 0
 
        for f in diff_processor.parsed:
 
            st = f['stats']
 
            c.lines_added += st['added']
 
            c.lines_deleted += st['deleted']
 
            filename = f['filename']
 
            fid = h.FID('', filename)
 
            html_diff = diffs.as_html(enable_comments=False, parsed_lines=[f])
 
            c.file_diff_data.append((fid, None, f['operation'], f['old_filename'], filename, html_diff, st))
 

	
 
        return render('compare/compare_diff.html')
kallithea/controllers/feed.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
"""
 
kallithea.controllers.feed
 
~~~~~~~~~~~~~~~~~~~~~~~~~~
 

	
 
Feed controller for Kallithea
 

	
 
This file was forked by the Kallithea project in July 2014.
 
Original author and date, and relevant copyright and licensing information is below:
 
:created_on: Apr 23, 2010
 
:author: marcink
 
:copyright: (c) 2013 RhodeCode GmbH, and others.
 
:license: GPLv3, see LICENSE.md for more details.
 
"""
 

	
 

	
 
import logging
 

	
 
from beaker.cache import cache_region
 
from tg import response
 
from tg import tmpl_context as c
 
from tg.i18n import ugettext as _
 

	
 
from kallithea import CONFIG
 
from kallithea.lib import feeds
 
from kallithea.lib import helpers as h
 
from kallithea.lib.auth import HasRepoPermissionLevelDecorator, LoginRequired
 
from kallithea.lib.base import BaseRepoController
 
from kallithea.lib.diffs import DiffProcessor
 
from kallithea.lib.utils2 import safe_int, safe_unicode, str2bool
 

	
 

	
 
log = logging.getLogger(__name__)
 

	
 

	
 
class FeedController(BaseRepoController):
 

	
 
    @LoginRequired(allow_default_user=True)
 
    @HasRepoPermissionLevelDecorator('read')
 
    def _before(self, *args, **kwargs):
 
        super(FeedController, self)._before(*args, **kwargs)
 

	
 
    def _get_title(self, cs):
 
        return h.shorter(cs.message, 160)
 

	
 
    def __get_desc(self, cs):
 
        desc_msg = [(_('%s committed on %s')
 
                     % (h.person(cs.author), h.fmt_date(cs.date))) + '<br/>']
 
        # branches, tags, bookmarks
 
        for branch in cs.branches:
 
            desc_msg.append('branch: %s<br/>' % branch)
 
        for book in cs.bookmarks:
 
            desc_msg.append('bookmark: %s<br/>' % book)
 
        for tag in cs.tags:
 
            desc_msg.append('tag: %s<br/>' % tag)
 

	
 
        changes = []
 
        diff_limit = safe_int(CONFIG.get('rss_cut_off_limit', 32 * 1024))
 
        raw_diff = cs.diff()
 
        diff_processor = DiffProcessor(raw_diff,
 
                                       diff_limit=diff_limit,
 
                                       inline_diff=False)
 

	
 
        for st in diff_processor.parsed:
 
            st.update({'added': st['stats']['added'],
 
                       'removed': st['stats']['deleted']})
 
            changes.append('\n %(operation)s %(filename)s '
 
                           '(%(added)s lines added, %(removed)s lines removed)'
 
                            % st)
 
        if diff_processor.limited_diff:
 
            changes = changes + ['\n ' +
 
                                 _('Changeset was too big and was cut off...')]
 

	
 
        # rev link
 
        _url = h.canonical_url('changeset_home', repo_name=c.db_repo.repo_name,
 
                   revision=cs.raw_id)
 
        desc_msg.append('changeset: <a href="%s">%s</a>' % (_url, cs.raw_id[:8]))
 

	
 
        desc_msg.append('<pre>')
 
        desc_msg.append(h.urlify_text(cs.message))
 
        desc_msg.append('\n')
 
        desc_msg.extend(changes)
 
        if str2bool(CONFIG.get('rss_include_diff', False)):
 
            desc_msg.append('\n\n')
 
            desc_msg.append(raw_diff)
 
            desc_msg.append(safe_unicode(raw_diff))
 
        desc_msg.append('</pre>')
 
        return [safe_unicode(chunk) for chunk in desc_msg]
 

	
 
    def _feed(self, repo_name, feeder):
 
        """Produce a simple feed"""
 

	
 
        @cache_region('long_term', '_get_feed_from_cache')
 
        def _get_feed_from_cache(*_cache_keys):  # parameters are not really used - only as caching key
 
            header = dict(
 
                title=_('%s %s feed') % (c.site_name, repo_name),
 
                link=h.canonical_url('summary_home', repo_name=repo_name),
 
                description=_('Changes on %s repository') % repo_name,
 
            )
 

	
 
            rss_items_per_page = safe_int(CONFIG.get('rss_items_per_page', 20))
 
            entries=[]
 
            for cs in reversed(list(c.db_repo_scm_instance[-rss_items_per_page:])):
 
                entries.append(dict(
 
                    title=self._get_title(cs),
 
                    link=h.canonical_url('changeset_home', repo_name=repo_name, revision=cs.raw_id),
 
                    author_email=cs.author_email,
 
                    author_name=cs.author_name,
 
                    description=''.join(self.__get_desc(cs)),
 
                    pubdate=cs.date,
 
                ))
 
            return feeder.render(header, entries)
 

	
 
        response.content_type = feeder.content_type
 
        return _get_feed_from_cache(repo_name, feeder.__name__)
 

	
 
    def atom(self, repo_name):
 
        """Produce a simple atom-1.0 feed"""
 
        return self._feed(repo_name, feeds.AtomFeed)
 

	
 
    def rss(self, repo_name):
 
        """Produce a simple rss2 feed"""
 
        return self._feed(repo_name, feeds.RssFeed)
kallithea/controllers/files.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
"""
 
kallithea.controllers.files
 
~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

	
 
Files controller for Kallithea
 

	
 
This file was forked by the Kallithea project in July 2014.
 
Original author and date, and relevant copyright and licensing information is below:
 
:created_on: Apr 21, 2010
 
:author: marcink
 
:copyright: (c) 2013 RhodeCode GmbH, and others.
 
:license: GPLv3, see LICENSE.md for more details.
 
"""
 

	
 
import logging
 
import os
 
import posixpath
 
import shutil
 
import tempfile
 
import traceback
 
from collections import OrderedDict
 

	
 
from tg import request, response
 
from tg import tmpl_context as c
 
from tg.i18n import ugettext as _
 
from webob.exc import HTTPFound, HTTPNotFound
 

	
 
from kallithea.config.routing import url
 
from kallithea.controllers.changeset import _context_url, _ignorews_url, anchor_url, get_ignore_ws, get_line_ctx
 
from kallithea.lib import diffs
 
from kallithea.lib import helpers as h
 
from kallithea.lib.auth import HasRepoPermissionLevelDecorator, LoginRequired
 
from kallithea.lib.base import BaseRepoController, jsonify, render
 
from kallithea.lib.exceptions import NonRelativePathError
 
from kallithea.lib.utils import action_logger
 
from kallithea.lib.utils2 import convert_line_endings, detect_mode, safe_int, safe_str, str2bool
 
from kallithea.lib.utils2 import convert_line_endings, detect_mode, safe_int, safe_str, safe_unicode, str2bool
 
from kallithea.lib.vcs.backends.base import EmptyChangeset
 
from kallithea.lib.vcs.conf import settings
 
from kallithea.lib.vcs.exceptions import (
 
    ChangesetDoesNotExistError, ChangesetError, EmptyRepositoryError, ImproperArchiveTypeError, NodeAlreadyExistsError, NodeDoesNotExistError, NodeError, RepositoryError, VCSError)
 
from kallithea.lib.vcs.nodes import FileNode
 
from kallithea.model.db import Repository
 
from kallithea.model.repo import RepoModel
 
from kallithea.model.scm import ScmModel
 

	
 

	
 
log = logging.getLogger(__name__)
 

	
 

	
 
class FilesController(BaseRepoController):
 

	
 
    def _before(self, *args, **kwargs):
 
        super(FilesController, self)._before(*args, **kwargs)
 

	
 
    def __get_cs(self, rev, silent_empty=False):
 
        """
 
        Safe way to get changeset if error occur it redirects to tip with
 
        proper message
 

	
 
        :param rev: revision to fetch
 
        :silent_empty: return None if repository is empty
 
        """
 

	
 
        try:
 
            return c.db_repo_scm_instance.get_changeset(rev)
 
        except EmptyRepositoryError as e:
 
            if silent_empty:
 
                return None
 
            url_ = url('files_add_home',
 
                       repo_name=c.repo_name,
 
                       revision=0, f_path='', anchor='edit')
 
            add_new = h.link_to(_('Click here to add new file'), url_, class_="alert-link")
 
            h.flash(_('There are no files yet.') + ' ' + add_new, category='warning')
 
            raise HTTPNotFound()
 
        except (ChangesetDoesNotExistError, LookupError):
 
            msg = _('Such revision does not exist for this repository')
 
            h.flash(msg, category='error')
 
            raise HTTPNotFound()
 
        except RepositoryError as e:
 
            h.flash(unicode(e), category='error')
 
            raise HTTPNotFound()
 

	
 
    def __get_filenode(self, cs, path):
 
        """
 
        Returns file_node or raise HTTP error.
 

	
 
        :param cs: given changeset
 
        :param path: path to lookup
 
        """
 

	
 
        try:
 
            file_node = cs.get_node(path)
 
            if file_node.is_dir():
 
                raise RepositoryError('given path is a directory')
 
        except ChangesetDoesNotExistError:
 
            msg = _('Such revision does not exist for this repository')
 
            h.flash(msg, category='error')
 
            raise HTTPNotFound()
 
        except RepositoryError as e:
 
            h.flash(unicode(e), category='error')
 
            raise HTTPNotFound()
 

	
 
        return file_node
 

	
 
    @LoginRequired(allow_default_user=True)
 
    @HasRepoPermissionLevelDecorator('read')
 
    def index(self, repo_name, revision, f_path, annotate=False):
 
        # redirect to given revision from form if given
 
        post_revision = request.POST.get('at_rev', None)
 
        if post_revision:
 
            cs = self.__get_cs(post_revision) # FIXME - unused!
 

	
 
        c.revision = revision
 
        c.changeset = self.__get_cs(revision)
 
        c.branch = request.GET.get('branch', None)
 
        c.f_path = f_path
 
        c.annotate = annotate
 
        cur_rev = c.changeset.revision
 
        # used in files_source.html:
 
        c.cut_off_limit = self.cut_off_limit
 
        c.fulldiff = request.GET.get('fulldiff')
 

	
 
        # prev link
 
        try:
 
            prev_rev = c.db_repo_scm_instance.get_changeset(cur_rev).prev(c.branch)
 
            c.url_prev = url('files_home', repo_name=c.repo_name,
 
                         revision=prev_rev.raw_id, f_path=f_path)
 
            if c.branch:
 
                c.url_prev += '?branch=%s' % c.branch
 
        except (ChangesetDoesNotExistError, VCSError):
 
            c.url_prev = '#'
 

	
 
@@ -272,194 +272,193 @@ class FilesController(BaseRepoController
 
                # Note: underlying vcs library fakes text/plain mimetype if the
 
                # mimetype can not be determined and it thinks it is not
 
                # binary.This might lead to erroneous text display in some
 
                # cases, but helps in other cases, like with text files
 
                # without extension.
 
                mimetype, dispo = 'text/plain', 'inline'
 

	
 
        if dispo == 'attachment':
 
            dispo = 'attachment; filename=%s' % \
 
                        safe_str(f_path.split(os.sep)[-1])
 

	
 
        response.content_disposition = dispo
 
        response.content_type = mimetype
 
        return file_node.content
 

	
 
    @LoginRequired()
 
    @HasRepoPermissionLevelDecorator('write')
 
    def delete(self, repo_name, revision, f_path):
 
        repo = c.db_repo
 
        # check if revision is a branch identifier- basically we cannot
 
        # create multiple heads via file editing
 
        _branches = repo.scm_instance.branches
 
        # check if revision is a branch name or branch hash
 
        if revision not in _branches and revision not in _branches.values():
 
            h.flash(_('You can only delete files with revision '
 
                      'being a valid branch'), category='warning')
 
            raise HTTPFound(location=h.url('files_home',
 
                                  repo_name=repo_name, revision='tip',
 
                                  f_path=f_path))
 

	
 
        r_post = request.POST
 

	
 
        c.cs = self.__get_cs(revision)
 
        c.file = self.__get_filenode(c.cs, f_path)
 

	
 
        c.default_message = _('Deleted file %s via Kallithea') % (f_path)
 
        c.f_path = f_path
 
        node_path = f_path
 
        author = request.authuser.full_contact
 

	
 
        if r_post:
 
            message = r_post.get('message') or c.default_message
 

	
 
            try:
 
                nodes = {
 
                    node_path: {
 
                        'content': ''
 
                    }
 
                }
 
                self.scm_model.delete_nodes(
 
                    user=request.authuser.user_id,
 
                    ip_addr=request.ip_addr,
 
                    repo=c.db_repo,
 
                    message=message,
 
                    nodes=nodes,
 
                    parent_cs=c.cs,
 
                    author=author,
 
                )
 

	
 
                h.flash(_('Successfully deleted file %s') % f_path,
 
                        category='success')
 
            except Exception:
 
                log.error(traceback.format_exc())
 
                h.flash(_('Error occurred during commit'), category='error')
 
            raise HTTPFound(location=url('changeset_home',
 
                                repo_name=c.repo_name, revision='tip'))
 

	
 
        return render('files/files_delete.html')
 

	
 
    @LoginRequired()
 
    @HasRepoPermissionLevelDecorator('write')
 
    def edit(self, repo_name, revision, f_path):
 
        repo = c.db_repo
 
        # check if revision is a branch identifier- basically we cannot
 
        # create multiple heads via file editing
 
        _branches = repo.scm_instance.branches
 
        # check if revision is a branch name or branch hash
 
        if revision not in _branches and revision not in _branches.values():
 
            h.flash(_('You can only edit files with revision '
 
                      'being a valid branch'), category='warning')
 
            raise HTTPFound(location=h.url('files_home',
 
                                  repo_name=repo_name, revision='tip',
 
                                  f_path=f_path))
 

	
 
        r_post = request.POST
 

	
 
        c.cs = self.__get_cs(revision)
 
        c.file = self.__get_filenode(c.cs, f_path)
 

	
 
        if c.file.is_binary:
 
            raise HTTPFound(location=url('files_home', repo_name=c.repo_name,
 
                            revision=c.cs.raw_id, f_path=f_path))
 
        c.default_message = _('Edited file %s via Kallithea') % (f_path)
 
        c.f_path = f_path
 

	
 
        if r_post:
 

	
 
            old_content = c.file.content
 
            old_content = safe_unicode(c.file.content)
 
            sl = old_content.splitlines(1)
 
            first_line = sl[0] if sl else ''
 
            # modes:  0 - Unix, 1 - Mac, 2 - DOS
 
            mode = detect_mode(first_line, 0)
 
            content = convert_line_endings(r_post.get('content', ''), mode)
 

	
 
            message = r_post.get('message') or c.default_message
 
            author = request.authuser.full_contact
 

	
 
            if content == old_content:
 
                h.flash(_('No changes'), category='warning')
 
                raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name,
 
                                    revision='tip'))
 
            try:
 
                self.scm_model.commit_change(repo=c.db_repo_scm_instance,
 
                                             repo_name=repo_name, cs=c.cs,
 
                                             user=request.authuser.user_id,
 
                                             ip_addr=request.ip_addr,
 
                                             author=author, message=message,
 
                                             content=content, f_path=f_path)
 
                h.flash(_('Successfully committed to %s') % f_path,
 
                        category='success')
 
            except Exception:
 
                log.error(traceback.format_exc())
 
                h.flash(_('Error occurred during commit'), category='error')
 
            raise HTTPFound(location=url('changeset_home',
 
                                repo_name=c.repo_name, revision='tip'))
 

	
 
        return render('files/files_edit.html')
 

	
 
    @LoginRequired()
 
    @HasRepoPermissionLevelDecorator('write')
 
    def add(self, repo_name, revision, f_path):
 

	
 
        repo = c.db_repo
 
        r_post = request.POST
 
        c.cs = self.__get_cs(revision, silent_empty=True)
 
        if c.cs is None:
 
            c.cs = EmptyChangeset(alias=c.db_repo_scm_instance.alias)
 
        c.default_message = (_('Added file via Kallithea'))
 
        c.f_path = f_path
 

	
 
        if r_post:
 
            unix_mode = 0
 
            content = convert_line_endings(r_post.get('content', ''), unix_mode)
 

	
 
            message = r_post.get('message') or c.default_message
 
            filename = r_post.get('filename')
 
            location = r_post.get('location', '')
 
            file_obj = r_post.get('upload_file', None)
 

	
 
            if file_obj is not None and hasattr(file_obj, 'filename'):
 
                filename = file_obj.filename
 
                content = file_obj.file
 

	
 
                if hasattr(content, 'file'):
 
                    # non posix systems store real file under file attr
 
                    content = content.file
 

	
 
            if not content:
 
                h.flash(_('No content'), category='warning')
 
                raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name,
 
                                    revision='tip'))
 
            if not filename:
 
                h.flash(_('No filename'), category='warning')
 
                raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name,
 
                                    revision='tip'))
 
            # strip all crap out of file, just leave the basename
 
            filename = os.path.basename(filename)
 
            node_path = posixpath.join(location, filename)
 
            author = request.authuser.full_contact
 

	
 
            try:
 
                nodes = {
 
                    node_path: {
 
                        'content': content
 
                    }
 
                }
 
                self.scm_model.create_nodes(
 
                    user=request.authuser.user_id,
 
                    ip_addr=request.ip_addr,
 
                    repo=c.db_repo,
 
                    message=message,
 
                    nodes=nodes,
 
                    parent_cs=c.cs,
 
                    author=author,
 
                )
 

	
 
                h.flash(_('Successfully committed to %s') % node_path,
 
                        category='success')
 
            except NonRelativePathError as e:
 
                h.flash(_('Location must be relative path and must not '
 
                          'contain .. in path'), category='warning')
 
                raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name,
 
                                    revision='tip'))
 
            except (NodeError, NodeAlreadyExistsError) as e:
kallithea/controllers/pullrequests.py
Show inline comments
 
@@ -498,148 +498,148 @@ class PullrequestsController(BaseRepoCon
 
            pass
 
        except IndexError: # probably because c.cs_ranges is empty, probably because revisions are missing
 
            pass
 

	
 
        avail_revs = set()
 
        avail_show = []
 
        c.cs_branch_name = c.cs_ref_name
 
        c.a_branch_name = None
 
        other_scm_instance = c.a_repo.scm_instance
 
        c.update_msg = ""
 
        c.update_msg_other = ""
 
        try:
 
            if not c.cs_ranges:
 
                c.update_msg = _('Error: changesets not found when displaying pull request from %s.') % c.cs_rev
 
            elif org_scm_instance.alias == 'hg' and c.a_ref_name != 'ancestor':
 
                if c.cs_ref_type != 'branch':
 
                    c.cs_branch_name = org_scm_instance.get_changeset(c.cs_ref_name).branch # use ref_type ?
 
                c.a_branch_name = c.a_ref_name
 
                if c.a_ref_type != 'branch':
 
                    try:
 
                        c.a_branch_name = other_scm_instance.get_changeset(c.a_ref_name).branch # use ref_type ?
 
                    except EmptyRepositoryError:
 
                        c.a_branch_name = 'null' # not a branch name ... but close enough
 
                # candidates: descendants of old head that are on the right branch
 
                #             and not are the old head itself ...
 
                #             and nothing at all if old head is a descendant of target ref name
 
                if not c.is_range and other_scm_instance._repo.revs('present(%s)::&%s', c.cs_ranges[-1].raw_id, c.a_branch_name):
 
                    c.update_msg = _('This pull request has already been merged to %s.') % c.a_branch_name
 
                elif c.pull_request.is_closed():
 
                    c.update_msg = _('This pull request has been closed and can not be updated.')
 
                else: # look for descendants of PR head on source branch in org repo
 
                    avail_revs = org_scm_instance._repo.revs('%s:: & branch(%s)',
 
                                                             revs[0], c.cs_branch_name)
 
                    if len(avail_revs) > 1: # more than just revs[0]
 
                        # also show changesets that not are descendants but would be merged in
 
                        targethead = other_scm_instance.get_changeset(c.a_branch_name).raw_id
 
                        if org_scm_instance.path != other_scm_instance.path:
 
                            # Note: org_scm_instance.path must come first so all
 
                            # valid revision numbers are 100% org_scm compatible
 
                            # - both for avail_revs and for revset results
 
                            hgrepo = unionrepo.makeunionrepository(org_scm_instance.baseui,
 
                                                                   org_scm_instance.path,
 
                                                                   other_scm_instance.path)
 
                        else:
 
                            hgrepo = org_scm_instance._repo
 
                        show = set(hgrepo.revs('::%ld & !::parents(%s) & !::%s',
 
                                               avail_revs, revs[0], targethead))
 
                        if show:
 
                            c.update_msg = _('The following additional changes are available on %s:') % c.cs_branch_name
 
                        else:
 
                            c.update_msg = _('No additional changesets found for iterating on this pull request.')
 
                    else:
 
                        show = set()
 
                        avail_revs = set() # drop revs[0]
 
                        c.update_msg = _('No additional changesets found for iterating on this pull request.')
 

	
 
                    # TODO: handle branch heads that not are tip-most
 
                    brevs = org_scm_instance._repo.revs('%s - %ld - %s', c.cs_branch_name, avail_revs, revs[0])
 
                    if brevs:
 
                        # also show changesets that are on branch but neither ancestors nor descendants
 
                        show.update(org_scm_instance._repo.revs('::%ld - ::%ld - ::%s', brevs, avail_revs, c.a_branch_name))
 
                        show.add(revs[0]) # make sure graph shows this so we can see how they relate
 
                        c.update_msg_other = _('Note: Branch %s has another head: %s.') % (c.cs_branch_name,
 
                            h.short_id(org_scm_instance.get_changeset((max(brevs))).raw_id))
 

	
 
                    avail_show = sorted(show, reverse=True)
 

	
 
            elif org_scm_instance.alias == 'git':
 
                c.cs_repo.scm_instance.get_changeset(c.cs_rev) # check it exists - raise ChangesetDoesNotExistError if not
 
                c.update_msg = _("Git pull requests don't support iterating yet.")
 
        except ChangesetDoesNotExistError:
 
            c.update_msg = _('Error: some changesets not found when displaying pull request from %s.') % c.cs_rev
 

	
 
        c.avail_revs = avail_revs
 
        c.avail_cs = [org_scm_instance.get_changeset(r) for r in avail_show]
 
        c.avail_jsdata = graph_data(org_scm_instance, avail_show)
 

	
 
        raw_ids = [x.raw_id for x in c.cs_ranges]
 
        c.cs_comments = c.cs_repo.get_comments(raw_ids)
 
        c.cs_statuses = c.cs_repo.statuses(raw_ids)
 

	
 
        ignore_whitespace = request.GET.get('ignorews') == '1'
 
        line_context = safe_int(request.GET.get('context'), 3)
 
        c.ignorews_url = _ignorews_url
 
        c.context_url = _context_url
 
        fulldiff = request.GET.get('fulldiff')
 
        diff_limit = None if fulldiff else self.cut_off_limit
 

	
 
        # we swap org/other ref since we run a simple diff on one repo
 
        log.debug('running diff between %s and %s in %s',
 
                  c.a_rev, c.cs_rev, org_scm_instance.path)
 
        try:
 
            raw_diff = diffs.get_diff(org_scm_instance, rev1=safe_str(c.a_rev), rev2=safe_str(c.cs_rev),
 
                                      ignore_whitespace=ignore_whitespace, context=line_context)
 
        except ChangesetDoesNotExistError:
 
            raw_diff = _("The diff can't be shown - the PR revisions could not be found.")
 
        diff_processor = diffs.DiffProcessor(raw_diff or '', diff_limit=diff_limit)
 
        diff_processor = diffs.DiffProcessor(raw_diff, diff_limit=diff_limit)
 
        c.limited_diff = diff_processor.limited_diff
 
        c.file_diff_data = []
 
        c.lines_added = 0
 
        c.lines_deleted = 0
 

	
 
        for f in diff_processor.parsed:
 
            st = f['stats']
 
            c.lines_added += st['added']
 
            c.lines_deleted += st['deleted']
 
            filename = f['filename']
 
            fid = h.FID('', filename)
 
            html_diff = diffs.as_html(enable_comments=True, parsed_lines=[f])
 
            c.file_diff_data.append((fid, None, f['operation'], f['old_filename'], filename, html_diff, st))
 

	
 
        # inline comments
 
        c.inline_cnt = 0
 
        c.inline_comments = cc_model.get_inline_comments(
 
                                c.db_repo.repo_id,
 
                                pull_request=pull_request_id)
 
        # count inline comments
 
        for __, lines in c.inline_comments:
 
            for comments in lines.values():
 
                c.inline_cnt += len(comments)
 
        # comments
 
        c.comments = cc_model.get_comments(c.db_repo.repo_id, pull_request=pull_request_id)
 

	
 
        # (badly named) pull-request status calculation based on reviewer votes
 
        (c.pull_request_reviewers,
 
         c.pull_request_pending_reviewers,
 
         c.current_voting_result,
 
         ) = cs_model.calculate_pull_request_result(c.pull_request)
 
        c.changeset_statuses = ChangesetStatus.STATUSES
 

	
 
        c.is_ajax_preview = False
 
        c.ancestors = None # [c.a_rev] ... but that is shown in an other way
 
        return render('/pullrequests/pullrequest_show.html')
 

	
 
    @LoginRequired()
 
    @HasRepoPermissionLevelDecorator('read')
 
    @jsonify
 
    def comment(self, repo_name, pull_request_id):
 
        pull_request = PullRequest.get_or_404(pull_request_id)
 
        allowed_to_change_status = self._is_allowed_to_change_status(pull_request)
 
        return create_cs_pr_comment(repo_name, pull_request=pull_request,
 
                allowed_to_change_status=allowed_to_change_status)
 

	
 
    @LoginRequired()
 
    @HasRepoPermissionLevelDecorator('read')
 
    @jsonify
 
    def delete_comment(self, repo_name, comment_id):
 
        return delete_cs_pr_comment(repo_name, comment_id)
kallithea/controllers/summary.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
"""
 
kallithea.controllers.summary
 
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

	
 
Summary controller for Kallithea
 

	
 
This file was forked by the Kallithea project in July 2014.
 
Original author and date, and relevant copyright and licensing information is below:
 
:created_on: Apr 18, 2010
 
:author: marcink
 
:copyright: (c) 2013 RhodeCode GmbH, and others.
 
:license: GPLv3, see LICENSE.md for more details.
 
"""
 

	
 
import calendar
 
import itertools
 
import logging
 
import traceback
 
from datetime import date, timedelta
 
from time import mktime
 

	
 
from beaker.cache import cache_region
 
from tg import request
 
from tg import tmpl_context as c
 
from tg.i18n import ugettext as _
 
from webob.exc import HTTPBadRequest
 

	
 
import kallithea.lib.helpers as h
 
from kallithea.config.conf import ALL_EXTS, ALL_READMES, LANGUAGES_EXTENSIONS_MAP
 
from kallithea.lib.auth import HasRepoPermissionLevelDecorator, LoginRequired
 
from kallithea.lib.base import BaseRepoController, jsonify, render
 
from kallithea.lib.celerylib.tasks import get_commits_stats
 
from kallithea.lib.compat import json
 
from kallithea.lib.markup_renderer import MarkupRenderer
 
from kallithea.lib.page import Page
 
from kallithea.lib.utils2 import safe_int
 
from kallithea.lib.utils2 import safe_int, safe_unicode
 
from kallithea.lib.vcs.backends.base import EmptyChangeset
 
from kallithea.lib.vcs.exceptions import ChangesetError, EmptyRepositoryError, NodeDoesNotExistError
 
from kallithea.lib.vcs.nodes import FileNode
 
from kallithea.model.db import Statistics
 

	
 

	
 
log = logging.getLogger(__name__)
 

	
 
README_FILES = [''.join([x[0][0], x[1][0]]) for x in
 
                    sorted(list(itertools.product(ALL_READMES, ALL_EXTS)),
 
                           key=lambda y:y[0][1] + y[1][1])]
 

	
 

	
 
class SummaryController(BaseRepoController):
 

	
 
    def __get_readme_data(self, db_repo):
 
        repo_name = db_repo.repo_name
 
        log.debug('Looking for README file')
 

	
 
        @cache_region('long_term', '_get_readme_from_cache')
 
        def _get_readme_from_cache(*_cache_keys):  # parameters are not really used - only as caching key
 
            readme_data = None
 
            readme_file = None
 
            try:
 
                # gets the landing revision! or tip if fails
 
                cs = db_repo.get_landing_changeset()
 
                if isinstance(cs, EmptyChangeset):
 
                    raise EmptyRepositoryError()
 
                renderer = MarkupRenderer()
 
                for f in README_FILES:
 
                    try:
 
                        readme = cs.get_node(f)
 
                        if not isinstance(readme, FileNode):
 
                            continue
 
                        readme_file = f
 
                        log.debug('Found README file `%s` rendering...',
 
                                  readme_file)
 
                        readme_data = renderer.render(readme.content,
 
                        readme_data = renderer.render(safe_unicode(readme.content),
 
                                                      filename=f)
 
                        break
 
                    except NodeDoesNotExistError:
 
                        continue
 
            except ChangesetError:
 
                log.error(traceback.format_exc())
 
                pass
 
            except EmptyRepositoryError:
 
                pass
 

	
 
            return readme_data, readme_file
 

	
 
        kind = 'README'
 
        return _get_readme_from_cache(repo_name, kind, c.db_repo.changeset_cache.get('raw_id'))
 

	
 
    @LoginRequired(allow_default_user=True)
 
    @HasRepoPermissionLevelDecorator('read')
 
    def index(self, repo_name):
 
        p = safe_int(request.GET.get('page'), 1)
 
        size = safe_int(request.GET.get('size'), 10)
 
        try:
 
            collection = c.db_repo_scm_instance.get_changesets(reverse=True)
 
        except EmptyRepositoryError as e:
 
            h.flash(unicode(e), category='warning')
 
            collection = []
 
        c.cs_pagination = Page(collection, page=p, items_per_page=size)
 
        page_revisions = [x.raw_id for x in list(c.cs_pagination)]
 
        c.cs_comments = c.db_repo.get_comments(page_revisions)
 
        c.cs_statuses = c.db_repo.statuses(page_revisions)
 

	
 
        c.ssh_repo_url = None
 
        if request.authuser.is_default_user:
 
            username = None
 
        else:
 
            username = request.authuser.username
 
            if c.ssh_enabled:
 
                c.ssh_repo_url = c.db_repo.clone_url(clone_uri_tmpl=c.clone_ssh_tmpl)
 

	
 
        c.clone_repo_url = c.db_repo.clone_url(clone_uri_tmpl=c.clone_uri_tmpl, with_id=False, username=username)
 
        c.clone_repo_url_id = c.db_repo.clone_url(clone_uri_tmpl=c.clone_uri_tmpl, with_id=True, username=username)
 

	
 
        if c.db_repo.enable_statistics:
 
            c.show_stats = True
 
        else:
 
            c.show_stats = False
 

	
 
        stats = Statistics.query() \
 
            .filter(Statistics.repository == c.db_repo) \
 
            .scalar()
 

	
 
        c.stats_percentage = 0
 

	
 
        if stats and stats.languages:
 
            c.no_data = False is c.db_repo.enable_statistics
 
            lang_stats_d = json.loads(stats.languages)
 

	
 
            lang_stats = [(x, {"count": y,
 
                               "desc": LANGUAGES_EXTENSIONS_MAP.get(x, '?')})
 
                          for x, y in lang_stats_d.items()]
 
            lang_stats.sort(key=lambda k: (-k[1]['count'], k[0]))
 

	
 
            c.trending_languages = lang_stats[:10]
 
        else:
 
            c.no_data = True
 
            c.trending_languages = []
 

	
 
        c.enable_downloads = c.db_repo.enable_downloads
 
        c.readme_data, c.readme_file = \
 
            self.__get_readme_data(c.db_repo)
 
        return render('summary/summary.html')
 

	
 
    @LoginRequired()
 
    @HasRepoPermissionLevelDecorator('read')
 
    @jsonify
 
    def repo_size(self, repo_name):
 
        if request.is_xhr:
 
            return c.db_repo._repo_size()
 
        else:
 
            raise HTTPBadRequest()
 

	
 
    @LoginRequired(allow_default_user=True)
 
    @HasRepoPermissionLevelDecorator('read')
 
    def statistics(self, repo_name):
 
        if c.db_repo.enable_statistics:
 
            c.show_stats = True
 
            c.no_data_msg = _('No data ready yet')
 
        else:
 
            c.show_stats = False
 
            c.no_data_msg = _('Statistics are disabled for this repository')
 

	
 
        td = date.today() + timedelta(days=1)
 
        td_1m = td - timedelta(days=calendar.mdays[td.month])
 
        td_1y = td - timedelta(days=365)
 

	
 
        ts_min_m = mktime(td_1m.timetuple())
 
        ts_min_y = mktime(td_1y.timetuple())
kallithea/lib/annotate.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
"""
 
kallithea.lib.annotate
 
~~~~~~~~~~~~~~~~~~~~~~
 

	
 
Annotation library for usage in Kallithea, previously part of vcs
 

	
 
This file was forked by the Kallithea project in July 2014.
 
Original author and date, and relevant copyright and licensing information is below:
 
:created_on: Dec 4, 2011
 
:author: marcink
 
:copyright: (c) 2013 RhodeCode GmbH, and others.
 
:license: GPLv3, see LICENSE.md for more details.
 
"""
 

	
 
from pygments import highlight
 
from pygments.formatters import HtmlFormatter
 

	
 
from kallithea.lib.vcs.exceptions import VCSError
 
from kallithea.lib.vcs.nodes import FileNode
 
from kallithea.lib.vcs.utils import safe_unicode
 

	
 

	
 
def annotate_highlight(filenode, annotate_from_changeset_func=None,
 
        order=None, headers=None, **options):
 
    """
 
    Returns html portion containing annotated table with 3 columns: line
 
    numbers, changeset information and pygmentized line of code.
 

	
 
    :param filenode: FileNode object
 
    :param annotate_from_changeset_func: function taking changeset and
 
      returning single annotate cell; needs break line at the end
 
    :param order: ordered sequence of ``ls`` (line numbers column),
 
      ``annotate`` (annotate column), ``code`` (code column); Default is
 
      ``['ls', 'annotate', 'code']``
 
    :param headers: dictionary with headers (keys are whats in ``order``
 
      parameter)
 
    """
 
    from kallithea.lib.pygmentsutils import get_custom_lexer
 
    options['linenos'] = True
 
    formatter = AnnotateHtmlFormatter(filenode=filenode, order=order,
 
        headers=headers,
 
        annotate_from_changeset_func=annotate_from_changeset_func, **options)
 
    lexer = get_custom_lexer(filenode.extension) or filenode.lexer
 
    highlighted = highlight(filenode.content, lexer, formatter)
 
    highlighted = highlight(safe_unicode(filenode.content), lexer, formatter)
 
    return highlighted
 

	
 

	
 
class AnnotateHtmlFormatter(HtmlFormatter):
 

	
 
    def __init__(self, filenode, annotate_from_changeset_func=None,
 
            order=None, **options):
 
        """
 
        If ``annotate_from_changeset_func`` is passed it should be a function
 
        which returns string from the given changeset. For example, we may pass
 
        following function as ``annotate_from_changeset_func``::
 

	
 
            def changeset_to_anchor(changeset):
 
                return '<a href="/changesets/%s/">%s</a>\n' % \
 
                       (changeset.id, changeset.id)
 

	
 
        :param annotate_from_changeset_func: see above
 
        :param order: (default: ``['ls', 'annotate', 'code']``); order of
 
          columns;
 
        :param options: standard pygment's HtmlFormatter options, there is
 
          extra option tough, ``headers``. For instance we can pass::
 

	
 
             formatter = AnnotateHtmlFormatter(filenode, headers={
 
                'ls': '#',
 
                'annotate': 'Annotate',
 
                'code': 'Code',
 
             })
 

	
 
        """
 
        super(AnnotateHtmlFormatter, self).__init__(**options)
 
        self.annotate_from_changeset_func = annotate_from_changeset_func
 
        self.order = order or ('ls', 'annotate', 'code')
 
        headers = options.pop('headers', None)
 
        if headers and not ('ls' in headers and 'annotate' in headers and
 
            'code' in headers
 
        ):
 
            raise ValueError("If headers option dict is specified it must "
 
                "all 'ls', 'annotate' and 'code' keys")
 
        self.headers = headers
 
        if isinstance(filenode, FileNode):
 
            self.filenode = filenode
 
        else:
 
            raise VCSError("This formatter expect FileNode parameter, not %r"
 
                % type(filenode))
 

	
 
    def annotate_from_changeset(self, changeset):
 
        """
 
        Returns full html line for single changeset per annotated line.
 
        """
 
        if self.annotate_from_changeset_func:
 
            return self.annotate_from_changeset_func(changeset)
 
        else:
 
            return ''.join((changeset.id, '\n'))
 

	
 
    def _wrap_tablelinenos(self, inner):
 
        inner_lines = []
 
        lncount = 0
 
        for t, line in inner:
 
            if t:
 
                lncount += 1
 
            inner_lines.append(line)
 

	
 
        fl = self.linenostart
 
        mw = len(str(lncount + fl - 1))
 
        sp = self.linenospecial
 
        st = self.linenostep
 
        la = self.lineanchors
 
        aln = self.anchorlinenos
 
        if sp:
 
            lines = []
 

	
 
            for i in range(fl, fl + lncount):
 
                if i % st == 0:
 
                    if i % sp == 0:
 
                        if aln:
 
                            lines.append('<a href="#%s-%d" class="special">'
 
                                         '%*d</a>' %
 
                                         (la, i, mw, i))
 
                        else:
 
                            lines.append('<span class="special">'
 
                                         '%*d</span>' % (mw, i))
 
                    else:
 
                        if aln:
 
                            lines.append('<a href="#%s-%d">'
 
                                         '%*d</a>' % (la, i, mw, i))
 
                        else:
 
                            lines.append('%*d' % (mw, i))
 
                else:
 
                    lines.append('')
 
            ls = '\n'.join(lines)
 
        else:
 
            lines = []
 
            for i in range(fl, fl + lncount):
 
                if i % st == 0:
 
                    if aln:
 
                        lines.append('<a href="#%s-%d">%*d</a>'
kallithea/lib/diffs.py
Show inline comments
 
@@ -196,194 +196,194 @@ def wrapped_diff(filenode_old, filenode_
 

	
 
    elif diff_limit != -1 and (
 
            diff_limit is None or
 
            (filenode_old.size < diff_limit and filenode_new.size < diff_limit)):
 

	
 
        raw_diff = get_gitdiff(filenode_old, filenode_new,
 
                                ignore_whitespace=ignore_whitespace,
 
                                context=line_context)
 
        diff_processor = DiffProcessor(raw_diff)
 
        if diff_processor.parsed: # there should be exactly one element, for the specified file
 
            f = diff_processor.parsed[0]
 
            op = f['operation']
 
            a_path = f['old_filename']
 

	
 
        html_diff = as_html(parsed_lines=diff_processor.parsed, enable_comments=enable_comments)
 
        stats = diff_processor.stat()
 

	
 
    else:
 
        html_diff = wrap_to_table(_('Changeset was too big and was cut off, use '
 
                               'diff menu to display this diff'))
 
        stats = (0, 0)
 

	
 
    if not html_diff:
 
        submodules = [o for o in [filenode_new, filenode_old] if isinstance(o, SubModuleNode)]
 
        if submodules:
 
            html_diff = wrap_to_table(h.escape('Submodule %r' % submodules[0]))
 
        else:
 
            html_diff = wrap_to_table(_('No changes detected'))
 

	
 
    cs1 = filenode_old.changeset.raw_id
 
    cs2 = filenode_new.changeset.raw_id
 

	
 
    return cs1, cs2, a_path, html_diff, stats, op
 

	
 

	
 
def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True, context=3):
 
    """
 
    Returns git style diff between given ``filenode_old`` and ``filenode_new``.
 
    """
 
    # make sure we pass in default context
 
    context = context or 3
 
    submodules = [o for o in [filenode_new, filenode_old] if isinstance(o, SubModuleNode)]
 
    if submodules:
 
        return ''
 

	
 
    for filenode in (filenode_old, filenode_new):
 
        if not isinstance(filenode, FileNode):
 
            raise VCSError("Given object should be FileNode object, not %s"
 
                % filenode.__class__)
 

	
 
    repo = filenode_new.changeset.repository
 
    old_raw_id = getattr(filenode_old.changeset, 'raw_id', repo.EMPTY_CHANGESET)
 
    new_raw_id = getattr(filenode_new.changeset, 'raw_id', repo.EMPTY_CHANGESET)
 

	
 
    vcs_gitdiff = get_diff(repo, old_raw_id, new_raw_id, filenode_new.path,
 
                           ignore_whitespace, context)
 
    return vcs_gitdiff
 

	
 

	
 
def get_diff(scm_instance, rev1, rev2, path=None, ignore_whitespace=False, context=3):
 
    """
 
    A thin wrapper around vcs lib get_diff.
 
    """
 
    try:
 
        return scm_instance.get_diff(rev1, rev2, path=path,
 
                                     ignore_whitespace=ignore_whitespace, context=context)
 
    except MemoryError:
 
        h.flash('MemoryError: Diff is too big', category='error')
 
        return ''
 

	
 

	
 
NEW_FILENODE = 1
 
DEL_FILENODE = 2
 
MOD_FILENODE = 3
 
RENAMED_FILENODE = 4
 
COPIED_FILENODE = 5
 
CHMOD_FILENODE = 6
 
BIN_FILENODE = 7
 

	
 

	
 
class DiffProcessor(object):
 
    """
 
    Give it a unified or git diff and it returns a list of the files that were
 
    mentioned in the diff together with a dict of meta information that
 
    can be used to render it in a HTML template.
 
    """
 
    _diff_git_re = re.compile('^diff --git', re.MULTILINE)
 

	
 
    def __init__(self, diff, vcs='hg', diff_limit=None, inline_diff=True):
 
        """
 
        :param diff:   a text in diff format
 
        :param vcs: type of version control hg or git
 
        :param diff_limit: define the size of diff that is considered "big"
 
            based on that parameter cut off will be triggered, set to None
 
            to show full diff
 
        """
 
        if not isinstance(diff, basestring):
 
            raise Exception('Diff must be a basestring got %s instead' % type(diff))
 
        if not isinstance(diff, bytes):
 
            raise Exception('Diff must be bytes - got %s' % type(diff))
 

	
 
        self._diff = diff
 
        self.adds = 0
 
        self.removes = 0
 
        self.diff_limit = diff_limit
 
        self.limited_diff = False
 
        self.vcs = vcs
 
        self.parsed = self._parse_gitdiff(inline_diff=inline_diff)
 

	
 
    def _parse_gitdiff(self, inline_diff):
 
        """Parse self._diff and return a list of dicts with meta info and chunks for each file.
 
        Might set limited_diff.
 
        Optionally, do an extra pass and to extra markup of one-liner changes.
 
        """
 
        _files = [] # list of dicts with meta info and chunks
 

	
 
        starts = [m.start() for m in self._diff_git_re.finditer(self._diff)]
 
        starts.append(len(self._diff))
 

	
 
        for start, end in zip(starts, starts[1:]):
 
            if self.diff_limit and end > self.diff_limit:
 
                self.limited_diff = True
 
                continue
 

	
 
            head, diff_lines = _get_header(self.vcs, buffer(self._diff, start, end - start))
 

	
 
            op = None
 
            stats = {
 
                'added': 0,
 
                'deleted': 0,
 
                'binary': False,
 
                'ops': {},
 
            }
 

	
 
            if head['deleted_file_mode']:
 
                op = 'removed'
 
                stats['binary'] = True
 
                stats['ops'][DEL_FILENODE] = 'deleted file'
 

	
 
            elif head['new_file_mode']:
 
                op = 'added'
 
                stats['binary'] = True
 
                stats['ops'][NEW_FILENODE] = 'new file %s' % head['new_file_mode']
 
            else:  # modify operation, can be cp, rename, chmod
 
                # CHMOD
 
                if head['new_mode'] and head['old_mode']:
 
                    op = 'modified'
 
                    stats['binary'] = True
 
                    stats['ops'][CHMOD_FILENODE] = ('modified file chmod %s => %s'
 
                                        % (head['old_mode'], head['new_mode']))
 
                # RENAME
 
                if (head['rename_from'] and head['rename_to']
 
                      and head['rename_from'] != head['rename_to']):
 
                    op = 'renamed'
 
                    stats['binary'] = True
 
                    stats['ops'][RENAMED_FILENODE] = ('file renamed from %s to %s'
 
                                    % (head['rename_from'], head['rename_to']))
 
                # COPY
 
                if head.get('copy_from') and head.get('copy_to'):
 
                    op = 'modified'
 
                    stats['binary'] = True
 
                    stats['ops'][COPIED_FILENODE] = ('file copied from %s to %s'
 
                                        % (head['copy_from'], head['copy_to']))
 
                # FALL BACK: detect missed old style add or remove
 
                if op is None:
 
                    if not head['a_file'] and head['b_file']:
 
                        op = 'added'
 
                        stats['binary'] = True
 
                        stats['ops'][NEW_FILENODE] = 'new file'
 

	
 
                    elif head['a_file'] and not head['b_file']:
 
                        op = 'removed'
 
                        stats['binary'] = True
 
                        stats['ops'][DEL_FILENODE] = 'deleted file'
 

	
 
                # it's not ADD not DELETE
 
                if op is None:
 
                    op = 'modified'
 
                    stats['binary'] = True
 
                    stats['ops'][MOD_FILENODE] = 'modified file'
 

	
 
            # a real non-binary diff
 
            if head['a_file'] or head['b_file']:
 
                chunks, added, deleted = _parse_lines(diff_lines)
 
                stats['binary'] = False
 
                stats['added'] = added
 
                stats['deleted'] = deleted
 
                # explicit mark that it's a modified file
 
                if op == 'modified':
 
                    stats['ops'][MOD_FILENODE] = 'modified file'
 
            else:  # Git binary patch (or empty diff)
 
                # Git binary patch
 
                if head['bin_patch']:
 
                    stats['ops'][BIN_FILENODE] = 'binary diff not shown'
 
                chunks = []
 

	
 
@@ -423,214 +423,217 @@ class DiffProcessor(object):
 
                        # find a first del line
 
                        while peekline['action'] != 'del':
 
                            peekline = lineiter.next()
 
                        delline = peekline
 
                        peekline = lineiter.next()
 
                        # if not followed by add, eat all following del lines
 
                        if peekline['action'] != 'add':
 
                            while peekline['action'] == 'del':
 
                                peekline = lineiter.next()
 
                            continue
 
                        # found an add - make sure it is the only one
 
                        addline = peekline
 
                        try:
 
                            peekline = lineiter.next()
 
                        except StopIteration:
 
                            # add was last line - ok
 
                            _highlight_inline_diff(delline, addline)
 
                            raise
 
                        if peekline['action'] != 'add':
 
                            # there was only one add line - ok
 
                            _highlight_inline_diff(delline, addline)
 
                except StopIteration:
 
                    pass
 

	
 
        return _files
 

	
 
    def stat(self):
 
        """
 
        Returns tuple of added, and removed lines for this instance
 
        """
 
        return self.adds, self.removes
 

	
 

	
 
_escape_re = re.compile(r'(&)|(<)|(>)|(\t)|(\r)|(?<=.)( \n| $)')
 

	
 

	
 
def _escaper(string):
 
    """
 
    Do HTML escaping/markup
 
    """
 

	
 
    def substitute(m):
 
        groups = m.groups()
 
        if groups[0]:
 
            return '&amp;'
 
        if groups[1]:
 
            return '&lt;'
 
        if groups[2]:
 
            return '&gt;'
 
        if groups[3]:
 
            return '<u>\t</u>'
 
        if groups[4]:
 
            return '<u class="cr"></u>'
 
        if groups[5]:
 
            return ' <i></i>'
 
        assert False
 

	
 
    return _escape_re.sub(substitute, safe_unicode(string))
 

	
 

	
 
_git_header_re = re.compile(r"""
 
    ^diff[ ]--git[ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n
 
    (?:^old[ ]mode[ ](?P<old_mode>\d+)\n
 
       ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
 
    (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n
 
       ^rename[ ]from[ ](?P<rename_from>.+)\n
 
       ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))?
 
    (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
 
    (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
 
    (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
 
        \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
 
    (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))?
 
    (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))?
 
    (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))?
 
""", re.VERBOSE | re.MULTILINE)
 

	
 

	
 
_hg_header_re = re.compile(r"""
 
    ^diff[ ]--git[ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n
 
    (?:^old[ ]mode[ ](?P<old_mode>\d+)\n
 
       ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
 
    (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%(?:\n|$))?
 
    (?:^rename[ ]from[ ](?P<rename_from>.+)\n
 
       ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))?
 
    (?:^copy[ ]from[ ](?P<copy_from>.+)\n
 
       ^copy[ ]to[ ](?P<copy_to>.+)(?:\n|$))?
 
    (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
 
    (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
 
    (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
 
        \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
 
    (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))?
 
    (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))?
 
    (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))?
 
""", re.VERBOSE | re.MULTILINE)
 

	
 

	
 
_header_next_check = re.compile(br'''(?!@)(?!literal )(?!delta )''')
 

	
 

	
 
def _get_header(vcs, diff_chunk):
 
    """
 
    Parses a Git diff for a single file (header and chunks) and returns a tuple with:
 

	
 
    1. A dict with meta info:
 

	
 
        a_path, b_path, similarity_index, rename_from, rename_to,
 
        old_mode, new_mode, new_file_mode, deleted_file_mode,
 
        a_blob_id, b_blob_id, b_mode, a_file, b_file
 

	
 
    2. An iterator yielding lines with simple HTML markup.
 
    """
 
    match = None
 
    if vcs == 'git':
 
        match = _git_header_re.match(diff_chunk)
 
    elif vcs == 'hg':
 
        match = _hg_header_re.match(diff_chunk)
 
    if match is None:
 
        raise Exception('diff not recognized as valid %s diff' % vcs)
 
    meta_info = match.groupdict()
 
    rest = diff_chunk[match.end():]
 
    if rest and not rest.startswith('@') and not rest.startswith('literal ') and not rest.startswith('delta '):
 
    if rest and _header_next_check.match(rest):
 
        raise Exception('cannot parse %s diff header: %r followed by %r' % (vcs, diff_chunk[:match.end()], rest[:1000]))
 
    diff_lines = (_escaper(m.group(0)) for m in re.finditer(r'.*\n|.+$', rest)) # don't split on \r as str.splitlines do
 
    return meta_info, diff_lines
 

	
 

	
 
_chunk_re = re.compile(r'^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)')
 
_newline_marker = re.compile(r'^\\ No newline at end of file')
 

	
 

	
 
def _parse_lines(diff_lines):
 
    """
 
    Given an iterator of diff body lines, parse them and return a dict per
 
    line and added/removed totals.
 
    """
 
    added = deleted = 0
 
    old_line = old_end = new_line = new_end = None
 

	
 
    try:
 
        chunks = []
 
        line = diff_lines.next()
 

	
 
        while True:
 
            lines = []
 
            chunks.append(lines)
 

	
 
            match = _chunk_re.match(line)
 

	
 
            if not match:
 
                raise Exception('error parsing diff @@ line %r' % line)
 

	
 
            gr = match.groups()
 
            (old_line, old_end,
 
             new_line, new_end) = [int(x or 1) for x in gr[:-1]]
 
            old_line -= 1
 
            new_line -= 1
 

	
 
            context = len(gr) == 5
 
            old_end += old_line
 
            new_end += new_line
 

	
 
            if context:
 
                # skip context only if it's first line
 
                if int(gr[0]) > 1:
 
                    lines.append({
 
                        'old_lineno': '...',
 
                        'new_lineno': '...',
 
                        'action':     'context',
 
                        'line':       line,
 
                    })
 

	
 
            line = diff_lines.next()
 

	
 
            while old_line < old_end or new_line < new_end:
 
                if not line:
 
                    raise Exception('error parsing diff - empty line at -%s+%s' % (old_line, new_line))
 

	
 
                affects_old = affects_new = False
 

	
 
                command = line[0]
 
                if command == '+':
 
                    affects_new = True
 
                    action = 'add'
 
                    added += 1
 
                elif command == '-':
 
                    affects_old = True
 
                    action = 'del'
 
                    deleted += 1
 
                elif command == ' ':
 
                    affects_old = affects_new = True
 
                    action = 'unmod'
 
                else:
 
                    raise Exception('error parsing diff - unknown command in line %r at -%s+%s' % (line, old_line, new_line))
 

	
 
                if not _newline_marker.match(line):
 
                    old_line += affects_old
 
                    new_line += affects_new
 
                    lines.append({
 
                        'old_lineno':   affects_old and old_line or '',
 
                        'new_lineno':   affects_new and new_line or '',
 
                        'action':       action,
 
                        'line':         line[1:],
 
                    })
 

	
 
                line = diff_lines.next()
 

	
 
                if _newline_marker.match(line):
 
                    # we need to append to lines, since this is not
 
                    # counted in the line specs of diff
 
                    lines.append({
 
                        'old_lineno':   '...',
 
                        'new_lineno':   '...',
 
                        'action':       'context',
 
                        'line':         line,
 
                    })
 
                    line = diff_lines.next()
 
            if old_line > old_end:
kallithea/lib/helpers.py
Show inline comments
 
@@ -237,193 +237,193 @@ class CodeHtmlFormatter(HtmlFormatter):
 

	
 
    def wrap(self, source, outfile):
 
        return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
 

	
 
    def _wrap_code(self, source):
 
        for cnt, it in enumerate(source):
 
            i, t = it
 
            t = '<span id="L%s">%s</span>' % (cnt + 1, t)
 
            yield i, t
 

	
 
    def _wrap_tablelinenos(self, inner):
 
        inner_lines = []
 
        lncount = 0
 
        for t, line in inner:
 
            if t:
 
                lncount += 1
 
            inner_lines.append(line)
 

	
 
        fl = self.linenostart
 
        mw = len(str(lncount + fl - 1))
 
        sp = self.linenospecial
 
        st = self.linenostep
 
        la = self.lineanchors
 
        aln = self.anchorlinenos
 
        nocls = self.noclasses
 
        if sp:
 
            lines = []
 

	
 
            for i in range(fl, fl + lncount):
 
                if i % st == 0:
 
                    if i % sp == 0:
 
                        if aln:
 
                            lines.append('<a href="#%s%d" class="special">%*d</a>' %
 
                                         (la, i, mw, i))
 
                        else:
 
                            lines.append('<span class="special">%*d</span>' % (mw, i))
 
                    else:
 
                        if aln:
 
                            lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
 
                        else:
 
                            lines.append('%*d' % (mw, i))
 
                else:
 
                    lines.append('')
 
            ls = '\n'.join(lines)
 
        else:
 
            lines = []
 
            for i in range(fl, fl + lncount):
 
                if i % st == 0:
 
                    if aln:
 
                        lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
 
                    else:
 
                        lines.append('%*d' % (mw, i))
 
                else:
 
                    lines.append('')
 
            ls = '\n'.join(lines)
 

	
 
        # in case you wonder about the seemingly redundant <div> here: since the
 
        # content in the other cell also is wrapped in a div, some browsers in
 
        # some configurations seem to mess up the formatting...
 
        if nocls:
 
            yield 0, ('<table class="%stable">' % self.cssclass +
 
                      '<tr><td><div class="linenodiv">'
 
                      '<pre>' + ls + '</pre></div></td>'
 
                      '<td id="hlcode" class="code">')
 
        else:
 
            yield 0, ('<table class="%stable">' % self.cssclass +
 
                      '<tr><td class="linenos"><div class="linenodiv">'
 
                      '<pre>' + ls + '</pre></div></td>'
 
                      '<td id="hlcode" class="code">')
 
        yield 0, ''.join(inner_lines)
 
        yield 0, '</td></tr></table>'
 

	
 

	
 
_whitespace_re = re.compile(r'(\t)|( )(?=\n|</div>)')
 

	
 

	
 
def _markup_whitespace(m):
 
    groups = m.groups()
 
    if groups[0]:
 
        return '<u>\t</u>'
 
    if groups[1]:
 
        return ' <i></i>'
 

	
 

	
 
def markup_whitespace(s):
 
    return _whitespace_re.sub(_markup_whitespace, s)
 

	
 

	
 
def pygmentize(filenode, **kwargs):
 
    """
 
    pygmentize function using pygments
 

	
 
    :param filenode:
 
    """
 
    lexer = get_custom_lexer(filenode.extension) or filenode.lexer
 
    return literal(markup_whitespace(
 
        code_highlight(filenode.content, lexer, CodeHtmlFormatter(**kwargs))))
 
        code_highlight(safe_unicode(filenode.content), lexer, CodeHtmlFormatter(**kwargs))))
 

	
 

	
 
def pygmentize_annotation(repo_name, filenode, **kwargs):
 
    """
 
    pygmentize function for annotation
 

	
 
    :param filenode:
 
    """
 

	
 
    color_dict = {}
 

	
 
    def gen_color(n=10000):
 
        """generator for getting n of evenly distributed colors using
 
        hsv color and golden ratio. It always return same order of colors
 

	
 
        :returns: RGB tuple
 
        """
 

	
 
        def hsv_to_rgb(h, s, v):
 
            if s == 0.0:
 
                return v, v, v
 
            i = int(h * 6.0)  # XXX assume int() truncates!
 
            f = (h * 6.0) - i
 
            p = v * (1.0 - s)
 
            q = v * (1.0 - s * f)
 
            t = v * (1.0 - s * (1.0 - f))
 
            i = i % 6
 
            if i == 0:
 
                return v, t, p
 
            if i == 1:
 
                return q, v, p
 
            if i == 2:
 
                return p, v, t
 
            if i == 3:
 
                return p, q, v
 
            if i == 4:
 
                return t, p, v
 
            if i == 5:
 
                return v, p, q
 

	
 
        golden_ratio = 0.618033988749895
 
        h = 0.22717784590367374
 

	
 
        for _unused in xrange(n):
 
            h += golden_ratio
 
            h %= 1
 
            HSV_tuple = [h, 0.95, 0.95]
 
            RGB_tuple = hsv_to_rgb(*HSV_tuple)
 
            yield [str(int(x * 256)) for x in RGB_tuple]
 

	
 
    cgenerator = gen_color()
 

	
 
    def get_color_string(cs):
 
        if cs in color_dict:
 
            col = color_dict[cs]
 
        else:
 
            col = color_dict[cs] = cgenerator.next()
 
        return "color: rgb(%s)! important;" % (', '.join(col))
 

	
 
    def url_func(repo_name):
 

	
 
        def _url_func(changeset):
 
            author = escape(changeset.author)
 
            date = changeset.date
 
            message = escape(changeset.message)
 
            tooltip_html = ("<b>Author:</b> %s<br/>"
 
                            "<b>Date:</b> %s</b><br/>"
 
                            "<b>Message:</b> %s") % (author, date, message)
 

	
 
            lnk_format = show_id(changeset)
 
            uri = link_to(
 
                    lnk_format,
 
                    url('changeset_home', repo_name=repo_name,
 
                        revision=changeset.raw_id),
 
                    style=get_color_string(changeset.raw_id),
 
                    **{'data-toggle': 'popover',
 
                       'data-content': tooltip_html}
 
                  )
 

	
 
            uri += '\n'
 
            return uri
 
        return _url_func
 

	
 
    return literal(markup_whitespace(annotate_highlight(filenode, url_func(repo_name), **kwargs)))
 

	
 

	
 
class _Message(object):
 
    """A message returned by ``pop_flash_messages()``.
 

	
 
    Converting the message to a string returns the message text. Instances
 
    also have the following attributes:
 

	
 
    * ``message``: the message text.
 
    * ``category``: the category specified when the message was created.
 
    """
 

	
kallithea/lib/indexers/daemon.py
Show inline comments
 
@@ -89,198 +89,199 @@ class WhooshIndexingDaemon(object):
 
        # filter update repo list
 
        self.filtered_repo_update_paths = {}
 
        if repo_update_list:
 
            self.filtered_repo_update_paths = {}
 
            for repo_name, repo in self.repo_paths.items():
 
                if repo_name in repo_update_list:
 
                    self.filtered_repo_update_paths[repo_name] = repo
 
            self.repo_paths = self.filtered_repo_update_paths
 

	
 
        self.initial = True
 
        if not os.path.isdir(self.index_location):
 
            os.makedirs(self.index_location)
 
            log.info('Cannot run incremental index since it does not '
 
                     'yet exist - running full build')
 
        elif not exists_in(self.index_location, IDX_NAME):
 
            log.info('Running full index build, as the file content '
 
                     'index does not exist')
 
        elif not exists_in(self.index_location, CHGSET_IDX_NAME):
 
            log.info('Running full index build, as the changeset '
 
                     'index does not exist')
 
        else:
 
            self.initial = False
 

	
 
    def _get_index_revision(self, repo):
 
        db_repo = Repository.get_by_repo_name(safe_unicode(repo.name))
 
        landing_rev = 'tip'
 
        if db_repo:
 
            _rev_type, _rev = db_repo.landing_rev
 
            landing_rev = _rev
 
        return landing_rev
 

	
 
    def _get_index_changeset(self, repo, index_rev=None):
 
        if not index_rev:
 
            index_rev = self._get_index_revision(repo)
 
        cs = repo.get_changeset(index_rev)
 
        return cs
 

	
 
    def get_paths(self, repo):
 
        """
 
        recursive walk in root dir and return a set of all path in that dir
 
        based on repository walk function
 
        """
 
        index_paths_ = set()
 
        try:
 
            cs = self._get_index_changeset(repo)
 
            for _topnode, _dirs, files in cs.walk('/'):
 
                for f in files:
 
                    index_paths_.add(os.path.join(safe_str(repo.path), safe_str(f.path)))
 

	
 
        except RepositoryError:
 
            log.debug(traceback.format_exc())
 
            pass
 
        return index_paths_
 

	
 
    def get_node(self, repo, path, index_rev=None):
 
        """
 
        gets a filenode based on given full path. It operates on string for
 
        hg git compatibility.
 

	
 
        :param repo: scm repo instance
 
        :param path: full path including root location
 
        :return: FileNode
 
        """
 
        # FIXME: paths should be normalized ... or even better: don't include repo.path
 
        path = safe_str(path)
 
        repo_path = safe_str(repo.path)
 
        assert path.startswith(repo_path)
 
        assert path[len(repo_path)] in (os.path.sep, os.path.altsep)
 
        node_path = path[len(repo_path) + 1:]
 
        cs = self._get_index_changeset(repo, index_rev=index_rev)
 
        node = cs.get_node(node_path)
 
        return node
 

	
 
    def is_indexable_node(self, node):
 
        """
 
        Just index the content of chosen files, skipping binary files
 
        """
 
        return (node.extension in INDEX_EXTENSIONS or node.name in INDEX_FILENAMES) and \
 
               not node.is_binary
 

	
 
    def get_node_mtime(self, node):
 
        return mktime(node.last_changeset.date.timetuple())
 

	
 
    def add_doc(self, writer, path, repo, repo_name, index_rev=None):
 
        """
 
        Adding doc to writer this function itself fetches data from
 
        the instance of vcs backend
 
        """
 
        try:
 
            node = self.get_node(repo, path, index_rev)
 
        except (ChangesetError, NodeDoesNotExistError):
 
            log.debug("    >> %s - not found in %s %s", path, repo, index_rev)
 
            return 0, 0
 

	
 
        indexed = indexed_w_content = 0
 
        if self.is_indexable_node(node):
 
            u_content = node.content
 
            if not isinstance(u_content, unicode):
 
            bytes_content = node.content
 
            if b'\0' in bytes_content:
 
                log.warning('    >> %s - no text content', path)
 
                u_content = u''
 
            else:
 
                log.debug('    >> %s', path)
 
                u_content = safe_unicode(bytes_content)
 
                indexed_w_content += 1
 

	
 
        else:
 
            log.debug('    >> %s - not indexable', path)
 
            # just index file name without it's content
 
            u_content = u''
 
            indexed += 1
 

	
 
        p = safe_unicode(path)
 
        writer.add_document(
 
            fileid=p,
 
            owner=unicode(repo.contact),
 
            repository_rawname=safe_unicode(repo_name),
 
            repository=safe_unicode(repo_name),
 
            path=p,
 
            content=u_content,
 
            modtime=self.get_node_mtime(node),
 
            extension=node.extension
 
        )
 
        return indexed, indexed_w_content
 

	
 
    def index_changesets(self, writer, repo_name, repo, start_rev=None):
 
        """
 
        Add all changeset in the vcs repo starting at start_rev
 
        to the index writer
 

	
 
        :param writer: the whoosh index writer to add to
 
        :param repo_name: name of the repository from whence the
 
          changeset originates including the repository group
 
        :param repo: the vcs repository instance to index changesets for,
 
          the presumption is the repo has changesets to index
 
        :param start_rev=None: the full sha id to start indexing from
 
          if start_rev is None then index from the first changeset in
 
          the repo
 
        """
 

	
 
        if start_rev is None:
 
            start_rev = repo[0].raw_id
 

	
 
        log.debug('Indexing changesets in %s, starting at rev %s',
 
                  repo_name, start_rev)
 

	
 
        indexed = 0
 
        cs_iter = repo.get_changesets(start=start_rev)
 
        total = len(cs_iter)
 
        for cs in cs_iter:
 
            indexed += 1
 
            log.debug('    >> %s %s/%s', cs, indexed, total)
 
            writer.add_document(
 
                raw_id=unicode(cs.raw_id),
 
                owner=unicode(repo.contact),
 
                date=cs._timestamp,
 
                repository_rawname=safe_unicode(repo_name),
 
                repository=safe_unicode(repo_name),
 
                author=cs.author,
 
                message=cs.message,
 
                last=cs.last,
 
                added=u' '.join([safe_unicode(node.path) for node in cs.added]).lower(),
 
                removed=u' '.join([safe_unicode(node.path) for node in cs.removed]).lower(),
 
                changed=u' '.join([safe_unicode(node.path) for node in cs.changed]).lower(),
 
                parents=u' '.join([cs.raw_id for cs in cs.parents]),
 
            )
 

	
 
        return indexed
 

	
 
    def index_files(self, file_idx_writer, repo_name, repo):
 
        """
 
        Index files for given repo_name
 

	
 
        :param file_idx_writer: the whoosh index writer to add to
 
        :param repo_name: name of the repository we're indexing
 
        :param repo: instance of vcs repo
 
        """
 
        i_cnt = iwc_cnt = 0
 
        log.debug('Building file index for %s @revision:%s', repo_name,
 
                                                self._get_index_revision(repo))
 
        index_rev = self._get_index_revision(repo)
 
        for idx_path in self.get_paths(repo):
 
            i, iwc = self.add_doc(file_idx_writer, idx_path, repo, repo_name, index_rev)
 
            i_cnt += i
 
            iwc_cnt += iwc
 

	
 
        log.debug('added %s files %s with content for repo %s',
 
                  i_cnt + iwc_cnt, iwc_cnt, repo.path)
 
        return i_cnt, iwc_cnt
 

	
 
    def update_changeset_index(self):
 
        idx = open_dir(self.index_location, indexname=CHGSET_IDX_NAME)
 

	
 
        with idx.searcher() as searcher:
 
            writer = idx.writer()
 
            writer_is_dirty = False
 
            try:
 
                indexed_total = 0
 
                repo_name = None
 
                for repo_name, repo in sorted(self.repo_paths.items()):
kallithea/lib/vcs/backends/git/inmemory.py
Show inline comments
 
import datetime
 
import posixpath
 
import stat
 
import time
 

	
 
from dulwich import objects
 

	
 
from kallithea.lib.vcs.backends.base import BaseInMemoryChangeset
 
from kallithea.lib.vcs.exceptions import RepositoryError
 
from kallithea.lib.vcs.utils import safe_bytes, safe_str
 

	
 

	
 
class GitInMemoryChangeset(BaseInMemoryChangeset):
 

	
 
    def commit(self, message, author, parents=None, branch=None, date=None,
 
               **kwargs):
 
        """
 
        Performs in-memory commit (doesn't check workdir in any way) and
 
        returns newly created ``Changeset``. Updates repository's
 
        ``revisions``.
 

	
 
        :param message: message of the commit
 
        :param author: full username, i.e. "Joe Doe <joe.doe@example.com>"
 
        :param parents: single parent or sequence of parents from which commit
 
          would be derived
 
        :param date: ``datetime.datetime`` instance. Defaults to
 
          ``datetime.datetime.now()``.
 
        :param branch: branch name, as string. If none given, default backend's
 
          branch would be used.
 

	
 
        :raises ``CommitError``: if any error occurs while committing
 
        """
 
        self.check_integrity(parents)
 

	
 
        from .repository import GitRepository
 
        if branch is None:
 
            branch = GitRepository.DEFAULT_BRANCH_NAME
 

	
 
        repo = self.repository._repo
 
        object_store = repo.object_store
 

	
 
        ENCODING = "UTF-8"  # TODO: should probably be kept in sync with safe_unicode/safe_bytes and vcs/conf/settings.py DEFAULT_ENCODINGS
 

	
 
        # Create tree and populates it with blobs
 
        commit_tree = self.parents[0] and repo[self.parents[0]._commit.tree] or \
 
            objects.Tree()
 
        for node in self.added + self.changed:
 
            # Compute subdirs if needed
 
            dirpath, nodename = posixpath.split(node.path)
 
            dirnames = safe_str(dirpath).split('/') if dirpath else []
 
            parent = commit_tree
 
            ancestors = [('', parent)]
 

	
 
            # Tries to dig for the deepest existing tree
 
            while dirnames:
 
                curdir = dirnames.pop(0)
 
                try:
 
                    dir_id = parent[curdir][1]
 
                except KeyError:
 
                    # put curdir back into dirnames and stops
 
                    dirnames.insert(0, curdir)
 
                    break
 
                else:
 
                    # If found, updates parent
 
                    parent = self.repository._repo[dir_id]
 
                    ancestors.append((curdir, parent))
 
            # Now parent is deepest existing tree and we need to create subtrees
 
            # for dirnames (in reverse order) [this only applies for nodes from added]
 
            new_trees = []
 

	
 
            if not node.is_binary:
 
                content = node.content.encode(ENCODING)
 
            else:
 
                content = node.content
 
            blob = objects.Blob.from_string(content)
 
            blob = objects.Blob.from_string(node.content)
 

	
 
            node_path = safe_bytes(node.name)
 
            if dirnames:
 
                # If there are trees which should be created we need to build
 
                # them now (in reverse order)
 
                reversed_dirnames = list(reversed(dirnames))
 
                curtree = objects.Tree()
 
                curtree[node_path] = node.mode, blob.id
 
                new_trees.append(curtree)
 
                for dirname in reversed_dirnames[:-1]:
 
                    newtree = objects.Tree()
 
                    #newtree.add(stat.S_IFDIR, dirname, curtree.id)
 
                    newtree[dirname] = stat.S_IFDIR, curtree.id
 
                    new_trees.append(newtree)
 
                    curtree = newtree
 
                parent[reversed_dirnames[-1]] = stat.S_IFDIR, curtree.id
 
            else:
 
                parent.add(name=node_path, mode=node.mode, hexsha=blob.id)
 

	
 
            new_trees.append(parent)
 
            # Update ancestors
 
            for parent, tree, path in reversed([(a[1], b[1], b[0]) for a, b in
 
                zip(ancestors, ancestors[1:])]
 
            ):
 
                parent[path] = stat.S_IFDIR, tree.id
 
                object_store.add_object(tree)
 

	
 
            object_store.add_object(blob)
 
            for tree in new_trees:
 
                object_store.add_object(tree)
 
        for node in self.removed:
 
            paths = node.path.split('/')
 
            tree = commit_tree
 
            trees = [tree]
 
            # Traverse deep into the forest...
 
            for path in paths:
 
                try:
 
                    obj = self.repository._repo[tree[path][1]]
 
                    if isinstance(obj, objects.Tree):
 
                        trees.append(obj)
 
                        tree = obj
 
                except KeyError:
 
                    break
 
            # Cut down the blob and all rotten trees on the way back...
 
            for path, tree in reversed(zip(paths, trees)):
 
                del tree[path]
 
                if tree:
 
                    # This tree still has elements - don't remove it or any
 
                    # of it's parents
 
                    break
 

	
 
        object_store.add_object(commit_tree)
 

	
 
        # Create commit
 
        commit = objects.Commit()
 
        commit.tree = commit_tree.id
 
        commit.parents = [p._commit.id for p in self.parents if p]
 
        commit.author = commit.committer = safe_str(author)
 
        commit.encoding = ENCODING
 
        commit.message = safe_str(message)
 

	
 
        # Compute date
 
        if date is None:
 
            date = time.time()
 
        elif isinstance(date, datetime.datetime):
 
            date = time.mktime(date.timetuple())
 

	
 
        author_time = kwargs.pop('author_time', date)
 
        commit.commit_time = int(date)
 
        commit.author_time = int(author_time)
 
        tz = time.timezone
 
        author_tz = kwargs.pop('author_timezone', tz)
 
        commit.commit_timezone = tz
 
        commit.author_timezone = author_tz
 

	
 
        object_store.add_object(commit)
 

	
 
        ref = 'refs/heads/%s' % branch
 
        repo.refs[ref] = commit.id
 

	
 
        # Update vcs repository object & recreate dulwich repo
 
        self.repository.revisions.append(commit.id)
 
        # invalidate parsed refs after commit
 
        self.repository._parsed_refs = self.repository._get_parsed_refs()
 
        tip = self.repository.get_changeset()
 
        self.reset()
 
        return tip
 

	
 
    def _get_missing_trees(self, path, root_tree):
 
        """
 
        Creates missing ``Tree`` objects for the given path.
 

	
 
        :param path: path given as a string. It may be a path to a file node
 
          (i.e. ``foo/bar/baz.txt``) or directory path - in that case it must
 
          end with slash (i.e. ``foo/bar/``).
 
        :param root_tree: ``dulwich.objects.Tree`` object from which we start
kallithea/lib/vcs/backends/hg/inmemory.py
Show inline comments
 
import datetime
 

	
 
from kallithea.lib.vcs.backends.base import BaseInMemoryChangeset
 
from kallithea.lib.vcs.exceptions import RepositoryError
 
from kallithea.lib.vcs.utils import safe_bytes
 
from kallithea.lib.vcs.utils.hgcompat import hex, memctx, memfilectx
 

	
 

	
 
class MercurialInMemoryChangeset(BaseInMemoryChangeset):
 

	
 
    def commit(self, message, author, parents=None, branch=None, date=None,
 
            **kwargs):
 
        """
 
        Performs in-memory commit (doesn't check workdir in any way) and
 
        returns newly created ``Changeset``. Updates repository's
 
        ``revisions``.
 

	
 
        :param message: message of the commit
 
        :param author: full username, i.e. "Joe Doe <joe.doe@example.com>"
 
        :param parents: single parent or sequence of parents from which commit
 
          would be derived
 
        :param date: ``datetime.datetime`` instance. Defaults to
 
          ``datetime.datetime.now()``.
 
        :param branch: branch name, as string. If none given, default backend's
 
          branch would be used.
 

	
 
        :raises ``CommitError``: if any error occurs while committing
 
        """
 
        self.check_integrity(parents)
 

	
 
        from .repository import MercurialRepository
 
        if not isinstance(message, unicode) or not isinstance(author, unicode):
 
            raise RepositoryError('Given message and author needs to be '
 
                                  'an <unicode> instance got %r & %r instead'
 
                                  % (type(message), type(author)))
 

	
 
        if branch is None:
 
            branch = MercurialRepository.DEFAULT_BRANCH_NAME
 
        kwargs['branch'] = branch
 

	
 
        def filectxfn(_repo, memctx, path):
 
            """
 
            Marks given path as added/changed/removed in a given _repo. This is
 
            for internal mercurial commit function.
 
            """
 

	
 
            # check if this path is removed
 
            if path in (node.path for node in self.removed):
 
                return None
 

	
 
            # check if this path is added
 
            for node in self.added:
 
                if node.path == path:
 
                    return memfilectx(_repo, memctx, path=node.path,
 
                        data=(node.content.encode('utf-8')
 
                              if not node.is_binary else node.content),
 
                        data=node.content,
 
                        islink=False,
 
                        isexec=node.is_executable,
 
                        copysource=False)
 

	
 
            # or changed
 
            for node in self.changed:
 
                if node.path == path:
 
                    return memfilectx(_repo, memctx, path=node.path,
 
                        data=(node.content.encode('utf-8')
 
                              if not node.is_binary else node.content),
 
                        data=node.content,
 
                        islink=False,
 
                        isexec=node.is_executable,
 
                        copysource=False)
 

	
 
            raise RepositoryError("Given path haven't been marked as added,"
 
                                  "changed or removed (%s)" % path)
 

	
 
        parents = [None, None]
 
        for i, parent in enumerate(self.parents):
 
            if parent is not None:
 
                parents[i] = parent._ctx.node()
 

	
 
        if date and isinstance(date, datetime.datetime):
 
            date = date.strftime('%a, %d %b %Y %H:%M:%S')
 

	
 
        commit_ctx = memctx(repo=self.repository._repo,
 
            parents=parents,
 
            text='',
 
            files=self.get_paths(),
 
            filectxfn=filectxfn,
 
            user=author,
 
            date=date,
 
            extra=kwargs)
 

	
 
        # injecting given _repo params
 
        commit_ctx._text = safe_bytes(message)
 
        commit_ctx._user = safe_bytes(author)
 
        commit_ctx._date = date
 

	
 
        # TODO: Catch exceptions!
 
        n = self.repository._repo.commitctx(commit_ctx)
 
        # Returns mercurial node
 
        self._commit_ctx = commit_ctx  # For reference
 
        # Update vcs repository object & recreate mercurial _repo
 
        # new_ctx = self.repository._repo[node]
 
        # new_tip = self.repository.get_changeset(new_ctx.hex())
 
        new_id = hex(n)
 
        self.repository.revisions.append(new_id)
 
        self._repo = self.repository._get_repo(create=False)
 
        self.repository.branches = self.repository._get_branches()
 
        tip = self.repository.get_changeset()
 
        self.reset()
 
        return tip
kallithea/lib/vcs/nodes.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
"""
 
    vcs.nodes
 
    ~~~~~~~~~
 

	
 
    Module holding everything related to vcs nodes.
 

	
 
    :created_on: Apr 8, 2010
 
    :copyright: (c) 2010-2011 by Marcin Kuzminski, Lukasz Balcerzak.
 
"""
 

	
 
import functools
 
import mimetypes
 
import posixpath
 
import stat
 

	
 
from kallithea.lib.vcs.backends.base import EmptyChangeset
 
from kallithea.lib.vcs.exceptions import NodeError, RemovedFileNodeError
 
from kallithea.lib.vcs.utils import safe_str, safe_unicode
 
from kallithea.lib.vcs.utils import safe_bytes, safe_str, safe_unicode
 
from kallithea.lib.vcs.utils.lazy import LazyProperty
 

	
 

	
 
class NodeKind:
 
    SUBMODULE = -1
 
    DIR = 1
 
    FILE = 2
 

	
 

	
 
class NodeState:
 
    ADDED = u'added'
 
    CHANGED = u'changed'
 
    NOT_CHANGED = u'not changed'
 
    REMOVED = u'removed'
 

	
 

	
 
class NodeGeneratorBase(object):
 
    """
 
    Base class for removed added and changed filenodes, it's a lazy generator
 
    class that will create filenodes only on iteration or call
 

	
 
    The len method doesn't need to create filenodes at all
 
    """
 

	
 
    def __init__(self, current_paths, cs):
 
        self.cs = cs
 
        self.current_paths = current_paths
 

	
 
    def __call__(self):
 
        return [n for n in self]
 

	
 
    def __getitem__(self, key):
 
        assert isinstance(key, slice), key
 
        for p in self.current_paths[key]:
 
            yield self.cs.get_node(p)
 

	
 
    def __len__(self):
 
        return len(self.current_paths)
 

	
 
    def __iter__(self):
 
        for p in self.current_paths:
 
            yield self.cs.get_node(p)
 

	
 

	
 
class AddedFileNodesGenerator(NodeGeneratorBase):
 
    """
 
    Class holding Added files for current changeset
 
    """
 
    pass
 

	
 

	
 
class ChangedFileNodesGenerator(NodeGeneratorBase):
 
    """
 
    Class holding Changed files for current changeset
 
    """
 
    pass
 

	
 

	
 
class RemovedFileNodesGenerator(NodeGeneratorBase):
 
    """
 
    Class holding removed files for current changeset
 
    """
 
    def __iter__(self):
 
        for p in self.current_paths:
 
            yield RemovedFileNode(path=p)
 

	
 
    def __getitem__(self, key):
 
        assert isinstance(key, slice), key
 
        for p in self.current_paths[key]:
 
            yield RemovedFileNode(path=p)
 

	
 

	
 
@functools.total_ordering
 
class Node(object):
 
    """
 
    Simplest class representing file or directory on repository.  SCM backends
 
    should use ``FileNode`` and ``DirNode`` subclasses rather than ``Node``
 
    directly.
 

	
 
    Node's ``path`` cannot start with slash as we operate on *relative* paths
 
    only. Moreover, every single node is identified by the ``path`` attribute,
 
    so it cannot end with slash, too. Otherwise, path could lead to mistakes.
 
    """
 

	
 
    def __init__(self, path, kind):
 
        if path.startswith('/'):
 
            raise NodeError("Cannot initialize Node objects with slash at "
 
                            "the beginning as only relative paths are supported")
 
        self.path = safe_str(path.rstrip('/'))  # we store paths as str
 
        if path == '' and kind != NodeKind.DIR:
 
            raise NodeError("Only DirNode and its subclasses may be "
 
                            "initialized with empty path")
 
        self.kind = kind
 
        #self.dirs, self.files = [], []
 
        if self.is_root() and not self.is_dir():
 
            raise NodeError("Root node cannot be FILE kind")
 
@@ -170,345 +170,340 @@ class Node(object):
 
        if self.path > other.path:
 
            return False
 
        if self.is_file():
 
            return self.content < other.content
 
        else:
 
            # For DirNode's check without entering each dir
 
            self_nodes_paths = list(sorted(n.path for n in self.nodes))
 
            other_nodes_paths = list(sorted(n.path for n in self.nodes))
 
            return self_nodes_paths < other_nodes_paths
 

	
 
    def __repr__(self):
 
        return '<%s %r>' % (self.__class__.__name__, self.path)
 

	
 
    def __str__(self):
 
        return self.__repr__()
 

	
 
    def __unicode__(self):
 
        return self.name
 

	
 
    def get_parent_path(self):
 
        """
 
        Returns node's parent path or empty string if node is root.
 
        """
 
        if self.is_root():
 
            return ''
 
        return posixpath.dirname(self.path.rstrip('/')) + '/'
 

	
 
    def is_file(self):
 
        """
 
        Returns ``True`` if node's kind is ``NodeKind.FILE``, ``False``
 
        otherwise.
 
        """
 
        return self.kind == NodeKind.FILE
 

	
 
    def is_dir(self):
 
        """
 
        Returns ``True`` if node's kind is ``NodeKind.DIR``, ``False``
 
        otherwise.
 
        """
 
        return self.kind == NodeKind.DIR
 

	
 
    def is_root(self):
 
        """
 
        Returns ``True`` if node is a root node and ``False`` otherwise.
 
        """
 
        return self.kind == NodeKind.DIR and self.path == ''
 

	
 
    def is_submodule(self):
 
        """
 
        Returns ``True`` if node's kind is ``NodeKind.SUBMODULE``, ``False``
 
        otherwise.
 
        """
 
        return self.kind == NodeKind.SUBMODULE
 

	
 
    @LazyProperty
 
    def added(self):
 
        return self.state is NodeState.ADDED
 

	
 
    @LazyProperty
 
    def changed(self):
 
        return self.state is NodeState.CHANGED
 

	
 
    @LazyProperty
 
    def not_changed(self):
 
        return self.state is NodeState.NOT_CHANGED
 

	
 
    @LazyProperty
 
    def removed(self):
 
        return self.state is NodeState.REMOVED
 

	
 

	
 
class FileNode(Node):
 
    """
 
    Class representing file nodes.
 

	
 
    :attribute: path: path to the node, relative to repository's root
 
    :attribute: content: if given arbitrary sets content of the file
 
    :attribute: changeset: if given, first time content is accessed, callback
 
    :attribute: mode: octal stat mode for a node. Default is 0100644.
 
    """
 

	
 
    def __init__(self, path, content=None, changeset=None, mode=None):
 
        """
 
        Only one of ``content`` and ``changeset`` may be given. Passing both
 
        would raise ``NodeError`` exception.
 

	
 
        :param path: relative path to the node
 
        :param content: content may be passed to constructor
 
        :param changeset: if given, will use it to lazily fetch content
 
        :param mode: octal representation of ST_MODE (i.e. 0100644)
 
        """
 

	
 
        if content and changeset:
 
            raise NodeError("Cannot use both content and changeset")
 
        super(FileNode, self).__init__(path, kind=NodeKind.FILE)
 
        self.changeset = changeset
 
        if not isinstance(content, bytes) and content is not None:
 
            # File content is one thing that inherently must be bytes ... but
 
            # VCS module tries to be "user friendly" and support unicode ...
 
            content = safe_bytes(content)
 
        self._content = content
 
        self._mode = mode or 0o100644
 

	
 
    @LazyProperty
 
    def mode(self):
 
        """
 
        Returns lazily mode of the FileNode. If ``changeset`` is not set, would
 
        use value given at initialization or 0100644 (default).
 
        """
 
        if self.changeset:
 
            mode = self.changeset.get_file_mode(self.path)
 
        else:
 
            mode = self._mode
 
        return mode
 

	
 
    def _get_content(self):
 
    @property
 
    def content(self):
 
        """
 
        Returns lazily byte content of the FileNode.
 
        """
 
        if self.changeset:
 
            content = self.changeset.get_file_content(self.path)
 
        else:
 
            content = self._content
 
        return content
 

	
 
    @property
 
    def content(self):
 
        """
 
        Returns lazily content of the FileNode. If possible, would try to
 
        decode content from UTF-8.
 
        """
 
        content = self._get_content()
 

	
 
        if bool(content and '\0' in content):
 
            return content
 
        return safe_unicode(content)
 

	
 
    @LazyProperty
 
    def size(self):
 
        if self.changeset:
 
            return self.changeset.get_file_size(self.path)
 
        raise NodeError("Cannot retrieve size of the file without related "
 
            "changeset attribute")
 

	
 
    @LazyProperty
 
    def message(self):
 
        if self.changeset:
 
            return self.last_changeset.message
 
        raise NodeError("Cannot retrieve message of the file without related "
 
            "changeset attribute")
 

	
 
    @LazyProperty
 
    def last_changeset(self):
 
        if self.changeset:
 
            return self.changeset.get_file_changeset(self.path)
 
        raise NodeError("Cannot retrieve last changeset of the file without "
 
            "related changeset attribute")
 

	
 
    def get_mimetype(self):
 
        """
 
        Mimetype is calculated based on the file's content.
 
        """
 

	
 
        mtype, encoding = mimetypes.guess_type(self.name)
 

	
 
        if mtype is None:
 
            if self.is_binary:
 
                mtype = 'application/octet-stream'
 
                encoding = None
 
            else:
 
                mtype = 'text/plain'
 
                encoding = None
 

	
 
                # try with pygments
 
                try:
 
                    from pygments import lexers
 
                    mt = lexers.get_lexer_for_filename(self.name).mimetypes
 
                except lexers.ClassNotFound:
 
                    mt = None
 

	
 
                if mt:
 
                    mtype = mt[0]
 

	
 
        return mtype, encoding
 

	
 
    @LazyProperty
 
    def mimetype(self):
 
        """
 
        Wrapper around full mimetype info. It returns only type of fetched
 
        mimetype without the encoding part. use get_mimetype function to fetch
 
        full set of (type,encoding)
 
        """
 
        return self.get_mimetype()[0]
 

	
 
    @LazyProperty
 
    def mimetype_main(self):
 
        return self.mimetype.split('/')[0]
 

	
 
    @LazyProperty
 
    def lexer(self):
 
        """
 
        Returns pygment's lexer class. Would try to guess lexer taking file's
 
        content, name and mimetype.
 
        """
 
        from pygments import lexers
 
        try:
 
            lexer = lexers.guess_lexer_for_filename(self.name, self.content, stripnl=False)
 
            lexer = lexers.guess_lexer_for_filename(self.name, safe_unicode(self.content), stripnl=False)
 
        except lexers.ClassNotFound:
 
            lexer = lexers.TextLexer(stripnl=False)
 
        # returns first alias
 
        return lexer
 

	
 
    @LazyProperty
 
    def lexer_alias(self):
 
        """
 
        Returns first alias of the lexer guessed for this file.
 
        """
 
        return self.lexer.aliases[0]
 

	
 
    @LazyProperty
 
    def history(self):
 
        """
 
        Returns a list of changeset for this file in which the file was changed
 
        """
 
        if self.changeset is None:
 
            raise NodeError('Unable to get changeset for this FileNode')
 
        return self.changeset.get_file_history(self.path)
 

	
 
    @LazyProperty
 
    def annotate(self):
 
        """
 
        Returns a list of three element tuples with lineno,changeset and line
 
        """
 
        if self.changeset is None:
 
            raise NodeError('Unable to get changeset for this FileNode')
 
        return self.changeset.get_file_annotate(self.path)
 

	
 
    @LazyProperty
 
    def state(self):
 
        if not self.changeset:
 
            raise NodeError("Cannot check state of the node if it's not "
 
                "linked with changeset")
 
        elif self.path in (node.path for node in self.changeset.added):
 
            return NodeState.ADDED
 
        elif self.path in (node.path for node in self.changeset.changed):
 
            return NodeState.CHANGED
 
        else:
 
            return NodeState.NOT_CHANGED
 

	
 
    @property
 
    def is_binary(self):
 
        """
 
        Returns True if file has binary content.
 
        """
 
        _bin = '\0' in self._get_content()
 
        return _bin
 
        return b'\0' in self.content
 

	
 
    def is_browser_compatible_image(self):
 
        return self.mimetype in [
 
            "image/gif",
 
            "image/jpeg",
 
            "image/png",
 
            "image/bmp"
 
        ]
 

	
 
    @LazyProperty
 
    def extension(self):
 
        """Returns filenode extension"""
 
        return self.name.split('.')[-1]
 

	
 
    @property
 
    def is_executable(self):
 
        """
 
        Returns ``True`` if file has executable flag turned on.
 
        """
 
        return bool(self.mode & stat.S_IXUSR)
 

	
 
    def __repr__(self):
 
        return '<%s %r @ %s>' % (self.__class__.__name__, self.path,
 
                                 getattr(self.changeset, 'short_id', ''))
 

	
 

	
 
class RemovedFileNode(FileNode):
 
    """
 
    Dummy FileNode class - trying to access any public attribute except path,
 
    name, kind or state (or methods/attributes checking those two) would raise
 
    RemovedFileNodeError.
 
    """
 
    ALLOWED_ATTRIBUTES = [
 
        'name', 'path', 'state', 'is_root', 'is_file', 'is_dir', 'kind',
 
        'added', 'changed', 'not_changed', 'removed'
 
    ]
 

	
 
    def __init__(self, path):
 
        """
 
        :param path: relative path to the node
 
        """
 
        super(RemovedFileNode, self).__init__(path=path)
 

	
 
    def __getattribute__(self, attr):
 
        if attr.startswith('_') or attr in RemovedFileNode.ALLOWED_ATTRIBUTES:
 
            return super(RemovedFileNode, self).__getattribute__(attr)
 
        raise RemovedFileNodeError("Cannot access attribute %s on "
 
            "RemovedFileNode" % attr)
 

	
 
    @LazyProperty
 
    def state(self):
 
        return NodeState.REMOVED
 

	
 

	
 
class DirNode(Node):
 
    """
 
    DirNode stores list of files and directories within this node.
 
    Nodes may be used standalone but within repository context they
 
    lazily fetch data within same repository's changeset.
 
    """
 

	
 
    def __init__(self, path, nodes=(), changeset=None):
 
        """
 
        Only one of ``nodes`` and ``changeset`` may be given. Passing both
 
        would raise ``NodeError`` exception.
 

	
 
        :param path: relative path to the node
 
        :param nodes: content may be passed to constructor
 
        :param changeset: if given, will use it to lazily fetch content
 
        :param size: always 0 for ``DirNode``
 
        """
 
        if nodes and changeset:
 
            raise NodeError("Cannot use both nodes and changeset")
 
        super(DirNode, self).__init__(path, NodeKind.DIR)
 
        self.changeset = changeset
 
        self._nodes = nodes
 

	
 
    @LazyProperty
 
    def content(self):
 
        raise NodeError("%s represents a dir and has no ``content`` attribute"
 
            % self)
 

	
 
    @LazyProperty
 
    def nodes(self):
 
        if self.changeset:
 
            nodes = self.changeset.get_nodes(self.path)
 
        else:
 
            nodes = self._nodes
 
        self._nodes_dict = dict((node.path, node) for node in nodes)
 
        return sorted(nodes)
 

	
 
    @LazyProperty
 
    def files(self):
 
        return sorted((node for node in self.nodes if node.is_file()))
 

	
 
    @LazyProperty
kallithea/lib/vcs/utils/annotate.py
Show inline comments
 
from pygments import highlight
 
from pygments.formatters import HtmlFormatter
 

	
 
from kallithea.lib.vcs.exceptions import VCSError
 
from kallithea.lib.vcs.nodes import FileNode
 
from kallithea.lib.vcs.utils import safe_unicode
 

	
 

	
 
def annotate_highlight(filenode, annotate_from_changeset_func=None,
 
        order=None, headers=None, **options):
 
    """
 
    Returns html portion containing annotated table with 3 columns: line
 
    numbers, changeset information and pygmentized line of code.
 

	
 
    :param filenode: FileNode object
 
    :param annotate_from_changeset_func: function taking changeset and
 
      returning single annotate cell; needs break line at the end
 
    :param order: ordered sequence of ``ls`` (line numbers column),
 
      ``annotate`` (annotate column), ``code`` (code column); Default is
 
      ``['ls', 'annotate', 'code']``
 
    :param headers: dictionary with headers (keys are whats in ``order``
 
      parameter)
 
    """
 
    options['linenos'] = True
 
    formatter = AnnotateHtmlFormatter(filenode=filenode, order=order,
 
        headers=headers,
 
        annotate_from_changeset_func=annotate_from_changeset_func, **options)
 
    lexer = filenode.lexer
 
    highlighted = highlight(filenode.content, lexer, formatter)
 
    return highlighted
 
    return highlight(safe_unicode(filenode.content), filenode.lexer, formatter)
 

	
 

	
 
class AnnotateHtmlFormatter(HtmlFormatter):
 

	
 
    def __init__(self, filenode, annotate_from_changeset_func=None,
 
            order=None, **options):
 
        """
 
        If ``annotate_from_changeset_func`` is passed it should be a function
 
        which returns string from the given changeset. For example, we may pass
 
        following function as ``annotate_from_changeset_func``::
 

	
 
            def changeset_to_anchor(changeset):
 
                return '<a href="/changesets/%s/">%s</a>\n' % \
 
                       (changeset.id, changeset.id)
 

	
 
        :param annotate_from_changeset_func: see above
 
        :param order: (default: ``['ls', 'annotate', 'code']``); order of
 
          columns;
 
        :param options: standard pygment's HtmlFormatter options, there is
 
          extra option tough, ``headers``. For instance we can pass::
 

	
 
             formatter = AnnotateHtmlFormatter(filenode, headers={
 
                'ls': '#',
 
                'annotate': 'Annotate',
 
                'code': 'Code',
 
             })
 

	
 
        """
 
        super(AnnotateHtmlFormatter, self).__init__(**options)
 
        self.annotate_from_changeset_func = annotate_from_changeset_func
 
        self.order = order or ('ls', 'annotate', 'code')
 
        headers = options.pop('headers', None)
 
        if headers and not ('ls' in headers and 'annotate' in headers and
 
            'code' in headers
 
        ):
 
            raise ValueError("If headers option dict is specified it must "
 
                "all 'ls', 'annotate' and 'code' keys")
 
        self.headers = headers
 
        if isinstance(filenode, FileNode):
 
            self.filenode = filenode
 
        else:
 
            raise VCSError("This formatter expect FileNode parameter, not %r"
 
                % type(filenode))
 

	
 
    def annotate_from_changeset(self, changeset):
 
        """
 
        Returns full html line for single changeset per annotated line.
 
        """
 
        if self.annotate_from_changeset_func:
 
            return self.annotate_from_changeset_func(changeset)
 
        else:
 
            return ''.join((changeset.id, '\n'))
 

	
 
    def _wrap_tablelinenos(self, inner):
 
        inner_lines = []
 
        lncount = 0
 
        for t, line in inner:
 
            if t:
 
                lncount += 1
 
            inner_lines.append(line)
 

	
 
        fl = self.linenostart
 
        mw = len(str(lncount + fl - 1))
 
        sp = self.linenospecial
 
        st = self.linenostep
 
        la = self.lineanchors
 
        aln = self.anchorlinenos
 
        if sp:
 
            lines = []
 

	
 
            for i in range(fl, fl + lncount):
 
                if i % st == 0:
 
                    if i % sp == 0:
 
                        if aln:
 
                            lines.append('<a href="#%s-%d" class="special">'
 
                                         '%*d</a>' %
 
                                         (la, i, mw, i))
 
                        else:
 
                            lines.append('<span class="special">'
 
                                         '%*d</span>' % (mw, i))
 
                    else:
 
                        if aln:
 
                            lines.append('<a href="#%s-%d">'
 
                                         '%*d</a>' % (la, i, mw, i))
 
                        else:
 
                            lines.append('%*d' % (mw, i))
 
                else:
 
                    lines.append('')
 
            ls = '\n'.join(lines)
 
        else:
 
            lines = []
 
            for i in range(fl, fl + lncount):
 
                if i % st == 0:
 
                    if aln:
 
                        lines.append('<a href="#%s-%d">%*d</a>'
 
                                     % (la, i, mw, i))
kallithea/templates/admin/gists/edit.html
Show inline comments
 
## -*- coding: utf-8 -*-
 
<%inherit file="/base/base.html"/>
 

	
 
<%block name="title">
 
    ${_('Edit Gist')} &middot; ${c.gist.gist_access_id}
 
</%block>
 

	
 
<%block name="js_extra">
 
  <script type="text/javascript" src="${h.url('/codemirror/lib/codemirror.js')}"></script>
 
  <script type="text/javascript" src="${h.url('/js/codemirror_loadmode.js')}"></script>
 
  <script type="text/javascript" src="${h.url('/codemirror/mode/meta.js')}"></script>
 
</%block>
 
<%block name="css_extra">
 
  <link rel="stylesheet" type="text/css" href="${h.url('/codemirror/lib/codemirror.css')}"/>
 
</%block>
 

	
 
<%def name="breadcrumbs_links()">
 
    ${_('Edit Gist')} &middot; ${c.gist.gist_access_id}
 
</%def>
 

	
 
<%block name="header_menu">
 
    ${self.menu('gists')}
 
</%block>
 

	
 
<%def name="main()">
 
<div class="panel panel-primary">
 
    <div class="panel-heading clearfix">
 
        ${self.breadcrumbs()}
 
    </div>
 

	
 
    <div class="panel-body">
 
        <div id="edit_error" style="display: none" class="flash_msg">
 
            <div class="alert alert-dismissable alert-warning">
 
              <button type="button" class="close" data-dismiss="alert" aria-hidden="true"><i class="icon-cancel-circled"></i></button>
 
              ${(h.HTML(_('Gist was updated since you started editing. Copy your changes and click %(here)s to reload new version.'))
 
                             % {'here': h.link_to(_('here'),h.url('edit_gist', gist_id=c.gist.gist_access_id))})}
 
            </div>
 
            <script>
 
            if (typeof jQuery != 'undefined') {
 
                $(".alert").alert();
 
            }
 
            </script>
 
        </div>
 

	
 
        <div id="files_data">
 
          ${h.form(h.url('edit_gist', gist_id=c.gist.gist_access_id), method='post', id='eform')}
 
            <div>
 
                <input type="hidden" value="${c.file_changeset.raw_id}" name="parent_hash">
 
                <textarea class="form-control"
 
                          id="description" name="description"
 
                          placeholder="${_('Gist description ...')}">${c.gist.gist_description}</textarea>
 
                <div>
 
                    <label>
 
                        ${_('Gist lifetime')}
 
                        ${h.select('lifetime', '0', c.lifetime_options)}
 
                    </label>
 
                    <span class="text-muted">
 
                     %if c.gist.gist_expires == -1:
 
                      ${_('Expires')}: ${_('Never')}
 
                     %else:
 
                      ${_('Expires')}: ${h.age(h.time_to_datetime(c.gist.gist_expires))}
 
                     %endif
 
                   </span>
 
                </div>
 
            </div>
 

	
 
            % for cnt, file in enumerate(c.files):
 
                <div id="body" class="panel panel-default form-inline">
 
                    <div class="panel-heading">
 
                        <input type="hidden" value="${h.safe_unicode(file.path)}" name="org_files">
 
                        <input class="form-control" id="filename_${h.FID('f',file.path)}" name="files" size="30" type="text" value="${h.safe_unicode(file.path)}">
 
                        <select class="form-control" id="mimetype_${h.FID('f',file.path)}" name="mimetypes"></select>
 
                    </div>
 
                    <div class="panel-body no-padding">
 
                        <div id="editor_container">
 
                            <textarea id="editor_${h.FID('f',file.path)}" name="contents" style="display:none">${file.content}</textarea>
 
                            <textarea id="editor_${h.FID('f',file.path)}" name="contents" style="display:none">${safe_unicode(file.content)}</textarea>
 
                        </div>
 
                    </div>
 
                </div>
 

	
 
                ## dynamic edit box.
 
                <script type="text/javascript">
 
                    $(document).ready(function(){
 
                        var myCodeMirror = initCodeMirror(${h.js('editor_' + h.FID('f',file.path))}, ${h.jshtml(request.script_name)}, '');
 

	
 
                        //inject new modes
 
                        var $mimetype_select = $(${h.js('#mimetype_' + h.FID('f',file.path))});
 
                        $mimetype_select.each(function(){
 
                            var modes_select = this;
 
                            var index = 1;
 
                            for(var i=0;i<CodeMirror.modeInfo.length;i++) {
 
                                var m = CodeMirror.modeInfo[i];
 
                                var opt = new Option(m.name, m.mime);
 
                                $(opt).attr('mode', m.mode);
 
                                if (m.mime == 'text/plain') {
 
                                    // default plain text
 
                                    $(opt).prop('selected', true);
 
                                    modes_select.options[0] = opt;
 
                                } else {
 
                                    modes_select.options[index++] = opt;
 
                                }
 
                            }
 
                        });
 

	
 
                        var $filename_input = $(${h.js('#filename_' + h.FID('f',file.path))});
 
                        // on select change set new mode
 
                        $mimetype_select.change(function(e){
 
                            var selected = e.currentTarget;
 
                            var node = selected.options[selected.selectedIndex];
 
                            var detected_mode = CodeMirror.findModeByMIME(node.value);
 
                            setCodeMirrorMode(myCodeMirror, detected_mode);
 

	
 
                            var proposed_ext = CodeMirror.findExtensionByMode(detected_mode);
 
                            var file_data = CodeMirror.getFilenameAndExt($filename_input.val());
 
                            var filename = file_data['filename'] || 'filename1';
 
                            $filename_input.val(filename + '.' + proposed_ext);
 
                        });
 

	
 
                        // on type the new filename set mode
 
                        $filename_input.keyup(function(e){
 
                            var file_data = CodeMirror.getFilenameAndExt(this.value);
 
                            if(file_data['ext'] != null){
 
                                var detected_mode = CodeMirror.findModeByExtension(file_data['ext']) || CodeMirror.findModeByMIME('text/plain');
 

	
 
                                if (detected_mode){
 
                                    setCodeMirrorMode(myCodeMirror, detected_mode);
 
                                    $mimetype_select.val(detected_mode.mime);
 
                                }
 
                            }
 
                        });
 

	
 
                        // set mode on page load
 
                        var detected_mode = CodeMirror.findModeByExtension(${h.js(file.extension)});
 

	
 
                        if (detected_mode){
 
                            setCodeMirrorMode(myCodeMirror, detected_mode);
 
                            $mimetype_select.val(detected_mode.mime);
 
                        }
 
                    });
 
                </script>
 

	
 
            %endfor
 

	
 
            <div>
 
            ${h.submit('update',_('Update Gist'),class_="btn btn-success")}
 
            <a class="btn btn-default" href="${h.url('gist', gist_id=c.gist.gist_access_id)}">${_('Cancel')}</a>
 
            </div>
 
          ${h.end_form()}
 
          <script>
 
              $('#update').on('click', function(e){
 
                  e.preventDefault();
 

	
 
                  // check for newer version.
 
                  $.ajax({
 
                    url: ${h.js(h.url('edit_gist_check_revision', gist_id=c.gist.gist_access_id))},
 
                    data: {'revision': ${h.js(c.file_changeset.raw_id)}, '_session_csrf_secret_token': _session_csrf_secret_token},
 
                    dataType: 'json',
 
                    type: 'POST',
 
                    success: function(data) {
 
                      if(data.success == false){
 
                          $('#edit_error').show();
 
                      }
 
                      else{
 
                        $('#eform').submit();
 
                      }
 
                    }
 
                  });
 
              });
 
          </script>
 
        </div>
 
    </div>
 

	
kallithea/templates/files/files_edit.html
Show inline comments
 
<%inherit file="/base/base.html"/>
 

	
 
<%block name="title">
 
    ${_('%s File Edit') % c.repo_name}
 
</%block>
 

	
 
<%block name="js_extra">
 
  <script type="text/javascript" src="${h.url('/codemirror/lib/codemirror.js')}"></script>
 
  <script type="text/javascript" src="${h.url('/js/codemirror_loadmode.js')}"></script>
 
  <script type="text/javascript" src="${h.url('/codemirror/mode/meta.js')}"></script>
 
</%block>
 
<%block name="css_extra">
 
  <link rel="stylesheet" type="text/css" href="${h.url('/codemirror/lib/codemirror.css')}"/>
 
</%block>
 

	
 
<%block name="header_menu">
 
    ${self.menu('repositories')}
 
</%block>
 

	
 
<%def name="breadcrumbs_links()">
 
    ${_('Edit file')} @ ${h.show_id(c.cs)}
 
</%def>
 

	
 
<%def name="main()">
 
${self.repo_context_bar('files')}
 
<div class="panel panel-primary">
 
    <div class="panel-heading clearfix">
 
        <div class="pull-left">
 
            ${self.breadcrumbs()}
 
        </div>
 
        <div class="pull-right">
 
            <a href="#">${_('Branch')}: ${c.cs.branch}</a>
 
        </div>
 
    </div>
 
    <div class="panel-body" id="edit">
 
        <div id="files_data">
 
            ${h.form(h.url.current(),method='post',id='eform')}
 
            <h3 class="files_location">
 
              ${_('Location')}: ${h.files_breadcrumbs(c.repo_name,c.cs.raw_id,c.file.path)}
 
            </h3>
 
            <div id="body" class="panel panel-default">
 
              <div class="panel-heading clearfix form-inline form-group-sm">
 
                    <span>
 
                        <span><i class="icon-doc-inv"></i></span>
 
                        <span>${h.link_to(h.show_id(c.file.changeset),h.url('changeset_home',repo_name=c.repo_name,revision=c.file.changeset.raw_id))}</span>
 
                        <span>${h.format_byte_size(c.file.size,binary=True)}</span>
 
                        <span>${c.file.mimetype}</span>
 
                        <select class="form-control" id="mimetype" name="mimetype"></select>
 
                    </span>
 
                    <span class="pull-right buttons">
 
                      ${h.link_to(_('Show Annotation'),h.url('files_annotate_home',repo_name=c.repo_name,revision=c.cs.raw_id,f_path=c.f_path),class_="btn btn-default btn-xs")}
 
                      ${h.link_to(_('Show as Raw'),h.url('files_raw_home',repo_name=c.repo_name,revision=c.cs.raw_id,f_path=c.f_path),class_="btn btn-default btn-xs")}
 
                      ${h.link_to(_('Download as Raw'),h.url('files_rawfile_home',repo_name=c.repo_name,revision=c.cs.raw_id,f_path=c.f_path),class_="btn btn-default btn-xs")}
 
                      % if h.HasRepoPermissionLevel('write')(c.repo_name):
 
                       % if not c.file.is_binary:
 
                        ${h.link_to(_('Source'),h.url('files_home',repo_name=c.repo_name,revision=c.cs.raw_id,f_path=c.f_path),class_="btn btn-default btn-xs")}
 
                       % endif
 
                      % endif
 
                    </span>
 
              </div>
 
              <div class="panel-body no-padding">
 
                <textarea id="editor" name="content" style="display:none">${h.escape(c.file.content)|n}</textarea>
 
                <textarea id="editor" name="content" style="display:none">${h.escape(safe_unicode(c.file.content))|n}</textarea>
 
              </div>
 
            </div>
 
            <div>
 
              <div class="form-group">
 
                  <label>${_('Commit Message')}</label>
 
                  <textarea class="form-control" id="commit" name="message" placeholder="${c.default_message}"></textarea>
 
              </div>
 
              <div class="form-group buttons">
 
                ${h.submit('commit',_('Commit Changes'),class_="btn btn-success")}
 
                ${h.reset('reset',_('Reset'),class_="btn btn-default")}
 
              </div>
 
            </div>
 
            ${h.end_form()}
 
        </div>
 
    </div>
 
</div>
 

	
 
<script type="text/javascript">
 
    $(document).ready(function(){
 
        var reset_url = ${h.jshtml(h.url('files_home',repo_name=c.repo_name,revision=c.cs.raw_id,f_path=c.file.path))};
 
        var myCodeMirror = initCodeMirror('editor', ${h.jshtml(request.script_name)}, reset_url);
 

	
 
       //inject new modes, based on codeMirrors modeInfo object
 
        var $mimetype_select = $('#mimetype');
 
        $mimetype_select.each(function(){
 
            var modes_select = this;
 
            var index = 1;
 
            for(var i=0;i<CodeMirror.modeInfo.length;i++){
 
                var m = CodeMirror.modeInfo[i];
 
                var opt = new Option(m.name, m.mime);
 
                $(opt).attr('mode', m.mode);
 
                if (m.mime == 'text/plain') {
 
                    // default plain text
 
                    $(opt).prop('selected', true);
 
                    modes_select.options[0] = opt;
 
                } else {
 
                    modes_select.options[index++] = opt;
 
                }
 
            }
 
        });
 
        // try to detect the mode based on the file we edit
 
        var detected_mode = CodeMirror.findModeByExtension(${h.js(c.file.extension)});
 
        if(detected_mode){
 
            setCodeMirrorMode(myCodeMirror, detected_mode);
 
            $($mimetype_select.find('option[value="'+detected_mode.mime+'"]')[0]).prop('selected', true);
 
        }
 

	
 
        $mimetype_select.on('change', function(e){
 
            var selected = e.currentTarget;
 
            var node = selected.options[selected.selectedIndex];
 
            var detected_mode = CodeMirror.findModeByMIME(node.value);
 
            setCodeMirrorMode(myCodeMirror, detected_mode);
 
        });
 
    });
 
</script>
 
</%def>
kallithea/tests/vcs/test_git.py
Show inline comments
 
@@ -503,197 +503,197 @@ class TestGitChangeset(object):
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '78c3f0c23b7ee935ec276acb8b8212444c33c396',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '2a13f185e4525f9d4b59882791a2d397b90d5ddc',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '78c3f0c23b7ee935ec276acb8b8212444c33c396',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                    ],
 
                },
 
            },
 
        }
 

	
 
        for fname, revision_dict in files.items():
 
            for rev, data in revision_dict.items():
 
                cs = self.repo.get_changeset(rev)
 

	
 
                l1_1 = [x[1] for x in cs.get_file_annotate(fname)]
 
                l1_2 = [x[2]().raw_id for x in cs.get_file_annotate(fname)]
 
                assert l1_1 == l1_2
 
                l1 = l1_1
 
                l2 = files[fname][rev]['changesets']
 
                assert l1 == l2, "The lists of revision for %s@rev %s" \
 
                    "from annotation list should match each other, " \
 
                    "got \n%s \nvs \n%s " % (fname, rev, l1, l2)
 

	
 
    def test_files_state(self):
 
        """
 
        Tests state of FileNodes.
 
        """
 
        node = self.repo \
 
            .get_changeset('e6ea6d16e2f26250124a1f4b4fe37a912f9d86a0') \
 
            .get_node('vcs/utils/diffs.py')
 
        assert node.state, NodeState.ADDED
 
        assert node.added
 
        assert not node.changed
 
        assert not node.not_changed
 
        assert not node.removed
 

	
 
        node = self.repo \
 
            .get_changeset('33fa3223355104431402a888fa77a4e9956feb3e') \
 
            .get_node('.hgignore')
 
        assert node.state, NodeState.CHANGED
 
        assert not node.added
 
        assert node.changed
 
        assert not node.not_changed
 
        assert not node.removed
 

	
 
        node = self.repo \
 
            .get_changeset('e29b67bd158580fc90fc5e9111240b90e6e86064') \
 
            .get_node('setup.py')
 
        assert node.state, NodeState.NOT_CHANGED
 
        assert not node.added
 
        assert not node.changed
 
        assert node.not_changed
 
        assert not node.removed
 

	
 
        # If node has REMOVED state then trying to fetch it would raise
 
        # ChangesetError exception
 
        chset = self.repo.get_changeset(
 
            'fa6600f6848800641328adbf7811fd2372c02ab2')
 
        path = 'vcs/backends/BaseRepository.py'
 
        with pytest.raises(NodeDoesNotExistError):
 
            chset.get_node(path)
 
        # but it would be one of ``removed`` (changeset's attribute)
 
        assert path in [rf.path for rf in chset.removed]
 

	
 
        chset = self.repo.get_changeset(
 
            '54386793436c938cff89326944d4c2702340037d')
 
        changed = ['setup.py', 'tests/test_nodes.py', 'vcs/backends/hg.py',
 
            'vcs/nodes.py']
 
        assert set(changed) == set([f.path for f in chset.changed])
 

	
 
    def test_commit_message_is_unicode(self):
 
        for cs in self.repo:
 
            assert isinstance(cs.message, unicode)
 

	
 
    def test_changeset_author_is_unicode(self):
 
        for cs in self.repo:
 
            assert isinstance(cs.author, unicode)
 

	
 
    def test_repo_files_content_is_unicode(self):
 
    def test_repo_files_content_is_bytes(self):
 
        changeset = self.repo.get_changeset()
 
        for node in changeset.get_node('/'):
 
            if node.is_file():
 
                assert isinstance(node.content, unicode)
 
                assert isinstance(node.content, bytes)
 

	
 
    def test_wrong_path(self):
 
        # There is 'setup.py' in the root dir but not there:
 
        path = 'foo/bar/setup.py'
 
        tip = self.repo.get_changeset()
 
        with pytest.raises(VCSError):
 
            tip.get_node(path)
 

	
 
    def test_author_email(self):
 
        assert 'marcin@python-blog.com' == self.repo.get_changeset('c1214f7e79e02fc37156ff215cd71275450cffc3').author_email
 
        assert 'lukasz.balcerzak@python-center.pl' == self.repo.get_changeset('ff7ca51e58c505fec0dd2491de52c622bb7a806b').author_email
 
        assert '' == self.repo.get_changeset('8430a588b43b5d6da365400117c89400326e7992').author_email
 

	
 
    def test_author_username(self):
 
        assert 'Marcin Kuzminski' == self.repo.get_changeset('c1214f7e79e02fc37156ff215cd71275450cffc3').author_name
 
        assert 'Lukasz Balcerzak' == self.repo.get_changeset('ff7ca51e58c505fec0dd2491de52c622bb7a806b').author_name
 
        assert 'marcink none@none' == self.repo.get_changeset('8430a588b43b5d6da365400117c89400326e7992').author_name
 

	
 

	
 
class TestGitSpecificWithRepo(_BackendTestMixin):
 
    backend_alias = 'git'
 

	
 
    @classmethod
 
    def _get_commits(cls):
 
        return [
 
            {
 
                'message': 'Initial',
 
                'author': 'Joe Doe <joe.doe@example.com>',
 
                'date': datetime.datetime(2010, 1, 1, 20),
 
                'added': [
 
                    FileNode('foobar/static/js/admin/base.js', content='base'),
 
                    FileNode('foobar/static/admin', content='admin',
 
                        mode=0o120000), # this is a link
 
                    FileNode('foo', content='foo'),
 
                ],
 
            },
 
            {
 
                'message': 'Second',
 
                'author': 'Joe Doe <joe.doe@example.com>',
 
                'date': datetime.datetime(2010, 1, 1, 22),
 
                'added': [
 
                    FileNode('foo2', content='foo2'),
 
                ],
 
            },
 
        ]
 

	
 
    def test_paths_slow_traversing(self):
 
        cs = self.repo.get_changeset()
 
        assert cs.get_node('foobar').get_node('static').get_node('js').get_node('admin').get_node('base.js').content == 'base'
 

	
 
    def test_paths_fast_traversing(self):
 
        cs = self.repo.get_changeset()
 
        assert cs.get_node('foobar/static/js/admin/base.js').content == 'base'
 

	
 
    def test_workdir_get_branch(self):
 
        self.repo.run_git_command(['checkout', '-b', 'production'])
 
        # Regression test: one of following would fail if we don't check
 
        # .git/HEAD file
 
        self.repo.run_git_command(['checkout', 'production'])
 
        assert self.repo.workdir.get_branch() == 'production'
 
        self.repo.run_git_command(['checkout', 'master'])
 
        assert self.repo.workdir.get_branch() == 'master'
 

	
 
    def test_get_diff_runs_git_command_with_hashes(self):
 
        self.repo._run_git_command = mock.Mock(return_value=(b'', b''))
 
        self.repo.get_diff(0, 1)
 
        self.repo._run_git_command.assert_called_once_with(
 
            ['diff', '-U3', '--full-index', '--binary', '-p', '-M', '--abbrev=40',
 
             self.repo._get_revision(0), self.repo._get_revision(1)], cwd=self.repo.path)
 

	
 
    def test_get_diff_runs_git_command_with_str_hashes(self):
 
        self.repo._run_git_command = mock.Mock(return_value=(b'', b''))
 
        self.repo.get_diff(self.repo.EMPTY_CHANGESET, 1)
 
        self.repo._run_git_command.assert_called_once_with(
 
            ['show', '-U3', '--full-index', '--binary', '-p', '-M', '--abbrev=40',
 
             self.repo._get_revision(1)], cwd=self.repo.path)
 

	
 
    def test_get_diff_runs_git_command_with_path_if_its_given(self):
 
        self.repo._run_git_command = mock.Mock(return_value=(b'', b''))
 
        self.repo.get_diff(0, 1, 'foo')
 
        self.repo._run_git_command.assert_called_once_with(
 
            ['diff', '-U3', '--full-index', '--binary', '-p', '-M', '--abbrev=40',
 
             self.repo._get_revision(0), self.repo._get_revision(1), '--', 'foo'], cwd=self.repo.path)
 

	
 
    def test_get_diff_does_not_sanitize_valid_context(self):
 
        almost_overflowed_long_int = 2**31-1
 

	
 
        self.repo._run_git_command = mock.Mock(return_value=(b'', b''))
 
        self.repo.get_diff(0, 1, 'foo', context=almost_overflowed_long_int)
 
        self.repo._run_git_command.assert_called_once_with(
 
            ['diff', '-U' + str(almost_overflowed_long_int), '--full-index', '--binary', '-p', '-M', '--abbrev=40',
 
             self.repo._get_revision(0), self.repo._get_revision(1), '--', 'foo'], cwd=self.repo.path)
 

	
 
    def test_get_diff_sanitizes_overflowing_context(self):
 
        overflowed_long_int = 2**31
 
        sanitized_overflowed_long_int = overflowed_long_int-1
kallithea/tests/vcs/test_hg.py
Show inline comments
 
@@ -451,142 +451,142 @@ class TestMercurialChangeset(object):
 

	
 
        added = set(['docs/api/backends/hg.rst'
 
            , 'docs/api/backends/index.rst', 'docs/api/index.rst'
 
            , 'docs/api/nodes.rst', 'docs/api/web/index.rst'
 
            , 'docs/api/web/simplevcs.rst', 'docs/installation.rst'
 
            , 'docs/quickstart.rst', 'setup.cfg', 'vcs/utils/baseui_config.py'
 
            , 'vcs/utils/web.py', 'vcs/web/__init__.py', 'vcs/web/exceptions.py'
 
            , 'vcs/web/simplevcs/__init__.py', 'vcs/web/simplevcs/exceptions.py'
 
            , 'vcs/web/simplevcs/middleware.py', 'vcs/web/simplevcs/models.py'
 
            , 'vcs/web/simplevcs/settings.py', 'vcs/web/simplevcs/utils.py'
 
            , 'vcs/web/simplevcs/views.py'])
 

	
 
        removed = set(['docs/api.rst'])
 

	
 
        chset64 = self.repo.get_changeset('46ad32a4f974')
 
        assert set((node.path for node in chset64.added)) == added
 
        assert set((node.path for node in chset64.changed)) == changed
 
        assert set((node.path for node in chset64.removed)) == removed
 

	
 
        # rev b090f22d27d6:
 
        # hg st --rev b090f22d27d6
 
        #    changed: 13
 
        #    added:   20
 
        #    removed: 1
 
        chset88 = self.repo.get_changeset('b090f22d27d6')
 
        assert set((node.path for node in chset88.added)) == set()
 
        assert set((node.path for node in chset88.changed)) == set(['.hgignore'])
 
        assert set((node.path for node in chset88.removed)) == set()
 

	
 
        # 85:
 
        #    added:   2 ['vcs/utils/diffs.py', 'vcs/web/simplevcs/views/diffs.py']
 
        #    changed: 4 ['vcs/web/simplevcs/models.py', ...]
 
        #    removed: 1 ['vcs/utils/web.py']
 
        chset85 = self.repo.get_changeset(85)
 
        assert set((node.path for node in chset85.added)) == set([
 
            'vcs/utils/diffs.py',
 
            'vcs/web/simplevcs/views/diffs.py'
 
        ])
 

	
 
        assert set((node.path for node in chset85.changed)) == set([
 
            'vcs/web/simplevcs/models.py',
 
            'vcs/web/simplevcs/utils.py',
 
            'vcs/web/simplevcs/views/__init__.py',
 
            'vcs/web/simplevcs/views/repository.py',
 
        ])
 

	
 
        assert set((node.path for node in chset85.removed)) == set([
 
            'vcs/utils/web.py'
 
        ])
 

	
 

	
 
    def test_files_state(self):
 
        """
 
        Tests state of FileNodes.
 
        """
 
        chset = self.repo.get_changeset(85)
 
        node = chset.get_node('vcs/utils/diffs.py')
 
        assert node.state, NodeState.ADDED
 
        assert node.added
 
        assert not node.changed
 
        assert not node.not_changed
 
        assert not node.removed
 

	
 
        chset = self.repo.get_changeset(88)
 
        node = chset.get_node('.hgignore')
 
        assert node.state, NodeState.CHANGED
 
        assert not node.added
 
        assert node.changed
 
        assert not node.not_changed
 
        assert not node.removed
 

	
 
        chset = self.repo.get_changeset(85)
 
        node = chset.get_node('setup.py')
 
        assert node.state, NodeState.NOT_CHANGED
 
        assert not node.added
 
        assert not node.changed
 
        assert node.not_changed
 
        assert not node.removed
 

	
 
        # If node has REMOVED state then trying to fetch it would raise
 
        # ChangesetError exception
 
        chset = self.repo.get_changeset(2)
 
        path = 'vcs/backends/BaseRepository.py'
 
        with pytest.raises(NodeDoesNotExistError):
 
            chset.get_node(path)
 
        # but it would be one of ``removed`` (changeset's attribute)
 
        assert path in [rf.path for rf in chset.removed]
 

	
 
    def test_commit_message_is_unicode(self):
 
        for cm in self.repo:
 
            assert isinstance(cm.message, unicode)
 

	
 
    def test_changeset_author_is_unicode(self):
 
        for cm in self.repo:
 
            assert isinstance(cm.author, unicode)
 

	
 
    def test_repo_files_content_is_unicode(self):
 
    def test_repo_files_content_is_bytes(self):
 
        test_changeset = self.repo.get_changeset(100)
 
        for node in test_changeset.get_node('/'):
 
            if node.is_file():
 
                assert isinstance(node.content, unicode)
 
                assert isinstance(node.content, bytes)
 

	
 
    def test_wrong_path(self):
 
        # There is 'setup.py' in the root dir but not there:
 
        path = 'foo/bar/setup.py'
 
        with pytest.raises(VCSError):
 
            self.repo.get_changeset().get_node(path)
 

	
 
    def test_archival_file(self):
 
        # TODO:
 
        pass
 

	
 
    def test_archival_as_generator(self):
 
        # TODO:
 
        pass
 

	
 
    def test_archival_wrong_kind(self):
 
        tip = self.repo.get_changeset()
 
        with pytest.raises(VCSError):
 
            tip.fill_archive(kind='error')
 

	
 
    def test_archival_empty_prefix(self):
 
        # TODO:
 
        pass
 

	
 
    def test_author_email(self):
 
        assert 'marcin@python-blog.com' == self.repo.get_changeset('b986218ba1c9').author_email
 
        assert 'lukasz.balcerzak@python-center.pl' == self.repo.get_changeset('3803844fdbd3').author_email
 
        assert '' == self.repo.get_changeset('84478366594b').author_email
 

	
 
    def test_author_username(self):
 
        assert 'Marcin Kuzminski' == self.repo.get_changeset('b986218ba1c9').author_name
 
        assert 'Lukasz Balcerzak' == self.repo.get_changeset('3803844fdbd3').author_name
 
        assert 'marcink' == self.repo.get_changeset('84478366594b').author_name
 

	
 
    def test_successors(self):
 
        init_chset = self.repo.get_changeset(0)
 
        assert init_chset.successors == []
 

	
 
    def test_predecessors(self):
 
        init_chset = self.repo.get_changeset(0)
 
        assert len(init_chset.predecessors) == 0
0 comments (0 inline, 0 general)