Changeset - 2323e2bb2797
[Not reviewed]
default
0 4 0
Mads Kiilerich - 6 years ago 2019-11-08 00:23:10
mads@kiilerich.com
Grafted from: c97a16f46588
page: consistently invoke Page with kwargs instead of explicitly providing a custom url generator

Drop the option of providing an url maker as 'url', and thus also hardcode the
assumption that the page parameter is called page.
4 files changed with 11 insertions and 20 deletions:
0 comments (0 inline, 0 general)
kallithea/controllers/admin/admin.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
"""
 
kallithea.controllers.admin.admin
 
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

	
 
Controller for Admin panel of Kallithea
 

	
 
This file was forked by the Kallithea project in July 2014.
 
Original author and date, and relevant copyright and licensing information is below:
 
:created_on: Apr 7, 2010
 
:author: marcink
 
:copyright: (c) 2013 RhodeCode GmbH, and others.
 
:license: GPLv3, see LICENSE.md for more details.
 
"""
 

	
 

	
 
import logging
 

	
 
from sqlalchemy.orm import joinedload
 
from sqlalchemy.sql.expression import and_, func, or_
 
from tg import request
 
from tg import tmpl_context as c
 
from whoosh import query
 
from whoosh.qparser.dateparse import DateParserPlugin
 
from whoosh.qparser.default import QueryParser
 

	
 
from kallithea.config.routing import url
 
from kallithea.lib.auth import HasPermissionAnyDecorator, LoginRequired
 
from kallithea.lib.base import BaseController, render
 
from kallithea.lib.indexers import JOURNAL_SCHEMA
 
from kallithea.lib.page import Page
 
from kallithea.lib.utils2 import remove_prefix, remove_suffix, safe_int
 
from kallithea.model.db import UserLog
 

	
 

	
 
log = logging.getLogger(__name__)
 

	
 

	
 
def _journal_filter(user_log, search_term):
 
    """
 
    Filters sqlalchemy user_log based on search_term with whoosh Query language
 
    http://packages.python.org/Whoosh/querylang.html
 

	
 
    :param user_log:
 
    :param search_term:
 
    """
 
    log.debug('Initial search term: %r', search_term)
 
    qry = None
 
    if search_term:
 
        qp = QueryParser('repository', schema=JOURNAL_SCHEMA)
 
        qp.add_plugin(DateParserPlugin())
 
        qry = qp.parse(unicode(search_term))
 
        log.debug('Filtering using parsed query %r', qry)
 

	
 
    def wildcard_handler(col, wc_term):
 
        if wc_term.startswith('*') and not wc_term.endswith('*'):
 
            # postfix == endswith
 
            wc_term = remove_prefix(wc_term, prefix='*')
 
            return func.lower(col).endswith(func.lower(wc_term))
 
        elif wc_term.startswith('*') and wc_term.endswith('*'):
 
            # wildcard == ilike
 
            wc_term = remove_prefix(wc_term, prefix='*')
 
            wc_term = remove_suffix(wc_term, suffix='*')
 
            return func.lower(col).contains(func.lower(wc_term))
 

	
 
    def get_filterion(field, val, term):
 

	
 
        if field == 'repository':
 
            field = getattr(UserLog, 'repository_name')
 
        elif field == 'ip':
 
            field = getattr(UserLog, 'user_ip')
 
        elif field == 'date':
 
            field = getattr(UserLog, 'action_date')
 
        elif field == 'username':
 
            field = getattr(UserLog, 'username')
 
@@ -94,57 +93,55 @@ def _journal_filter(user_log, search_ter
 
            return wildcard_handler(field, val)
 
        elif isinstance(term, query.Prefix):
 
            return func.lower(field).startswith(func.lower(val))
 
        elif isinstance(term, query.DateRange):
 
            return and_(field >= val[0], field <= val[1])
 
        return func.lower(field) == func.lower(val)
 

	
 
    if isinstance(qry, (query.And, query.Term, query.Prefix, query.Wildcard,
 
                        query.DateRange)):
 
        if not isinstance(qry, query.And):
 
            qry = [qry]
 
        for term in qry:
 
            field = term.fieldname
 
            val = (term.text if not isinstance(term, query.DateRange)
 
                   else [term.startdate, term.enddate])
 
            user_log = user_log.filter(get_filterion(field, val, term))
 
    elif isinstance(qry, query.Or):
 
        filters = []
 
        for term in qry:
 
            field = term.fieldname
 
            val = (term.text if not isinstance(term, query.DateRange)
 
                   else [term.startdate, term.enddate])
 
            filters.append(get_filterion(field, val, term))
 
        user_log = user_log.filter(or_(*filters))
 

	
 
    return user_log
 

	
 

	
 
class AdminController(BaseController):
 

	
 
    @LoginRequired(allow_default_user=True)
 
    def _before(self, *args, **kwargs):
 
        super(AdminController, self)._before(*args, **kwargs)
 

	
 
    @HasPermissionAnyDecorator('hg.admin')
 
    def index(self):
 
        users_log = UserLog.query() \
 
                .options(joinedload(UserLog.user)) \
 
                .options(joinedload(UserLog.repository))
 

	
 
        # FILTERING
 
        c.search_term = request.GET.get('filter')
 
        users_log = _journal_filter(users_log, c.search_term)
 

	
 
        users_log = users_log.order_by(UserLog.action_date.desc())
 

	
 
        p = safe_int(request.GET.get('page'), 1)
 

	
 
        def url_generator(**kw):
 
            return url.current(filter=c.search_term, **kw)
 

	
 
        c.users_log = Page(users_log, page=p, items_per_page=10, url=url_generator)
 
        c.users_log = Page(users_log, page=p, items_per_page=10,
 
                           filter=c.search_term)
 

	
 
        if request.environ.get('HTTP_X_PARTIAL_XHR'):
 
            return render('admin/admin_log.html')
 

	
 
        return render('admin/admin.html')
kallithea/controllers/journal.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
"""
 
kallithea.controllers.journal
 
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

	
 
Journal controller
 

	
 
This file was forked by the Kallithea project in July 2014.
 
Original author and date, and relevant copyright and licensing information is below:
 
:created_on: Nov 21, 2010
 
:author: marcink
 
:copyright: (c) 2013 RhodeCode GmbH, and others.
 
:license: GPLv3, see LICENSE.md for more details.
 

	
 
"""
 

	
 
import logging
 
import traceback
 
from itertools import groupby
 

	
 
from sqlalchemy import or_
 
from sqlalchemy.orm import joinedload
 
from tg import request, response
 
from tg import tmpl_context as c
 
from tg.i18n import ugettext as _
 
from webhelpers.feedgenerator import Atom1Feed, Rss201rev2Feed
 
from webob.exc import HTTPBadRequest
 

	
 
import kallithea.lib.helpers as h
 
from kallithea.config.routing import url
 
from kallithea.controllers.admin.admin import _journal_filter
 
from kallithea.lib.auth import LoginRequired
 
from kallithea.lib.base import BaseController, render
 
from kallithea.lib.page import Page
 
from kallithea.lib.utils2 import AttributeDict, safe_int
 
from kallithea.model.db import Repository, User, UserFollowing, UserLog
 
from kallithea.model.meta import Session
 
from kallithea.model.repo import RepoModel
 

	
 

	
 
log = logging.getLogger(__name__)
 

	
 

	
 
language = 'en-us'
 
ttl = "5"
 
feed_nr = 20
 

	
 

	
 
class JournalController(BaseController):
 

	
 
    def _before(self, *args, **kwargs):
 
        super(JournalController, self)._before(*args, **kwargs)
 
        c.search_term = request.GET.get('filter')
 

	
 
    def _get_daily_aggregate(self, journal):
 
        groups = []
 
        for k, g in groupby(journal, lambda x: x.action_as_day):
 
            user_group = []
 
            # groupby username if it's a present value, else fallback to journal username
 
            for _unused, g2 in groupby(list(g), lambda x: x.user.username if x.user else x.username):
 
                l = list(g2)
 
                user_group.append((l[0].user, l))
 

	
 
            groups.append((k, user_group,))
 

	
 
        return groups
 

	
 
    def _get_journal_data(self, following_repos):
 
        repo_ids = [x.follows_repository_id for x in following_repos
 
                    if x.follows_repository_id is not None]
 
        user_ids = [x.follows_user_id for x in following_repos
 
                    if x.follows_user_id is not None]
 

	
 
        filtering_criterion = None
 

	
 
        if repo_ids and user_ids:
 
            filtering_criterion = or_(UserLog.repository_id.in_(repo_ids),
 
                        UserLog.user_id.in_(user_ids))
 
@@ -156,100 +155,98 @@ class JournalController(BaseController):
 
        else:
 
            _link = h.canonical_url('journal_atom')
 
            _desc = '%s %s %s' % (c.site_name, _('Journal'), 'rss feed')
 

	
 
        feed = Rss201rev2Feed(title=_desc,
 
                         link=_link,
 
                         description=_desc,
 
                         language=language,
 
                         ttl=ttl)
 

	
 
        for entry in journal[:feed_nr]:
 
            user = entry.user
 
            if user is None:
 
                # fix deleted users
 
                user = AttributeDict({'short_contact': entry.username,
 
                                      'email': '',
 
                                      'full_contact': ''})
 
            action, action_extra, ico = h.action_parser(entry, feed=True)
 
            title = "%s - %s %s" % (user.short_contact, action(),
 
                                    entry.repository.repo_name)
 
            desc = action_extra()
 
            _url = None
 
            if entry.repository is not None:
 
                _url = h.canonical_url('changelog_home',
 
                           repo_name=entry.repository.repo_name)
 

	
 
            feed.add_item(title=title,
 
                          pubdate=entry.action_date,
 
                          link=_url or h.canonical_url(''),
 
                          author_email=user.email,
 
                          author_name=user.full_contact,
 
                          description=desc)
 

	
 
        response.content_type = feed.mime_type
 
        return feed.writeString('utf-8')
 

	
 
    @LoginRequired()
 
    def index(self):
 
        # Return a rendered template
 
        p = safe_int(request.GET.get('page'), 1)
 
        c.user = User.get(request.authuser.user_id)
 
        c.following = UserFollowing.query() \
 
            .filter(UserFollowing.user_id == request.authuser.user_id) \
 
            .options(joinedload(UserFollowing.follows_repository)) \
 
            .all()
 

	
 
        journal = self._get_journal_data(c.following)
 

	
 
        def url_generator(**kw):
 
            return url.current(filter=c.search_term, **kw)
 

	
 
        c.journal_pager = Page(journal, page=p, items_per_page=20, url=url_generator)
 
        c.journal_pager = Page(journal, page=p, items_per_page=20,
 
                               filter=c.search_term)
 
        c.journal_day_aggregate = self._get_daily_aggregate(c.journal_pager)
 

	
 
        if request.environ.get('HTTP_X_PARTIAL_XHR'):
 
            return render('journal/journal_data.html')
 

	
 
        repos_list = Repository.query(sorted=True) \
 
            .filter_by(owner_id=request.authuser.user_id).all()
 

	
 
        repos_data = RepoModel().get_repos_as_dict(repos_list, admin=True)
 
        # data used to render the grid
 
        c.data = repos_data
 

	
 
        return render('journal/journal.html')
 

	
 
    @LoginRequired()
 
    def journal_atom(self):
 
        """
 
        Produce an atom-1.0 feed via feedgenerator module
 
        """
 
        following = UserFollowing.query() \
 
            .filter(UserFollowing.user_id == request.authuser.user_id) \
 
            .options(joinedload(UserFollowing.follows_repository)) \
 
            .all()
 
        return self._atom_feed(following, public=False)
 

	
 
    @LoginRequired()
 
    def journal_rss(self):
 
        """
 
        Produce an rss feed via feedgenerator module
 
        """
 
        following = UserFollowing.query() \
 
            .filter(UserFollowing.user_id == request.authuser.user_id) \
 
            .options(joinedload(UserFollowing.follows_repository)) \
 
            .all()
 
        return self._rss_feed(following, public=False)
 

	
 
    @LoginRequired()
 
    def toggle_following(self):
 
        user_id = request.POST.get('follows_user_id')
 
        if user_id:
 
            try:
 
                self.scm_model.toggle_following_user(user_id,
 
                                            request.authuser.user_id)
 
                Session().commit()
 
                return 'ok'
 
            except Exception:
 
                log.error(traceback.format_exc())
 
                raise HTTPBadRequest()
kallithea/controllers/search.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
"""
 
kallithea.controllers.search
 
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

	
 
Search controller for Kallithea
 

	
 
This file was forked by the Kallithea project in July 2014.
 
Original author and date, and relevant copyright and licensing information is below:
 
:created_on: Aug 7, 2010
 
:author: marcink
 
:copyright: (c) 2013 RhodeCode GmbH, and others.
 
:license: GPLv3, see LICENSE.md for more details.
 
"""
 

	
 
import logging
 
import traceback
 
import urllib
 

	
 
from tg import config, request
 
from tg import tmpl_context as c
 
from tg.i18n import ugettext as _
 
from webhelpers2.html.tools import update_params
 
from whoosh.index import EmptyIndexError, exists_in, open_dir
 
from whoosh.qparser import QueryParser, QueryParserError
 
from whoosh.query import Phrase, Prefix
 

	
 
from kallithea.lib.auth import LoginRequired
 
from kallithea.lib.base import BaseRepoController, render
 
from kallithea.lib.indexers import CHGSET_IDX_NAME, CHGSETS_SCHEMA, IDX_NAME, SCHEMA, WhooshResultWrapper
 
from kallithea.lib.page import Page
 
from kallithea.lib.utils2 import safe_int, safe_str
 
from kallithea.model.repo import RepoModel
 

	
 

	
 
log = logging.getLogger(__name__)
 

	
 

	
 
class SearchController(BaseRepoController):
 

	
 
    @LoginRequired(allow_default_user=True)
 
    def index(self, repo_name=None):
 
        c.repo_name = repo_name
 
        c.formated_results = []
 
        c.runtime = ''
 
        c.cur_query = request.GET.get('q', None)
 
        c.cur_type = request.GET.get('type', 'content')
 
        c.cur_search = search_type = {'content': 'content',
 
                                      'commit': 'message',
 
                                      'path': 'path',
 
                                      'repository': 'repository'
 
                                      }.get(c.cur_type, 'content')
 

	
 
        index_name = {
 
            'content': IDX_NAME,
 
            'commit': CHGSET_IDX_NAME,
 
            'path': IDX_NAME
 
        }.get(c.cur_type, IDX_NAME)
 

	
 
        schema_defn = {
 
            'content': SCHEMA,
 
            'commit': CHGSETS_SCHEMA,
 
            'path': SCHEMA
 
        }.get(c.cur_type, SCHEMA)
 

	
 
        log.debug('IDX: %s', index_name)
 
        log.debug('SCHEMA: %s', schema_defn)
 

	
 
        if c.cur_query:
 
            cur_query = c.cur_query.lower()
 
            log.debug(cur_query)
 

	
 
        if c.cur_query:
 
            p = safe_int(request.GET.get('page'), 1)
 
            highlight_items = set()
 
            index_dir = config['index_dir']
 
            try:
 
                if not exists_in(index_dir, index_name):
 
                    raise EmptyIndexError
 
                idx = open_dir(index_dir, indexname=index_name)
 
                searcher = idx.searcher()
 

	
 
                qp = QueryParser(search_type, schema=schema_defn)
 
                if c.repo_name:
 
                    # use "repository_rawname:" instead of "repository:"
 
                    # for case-sensitive matching
 
                    cur_query = u'repository_rawname:%s %s' % (c.repo_name, cur_query)
 
                try:
 
                    query = qp.parse(unicode(cur_query))
 
                    # extract words for highlight
 
                    if isinstance(query, Phrase):
 
                        highlight_items.update(query.words)
 
                    elif isinstance(query, Prefix):
 
                        highlight_items.add(query.text)
 
                    else:
 
                        for i in query.all_terms():
 
                            if i[0] in ['content', 'message']:
 
                                highlight_items.add(i[1])
 

	
 
                    matcher = query.matcher(searcher)
 

	
 
                    log.debug('query: %s', query)
 
                    log.debug('hl terms: %s', highlight_items)
 
                    results = searcher.search(query)
 
                    res_ln = len(results)
 
                    c.runtime = '%s results (%.3f seconds)' % (
 
                        res_ln, results.runtime
 
                    )
 

	
 
                    def url_generator(**kw):
 
                        q = urllib.quote(safe_str(c.cur_query))
 
                        return update_params("?q=%s&type=%s" % (q, safe_str(c.cur_type)), **kw)
 
                    repo_location = RepoModel().repos_path
 
                    c.formated_results = Page(
 
                        WhooshResultWrapper(search_type, searcher, matcher,
 
                                            highlight_items, repo_location),
 
                        page=p,
 
                        item_count=res_ln,
 
                        items_per_page=10,
 
                        url=url_generator
 
                        type=safe_str(c.cur_type),
 
                        q=safe_str(c.cur_query),
 
                    )
 

	
 
                except QueryParserError:
 
                    c.runtime = _('Invalid search query. Try quoting it.')
 
                searcher.close()
 
            except EmptyIndexError:
 
                log.error("Empty search index - run 'kallithea-cli index-create' regularly")
 
                c.runtime = _('The server has no search index.')
 
            except Exception:
 
                log.error(traceback.format_exc())
 
                c.runtime = _('An error occurred during search operation.')
 

	
 
        # Return a rendered template
 
        return render('/search/search.html')
kallithea/lib/page.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
"""
 
Custom paging classes
 
"""
 
import logging
 
import re
 

	
 
from webhelpers2.html import HTML, literal
 
from webhelpers.paginate import Page as _Page
 

	
 
from kallithea.config.routing import url
 

	
 

	
 
log = logging.getLogger(__name__)
 

	
 

	
 
class Page(_Page):
 
    """
 
    Custom pager emitting Bootstrap paginators
 
    """
 

	
 
    def __init__(self, *args, **kwargs):
 
        kwargs.setdefault('url', url.current)
 
        _Page.__init__(self, *args, **kwargs)
 
    def __init__(self, collection,
 
                 page=1, items_per_page=20, item_count=None,
 
                 **kwargs):
 
        _Page.__init__(self, collection, page=page, items_per_page=items_per_page, item_count=item_count,
 
                       url=url.current, **kwargs)
 

	
 
    def _get_pos(self, cur_page, max_page, items):
 
        edge = (items / 2) + 1
 
        if (cur_page <= edge):
 
            radius = max(items / 2, items - cur_page)
 
        elif (max_page - cur_page) < edge:
 
            radius = (items - 1) - (max_page - cur_page)
 
        else:
 
            radius = items / 2
 

	
 
        left = max(1, (cur_page - (radius)))
 
        right = min(max_page, cur_page + (radius))
 
        return left, cur_page, right
 

	
 
    def _range(self, regexp_match):
 
        """
 
        Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
 

	
 
        Arguments:
 

	
 
        regexp_match
 
            A "re" (regular expressions) match object containing the
 
            radius of linked pages around the current page in
 
            regexp_match.group(1) as a string
 

	
 
        This function is supposed to be called as a callable in
 
        re.sub.
 

	
 
        """
 
        radius = int(regexp_match.group(1))
 

	
 
        # Compute the first and last page number within the radius
 
        # e.g. '1 .. 5 6 [7] 8 9 .. 12'
 
        # -> leftmost_page  = 5
 
        # -> rightmost_page = 9
 
        leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
 
                                                            self.last_page,
 
                                                            (radius * 2) + 1)
 
        nav_items = []
 

	
 
        # Create a link to the first page (unless we are on the first page
 
        # or there would be no need to insert '..' spacers)
 
        if self.page != self.first_page and self.first_page < leftmost_page:
 
            nav_items.append(HTML.li(self._pagerlink(self.first_page, self.first_page)))
 

	
 
        # Insert dots if there are pages between the first page
 
        # and the currently displayed page range
 
        if leftmost_page - self.first_page > 1:
0 comments (0 inline, 0 general)