Changeset - 30589acf820e
[Not reviewed]
Merge beta
0 5 0
Vincent Duvert - 13 years ago 2012-06-06 21:02:21
vincent@duvert.net
Merge with latest changes
5 files changed with 26 insertions and 11 deletions:
0 comments (0 inline, 0 general)
rhodecode/config/routing.py
Show inline comments
 
@@ -249,196 +249,203 @@ def make_map(config):
 
                  controller='admin/groups', path_prefix=ADMIN_PREFIX)
 

	
 
    #ADMIN PERMISSIONS REST ROUTES
 
    rmap.resource('permission', 'permissions',
 
                  controller='admin/permissions', path_prefix=ADMIN_PREFIX)
 

	
 
    ##ADMIN LDAP SETTINGS
 
    rmap.connect('ldap_settings', '%s/ldap' % ADMIN_PREFIX,
 
                 controller='admin/ldap_settings', action='ldap_settings',
 
                 conditions=dict(method=["POST"]))
 

	
 
    rmap.connect('ldap_home', '%s/ldap' % ADMIN_PREFIX,
 
                 controller='admin/ldap_settings')
 

	
 
    #ADMIN SETTINGS REST ROUTES
 
    with rmap.submapper(path_prefix=ADMIN_PREFIX,
 
                        controller='admin/settings') as m:
 
        m.connect("admin_settings", "/settings",
 
                  action="create", conditions=dict(method=["POST"]))
 
        m.connect("admin_settings", "/settings",
 
                  action="index", conditions=dict(method=["GET"]))
 
        m.connect("formatted_admin_settings", "/settings.{format}",
 
                  action="index", conditions=dict(method=["GET"]))
 
        m.connect("admin_new_setting", "/settings/new",
 
                  action="new", conditions=dict(method=["GET"]))
 
        m.connect("formatted_admin_new_setting", "/settings/new.{format}",
 
                  action="new", conditions=dict(method=["GET"]))
 
        m.connect("/settings/{setting_id}",
 
                  action="update", conditions=dict(method=["PUT"]))
 
        m.connect("/settings/{setting_id}",
 
                  action="delete", conditions=dict(method=["DELETE"]))
 
        m.connect("admin_edit_setting", "/settings/{setting_id}/edit",
 
                  action="edit", conditions=dict(method=["GET"]))
 
        m.connect("formatted_admin_edit_setting",
 
                  "/settings/{setting_id}.{format}/edit",
 
                  action="edit", conditions=dict(method=["GET"]))
 
        m.connect("admin_setting", "/settings/{setting_id}",
 
                  action="show", conditions=dict(method=["GET"]))
 
        m.connect("formatted_admin_setting", "/settings/{setting_id}.{format}",
 
                  action="show", conditions=dict(method=["GET"]))
 
        m.connect("admin_settings_my_account", "/my_account",
 
                  action="my_account", conditions=dict(method=["GET"]))
 
        m.connect("admin_settings_my_account_update", "/my_account_update",
 
                  action="my_account_update", conditions=dict(method=["PUT"]))
 
        m.connect("admin_settings_create_repository", "/create_repository",
 
                  action="create_repository", conditions=dict(method=["GET"]))
 

	
 
    #NOTIFICATION REST ROUTES
 
    with rmap.submapper(path_prefix=ADMIN_PREFIX,
 
                        controller='admin/notifications') as m:
 
        m.connect("notifications", "/notifications",
 
                  action="create", conditions=dict(method=["POST"]))
 
        m.connect("notifications", "/notifications",
 
                  action="index", conditions=dict(method=["GET"]))
 
        m.connect("notifications_mark_all_read", "/notifications/mark_all_read",
 
                  action="mark_all_read", conditions=dict(method=["GET"]))
 
        m.connect("formatted_notifications", "/notifications.{format}",
 
                  action="index", conditions=dict(method=["GET"]))
 
        m.connect("new_notification", "/notifications/new",
 
                  action="new", conditions=dict(method=["GET"]))
 
        m.connect("formatted_new_notification", "/notifications/new.{format}",
 
                  action="new", conditions=dict(method=["GET"]))
 
        m.connect("/notification/{notification_id}",
 
                  action="update", conditions=dict(method=["PUT"]))
 
        m.connect("/notification/{notification_id}",
 
                  action="delete", conditions=dict(method=["DELETE"]))
 
        m.connect("edit_notification", "/notification/{notification_id}/edit",
 
                  action="edit", conditions=dict(method=["GET"]))
 
        m.connect("formatted_edit_notification",
 
                  "/notification/{notification_id}.{format}/edit",
 
                  action="edit", conditions=dict(method=["GET"]))
 
        m.connect("notification", "/notification/{notification_id}",
 
                  action="show", conditions=dict(method=["GET"]))
 
        m.connect("formatted_notification", "/notifications/{notification_id}.{format}",
 
                  action="show", conditions=dict(method=["GET"]))
 

	
 
    #ADMIN MAIN PAGES
 
    with rmap.submapper(path_prefix=ADMIN_PREFIX,
 
                        controller='admin/admin') as m:
 
        m.connect('admin_home', '', action='index')
 
        m.connect('admin_add_repo', '/add_repo/{new_repo:[a-z0-9\. _-]*}',
 
                  action='add_repo')
 

	
 
    #==========================================================================
 
    # API V2
 
    #==========================================================================
 
    with rmap.submapper(path_prefix=ADMIN_PREFIX,
 
                        controller='api/api') as m:
 
        m.connect('api', '/api')
 

	
 
    #USER JOURNAL
 
    rmap.connect('journal', '%s/journal' % ADMIN_PREFIX, controller='journal')
 

	
 
    rmap.connect('public_journal', '%s/public_journal' % ADMIN_PREFIX,
 
                 controller='journal', action="public_journal")
 

	
 
    rmap.connect('public_journal_rss', '%s/public_journal_rss' % ADMIN_PREFIX,
 
    rmap.connect('public_journal_rss', '%s/public_journal/rss' % ADMIN_PREFIX,
 
                 controller='journal', action="public_journal_rss")
 

	
 
    rmap.connect('public_journal_rss_old', '%s/public_journal_rss' % ADMIN_PREFIX,
 
                 controller='journal', action="public_journal_rss")
 

	
 
    rmap.connect('public_journal_atom',
 
                 '%s/public_journal/atom' % ADMIN_PREFIX, controller='journal',
 
                 action="public_journal_atom")
 

	
 
    rmap.connect('public_journal_atom_old',
 
                 '%s/public_journal_atom' % ADMIN_PREFIX, controller='journal',
 
                 action="public_journal_atom")
 

	
 
    rmap.connect('toggle_following', '%s/toggle_following' % ADMIN_PREFIX,
 
                 controller='journal', action='toggle_following',
 
                 conditions=dict(method=["POST"]))
 

	
 
    #SEARCH
 
    rmap.connect('search', '%s/search' % ADMIN_PREFIX, controller='search',)
 
    rmap.connect('search_repo', '%s/search/{search_repo:.*}' % ADMIN_PREFIX,
 
                  controller='search')
 

	
 
    #LOGIN/LOGOUT/REGISTER/SIGN IN
 
    rmap.connect('login_home', '%s/login' % ADMIN_PREFIX, controller='login')
 
    rmap.connect('logout_home', '%s/logout' % ADMIN_PREFIX, controller='login',
 
                 action='logout')
 

	
 
    rmap.connect('register', '%s/register' % ADMIN_PREFIX, controller='login',
 
                 action='register')
 

	
 
    rmap.connect('reset_password', '%s/password_reset' % ADMIN_PREFIX,
 
                 controller='login', action='password_reset')
 

	
 
    rmap.connect('reset_password_confirmation',
 
                 '%s/password_reset_confirmation' % ADMIN_PREFIX,
 
                 controller='login', action='password_reset_confirmation')
 

	
 
    #FEEDS
 
    rmap.connect('rss_feed_home', '/{repo_name:.*}/feed/rss',
 
                controller='feed', action='rss',
 
                conditions=dict(function=check_repo))
 

	
 
    rmap.connect('atom_feed_home', '/{repo_name:.*}/feed/atom',
 
                controller='feed', action='atom',
 
                conditions=dict(function=check_repo))
 

	
 
    #==========================================================================
 
    # REPOSITORY ROUTES
 
    #==========================================================================
 
    rmap.connect('summary_home', '/{repo_name:.*}',
 
                controller='summary',
 
                conditions=dict(function=check_repo))
 

	
 
    rmap.connect('repos_group_home', '/{group_name:.*}',
 
                controller='admin/repos_groups', action="show_by_name",
 
                conditions=dict(function=check_group))
 

	
 
    rmap.connect('changeset_home', '/{repo_name:.*}/changeset/{revision}',
 
                controller='changeset', revision='tip',
 
                conditions=dict(function=check_repo))
 

	
 
    rmap.connect('changeset_comment',
 
                 '/{repo_name:.*}/changeset/{revision}/comment',
 
                controller='changeset', revision='tip', action='comment',
 
                conditions=dict(function=check_repo))
 

	
 
    rmap.connect('changeset_comment_delete',
 
                 '/{repo_name:.*}/changeset/comment/{comment_id}/delete',
 
                controller='changeset', action='delete_comment',
 
                conditions=dict(function=check_repo, method=["DELETE"]))
 

	
 
    rmap.connect('raw_changeset_home',
 
                 '/{repo_name:.*}/raw-changeset/{revision}',
 
                 controller='changeset', action='raw_changeset',
 
                 revision='tip', conditions=dict(function=check_repo))
 

	
 
    rmap.connect('summary_home', '/{repo_name:.*}/summary',
 
                controller='summary', conditions=dict(function=check_repo))
 

	
 
    rmap.connect('shortlog_home', '/{repo_name:.*}/shortlog',
 
                controller='shortlog', conditions=dict(function=check_repo))
 

	
 
    rmap.connect('branches_home', '/{repo_name:.*}/branches',
 
                controller='branches', conditions=dict(function=check_repo))
 

	
 
    rmap.connect('tags_home', '/{repo_name:.*}/tags',
 
                controller='tags', conditions=dict(function=check_repo))
 

	
 
    rmap.connect('bookmarks_home', '/{repo_name:.*}/bookmarks',
 
                controller='bookmarks', conditions=dict(function=check_repo))
 

	
 
    rmap.connect('changelog_home', '/{repo_name:.*}/changelog',
 
                controller='changelog', conditions=dict(function=check_repo))
 

	
 
    rmap.connect('changelog_details', '/{repo_name:.*}/changelog_details/{cs}',
 
                controller='changelog', action='changelog_details',
 
                conditions=dict(function=check_repo))
 

	
 
    rmap.connect('files_home', '/{repo_name:.*}/files/{revision}/{f_path:.*}',
 
                controller='files', revision='tip', f_path='',
 
                conditions=dict(function=check_repo))
 

	
 
    rmap.connect('files_diff_home', '/{repo_name:.*}/diff/{f_path:.*}',
 
                controller='files', action='diff', revision='tip', f_path='',
 
                conditions=dict(function=check_repo))
 

	
rhodecode/controllers/journal.py
Show inline comments
 
@@ -99,142 +99,142 @@ class JournalController(BaseController):
 
    def _get_journal_data(self, following_repos):
 
        repo_ids = [x.follows_repository.repo_id for x in following_repos
 
                    if x.follows_repository is not None]
 
        user_ids = [x.follows_user.user_id for x in following_repos
 
                    if x.follows_user is not None]
 

	
 
        filtering_criterion = None
 

	
 
        if repo_ids and user_ids:
 
            filtering_criterion = or_(UserLog.repository_id.in_(repo_ids),
 
                        UserLog.user_id.in_(user_ids))
 
        if repo_ids and not user_ids:
 
            filtering_criterion = UserLog.repository_id.in_(repo_ids)
 
        if not repo_ids and user_ids:
 
            filtering_criterion = UserLog.user_id.in_(user_ids)
 
        if filtering_criterion is not None:
 
            journal = self.sa.query(UserLog)\
 
                .options(joinedload(UserLog.user))\
 
                .options(joinedload(UserLog.repository))\
 
                .filter(filtering_criterion)\
 
                .order_by(UserLog.action_date.desc())
 
        else:
 
            journal = []
 

	
 
        return journal
 

	
 
    @LoginRequired()
 
    @NotAnonymous()
 
    def toggle_following(self):
 
        cur_token = request.POST.get('auth_token')
 
        token = h.get_token()
 
        if cur_token == token:
 

	
 
            user_id = request.POST.get('follows_user_id')
 
            if user_id:
 
                try:
 
                    self.scm_model.toggle_following_user(user_id,
 
                                                self.rhodecode_user.user_id)
 
                    Session.commit()
 
                    return 'ok'
 
                except:
 
                    raise HTTPBadRequest()
 

	
 
            repo_id = request.POST.get('follows_repo_id')
 
            if repo_id:
 
                try:
 
                    self.scm_model.toggle_following_repo(repo_id,
 
                                                self.rhodecode_user.user_id)
 
                    Session.commit()
 
                    return 'ok'
 
                except:
 
                    raise HTTPBadRequest()
 

	
 
        log.debug('token mismatch %s vs %s' % (cur_token, token))
 
        raise HTTPBadRequest()
 

	
 
    @LoginRequired()
 
    def public_journal(self):
 
        # Return a rendered template
 
        p = int(request.params.get('page', 1))
 

	
 
        c.following = self.sa.query(UserFollowing)\
 
            .filter(UserFollowing.user_id == self.rhodecode_user.user_id)\
 
            .options(joinedload(UserFollowing.follows_repository))\
 
            .all()
 

	
 
        journal = self._get_journal_data(c.following)
 

	
 
        c.journal_pager = Page(journal, page=p, items_per_page=20)
 

	
 
        c.journal_day_aggreagate = self._get_daily_aggregate(c.journal_pager)
 

	
 
        c.journal_data = render('journal/journal_data.html')
 
        if request.environ.get('HTTP_X_PARTIAL_XHR'):
 
            return c.journal_data
 
        return render('journal/public_journal.html')
 

	
 
    @LoginRequired(api_access=True)
 
    def public_journal_atom(self):
 
        """
 
        Produce an atom-1.0 feed via feedgenerator module
 
        """
 
        c.following = self.sa.query(UserFollowing)\
 
            .filter(UserFollowing.user_id == self.rhodecode_user.user_id)\
 
            .options(joinedload(UserFollowing.follows_repository))\
 
            .all()
 

	
 
        journal = self._get_journal_data(c.following)
 

	
 
        feed = Atom1Feed(title=self.title % 'atom',
 
                         link=url('public_journal_atom', qualified=True),
 
                         description=_('Public journal'),
 
                         language=self.language,
 
                         ttl=self.ttl)
 

	
 
        for entry in journal[:self.feed_nr]:
 
            action, action_extra = h.action_parser(entry, feed=True)
 
            title = "%s - %s %s" % (entry.user.short_contact, action,
 
            action, action_extra, ico = h.action_parser(entry, feed=True)
 
            title = "%s - %s %s" % (entry.user.short_contact, action(),
 
                                 entry.repository.repo_name)
 
            desc = action_extra()
 
            feed.add_item(title=title,
 
                          pubdate=entry.action_date,
 
                          link=url('', qualified=True),
 
                          author_email=entry.user.email,
 
                          author_name=entry.user.full_contact,
 
                          description=desc)
 

	
 
        response.content_type = feed.mime_type
 
        return feed.writeString('utf-8')
 

	
 
    @LoginRequired(api_access=True)
 
    def public_journal_rss(self):
 
        """
 
        Produce an rss2 feed via feedgenerator module
 
        """
 
        c.following = self.sa.query(UserFollowing)\
 
            .filter(UserFollowing.user_id == self.rhodecode_user.user_id)\
 
            .options(joinedload(UserFollowing.follows_repository))\
 
            .all()
 

	
 
        journal = self._get_journal_data(c.following)
 

	
 
        feed = Rss201rev2Feed(title=self.title % 'rss',
 
                         link=url('public_journal_rss', qualified=True),
 
                         description=_('Public journal'),
 
                         language=self.language,
 
                         ttl=self.ttl)
 

	
 
        for entry in journal[:self.feed_nr]:
 
            action, action_extra = h.action_parser(entry, feed=True)
 
            title = "%s - %s %s" % (entry.user.short_contact, action,
 
            action, action_extra, ico = h.action_parser(entry, feed=True)
 
            title = "%s - %s %s" % (entry.user.short_contact, action(),
 
                                 entry.repository.repo_name)
 
            desc = action_extra()
 
            feed.add_item(title=title,
 
                          pubdate=entry.action_date,
 
                          link=url('', qualified=True),
 
                          author_email=entry.user.email,
 
                          author_name=entry.user.full_contact,
 
                          description=desc)
 

	
 
        response.content_type = feed.mime_type
 
        return feed.writeString('utf-8')
rhodecode/lib/indexers/__init__.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
"""
 
    rhodecode.lib.indexers.__init__
 
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

	
 
    Whoosh indexing module for RhodeCode
 

	
 
    :created_on: Aug 17, 2010
 
    :author: marcink
 
    :copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com>
 
    :license: GPLv3, see COPYING for more details.
 
"""
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
import os
 
import sys
 
import traceback
 
import logging
 
from os.path import dirname as dn, join as jn
 

	
 
#to get the rhodecode import
 
sys.path.append(dn(dn(dn(os.path.realpath(__file__)))))
 

	
 
from string import strip
 
from shutil import rmtree
 

	
 
from whoosh.analysis import RegexTokenizer, LowercaseFilter, StopFilter
 
from whoosh.fields import TEXT, ID, STORED, Schema, FieldType
 
from whoosh.index import create_in, open_dir
 
from whoosh.formats import Characters
 
from whoosh.highlight import highlight, HtmlFormatter, ContextFragmenter
 

	
 
from webhelpers.html.builder import escape
 
from webhelpers.html.builder import escape, literal
 
from sqlalchemy import engine_from_config
 

	
 
from rhodecode.model import init_model
 
from rhodecode.model.scm import ScmModel
 
from rhodecode.model.repo import RepoModel
 
from rhodecode.config.environment import load_environment
 
from rhodecode.lib.utils2 import LazyProperty
 
from rhodecode.lib.utils import BasePasterCommand, Command, add_cache,\
 
    load_rcextensions
 

	
 
# CUSTOM ANALYZER wordsplit + lowercase filter
 
ANALYZER = RegexTokenizer(expression=r"\w+") | LowercaseFilter()
 

	
 

	
 
#INDEX SCHEMA DEFINITION
 
SCHEMA = Schema(
 
    fileid=ID(unique=True),
 
    owner=TEXT(),
 
    repository=TEXT(stored=True),
 
    path=TEXT(stored=True),
 
    content=FieldType(format=Characters(), analyzer=ANALYZER,
 
                      scorable=True, stored=True),
 
    modtime=STORED(),
 
    extension=TEXT(stored=True)
 
)
 

	
 
IDX_NAME = 'HG_INDEX'
 
FORMATTER = HtmlFormatter('span', between='\n<span class="break">...</span>\n')
 
FRAGMENTER = ContextFragmenter(200)
 

	
 

	
 
class MakeIndex(BasePasterCommand):
 

	
 
    max_args = 1
 
    min_args = 1
 

	
 
    usage = "CONFIG_FILE"
 
    summary = "Creates index for full text search given configuration file"
 
    group_name = "RhodeCode"
 
    takes_config_file = -1
 
    parser = Command.standard_parser(verbose=True)
 

	
 
    def command(self):
 
        logging.config.fileConfig(self.path_to_ini_file)
 
        from pylons import config
 
        add_cache(config)
 
        engine = engine_from_config(config, 'sqlalchemy.db1.')
 
        init_model(engine)
 
        index_location = config['index_dir']
 
        repo_location = self.options.repo_location \
 
            if self.options.repo_location else RepoModel().repos_path
 
        repo_list = map(strip, self.options.repo_list.split(',')) \
 
            if self.options.repo_list else None
 
        repo_update_list = map(strip, self.options.repo_update_list.split(',')) \
 
            if self.options.repo_update_list else None
 
        load_rcextensions(config['here'])
 
        #======================================================================
 
        # WHOOSH DAEMON
 
        #======================================================================
 
        from rhodecode.lib.pidlock import LockHeld, DaemonLock
 
        from rhodecode.lib.indexers.daemon import WhooshIndexingDaemon
 
        try:
 
            l = DaemonLock(file_=jn(dn(dn(index_location)), 'make_index.lock'))
 
            WhooshIndexingDaemon(index_location=index_location,
 
                                 repo_location=repo_location,
 
                                 repo_list=repo_list,
 
                                 repo_update_list=repo_update_list)\
 
                .run(full_index=self.options.full_index)
 
            l.release()
 
        except LockHeld:
 
            sys.exit(1)
 

	
 
    def update_parser(self):
 
        self.parser.add_option('--repo-location',
 
                          action='store',
 
                          dest='repo_location',
 
                          help="Specifies repositories location to index OPTIONAL",
 
                          )
 
        self.parser.add_option('--index-only',
 
                          action='store',
 
                          dest='repo_list',
 
                          help="Specifies a comma separated list of repositores "
 
                                "to build index on. If not given all repositories "
 
                                "are scanned for indexing. OPTIONAL",
 
                          )
 
        self.parser.add_option('--update-only',
 
                          action='store',
 
                          dest='repo_update_list',
 
                          help="Specifies a comma separated list of repositores "
 
                                "to re-build index on. OPTIONAL",
 
                          )
 
        self.parser.add_option('-f',
 
                          action='store_true',
 
                          dest='full_index',
 
                          help="Specifies that index should be made full i.e"
 
                                " destroy old and build from scratch",
 
                          default=False)
 

	
 

	
 
class WhooshResultWrapper(object):
 
    def __init__(self, search_type, searcher, matcher, highlight_items,
 
                 repo_location):
 
        self.search_type = search_type
 
        self.searcher = searcher
 
        self.matcher = matcher
 
        self.highlight_items = highlight_items
 
        self.fragment_size = 200
 
        self.repo_location = repo_location
 

	
 
    @LazyProperty
 
    def doc_ids(self):
 
        docs_id = []
 
        while self.matcher.is_active():
 
            docnum = self.matcher.id()
 
            chunks = [offsets for offsets in self.get_chunks()]
 
            docs_id.append([docnum, chunks])
 
            self.matcher.next()
 
        return docs_id
 

	
 
    def __str__(self):
 
        return '<%s at %s>' % (self.__class__.__name__, len(self.doc_ids))
 

	
 
    def __repr__(self):
 
        return self.__str__()
 

	
 
    def __len__(self):
 
        return len(self.doc_ids)
 

	
 
    def __iter__(self):
 
        """
 
        Allows Iteration over results,and lazy generate content
 

	
 
        *Requires* implementation of ``__getitem__`` method.
 
        """
 
        for docid in self.doc_ids:
 
            yield self.get_full_content(docid)
 

	
 
    def __getitem__(self, key):
 
        """
 
        Slicing of resultWrapper
 
        """
 
        i, j = key.start, key.stop
 

	
 
        slices = []
 
        for docid in self.doc_ids[i:j]:
 
            slices.append(self.get_full_content(docid))
 
        return slices
 

	
 
    def get_full_content(self, docid):
 
        res = self.searcher.stored_fields(docid[0])
 
        full_repo_path = jn(self.repo_location, res['repository'])
 
        f_path = res['path'].split(full_repo_path)[-1]
 
        f_path = f_path.lstrip(os.sep)
 

	
 
        content_short = self.get_short_content(res, docid[1])
 
        res.update({'content_short': content_short,
 
                    'content_short_hl': self.highlight(content_short),
 
                    'f_path': f_path})
 

	
 
        return res
 

	
 
    def get_short_content(self, res, chunks):
 

	
 
        return ''.join([res['content'][chunk[0]:chunk[1]] for chunk in chunks])
 

	
 
    def get_chunks(self):
 
        """
 
        Smart function that implements chunking the content
 
        but not overlap chunks so it doesn't highlight the same
 
        close occurrences twice.
 

	
 
        :param matcher:
 
        :param size:
 
        """
 
        memory = [(0, 0)]
 
        for span in self.matcher.spans():
 
            start = span.startchar or 0
 
            end = span.endchar or 0
 
            start_offseted = max(0, start - self.fragment_size)
 
            end_offseted = end + self.fragment_size
 

	
 
            if start_offseted < memory[-1][1]:
 
                start_offseted = memory[-1][1]
 
            memory.append((start_offseted, end_offseted,))
 
            yield (start_offseted, end_offseted,)
 

	
 
    def highlight(self, content, top=5):
 
        if self.search_type != 'content':
 
            return ''
 
        hl = highlight(
 
            text=escape(content),
 
            text=content,
 
            terms=self.highlight_items,
 
            analyzer=ANALYZER,
 
            fragmenter=FRAGMENTER,
 
            formatter=FORMATTER,
 
            top=top
 
        )
 
        return hl
rhodecode/lib/indexers/daemon.py
Show inline comments
 
@@ -53,199 +53,205 @@ log = logging.getLogger('whoosh_indexer'
 

	
 
class WhooshIndexingDaemon(object):
 
    """
 
    Daemon for atomic indexing jobs
 
    """
 

	
 
    def __init__(self, indexname=IDX_NAME, index_location=None,
 
                 repo_location=None, sa=None, repo_list=None,
 
                 repo_update_list=None):
 
        self.indexname = indexname
 

	
 
        self.index_location = index_location
 
        if not index_location:
 
            raise Exception('You have to provide index location')
 

	
 
        self.repo_location = repo_location
 
        if not repo_location:
 
            raise Exception('You have to provide repositories location')
 

	
 
        self.repo_paths = ScmModel(sa).repo_scan(self.repo_location)
 

	
 
        #filter repo list
 
        if repo_list:
 
            self.filtered_repo_paths = {}
 
            for repo_name, repo in self.repo_paths.items():
 
                if repo_name in repo_list:
 
                    self.filtered_repo_paths[repo_name] = repo
 

	
 
            self.repo_paths = self.filtered_repo_paths
 

	
 
        #filter update repo list
 
        self.filtered_repo_update_paths = {}
 
        if repo_update_list:
 
            self.filtered_repo_update_paths = {}
 
            for repo_name, repo in self.repo_paths.items():
 
                if repo_name in repo_update_list:
 
                    self.filtered_repo_update_paths[repo_name] = repo
 
            self.repo_paths = self.filtered_repo_update_paths
 

	
 
        self.initial = False
 
        if not os.path.isdir(self.index_location):
 
            os.makedirs(self.index_location)
 
            log.info('Cannot run incremental index since it does not'
 
                     ' yet exist running full build')
 
            self.initial = True
 

	
 
    def get_paths(self, repo):
 
        """
 
        recursive walk in root dir and return a set of all path in that dir
 
        based on repository walk function
 
        """
 
        index_paths_ = set()
 
        try:
 
            tip = repo.get_changeset('tip')
 
            for topnode, dirs, files in tip.walk('/'):
 
                for f in files:
 
                    index_paths_.add(jn(repo.path, f.path))
 

	
 
        except RepositoryError, e:
 
            log.debug(traceback.format_exc())
 
            pass
 
        return index_paths_
 

	
 
    def get_node(self, repo, path):
 
        n_path = path[len(repo.path) + 1:]
 
        node = repo.get_changeset().get_node(n_path)
 
        return node
 

	
 
    def get_node_mtime(self, node):
 
        return mktime(node.last_changeset.date.timetuple())
 

	
 
    def add_doc(self, writer, path, repo, repo_name):
 
        """
 
        Adding doc to writer this function itself fetches data from
 
        the instance of vcs backend
 
        """
 

	
 
        node = self.get_node(repo, path)
 
        indexed = indexed_w_content = 0
 
        # we just index the content of chosen files, and skip binary files
 
        if node.extension in INDEX_EXTENSIONS and not node.is_binary:
 
            u_content = node.content
 
            if not isinstance(u_content, unicode):
 
                log.warning('  >> %s Could not get this content as unicode '
 
                            'replacing with empty content' % path)
 
                u_content = u''
 
            else:
 
                log.debug('    >> %s [WITH CONTENT]' % path)
 
                indexed_w_content += 1
 

	
 
        else:
 
            log.debug('    >> %s' % path)
 
            # just index file name without it's content
 
            u_content = u''
 
            indexed += 1
 

	
 
        p = safe_unicode(path)
 
        writer.add_document(
 
            fileid=p,
 
            owner=unicode(repo.contact),
 
            repository=safe_unicode(repo_name),
 
            path=safe_unicode(path),
 
            path=p,
 
            content=u_content,
 
            modtime=self.get_node_mtime(node),
 
            extension=node.extension
 
        )
 
        return indexed, indexed_w_content
 

	
 
    def build_index(self):
 
        if os.path.exists(self.index_location):
 
            log.debug('removing previous index')
 
            rmtree(self.index_location)
 

	
 
        if not os.path.exists(self.index_location):
 
            os.mkdir(self.index_location)
 

	
 
        idx = create_in(self.index_location, SCHEMA, indexname=IDX_NAME)
 
        writer = idx.writer()
 
        log.debug('BUILDIN INDEX FOR EXTENSIONS %s' % INDEX_EXTENSIONS)
 
        for repo_name, repo in self.repo_paths.items():
 
            log.debug('building index @ %s' % repo.path)
 
            i_cnt = iwc_cnt = 0
 
            for idx_path in self.get_paths(repo):
 
                i, iwc = self.add_doc(writer, idx_path, repo, repo_name)
 
                i_cnt += i
 
                iwc_cnt += iwc
 
            log.debug('added %s files %s with content for repo %s' % (
 
                         i_cnt + iwc_cnt, iwc_cnt, repo.path)
 
            )
 

	
 
        log.debug('>> COMMITING CHANGES <<')
 
        writer.commit(merge=True)
 
        log.debug('>>> FINISHED BUILDING INDEX <<<')
 

	
 
    def update_index(self):
 
        log.debug((u'STARTING INCREMENTAL INDEXING UPDATE FOR EXTENSIONS %s '
 
                   'AND REPOS %s') % (INDEX_EXTENSIONS, self.repo_paths.keys()))
 

	
 
        idx = open_dir(self.index_location, indexname=self.indexname)
 
        # The set of all paths in the index
 
        indexed_paths = set()
 
        # The set of all paths we need to re-index
 
        to_index = set()
 

	
 
        reader = idx.reader()
 
        writer = idx.writer()
 

	
 
        # Loop over the stored fields in the index
 
        for fields in reader.all_stored_fields():
 
            indexed_path = fields['path']
 
            indexed_repo_path = fields['repository']
 
            indexed_paths.add(indexed_path)
 

	
 
            if not indexed_repo_path in self.filtered_repo_update_paths:
 
                continue
 

	
 
            repo = self.repo_paths[indexed_repo_path]
 

	
 
            try:
 
                node = self.get_node(repo, indexed_path)
 
                # Check if this file was changed since it was indexed
 
                indexed_time = fields['modtime']
 
                mtime = self.get_node_mtime(node)
 
                if mtime > indexed_time:
 
                    # The file has changed, delete it and add it to the list of
 
                    # files to reindex
 
                    log.debug('adding to reindex list %s' % indexed_path)
 
                    writer.delete_by_term('path', indexed_path)
 
                    log.debug('adding to reindex list %s mtime: %s vs %s' % (
 
                                    indexed_path, mtime, indexed_time)
 
                    )
 
                    writer.delete_by_term('fileid', indexed_path)
 

	
 
                    to_index.add(indexed_path)
 
            except (ChangesetError, NodeDoesNotExistError):
 
                # This file was deleted since it was indexed
 
                log.debug('removing from index %s' % indexed_path)
 
                writer.delete_by_term('path', indexed_path)
 

	
 
        # Loop over the files in the filesystem
 
        # Assume we have a function that gathers the filenames of the
 
        # documents to be indexed
 
        ri_cnt = riwc_cnt = 0
 
        for repo_name, repo in self.repo_paths.items():
 
            for path in self.get_paths(repo):
 
                path = safe_unicode(path)
 
                if path in to_index or path not in indexed_paths:
 

	
 
                    # This is either a file that's changed, or a new file
 
                    # that wasn't indexed before. So index it!
 
                    i, iwc = self.add_doc(writer, path, repo, repo_name)
 
                    log.debug('re indexing %s' % path)
 
                    ri_cnt += i
 
                    riwc_cnt += iwc
 
        log.debug('added %s files %s with content for repo %s' % (
 
                     ri_cnt + riwc_cnt, riwc_cnt, repo.path)
 
        )
 
        log.debug('>> COMMITING CHANGES <<')
 
        writer.commit(merge=True)
 
        log.debug('>>> FINISHED REBUILDING INDEX <<<')
 

	
 
    def run(self, full_index=False):
 
        """Run daemon"""
 
        if full_index or self.initial:
 
            self.build_index()
 
        else:
 
            self.update_index()
rhodecode/templates/search/search.html
Show inline comments
 
## -*- coding: utf-8 -*-
 
<%inherit file="/base/base.html"/>
 
<%def name="title()">
 
   ${_('Search')}
 
   ${'"%s"' % c.cur_query if c.cur_query else None}
 
	%if c.repo_name:
 
		${_('in repository: ') + c.repo_name}
 
	%else:
 
		${_('in all repositories')}
 
	%endif
 
	- ${c.rhodecode_name}
 
</%def>
 
<%def name="breadcrumbs()">
 
	${c.rhodecode_name}
 
</%def>
 
<%def name="page_nav()">
 
	${self.menu('home')}
 
</%def>
 
<%def name="main()">
 

	
 
<div class="box">
 
	<!-- box / title -->
 
	<div class="title">
 
		<h5>${_('Search')}
 
		%if c.repo_name:
 
			${_('in repository: ') + c.repo_name}
 
		%else:
 
			${_('in all repositories')}
 
		%endif
 
		</h5>
 
	</div>
 
	<!-- end box / title -->
 
	%if c.repo_name:
 
		${h.form(h.url('search_repo',search_repo=c.repo_name),method='get')}
 
	%else:
 
		${h.form(h.url('search'),method='get')}
 
	%endif
 
	<div class="form">
 
		<div class="fields">
 
			<div class="field field-first field-noborder">
 
             <div class="label">
 
                 <label for="q">${_('Search term')}</label>
 
             </div>
 
				<div class="input">${h.text('q',c.cur_query,class_="small")}
 
					<div class="button highlight">
 
						<input type="submit" value="${_('Search')}" class="ui-button"/>
 
					</div>
 
				</div>
 
				<div style="font-weight: bold;clear:Both;margin-left:200px">${c.runtime}</div>
 
			</div>
 

	
 
			<div class="field">
 
	            <div class="label">
 
	                <label for="type">${_('Search in')}</label>
 
	            </div>
 
                <div class="select">
 
                    ${h.select('type',c.cur_type,[('content',_('File contents')),
 
                        ##('commit',_('Commit messages')),
 
                        ('path',_('File names')),
 
                        ##('repository',_('Repository names')),
 
                        ])}
 
                </div>
 
             </div>
 

	
 
		</div>
 
	</div>
 
	${h.end_form()}
 

	
 
    <div class="search">
 
    %if c.cur_search == 'content':
 
        <%include file='search_content.html'/>
 
    %elif c.cur_search == 'path':
 
        <%include file='search_path.html'/>
 
    %elif c.cur_search == 'commit':
 
        <%include file='search_commit.html'/>
 
    %elif c.cur_search == 'repository':
 
        <%include file='search_repository.html'/>
 
    %endif
 
    </div>
 
</div>
 

	
 
</%def>
0 comments (0 inline, 0 general)