Changeset - 88b0e82bcba4
[Not reviewed]
beta
0 3 0
Indra Talip - 13 years ago 2012-07-21 06:01:32
indra.talip@gmail.com
rename changeset index key to match raw_id rather than path for greater consistency
3 files changed with 12 insertions and 12 deletions:
0 comments (0 inline, 0 general)
rhodecode/lib/indexers/__init__.py
Show inline comments
 
@@ -28,97 +28,97 @@ import traceback
 
import logging
 
from os.path import dirname as dn, join as jn
 

	
 
#to get the rhodecode import
 
sys.path.append(dn(dn(dn(os.path.realpath(__file__)))))
 

	
 
from string import strip
 
from shutil import rmtree
 

	
 
from whoosh.analysis import RegexTokenizer, LowercaseFilter, StopFilter
 
from whoosh.fields import TEXT, ID, STORED, NUMERIC, BOOLEAN, Schema, FieldType
 
from whoosh.index import create_in, open_dir
 
from whoosh.formats import Characters
 
from whoosh.highlight import highlight, HtmlFormatter, ContextFragmenter
 

	
 
from webhelpers.html.builder import escape, literal
 
from sqlalchemy import engine_from_config
 

	
 
from rhodecode.model import init_model
 
from rhodecode.model.scm import ScmModel
 
from rhodecode.model.repo import RepoModel
 
from rhodecode.config.environment import load_environment
 
from rhodecode.lib.utils2 import LazyProperty
 
from rhodecode.lib.utils import BasePasterCommand, Command, add_cache,\
 
    load_rcextensions
 

	
 
log = logging.getLogger(__name__)
 

	
 
# CUSTOM ANALYZER wordsplit + lowercase filter
 
ANALYZER = RegexTokenizer(expression=r"\w+") | LowercaseFilter()
 

	
 
#INDEX SCHEMA DEFINITION
 
SCHEMA = Schema(
 
    fileid=ID(unique=True),
 
    owner=TEXT(),
 
    repository=TEXT(stored=True),
 
    path=TEXT(stored=True),
 
    content=FieldType(format=Characters(), analyzer=ANALYZER,
 
                      scorable=True, stored=True),
 
    modtime=STORED(),
 
    extension=TEXT(stored=True)
 
)
 

	
 
IDX_NAME = 'HG_INDEX'
 
FORMATTER = HtmlFormatter('span', between='\n<span class="break">...</span>\n')
 
FRAGMENTER = ContextFragmenter(200)
 

	
 
CHGSETS_SCHEMA = Schema(
 
    path=ID(unique=True, stored=True),
 
    raw_id=ID(unique=True, stored=True),
 
    revision=NUMERIC(unique=True, stored=True),
 
    last=BOOLEAN(),
 
    owner=TEXT(),
 
    repository=ID(unique=True, stored=True),
 
    author=TEXT(stored=True),
 
    message=FieldType(format=Characters(), analyzer=ANALYZER,
 
                      scorable=True, stored=True),
 
    parents=TEXT(),
 
    added=TEXT(),
 
    removed=TEXT(),
 
    changed=TEXT(),
 
)
 

	
 
CHGSET_IDX_NAME = 'CHGSET_INDEX'
 

	
 
class MakeIndex(BasePasterCommand):
 

	
 
    max_args = 1
 
    min_args = 1
 

	
 
    usage = "CONFIG_FILE"
 
    summary = "Creates index for full text search given configuration file"
 
    group_name = "RhodeCode"
 
    takes_config_file = -1
 
    parser = Command.standard_parser(verbose=True)
 

	
 
    def command(self):
 
        logging.config.fileConfig(self.path_to_ini_file)
 
        from pylons import config
 
        add_cache(config)
 
        engine = engine_from_config(config, 'sqlalchemy.db1.')
 
        init_model(engine)
 
        index_location = config['index_dir']
 
        repo_location = self.options.repo_location \
 
            if self.options.repo_location else RepoModel().repos_path
 
        repo_list = map(strip, self.options.repo_list.split(',')) \
 
            if self.options.repo_list else None
 
        repo_update_list = map(strip, self.options.repo_update_list.split(',')) \
 
            if self.options.repo_update_list else None
 
        load_rcextensions(config['here'])
 
        #======================================================================
 
        # WHOOSH DAEMON
 
        #======================================================================
 
        from rhodecode.lib.pidlock import LockHeld, DaemonLock
 
        from rhodecode.lib.indexers.daemon import WhooshIndexingDaemon
 
        try:
 
            l = DaemonLock(file_=jn(dn(dn(index_location)), 'make_index.lock'))
 
            WhooshIndexingDaemon(index_location=index_location,
 
@@ -164,102 +164,102 @@ class WhooshResultWrapper(object):
 
        self.searcher = searcher
 
        self.matcher = matcher
 
        self.highlight_items = highlight_items
 
        self.fragment_size = 200
 
        self.repo_location = repo_location
 

	
 
    @LazyProperty
 
    def doc_ids(self):
 
        docs_id = []
 
        while self.matcher.is_active():
 
            docnum = self.matcher.id()
 
            chunks = [offsets for offsets in self.get_chunks()]
 
            docs_id.append([docnum, chunks])
 
            self.matcher.next()
 
        return docs_id
 

	
 
    def __str__(self):
 
        return '<%s at %s>' % (self.__class__.__name__, len(self.doc_ids))
 

	
 
    def __repr__(self):
 
        return self.__str__()
 

	
 
    def __len__(self):
 
        return len(self.doc_ids)
 

	
 
    def __iter__(self):
 
        """
 
        Allows Iteration over results,and lazy generate content
 

	
 
        *Requires* implementation of ``__getitem__`` method.
 
        """
 
        for docid in self.doc_ids:
 
            yield self.get_full_content(docid)
 

	
 
    def __getitem__(self, key):
 
        """
 
        Slicing of resultWrapper
 
        """
 
        i, j = key.start, key.stop
 

	
 
        slices = []
 
        for docid in self.doc_ids[i:j]:
 
            slices.append(self.get_full_content(docid))
 
        return slices
 

	
 
    def get_full_content(self, docid):
 
        res = self.searcher.stored_fields(docid[0])
 
        log.debug('result: %s' % res)
 
        full_repo_path = jn(self.repo_location, res['repository'])
 
        f_path = res['path'].split(full_repo_path)[-1]
 
        f_path = f_path.lstrip(os.sep)
 
        res.update({'f_path': f_path})
 

	
 
        if self.search_type == 'content':
 
            full_repo_path = jn(self.repo_location, res['repository'])
 
            f_path = res['path'].split(full_repo_path)[-1]
 
            f_path = f_path.lstrip(os.sep)
 
            content_short = self.get_short_content(res, docid[1])
 
            res.update({'content_short': content_short,
 
                        'content_short_hl': self.highlight(content_short)})
 
                        'content_short_hl': self.highlight(content_short),
 
                        'f_path': f_path
 
                      })
 
        elif self.search_type == 'message':
 
            res.update({'message_hl': self.highlight(res['message'])})
 

	
 
        log.debug('result: %s' % res)
 

	
 
        return res
 

	
 
    def get_short_content(self, res, chunks):
 

	
 
        return ''.join([res['content'][chunk[0]:chunk[1]] for chunk in chunks])
 

	
 
    def get_chunks(self):
 
        """
 
        Smart function that implements chunking the content
 
        but not overlap chunks so it doesn't highlight the same
 
        close occurrences twice.
 

	
 
        :param matcher:
 
        :param size:
 
        """
 
        memory = [(0, 0)]
 
        if self.matcher.supports('positions'): 
 
            for span in self.matcher.spans():
 
                start = span.startchar or 0
 
                end = span.endchar or 0
 
                start_offseted = max(0, start - self.fragment_size)
 
                end_offseted = end + self.fragment_size
 

	
 
                if start_offseted < memory[-1][1]:
 
                    start_offseted = memory[-1][1]
 
                memory.append((start_offseted, end_offseted,))
 
                yield (start_offseted, end_offseted,)
 

	
 
    def highlight(self, content, top=5):
 
        if self.search_type not in ['content', 'message']:
 
            return ''
 
        hl = highlight(
 
            text=content,
 
            terms=self.highlight_items,
 
            analyzer=ANALYZER,
 
            fragmenter=FRAGMENTER,
 
            formatter=FORMATTER,
 
            top=top
 
        )
 
        return hl
rhodecode/lib/indexers/daemon.py
Show inline comments
 
@@ -134,150 +134,150 @@ class WhooshIndexingDaemon(object):
 
    def add_doc(self, writer, path, repo, repo_name):
 
        """
 
        Adding doc to writer this function itself fetches data from
 
        the instance of vcs backend
 
        """
 

	
 
        node = self.get_node(repo, path)
 
        indexed = indexed_w_content = 0
 
        # we just index the content of chosen files, and skip binary files
 
        if node.extension in INDEX_EXTENSIONS and not node.is_binary:
 
            u_content = node.content
 
            if not isinstance(u_content, unicode):
 
                log.warning('  >> %s Could not get this content as unicode '
 
                            'replacing with empty content' % path)
 
                u_content = u''
 
            else:
 
                log.debug('    >> %s [WITH CONTENT]' % path)
 
                indexed_w_content += 1
 

	
 
        else:
 
            log.debug('    >> %s' % path)
 
            # just index file name without it's content
 
            u_content = u''
 
            indexed += 1
 

	
 
        p = safe_unicode(path)
 
        writer.add_document(
 
            fileid=p,
 
            owner=unicode(repo.contact),
 
            repository=safe_unicode(repo_name),
 
            path=p,
 
            content=u_content,
 
            modtime=self.get_node_mtime(node),
 
            extension=node.extension
 
        )
 
        return indexed, indexed_w_content
 

	
 
    def index_changesets(self, writer, repo_name, repo, start_rev=0):
 
        """
 
        Add all changeset in the vcs repo starting at start_rev
 
        to the index writer
 
        """
 

	
 
        log.debug('indexing changesets in %s[%d:]' % (repo_name, start_rev))
 

	
 
        indexed=0
 
        for cs in repo[start_rev:]:
 
            writer.add_document(
 
                path=unicode(cs.raw_id),
 
                raw_id=unicode(cs.raw_id),
 
                owner=unicode(repo.contact),
 
                repository=safe_unicode(repo_name),
 
                author=cs.author,
 
                message=cs.message,
 
                revision=cs.revision,
 
                last=cs.last,
 
                added=u' '.join([node.path for node in cs.added]).lower(),
 
                removed=u' '.join([node.path for node in cs.removed]).lower(),
 
                changed=u' '.join([node.path for node in cs.changed]).lower(),
 
                parents=u' '.join([cs.raw_id for cs in cs.parents]),
 
            )
 
            indexed += 1
 

	
 
        log.debug('indexed %d changesets for repo %s' % (indexed, repo_name))
 

	
 
    def index_files(self, file_idx_writer, repo_name, repo):
 
        i_cnt = iwc_cnt = 0
 
        log.debug('building index for [%s]' % repo.path)
 
        for idx_path in self.get_paths(repo):
 
            i, iwc = self.add_doc(file_idx_writer, idx_path, repo, repo_name)
 
            i_cnt += i
 
            iwc_cnt += iwc
 

	
 
        log.debug('added %s files %s with content for repo %s' % (i_cnt + iwc_cnt, iwc_cnt, repo.path))
 

	
 
    def update_changeset_index(self):
 
        idx = open_dir(self.index_location, indexname=CHGSET_IDX_NAME)
 

	
 
        with idx.searcher() as searcher:
 
            writer = idx.writer()
 
            writer_is_dirty = False
 
            try:
 
                for repo_name, repo in self.repo_paths.items():
 
                    # skip indexing if there aren't any revs in the repo
 
                    revs = repo.revisions
 
                    if len(revs) < 1:
 
                        continue
 

	
 
                    qp = QueryParser('repository', schema=CHGSETS_SCHEMA)
 
                    q = qp.parse(u"last:t AND %s" % repo_name)
 

	
 
                    results = searcher.search(q, sortedby='revision')
 

	
 
                    last_rev = 0
 
                    if len(results) > 0:
 
                        last_rev = results[0]['revision']
 

	
 
                    # there are new changesets to index or a new repo to index
 
                    if last_rev == 0 or len(revs) > last_rev + 1:
 
                        # delete the docs in the index for the previous last changeset(s)
 
                        for hit in results:
 
                            q = qp.parse(u"last:t AND %s AND path:%s" % 
 
                                            (repo_name, hit['path']))
 
                            q = qp.parse(u"last:t AND %s AND raw_id:%s" % 
 
                                            (repo_name, hit['raw_id']))
 
                            writer.delete_by_query(q)
 

	
 
                        # index from the previous last changeset + all new ones
 
                        self.index_changesets(writer, repo_name, repo, last_rev)
 
                        writer_is_dirty = True
 

	
 
            finally:
 
                if writer_is_dirty:
 
                    log.debug('>> COMMITING CHANGES TO CHANGESET INDEX<<')
 
                    writer.commit(merge=True)
 
                    log.debug('>> COMMITTED CHANGES TO CHANGESET INDEX<<')
 
                else:
 
                    writer.cancel
 

	
 
    def update_file_index(self):
 
        log.debug((u'STARTING INCREMENTAL INDEXING UPDATE FOR EXTENSIONS %s '
 
                   'AND REPOS %s') % (INDEX_EXTENSIONS, self.repo_paths.keys()))
 

	
 
        idx = open_dir(self.index_location, indexname=self.indexname)
 
        # The set of all paths in the index
 
        indexed_paths = set()
 
        # The set of all paths we need to re-index
 
        to_index = set()
 

	
 
        writer = idx.writer()
 
        writer_is_dirty = False
 
        try:
 
            with idx.reader() as reader:
 

	
 
                # Loop over the stored fields in the index
 
                for fields in reader.all_stored_fields():
 
                    indexed_path = fields['path']
 
                    indexed_repo_path = fields['repository']
 
                    indexed_paths.add(indexed_path)
 

	
 
                    if not indexed_repo_path in self.filtered_repo_update_paths:
 
                        continue
 

	
 
                    repo = self.repo_paths[indexed_repo_path]
 

	
 
                    try:
 
                        node = self.get_node(repo, indexed_path)
 
                        # Check if this file was changed since it was indexed
 
                        indexed_time = fields['modtime']
 
                        mtime = self.get_node_mtime(node)
 
                        if mtime > indexed_time:
 
                            # The file has changed, delete it and add it to the list of
 
                            # files to reindex
rhodecode/templates/search/search_commit.html
Show inline comments
 
##commit highligthing
 

	
 
%for cnt,sr in enumerate(c.formated_results):
 
    %if h.HasRepoPermissionAny('repository.write','repository.read','repository.admin')(sr['repository'],'search results check'):
 
    <div class="table">
 
        <div id="body${cnt}" class="codeblock">
 
            <div class="code-header">
 
                <div class="search-path">${h.link_to(h.literal('%s &raquo; %s' % (sr['repository'],sr['f_path'])),
 
                h.url('changeset_home',repo_name=sr['repository'],revision=sr['path']))}
 
                <div class="search-path">${h.link_to(h.literal('%s &raquo; %s' % (sr['repository'],sr['raw_id'])),
 
                h.url('changeset_home',repo_name=sr['repository'],revision=sr['raw_id']))}
 
                </div>
 
            </div>
 
            <div class="left">
 
                <div class="author">
 
                    <div class="gravatar">
 
                        <img alt="gravatar" src="${h.gravatar_url(h.email(sr['author']),20)}"/>
 
                    </div>
 
                    <span>${h.person(sr['author'])}</span><br/>
 
                    <span><a href="mailto:${h.email_or_none(sr['author'])}">${h.email_or_none(sr['author'])}</a></span><br/>
 
                </div>
 
                %if sr['message_hl']:
 
                <div class="search-code-body">
 
                    <pre>${h.literal(sr['message_hl'])}</pre>
 
                </div>
 
                %else:
 
                <div class="message">${h.urlify_commit(sr['message'], sr['repository'])}</div>
 
                %endif
 
            </div>
 
        </div>
 
    </div>
 
    %else:
 
        %if cnt == 0:
 
        <div class="table">
 
            <div id="body${cnt}" class="codeblock">
 
                <div class="error">${_('Permission denied')}</div>
 
            </div>
 
        </div>
 
        %endif
 
    %endif
 
%endfor
 
%if c.cur_query and c.formated_results:
 
<div class="pagination-wh pagination-left">
 
    ${c.formated_results.pager('$link_previous ~2~ $link_next')}
 
</div>
 
%endif
0 comments (0 inline, 0 general)