Changeset - 168cc92c1b53
[Not reviewed]
default
0 2 0
FUJIWARA Katsunori - 9 years ago 2017-01-22 18:17:38
foozy@lares.dti.ne.jp
search: prevent pathname related conditions from removing "stop words"

Before this revision, pathname related conditions below cause
unintentional ignorance of "stop words".

- path:,extension: (for "File contents" or "File names")
- added:, removed:, changed: (for "Commit messages")

Therefore, pathname related conditions with "this", "a", "you", and so
on are completely ignored, even if they are valid pathname components.

To prevent pathname related conditions from removing "stop words",
this revision explicitly specifies "analyzer" for pathname related
fields of SCHEMA and CHGSETS_SCHEMA.

Difference between PATHANALYZER and default analyzer of TEXT is
whether "stop words" are preserved or not. Tokenization is still
applied on pathnames.

This revision requires full re-building index tables, because indexing
schemas are changed.
2 files changed with 19 insertions and 11 deletions:
0 comments (0 inline, 0 general)
kallithea/lib/indexers/__init__.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
"""
 
kallithea.lib.indexers
 
~~~~~~~~~~~~~~~~~~~~~~
 

	
 
Whoosh indexing module for Kallithea
 

	
 
This file was forked by the Kallithea project in July 2014.
 
Original author and date, and relevant copyright and licensing information is below:
 
:created_on: Aug 17, 2010
 
:author: marcink
 
:copyright: (c) 2013 RhodeCode GmbH, and others.
 
:license: GPLv3, see LICENSE.md for more details.
 
"""
 

	
 
import os
 
import sys
 
import logging
 
from os.path import dirname
 

	
 
# Add location of top level folder to sys.path
 
sys.path.append(dirname(dirname(dirname(os.path.realpath(__file__)))))
 

	
 
from whoosh.analysis import RegexTokenizer, LowercaseFilter, IDTokenizer
 
from whoosh.fields import TEXT, ID, STORED, NUMERIC, BOOLEAN, Schema, FieldType, DATETIME
 
from whoosh.formats import Characters
 
from whoosh.highlight import highlight as whoosh_highlight, HtmlFormatter, ContextFragmenter
 
from kallithea.lib.utils2 import LazyProperty
 

	
 
log = logging.getLogger(__name__)
 

	
 
# CUSTOM ANALYZER wordsplit + lowercase filter
 
ANALYZER = RegexTokenizer(expression=r"\w+") | LowercaseFilter()
 

	
 
# CUSTOM ANALYZER raw-string + lowercase filter
 
#
 
# This is useful to:
 
# - avoid tokenization
 
# - avoid removing "stop words" from text
 
# - search case-insensitively
 
#
 
ICASEIDANALYZER = IDTokenizer() | LowercaseFilter()
 

	
 
# CUSTOM ANALYZER raw-string
 
#
 
# This is useful to:
 
# - avoid tokenization
 
# - avoid removing "stop words" from text
 
#
 
IDANALYZER = IDTokenizer()
 

	
 
# CUSTOM ANALYZER wordsplit + lowercase filter, for pathname-like text
 
#
 
# This is useful to:
 
# - avoid removing "stop words" from text
 
# - search case-insensitively
 
#
 
PATHANALYZER = RegexTokenizer() | LowercaseFilter()
 

	
 
#INDEX SCHEMA DEFINITION
 
SCHEMA = Schema(
 
    fileid=ID(unique=True),
 
    owner=TEXT(),
 
    # this field preserves case of repository name for exact matching
 
    repository_rawname=TEXT(analyzer=IDANALYZER),
 
    repository=TEXT(stored=True, analyzer=ICASEIDANALYZER),
 
    path=TEXT(stored=True),
 
    path=TEXT(stored=True, analyzer=PATHANALYZER),
 
    content=FieldType(format=Characters(), analyzer=ANALYZER,
 
                      scorable=True, stored=True),
 
    modtime=STORED(),
 
    extension=TEXT(stored=True)
 
    extension=TEXT(stored=True, analyzer=PATHANALYZER)
 
)
 

	
 
IDX_NAME = 'HG_INDEX'
 
FORMATTER = HtmlFormatter('span', between='\n<span class="break">...</span>\n')
 
FRAGMENTER = ContextFragmenter(200)
 

	
 
CHGSETS_SCHEMA = Schema(
 
    raw_id=ID(unique=True, stored=True),
 
    date=NUMERIC(stored=True),
 
    last=BOOLEAN(),
 
    owner=TEXT(),
 
    # this field preserves case of repository name for exact matching
 
    # and unique-ness in index table
 
    repository_rawname=ID(unique=True),
 
    repository=ID(stored=True, analyzer=ICASEIDANALYZER),
 
    author=TEXT(stored=True),
 
    message=FieldType(format=Characters(), analyzer=ANALYZER,
 
                      scorable=True, stored=True),
 
    parents=TEXT(),
 
    added=TEXT(),
 
    removed=TEXT(),
 
    changed=TEXT(),
 
    added=TEXT(analyzer=PATHANALYZER),
 
    removed=TEXT(analyzer=PATHANALYZER),
 
    changed=TEXT(analyzer=PATHANALYZER),
 
)
 

	
 
CHGSET_IDX_NAME = 'CHGSET_INDEX'
 

	
 
# used only to generate queries in journal
 
JOURNAL_SCHEMA = Schema(
 
    username=ID(),
 
    date=DATETIME(),
 
    action=TEXT(),
 
    repository=ID(),
 
    ip=TEXT(),
 
)
 

	
 

	
 
class WhooshResultWrapper(object):
 
    def __init__(self, search_type, searcher, matcher, highlight_items,
 
                 repo_location):
 
        self.search_type = search_type
 
        self.searcher = searcher
 
        self.matcher = matcher
 
        self.highlight_items = highlight_items
 
        self.fragment_size = 200
 
        self.repo_location = repo_location
 

	
 
    @LazyProperty
 
    def doc_ids(self):
 
        docs_id = []
 
        while self.matcher.is_active():
 
            docnum = self.matcher.id()
 
            chunks = [offsets for offsets in self.get_chunks()]
 
            docs_id.append([docnum, chunks])
 
            self.matcher.next()
 
        return docs_id
 

	
 
    def __str__(self):
 
        return '<%s at %s>' % (self.__class__.__name__, len(self.doc_ids))
 

	
 
    def __repr__(self):
 
        return self.__str__()
 

	
 
    def __len__(self):
 
        return len(self.doc_ids)
 

	
 
    def __iter__(self):
 
        """
 
        Allows Iteration over results,and lazy generate content
 

	
 
        *Requires* implementation of ``__getitem__`` method.
 
        """
 
        for docid in self.doc_ids:
 
            yield self.get_full_content(docid)
 

	
 
    def __getitem__(self, key):
 
        """
 
        Slicing of resultWrapper
 
        """
 
        i, j = key.start, key.stop
 

	
 
        slices = []
 
        for docid in self.doc_ids[i:j]:
 
            slices.append(self.get_full_content(docid))
 
        return slices
 

	
 
    def get_full_content(self, docid):
 
        res = self.searcher.stored_fields(docid[0])
 
        log.debug('result: %s', res)
 
        if self.search_type == 'content':
 
            full_repo_path = os.path.join(self.repo_location, res['repository'])
 
            f_path = res['path'].split(full_repo_path)[-1]
 
            f_path = f_path.lstrip(os.sep)
 
            content_short = self.get_short_content(res, docid[1])
 
            res.update({'content_short': content_short,
 
                        'content_short_hl': self.highlight(content_short),
 
                        'f_path': f_path
 
            })
 
        elif self.search_type == 'path':
 
            full_repo_path = os.path.join(self.repo_location, res['repository'])
 
            f_path = res['path'].split(full_repo_path)[-1]
 
            f_path = f_path.lstrip(os.sep)
 
            res.update({'f_path': f_path})
 
        elif self.search_type == 'message':
 
            res.update({'message_hl': self.highlight(res['message'])})
 

	
 
        log.debug('result: %s', res)
 

	
 
        return res
 

	
 
    def get_short_content(self, res, chunks):
 

	
 
        return ''.join([res['content'][chunk[0]:chunk[1]] for chunk in chunks])
 

	
 
    def get_chunks(self):
 
        """
 
        Smart function that implements chunking the content
 
        but not overlap chunks so it doesn't highlight the same
 
        close occurrences twice.
kallithea/tests/functional/test_search_indexing.py
Show inline comments
 
@@ -63,136 +63,136 @@ def rebuild_index(full_index):
 
        # Linux 3.2.78-1 x86_64, 3GB memory, and no ulimit
 
        # configuration for memory)
 
        create_test_index(TESTS_TMP_PATH, CONFIG, full_index=full_index)
 

	
 

	
 
class TestSearchControllerIndexing(TestController):
 
    @classmethod
 
    def setup_class(cls):
 
        for reponame, init_or_fork, groupname in repos:
 
            if groupname and groupname not in groupids:
 
                group = fixture.create_repo_group(groupname)
 
                groupids[groupname] = group.group_id
 
            if callable(init_or_fork):
 
                repo = fixture.create_repo(reponame,
 
                                           repo_group=groupname)
 
                init_or_fork(repo)
 
            else:
 
                repo = fixture.create_fork(init_or_fork, reponame,
 
                                           repo_group=groupname)
 
            repoids[reponame] = repo.repo_id
 

	
 
        # treat "it" as indexable filename
 
        filenames_mock = list(INDEX_FILENAMES)
 
        filenames_mock.append('it')
 
        with mock.patch('kallithea.lib.indexers.daemon.INDEX_FILENAMES',
 
                        filenames_mock):
 
            rebuild_index(full_index=False) # only for newly added repos
 

	
 
    @classmethod
 
    def teardown_class(cls):
 
        # delete in reversed order, to delete fork destination at first
 
        for reponame, init_or_fork, groupname in reversed(repos):
 
            RepoModel().delete(repoids[reponame])
 

	
 
        for reponame, init_or_fork, groupname in reversed(repos):
 
            if groupname in groupids:
 
                RepoGroupModel().delete(groupids.pop(groupname),
 
                                        force_delete=True)
 

	
 
        Session().commit()
 
        Session.remove()
 

	
 
        rebuild_index(full_index=True) # rebuild fully for subsequent tests
 

	
 
    @parametrize('reponame', [
 
        (u'indexing_test'),
 
        (u'indexing_test-fork'),
 
        (u'group/indexing_test'),
 
        (u'this-is-it'),
 
        (u'*-fork'),
 
        (u'group/*'),
 
    ])
 
    @parametrize('searchtype,query,hit', [
 
        ('content', 'this_should_be_unique_content', 1),
 
        ('commit', 'this_should_be_unique_commit_log', 1),
 
        ('path', 'this_should_be_unique_filename.txt', 1),
 
    ])
 
    def test_repository_tokenization(self, reponame, searchtype, query, hit):
 
        self.log_user()
 

	
 
        q = 'repository:%s %s' % (reponame, query)
 
        response = self.app.get(url(controller='search', action='index'),
 
                                {'q': q, 'type': searchtype})
 
        response.mustcontain('>%d results' % hit)
 

	
 
    @parametrize('searchtype,query,hit', [
 
        ('content', 'this_should_be_unique_content', 1),
 
        ('commit', 'this_should_be_unique_commit_log', 1),
 
        ('path', 'this_should_be_unique_filename.txt', 1),
 
    ])
 
    def test_repository_case_sensitivity(self, searchtype, query, hit):
 
        self.log_user()
 

	
 
        lname = u'indexing_test-foo'
 
        uname = u'indexing_test-FOO'
 

	
 
        # (1) "repository:REPONAME" condition should match against
 
        # repositories case-insensitively
 
        q = 'repository:%s %s' % (lname, query)
 
        response = self.app.get(url(controller='search', action='index'),
 
                                {'q': q, 'type': searchtype})
 

	
 
        response.mustcontain('>%d results' % (hit * 2))
 

	
 
        # (2) on the other hand, searching under the specific
 
        # repository should return results only for that repository,
 
        # even if specified name matches against another repository
 
        # case-insensitively.
 
        response = self.app.get(url(controller='search', action='index',
 
                                    repo_name=uname),
 
                                {'q': query, 'type': searchtype})
 

	
 
        response.mustcontain('>%d results' % hit)
 

	
 
        # confirm that there is no matching against lower name repository
 
        assert uname in response
 
        #assert lname not in response
 
        assert lname not in response
 

	
 
    @parametrize('searchtype,query,hit', [
 
        ('content', 'path:this/is/it def test', 37),
 
        ('commit', 'added:this/is/it bother to ask where', 4),
 
        ('content', 'path:this/is/it def test', 1),
 
        ('commit', 'added:this/is/it bother to ask where', 1),
 
        # this condition matches against files below, because
 
        # "path:" condition is also applied on "repository path".
 
        # - "this/is/it" in "stopword_test" repo
 
        # - "this_should_be_unique_filename.txt" in "this-is-it" repo
 
        ('path', 'this/is/it', 0),
 
        ('path', 'this/is/it', 2),
 

	
 
        ('content', 'extension:us', 0),
 
        ('path', 'extension:us', 0),
 
        ('content', 'extension:us', 1),
 
        ('path', 'extension:us', 1),
 
    ])
 
    def test_filename_stopword(self, searchtype, query, hit):
 
        response = self.app.get(url(controller='search', action='index'),
 
                                {'q': query, 'type': searchtype})
 

	
 
        response.mustcontain('>%d results' % hit)
 

	
 
    @parametrize('searchtype,query,hit', [
 
        # matching against both 2 files
 
        ('content', 'owner:"this is it"', 0),
 
        ('content', 'owner:this-is-it', 0),
 
        ('path', 'owner:"this is it"', 0),
 
        ('path', 'owner:this-is-it', 0),
 

	
 
        # matching against both 2 revisions
 
        ('commit', 'owner:"this is it"', 0),
 
        ('commit', 'owner:"this-is-it"', 0),
 

	
 
        # matching against only 1 revision
 
        ('commit', 'author:"this is it"', 0),
 
        ('commit', 'author:"this-is-it"', 0),
 
    ])
 
    def test_mailaddr_stopword(self, searchtype, query, hit):
 
        response = self.app.get(url(controller='search', action='index'),
 
                                {'q': query, 'type': searchtype})
 

	
 
        response.mustcontain('>%d results' % hit)
0 comments (0 inline, 0 general)