Files
@ 932c84e8fa92
Branch filter:
Location: kallithea/rhodecode/lib/indexers/daemon.py - annotation
932c84e8fa92
16.9 KiB
text/x-python
fixed #851 and #563 make-index crashes on non-ascii files
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 | 94f7585af8a1 94f7585af8a1 94f7585af8a1 94f7585af8a1 94f7585af8a1 78e5853df5c8 99850ac883d1 94f7585af8a1 94f7585af8a1 89efedac4e6c 94f7585af8a1 94f7585af8a1 a671db5bdd58 a671db5bdd58 a671db5bdd58 a671db5bdd58 99850ac883d1 1e757ac98988 1e757ac98988 1e757ac98988 1e757ac98988 99850ac883d1 1e757ac98988 a671db5bdd58 cfcd981d6679 1e757ac98988 36fe593dfe4b 1e757ac98988 36fe593dfe4b 94f7585af8a1 36fe593dfe4b 36fe593dfe4b 36fe593dfe4b 36fe593dfe4b 1e757ac98988 1e757ac98988 1e757ac98988 1e757ac98988 1e757ac98988 1e757ac98988 1e757ac98988 8ecfed1d8f8b 7486da5f0628 ba08786c49ef b3c8a3a5ce5f 0911cf6940af 0911cf6940af 1e757ac98988 324ac367a4da b369bec5d468 3072935bdeed 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 36fe593dfe4b df96adcbb1f7 1e757ac98988 b6c902d88472 1e757ac98988 3072935bdeed 1828eb7fa688 3072935bdeed 1e757ac98988 b6c902d88472 1828eb7fa688 1828eb7fa688 1e757ac98988 05528ad948c4 05528ad948c4 05528ad948c4 05528ad948c4 05528ad948c4 1e757ac98988 05528ad948c4 05528ad948c4 05528ad948c4 405b80e4ccd5 1fed3c9161bb 1828eb7fa688 1fed3c9161bb 2fa3c09f63e0 2fa3c09f63e0 1828eb7fa688 1fed3c9161bb 1fed3c9161bb 1828eb7fa688 1828eb7fa688 1828eb7fa688 1fed3c9161bb 1828eb7fa688 1828eb7fa688 1828eb7fa688 1828eb7fa688 1828eb7fa688 1828eb7fa688 1828eb7fa688 1828eb7fa688 1fed3c9161bb 5f21a9dcb09d 05528ad948c4 0dad296d2a57 ba08786c49ef ba08786c49ef 5f21a9dcb09d ba08786c49ef ba08786c49ef 5f21a9dcb09d ba08786c49ef ba08786c49ef 5f21a9dcb09d 5f21a9dcb09d 05528ad948c4 ba08786c49ef ba08786c49ef ba08786c49ef ba08786c49ef ba08786c49ef ba08786c49ef ba08786c49ef ba08786c49ef ba08786c49ef ba08786c49ef ba08786c49ef ba08786c49ef 5f3b967d9d10 df96adcbb1f7 df96adcbb1f7 3072935bdeed 3072935bdeed 1e757ac98988 80dc0a23edf7 ba08786c49ef ba08786c49ef 1e757ac98988 b3c8a3a5ce5f 05528ad948c4 0911cf6940af 94f7585af8a1 80dc0a23edf7 05528ad948c4 05528ad948c4 5f3b967d9d10 932c84e8fa92 932c84e8fa92 932c84e8fa92 932c84e8fa92 932c84e8fa92 932c84e8fa92 932c84e8fa92 932c84e8fa92 932c84e8fa92 932c84e8fa92 ba08786c49ef 932c84e8fa92 5f3b967d9d10 05528ad948c4 5f3b967d9d10 5f3b967d9d10 05528ad948c4 2ab211e0aecd df96adcbb1f7 df96adcbb1f7 df96adcbb1f7 df96adcbb1f7 df96adcbb1f7 5f3b967d9d10 8ecfed1d8f8b df96adcbb1f7 0736230c7d91 3072935bdeed 94f7585af8a1 94f7585af8a1 df96adcbb1f7 94f7585af8a1 94f7585af8a1 94f7585af8a1 8ecfed1d8f8b 94f7585af8a1 1e757ac98988 1e757ac98988 df96adcbb1f7 1e757ac98988 8ecfed1d8f8b 05528ad948c4 a0ef98f2520b df96adcbb1f7 a0ef98f2520b df96adcbb1f7 df96adcbb1f7 a0ef98f2520b df96adcbb1f7 df96adcbb1f7 df96adcbb1f7 df96adcbb1f7 8ecfed1d8f8b 05528ad948c4 2ad50c44b025 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 2ad50c44b025 2ad50c44b025 2ad50c44b025 2ad50c44b025 2ad50c44b025 2ad50c44b025 2ad50c44b025 2ad50c44b025 2ad50c44b025 5f21a9dcb09d 5f21a9dcb09d 2ad50c44b025 2ad50c44b025 2ad50c44b025 0911cf6940af 0911cf6940af 05528ad948c4 0911cf6940af 2ad50c44b025 0911cf6940af 5f21a9dcb09d 88b0e82bcba4 5f21a9dcb09d 66c778b8cb54 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 81624c8a1035 81624c8a1035 81624c8a1035 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 05528ad948c4 5f21a9dcb09d 0911cf6940af 5f21a9dcb09d 5f21a9dcb09d 0911cf6940af 0911cf6940af 0911cf6940af 0911cf6940af 0911cf6940af 0911cf6940af 0911cf6940af 5f21a9dcb09d ba08786c49ef ba08786c49ef 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 0911cf6940af 0911cf6940af 0911cf6940af 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d b98fd6fc67f9 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 0911cf6940af c0ddc86b4654 5f21a9dcb09d 5f21a9dcb09d 2ad50c44b025 2ad50c44b025 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 2ad50c44b025 5f21a9dcb09d 2ad50c44b025 5f21a9dcb09d 2ad50c44b025 2ad50c44b025 5f21a9dcb09d 2ad50c44b025 2ad50c44b025 2ad50c44b025 2ad50c44b025 05528ad948c4 5f21a9dcb09d 2ad50c44b025 0911cf6940af 0911cf6940af 5f21a9dcb09d 0911cf6940af 88b0e82bcba4 5f21a9dcb09d 05528ad948c4 5f21a9dcb09d 0911cf6940af 0911cf6940af 5f21a9dcb09d 0911cf6940af ba08786c49ef 0911cf6940af 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d c7c5825299fe 5f21a9dcb09d c7c5825299fe 5f21a9dcb09d 5f21a9dcb09d 1828eb7fa688 1828eb7fa688 05528ad948c4 05528ad948c4 1e757ac98988 1e757ac98988 1e757ac98988 1e757ac98988 05528ad948c4 1e757ac98988 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 0911cf6940af 0911cf6940af 0911cf6940af 0911cf6940af 0911cf6940af 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 05528ad948c4 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 05528ad948c4 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d c7c5825299fe 5f21a9dcb09d c7c5825299fe 5f21a9dcb09d c7c5825299fe 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 0911cf6940af 0911cf6940af 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 1828eb7fa688 1828eb7fa688 5f21a9dcb09d 5f21a9dcb09d 05528ad948c4 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d a0ef98f2520b 5f21a9dcb09d 5f21a9dcb09d 5f21a9dcb09d 05528ad948c4 1e757ac98988 1e757ac98988 1e757ac98988 5f21a9dcb09d 1e757ac98988 5f21a9dcb09d | # -*- coding: utf-8 -*-
"""
rhodecode.lib.indexers.daemon
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A daemon will read from task table and run tasks
:created_on: Jan 26, 2010
:author: marcink
:copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com>
:license: GPLv3, see COPYING for more details.
"""
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import sys
import logging
import traceback
from shutil import rmtree
from time import mktime
from os.path import dirname as dn
from os.path import join as jn
#to get the rhodecode import
project_path = dn(dn(dn(dn(os.path.realpath(__file__)))))
sys.path.append(project_path)
from rhodecode.config.conf import INDEX_EXTENSIONS
from rhodecode.model.scm import ScmModel
from rhodecode.model.db import Repository
from rhodecode.lib.utils2 import safe_unicode, safe_str
from rhodecode.lib.indexers import SCHEMA, IDX_NAME, CHGSETS_SCHEMA, \
CHGSET_IDX_NAME
from rhodecode.lib.vcs.exceptions import ChangesetError, RepositoryError, \
NodeDoesNotExistError
from whoosh.index import create_in, open_dir, exists_in
from whoosh.query import *
from whoosh.qparser import QueryParser
log = logging.getLogger('whoosh_indexer')
class WhooshIndexingDaemon(object):
"""
Daemon for atomic indexing jobs
"""
def __init__(self, indexname=IDX_NAME, index_location=None,
repo_location=None, sa=None, repo_list=None,
repo_update_list=None):
self.indexname = indexname
self.index_location = index_location
if not index_location:
raise Exception('You have to provide index location')
self.repo_location = repo_location
if not repo_location:
raise Exception('You have to provide repositories location')
self.repo_paths = ScmModel(sa).repo_scan(self.repo_location)
#filter repo list
if repo_list:
#Fix non-ascii repo names to unicode
repo_list = map(safe_unicode, repo_list)
self.filtered_repo_paths = {}
for repo_name, repo in self.repo_paths.items():
if repo_name in repo_list:
self.filtered_repo_paths[repo_name] = repo
self.repo_paths = self.filtered_repo_paths
#filter update repo list
self.filtered_repo_update_paths = {}
if repo_update_list:
self.filtered_repo_update_paths = {}
for repo_name, repo in self.repo_paths.items():
if repo_name in repo_update_list:
self.filtered_repo_update_paths[repo_name] = repo
self.repo_paths = self.filtered_repo_update_paths
self.initial = True
if not os.path.isdir(self.index_location):
os.makedirs(self.index_location)
log.info('Cannot run incremental index since it does not '
'yet exist running full build')
elif not exists_in(self.index_location, IDX_NAME):
log.info('Running full index build as the file content '
'index does not exist')
elif not exists_in(self.index_location, CHGSET_IDX_NAME):
log.info('Running full index build as the changeset '
'index does not exist')
else:
self.initial = False
def _get_index_revision(self, repo):
db_repo = Repository.get_by_repo_name(repo.name)
landing_rev = 'tip'
if db_repo:
landing_rev = db_repo.landing_rev
return landing_rev
def _get_index_changeset(self, repo):
index_rev = self._get_index_revision(repo)
cs = repo.get_changeset(index_rev)
return cs
def get_paths(self, repo):
"""
recursive walk in root dir and return a set of all path in that dir
based on repository walk function
"""
index_paths_ = set()
try:
cs = self._get_index_changeset(repo)
for _topnode, _dirs, files in cs.walk('/'):
for f in files:
index_paths_.add(jn(safe_str(repo.path), safe_str(f.path)))
except RepositoryError:
log.debug(traceback.format_exc())
pass
return index_paths_
def get_node(self, repo, path):
"""
gets a filenode based on given full path.It operates on string for
hg git compatability.
:param repo: scm repo instance
:param path: full path including root location
:return: FileNode
"""
root_path = safe_str(repo.path)+'/'
parts = safe_str(path).partition(root_path)
cs = self._get_index_changeset(repo)
node = cs.get_node(parts[-1])
return node
def get_node_mtime(self, node):
return mktime(node.last_changeset.date.timetuple())
def add_doc(self, writer, path, repo, repo_name):
"""
Adding doc to writer this function itself fetches data from
the instance of vcs backend
"""
node = self.get_node(repo, path)
indexed = indexed_w_content = 0
# we just index the content of chosen files, and skip binary files
if node.extension in INDEX_EXTENSIONS and not node.is_binary:
u_content = node.content
if not isinstance(u_content, unicode):
log.warning(' >> %s Could not get this content as unicode '
'replacing with empty content' % path)
u_content = u''
else:
log.debug(' >> %s [WITH CONTENT]' % path)
indexed_w_content += 1
else:
log.debug(' >> %s' % path)
# just index file name without it's content
u_content = u''
indexed += 1
p = safe_unicode(path)
writer.add_document(
fileid=p,
owner=unicode(repo.contact),
repository=safe_unicode(repo_name),
path=p,
content=u_content,
modtime=self.get_node_mtime(node),
extension=node.extension
)
return indexed, indexed_w_content
def index_changesets(self, writer, repo_name, repo, start_rev=None):
"""
Add all changeset in the vcs repo starting at start_rev
to the index writer
:param writer: the whoosh index writer to add to
:param repo_name: name of the repository from whence the
changeset originates including the repository group
:param repo: the vcs repository instance to index changesets for,
the presumption is the repo has changesets to index
:param start_rev=None: the full sha id to start indexing from
if start_rev is None then index from the first changeset in
the repo
"""
if start_rev is None:
start_rev = repo[0].raw_id
log.debug('indexing changesets in %s starting at rev: %s' %
(repo_name, start_rev))
indexed = 0
for cs in repo.get_changesets(start=start_rev):
log.debug(' >> %s' % cs)
writer.add_document(
raw_id=unicode(cs.raw_id),
owner=unicode(repo.contact),
date=cs._timestamp,
repository=safe_unicode(repo_name),
author=cs.author,
message=cs.message,
last=cs.last,
added=u' '.join([safe_unicode(node.path) for node in cs.added]).lower(),
removed=u' '.join([safe_unicode(node.path) for node in cs.removed]).lower(),
changed=u' '.join([safe_unicode(node.path) for node in cs.changed]).lower(),
parents=u' '.join([cs.raw_id for cs in cs.parents]),
)
indexed += 1
log.debug('indexed %d changesets for repo %s' % (indexed, repo_name))
return indexed
def index_files(self, file_idx_writer, repo_name, repo):
"""
Index files for given repo_name
:param file_idx_writer: the whoosh index writer to add to
:param repo_name: name of the repository we're indexing
:param repo: instance of vcs repo
"""
i_cnt = iwc_cnt = 0
log.debug('building index for %s @revision:%s' % (repo.path,
self._get_index_revision(repo)))
for idx_path in self.get_paths(repo):
i, iwc = self.add_doc(file_idx_writer, idx_path, repo, repo_name)
i_cnt += i
iwc_cnt += iwc
log.debug('added %s files %s with content for repo %s' %
(i_cnt + iwc_cnt, iwc_cnt, repo.path))
return i_cnt, iwc_cnt
def update_changeset_index(self):
idx = open_dir(self.index_location, indexname=CHGSET_IDX_NAME)
with idx.searcher() as searcher:
writer = idx.writer()
writer_is_dirty = False
try:
indexed_total = 0
repo_name = None
for repo_name, repo in self.repo_paths.items():
# skip indexing if there aren't any revs in the repo
num_of_revs = len(repo)
if num_of_revs < 1:
continue
qp = QueryParser('repository', schema=CHGSETS_SCHEMA)
q = qp.parse(u"last:t AND %s" % repo_name)
results = searcher.search(q)
# default to scanning the entire repo
last_rev = 0
start_id = None
if len(results) > 0:
# assuming that there is only one result, if not this
# may require a full re-index.
start_id = results[0]['raw_id']
last_rev = repo.get_changeset(revision=start_id).revision
# there are new changesets to index or a new repo to index
if last_rev == 0 or num_of_revs > last_rev + 1:
# delete the docs in the index for the previous
# last changeset(s)
for hit in results:
q = qp.parse(u"last:t AND %s AND raw_id:%s" %
(repo_name, hit['raw_id']))
writer.delete_by_query(q)
# index from the previous last changeset + all new ones
indexed_total += self.index_changesets(writer,
repo_name, repo, start_id)
writer_is_dirty = True
log.debug('indexed %s changesets for repo %s' % (
indexed_total, repo_name)
)
finally:
if writer_is_dirty:
log.debug('>> COMMITING CHANGES TO CHANGESET INDEX<<')
writer.commit(merge=True)
log.debug('>>> FINISHED REBUILDING CHANGESET INDEX <<<')
else:
log.debug('>> NOTHING TO COMMIT TO CHANGESET INDEX<<')
def update_file_index(self):
log.debug((u'STARTING INCREMENTAL INDEXING UPDATE FOR EXTENSIONS %s '
'AND REPOS %s') % (INDEX_EXTENSIONS, self.repo_paths.keys()))
idx = open_dir(self.index_location, indexname=self.indexname)
# The set of all paths in the index
indexed_paths = set()
# The set of all paths we need to re-index
to_index = set()
writer = idx.writer()
writer_is_dirty = False
try:
with idx.reader() as reader:
# Loop over the stored fields in the index
for fields in reader.all_stored_fields():
indexed_path = fields['path']
indexed_repo_path = fields['repository']
indexed_paths.add(indexed_path)
if not indexed_repo_path in self.filtered_repo_update_paths:
continue
repo = self.repo_paths[indexed_repo_path]
try:
node = self.get_node(repo, indexed_path)
# Check if this file was changed since it was indexed
indexed_time = fields['modtime']
mtime = self.get_node_mtime(node)
if mtime > indexed_time:
# The file has changed, delete it and add it to
# the list of files to reindex
log.debug(
'adding to reindex list %s mtime: %s vs %s' % (
indexed_path, mtime, indexed_time)
)
writer.delete_by_term('fileid', indexed_path)
writer_is_dirty = True
to_index.add(indexed_path)
except (ChangesetError, NodeDoesNotExistError):
# This file was deleted since it was indexed
log.debug('removing from index %s' % indexed_path)
writer.delete_by_term('path', indexed_path)
writer_is_dirty = True
# Loop over the files in the filesystem
# Assume we have a function that gathers the filenames of the
# documents to be indexed
ri_cnt_total = 0 # indexed
riwc_cnt_total = 0 # indexed with content
for repo_name, repo in self.repo_paths.items():
# skip indexing if there aren't any revisions
if len(repo) < 1:
continue
ri_cnt = 0 # indexed
riwc_cnt = 0 # indexed with content
for path in self.get_paths(repo):
path = safe_unicode(path)
if path in to_index or path not in indexed_paths:
# This is either a file that's changed, or a new file
# that wasn't indexed before. So index it!
i, iwc = self.add_doc(writer, path, repo, repo_name)
writer_is_dirty = True
log.debug('re indexing %s' % path)
ri_cnt += i
ri_cnt_total += 1
riwc_cnt += iwc
riwc_cnt_total += iwc
log.debug('added %s files %s with content for repo %s' % (
ri_cnt + riwc_cnt, riwc_cnt, repo.path)
)
log.debug('indexed %s files in total and %s with content' % (
ri_cnt_total, riwc_cnt_total)
)
finally:
if writer_is_dirty:
log.debug('>> COMMITING CHANGES TO FILE INDEX <<')
writer.commit(merge=True)
log.debug('>>> FINISHED REBUILDING FILE INDEX <<<')
else:
log.debug('>> NOTHING TO COMMIT TO FILE INDEX <<')
writer.cancel()
def build_indexes(self):
if os.path.exists(self.index_location):
log.debug('removing previous index')
rmtree(self.index_location)
if not os.path.exists(self.index_location):
os.mkdir(self.index_location)
chgset_idx = create_in(self.index_location, CHGSETS_SCHEMA,
indexname=CHGSET_IDX_NAME)
chgset_idx_writer = chgset_idx.writer()
file_idx = create_in(self.index_location, SCHEMA, indexname=IDX_NAME)
file_idx_writer = file_idx.writer()
log.debug('BUILDING INDEX FOR EXTENSIONS %s '
'AND REPOS %s' % (INDEX_EXTENSIONS, self.repo_paths.keys()))
for repo_name, repo in self.repo_paths.items():
# skip indexing if there aren't any revisions
if len(repo) < 1:
continue
self.index_files(file_idx_writer, repo_name, repo)
self.index_changesets(chgset_idx_writer, repo_name, repo)
log.debug('>> COMMITING CHANGES <<')
file_idx_writer.commit(merge=True)
chgset_idx_writer.commit(merge=True)
log.debug('>>> FINISHED BUILDING INDEX <<<')
def update_indexes(self):
self.update_file_index()
self.update_changeset_index()
def run(self, full_index=False):
"""Run daemon"""
if full_index or self.initial:
self.build_indexes()
else:
self.update_indexes()
|