Changeset - 6afa528ee30e
[Not reviewed]
default
0 7 0
Mads Kiilerich - 10 years ago 2016-03-14 16:17:46
madski@unity3d.com
db: get rid of vcs_full_cache - it should always be used

It might make tests less deterministic, but it makes sure that we test what we actually use.
7 files changed with 6 insertions and 30 deletions:
0 comments (0 inline, 0 general)
development.ini
Show inline comments
 
################################################################################
 
################################################################################
 
# Kallithea - Development config:                                              #
 
# listening on *:5000                                                          #
 
# sqlite and kallithea.db                                                      #
 
# initial_repo_scan = true                                                     #
 
# set debug = true                                                             #
 
# verbose and colorful logging                                                 #
 
#                                                                              #
 
# The %(here)s variable will be replaced with the parent directory of this file#
 
################################################################################
 
################################################################################
 

	
 
[DEFAULT]
 
debug = true
 
pdebug = false
 

	
 
################################################################################
 
## Email settings                                                             ##
 
##                                                                            ##
 
## Refer to the documentation ("Email settings") for more details.            ##
 
##                                                                            ##
 
## It is recommended to use a valid sender address that passes access         ##
 
## validation and spam filtering in mail servers.                             ##
 
################################################################################
 

	
 
## 'From' header for application emails. You can optionally add a name.
 
## Default:
 
#app_email_from = Kallithea
 
## Examples:
 
#app_email_from = Kallithea <kallithea-noreply@example.com>
 
#app_email_from = kallithea-noreply@example.com
 

	
 
## Subject prefix for application emails.
 
## A space between this prefix and the real subject is automatically added.
 
## Default:
 
#email_prefix =
 
## Example:
 
#email_prefix = [Kallithea]
 

	
 
## Recipients for error emails and fallback recipients of application mails.
 
## Multiple addresses can be specified, space-separated.
 
## Only addresses are allowed, do not add any name part.
 
## Default:
 
#email_to =
 
## Examples:
 
#email_to = admin@example.com
 
#email_to = admin@example.com another_admin@example.com
 

	
 
## 'From' header for error emails. You can optionally add a name.
 
## Default:
 
#error_email_from = pylons@yourapp.com
 
## Examples:
 
#error_email_from = Kallithea Errors <kallithea-noreply@example.com>
 
#error_email_from = paste_error@example.com
 

	
 
## SMTP server settings
 
## Only smtp_server is mandatory. All other settings take the specified default
 
## values.
 
#smtp_server = smtp.example.com
 
#smtp_username =
 
#smtp_password =
 
#smtp_port = 25
 
#smtp_use_tls = false
 
#smtp_use_ssl = false
 
## SMTP authentication parameters to use (e.g. LOGIN PLAIN CRAM-MD5, etc.).
 
## If empty, use any of the authentication parameters supported by the server.
 
#smtp_auth =
 

	
 
[server:main]
 
## PASTE ##
 
#use = egg:Paste#http
 
## nr of worker threads to spawn
 
#threadpool_workers = 5
 
## max request before thread respawn
 
#threadpool_max_requests = 10
 
## option to use threads of process
 
#use_threadpool = true
 

	
 
## WAITRESS ##
 
use = egg:waitress#main
 
## number of worker threads
 
threads = 5
 
## MAX BODY SIZE 100GB
 
max_request_body_size = 107374182400
 
## use poll instead of select, fixes fd limits, may not work on old
 
## windows systems.
 
#asyncore_use_poll = True
 

	
 
## GUNICORN ##
 
#use = egg:gunicorn#main
 
## number of process workers. You must set `instance_id = *` when this option
 
## is set to more than one worker
 
#workers = 1
 
## process name
 
#proc_name = kallithea
 
## type of worker class, one of sync, eventlet, gevent, tornado
 
## recommended for bigger setup is using of of other than sync one
 
#worker_class = sync
 
#max_requests = 1000
 
## ammount of time a worker can handle request before it gets killed and
 
## restarted
 
#timeout = 3600
 

	
 
## UWSGI ##
 
## run with uwsgi --ini-paste-logged <inifile.ini>
 
#[uwsgi]
 
#socket = /tmp/uwsgi.sock
 
#master = true
 
#http = 127.0.0.1:5000
 

	
 
## set as deamon and redirect all output to file
 
#daemonize = ./uwsgi_kallithea.log
 

	
 
## master process PID
 
#pidfile = ./uwsgi_kallithea.pid
 

	
 
## stats server with workers statistics, use uwsgitop
 
## for monitoring, `uwsgitop 127.0.0.1:1717`
 
#stats = 127.0.0.1:1717
 
#memory-report = true
 

	
 
## log 5XX errors
 
#log-5xx = true
 

	
 
## Set the socket listen queue size.
 
#listen = 256
 

	
 
## Gracefully Reload workers after the specified amount of managed requests
 
## (avoid memory leaks).
 
#max-requests = 1000
 

	
 
## enable large buffers
 
#buffer-size = 65535
 

	
 
## socket and http timeouts ##
 
#http-timeout = 3600
 
#socket-timeout = 3600
 

	
 
## Log requests slower than the specified number of milliseconds.
 
#log-slow = 10
 

	
 
## Exit if no app can be loaded.
 
#need-app = true
 

	
 
## Set lazy mode (load apps in workers instead of master).
 
#lazy = true
 

	
 
## scaling ##
 
## set cheaper algorithm to use, if not set default will be used
 
#cheaper-algo = spare
 

	
 
## minimum number of workers to keep at all times
 
#cheaper = 1
 

	
 
## number of workers to spawn at startup
 
#cheaper-initial = 1
 

	
 
## maximum number of workers that can be spawned
 
#workers = 4
 

	
 
## how many workers should be spawned at a time
 
#cheaper-step = 1
 

	
 
## COMMON ##
 
#host = 127.0.0.1
 
host = 0.0.0.0
 
port = 5000
 

	
 
## middleware for hosting the WSGI application under a URL prefix
 
#[filter:proxy-prefix]
 
#use = egg:PasteDeploy#prefix
 
#prefix = /<your-prefix>
 

	
 
[app:main]
 
use = egg:kallithea
 
## enable proxy prefix middleware
 
#filter-with = proxy-prefix
 

	
 
full_stack = true
 
static_files = true
 
## Available Languages:
 
## cs de fr hu ja nl_BE pl pt_BR ru sk zh_CN zh_TW
 
lang =
 
cache_dir = %(here)s/data
 
index_dir = %(here)s/data/index
 

	
 
## perform a full repository scan on each server start, this should be
 
## set to false after first startup, to allow faster server restarts.
 
#initial_repo_scan = false
 
initial_repo_scan = true
 

	
 
## uncomment and set this path to use archive download cache
 
archive_cache_dir = %(here)s/tarballcache
 

	
 
## change this to unique ID for security
 
app_instance_uuid = development-not-secret
 

	
 
## cut off limit for large diffs (size in bytes)
 
cut_off_limit = 256000
 

	
 
## use cache version of scm repo everywhere
 
vcs_full_cache = true
 

	
 
## force https in Kallithea, fixes https redirects, assumes it's always https
 
force_https = false
 

	
 
## use Strict-Transport-Security headers
 
use_htsts = false
 

	
 
## number of commits stats will parse on each iteration
 
commit_parse_limit = 25
 

	
 
## path to git executable
 
git_path = git
 

	
 
## git rev filter option, --all is the default filter, if you need to
 
## hide all refs in changelog switch this to --branches --tags
 
#git_rev_filter = --branches --tags
 

	
 
## RSS feed options
 
rss_cut_off_limit = 256000
 
rss_items_per_page = 10
 
rss_include_diff = false
 

	
 
## options for showing and identifying changesets
 
show_sha_length = 12
 
show_revision_number = false
 

	
 
## gist URL alias, used to create nicer urls for gist. This should be an
 
## url that does rewrites to _admin/gists/<gistid>.
 
## example: http://gist.example.com/{gistid}. Empty means use the internal
 
## Kallithea url, ie. http[s]://kallithea.example.com/_admin/gists/<gistid>
 
gist_alias_url =
 

	
 
## white list of API enabled controllers. This allows to add list of
 
## controllers to which access will be enabled by api_key. eg: to enable
 
## api access to raw_files put `FilesController:raw`, to enable access to patches
 
## add `ChangesetController:changeset_patch`. This list should be "," separated
 
## Syntax is <ControllerClass>:<function>. Check debug logs for generated names
 
## Recommended settings below are commented out:
 
api_access_controllers_whitelist =
 
#    ChangesetController:changeset_patch,
 
#    ChangesetController:changeset_raw,
 
#    FilesController:raw,
 
#    FilesController:archivefile
 

	
 
## default encoding used to convert from and to unicode
 
## can be also a comma seperated list of encoding in case of mixed encodings
 
default_encoding = utf8
 

	
 
## issue tracker for Kallithea (leave blank to disable, absent for default)
 
#bugtracker = https://bitbucket.org/conservancy/kallithea/issues
 

	
 
## issue tracking mapping for commits messages
 
## comment out issue_pat, issue_server, issue_prefix to enable
 

	
 
## pattern to get the issues from commit messages
 
## default one used here is #<numbers> with a regex passive group for `#`
 
## {id} will be all groups matched from this pattern
 

	
 
issue_pat = (?:\s*#)(\d+)
 

	
 
## server url to the issue, each {id} will be replaced with match
 
## fetched from the regex and {repo} is replaced with full repository name
 
## including groups {repo_name} is replaced with just name of repo
 

	
 
issue_server_link = https://issues.example.com/{repo}/issue/{id}
 

	
 
## prefix to add to link to indicate it's an url
 
## #314 will be replaced by <issue_prefix><id>
 

	
 
issue_prefix = #
 

	
 
## issue_pat, issue_server_link, issue_prefix can have suffixes to specify
 
## multiple patterns, to other issues server, wiki or others
 
## below an example how to create a wiki pattern
 
# wiki-some-id -> https://wiki.example.com/some-id
 

	
 
#issue_pat_wiki = (?:wiki-)(.+)
 
#issue_server_link_wiki = https://wiki.example.com/{id}
 
#issue_prefix_wiki = WIKI-
 

	
 
## alternative return HTTP header for failed authentication. Default HTTP
 
## response is 401 HTTPUnauthorized. Currently Mercurial clients have trouble with
 
## handling that. Set this variable to 403 to return HTTPForbidden
 
auth_ret_code =
 

	
 
## locking return code. When repository is locked return this HTTP code. 2XX
 
## codes don't break the transactions while 4XX codes do
 
lock_ret_code = 423
 

	
 
## allows to change the repository location in settings page
 
allow_repo_location_change = True
 

	
 
## allows to setup custom hooks in settings page
 
allow_custom_hooks_settings = True
 

	
 
## extra extensions for indexing, space separated and without the leading '.'.
 
# index.extensions =
 
#    gemfile
 
#    lock
 

	
 
## extra filenames for indexing, space separated
 
# index.filenames =
 
#    .dockerignore
 
#    .editorconfig
 
#    INSTALL
 
#    CHANGELOG
 

	
 
####################################
 
###        CELERY CONFIG        ####
 
####################################
 

	
 
use_celery = false
 
broker.host = localhost
 
broker.vhost = rabbitmqhost
 
broker.port = 5672
 
broker.user = rabbitmq
 
broker.password = qweqwe
 

	
 
celery.imports = kallithea.lib.celerylib.tasks
 

	
 
celery.result.backend = amqp
 
celery.result.dburi = amqp://
 
celery.result.serialier = json
 

	
 
#celery.send.task.error.emails = true
 
#celery.amqp.task.result.expires = 18000
 

	
 
celeryd.concurrency = 2
 
#celeryd.log.file = celeryd.log
 
celeryd.log.level = DEBUG
 
celeryd.max.tasks.per.child = 1
 

	
 
## tasks will never be sent to the queue, but executed locally instead.
 
celery.always.eager = false
 

	
 
####################################
 
###         BEAKER CACHE        ####
 
####################################
 

	
 
beaker.cache.data_dir = %(here)s/data/cache/data
 
beaker.cache.lock_dir = %(here)s/data/cache/lock
 

	
 
beaker.cache.regions = short_term,long_term,sql_cache_short
 

	
 
beaker.cache.short_term.type = memory
 
beaker.cache.short_term.expire = 60
 
beaker.cache.short_term.key_length = 256
 

	
 
beaker.cache.long_term.type = memory
 
beaker.cache.long_term.expire = 36000
 
beaker.cache.long_term.key_length = 256
 

	
 
beaker.cache.sql_cache_short.type = memory
 
beaker.cache.sql_cache_short.expire = 10
 
beaker.cache.sql_cache_short.key_length = 256
 

	
 
####################################
 
###       BEAKER SESSION        ####
 
####################################
 

	
 
## Name of session cookie. Should be unique for a given host and path, even when running
 
## on different ports. Otherwise, cookie sessions will be shared and messed up.
 
beaker.session.key = kallithea
 
## Sessions should always only be accessible by the browser, not directly by JavaScript.
 
beaker.session.httponly = true
 
## Session lifetime. 2592000 seconds is 30 days.
 
beaker.session.timeout = 2592000
 

	
 
## Server secret used with HMAC to ensure integrity of cookies.
 
beaker.session.secret = development-not-secret
 
## Further, encrypt the data with AES.
 
#beaker.session.encrypt_key = <key_for_encryption>
 
#beaker.session.validate_key = <validation_key>
 

	
 
## Type of storage used for the session, current types are
 
## dbm, file, memcached, database, and memory.
 

	
 
## File system storage of session data. (default)
 
#beaker.session.type = file
 

	
 
## Cookie only, store all session data inside the cookie. Requires secure secrets.
 
#beaker.session.type = cookie
 

	
 
## Database storage of session data.
 
#beaker.session.type = ext:database
 
#beaker.session.sa.url = postgresql://postgres:qwe@localhost/kallithea
 
#beaker.session.table_name = db_session
 

	
 
############################
 
## ERROR HANDLING SYSTEMS ##
 
############################
 

	
 
####################
 
### [errormator] ###
 
####################
 

	
 
## Errormator is tailored to work with Kallithea, see
 
## http://errormator.com for details how to obtain an account
 
## you must install python package `errormator_client` to make it work
 

	
 
## errormator enabled
 
errormator = false
 

	
 
errormator.server_url = https://api.errormator.com
 
errormator.api_key = YOUR_API_KEY
 

	
 
## TWEAK AMOUNT OF INFO SENT HERE
 

	
 
## enables 404 error logging (default False)
 
errormator.report_404 = false
 

	
 
## time in seconds after request is considered being slow (default 1)
 
errormator.slow_request_time = 1
 

	
 
## record slow requests in application
 
## (needs to be enabled for slow datastore recording and time tracking)
 
errormator.slow_requests = true
 

	
 
## enable hooking to application loggers
 
#errormator.logging = true
 

	
 
## minimum log level for log capture
 
#errormator.logging.level = WARNING
 

	
 
## send logs only from erroneous/slow requests
 
## (saves API quota for intensive logging)
 
errormator.logging_on_error = false
 

	
 
## list of additonal keywords that should be grabbed from environ object
 
## can be string with comma separated list of words in lowercase
 
## (by default client will always send following info:
 
## 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
 
## start with HTTP* this list be extended with additional keywords here
 
errormator.environ_keys_whitelist =
 

	
 
## list of keywords that should be blanked from request object
 
## can be string with comma separated list of words in lowercase
 
## (by default client will always blank keys that contain following words
 
## 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
 
## this list be extended with additional keywords set here
 
errormator.request_keys_blacklist =
 

	
 
## list of namespaces that should be ignores when gathering log entries
 
## can be string with comma separated list of namespaces
 
## (by default the client ignores own entries: errormator_client.client)
 
errormator.log_namespace_blacklist =
 

	
 
################
 
### [sentry] ###
 
################
 

	
 
## sentry is a alternative open source error aggregator
 
## you must install python packages `sentry` and `raven` to enable
 

	
 
sentry.dsn = YOUR_DNS
 
sentry.servers =
 
sentry.name =
 
sentry.key =
 
sentry.public_key =
 
sentry.secret_key =
 
sentry.project =
 
sentry.site =
 
sentry.include_paths =
 
sentry.exclude_paths =
 

	
 
################################################################################
 
## WARNING: *THE LINE BELOW MUST BE UNCOMMENTED ON A PRODUCTION ENVIRONMENT*  ##
 
## Debug mode will enable the interactive debugging tool, allowing ANYONE to  ##
 
## execute malicious code after an exception is raised.                       ##
 
################################################################################
 
#set debug = false
 
set debug = true
 

	
 
##################################
 
###       LOGVIEW CONFIG       ###
 
##################################
 

	
 
logview.sqlalchemy = #faa
 
logview.pylons.templating = #bfb
 
logview.pylons.util = #eee
 

	
 
#########################################################
 
### DB CONFIGS - EACH DB WILL HAVE IT'S OWN CONFIG    ###
 
#########################################################
 

	
 
# SQLITE [default]
 
sqlalchemy.db1.url = sqlite:///%(here)s/kallithea.db?timeout=60
 

	
 
# POSTGRESQL
 
#sqlalchemy.db1.url = postgresql://user:pass@localhost/kallithea
 

	
 
# MySQL
 
#sqlalchemy.db1.url = mysql://user:pass@localhost/kallithea?charset=utf8
 

	
 
# see sqlalchemy docs for others
 

	
 
sqlalchemy.db1.echo = false
 
sqlalchemy.db1.pool_recycle = 3600
 

	
 
################################
 
### LOGGING CONFIGURATION   ####
 
################################
 

	
 
[loggers]
 
keys = root, routes, kallithea, sqlalchemy, beaker, templates, whoosh_indexer
 

	
 
[handlers]
 
keys = console, console_sql
 

	
 
[formatters]
 
keys = generic, color_formatter, color_formatter_sql
 

	
 
#############
 
## LOGGERS ##
 
#############
 

	
 
[logger_root]
 
level = NOTSET
 
handlers = console
 

	
 
[logger_routes]
 
level = DEBUG
 
handlers =
 
qualname = routes.middleware
 
## "level = DEBUG" logs the route matched and routing variables.
 
propagate = 1
 

	
 
[logger_beaker]
 
level = DEBUG
 
handlers =
 
qualname = beaker.container
 
propagate = 1
 

	
 
[logger_templates]
 
level = INFO
 
handlers =
 
qualname = pylons.templating
 
propagate = 1
 

	
 
[logger_kallithea]
 
level = DEBUG
 
handlers =
 
qualname = kallithea
 
propagate = 1
 

	
 
[logger_sqlalchemy]
 
level = INFO
 
handlers = console_sql
 
qualname = sqlalchemy.engine
 
propagate = 0
 

	
 
[logger_whoosh_indexer]
 
level = DEBUG
 
handlers =
 
qualname = whoosh_indexer
 
propagate = 1
 

	
 
##############
 
## HANDLERS ##
 
##############
 

	
 
[handler_console]
 
class = StreamHandler
 
args = (sys.stderr,)
 
#level = INFO
 
level = DEBUG
 
#formatter = generic
 
formatter = color_formatter
 

	
 
[handler_console_sql]
 
class = StreamHandler
 
args = (sys.stderr,)
 
#level = WARN
 
level = DEBUG
 
#formatter = generic
 
formatter = color_formatter_sql
 

	
 
################
 
## FORMATTERS ##
 
################
 

	
 
[formatter_generic]
 
format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
 
datefmt = %Y-%m-%d %H:%M:%S
 

	
 
[formatter_color_formatter]
 
class = kallithea.lib.colored_formatter.ColorFormatter
 
format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
 
datefmt = %Y-%m-%d %H:%M:%S
 

	
 
[formatter_color_formatter_sql]
 
class = kallithea.lib.colored_formatter.ColorFormatterSql
 
format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
 
datefmt = %Y-%m-%d %H:%M:%S
docs/usage/performance.rst
Show inline comments
 
.. _performance:
 

	
 
================================
 
Optimizing Kallithea performance
 
================================
 

	
 
When serving a large amount of big repositories, Kallithea can start
 
performing slower than expected. Because of the demanding nature of handling large
 
amounts of data from version control systems, here are some tips on how to get
 
the best performance.
 

	
 
* Kallithea is often I/O bound, and hence a fast disk (SSD/SAN) is
 
Follow these few steps to improve performance of Kallithea system.
 

	
 
1.  Kallithea is often I/O bound, and hence a fast disk (SSD/SAN) is
 
  usually more important than a fast CPU.
 

	
 
* Sluggish loading of the front page can easily be fixed by grouping repositories or by
 
  increasing cache size (see below). This includes using the lightweight dashboard
 
  option and ``vcs_full_cache`` setting in .ini file.
 

	
 
Follow these few steps to improve performance of Kallithea system.
 

	
 
1. Increase cache
 
2. Increase cache
 

	
 
    Tweak beaker cache settings in the ini file. The actual effect of that
 
    is questionable.
 

	
 
2. Switch from SQLite to PostgreSQL or MySQL
 
3. Switch from SQLite to PostgreSQL or MySQL
 

	
 
    SQLite is a good option when having a small load on the system. But due to
 
    locking issues with SQLite, it is not recommended to use it for larger
 
    deployments. Switching to MySQL or PostgreSQL will result in an immediate
 
    performance increase. A tool like SQLAlchemyGrate_ can be used for
 
    migrating to another database platform.
 

	
 
3. Scale Kallithea horizontally
 
4. Scale Kallithea horizontally
 

	
 
    Scaling horizontally can give huge performance benefits when dealing with
 
    large amounts of traffic (many users, CI servers, etc.). Kallithea can be
 
    scaled horizontally on one (recommended) or multiple machines. In order
 
    to scale horizontally you need to do the following:
 

	
 
    - Each instance's ``data`` storage needs to be configured to be stored on a
 
      shared disk storage, preferably together with repositories. This ``data``
 
      dir contains template caches, sessions, whoosh index and is used for
 
      task locking (so it is safe across multiple instances). Set the
 
      ``cache_dir``, ``index_dir``, ``beaker.cache.data_dir``, ``beaker.cache.lock_dir``
 
      variables in each .ini file to a shared location across Kallithea instances
 
    - If celery is used each instance should run a separate Celery instance, but
 
      the message broker should be common to all of them (e.g.,  one
 
      shared RabbitMQ server)
 
    - Load balance using round robin or IP hash, recommended is writing LB rules
 
      that will separate regular user traffic from automated processes like CI
 
      servers or build bots.
 

	
 

	
 
.. _SQLAlchemyGrate: https://github.com/shazow/sqlalchemygrate
kallithea/bin/template.ini.mako
Show inline comments
 
## -*- coding: utf-8 -*-
 
<%text>################################################################################</%text>
 
<%text>################################################################################</%text>
 
# Kallithea - config file generated with kallithea-config                      #
 
<%text>################################################################################</%text>
 
<%text>################################################################################</%text>
 

	
 
[DEFAULT]
 
debug = true
 
pdebug = false
 

	
 
<%text>################################################################################</%text>
 
<%text>## Email settings                                                             ##</%text>
 
<%text>##                                                                            ##</%text>
 
<%text>## Refer to the documentation ("Email settings") for more details.            ##</%text>
 
<%text>##                                                                            ##</%text>
 
<%text>## It is recommended to use a valid sender address that passes access         ##</%text>
 
<%text>## validation and spam filtering in mail servers.                             ##</%text>
 
<%text>################################################################################</%text>
 

	
 
<%text>## 'From' header for application emails. You can optionally add a name.</%text>
 
<%text>## Default:</%text>
 
#app_email_from = Kallithea
 
<%text>## Examples:</%text>
 
#app_email_from = Kallithea <kallithea-noreply@example.com>
 
#app_email_from = kallithea-noreply@example.com
 

	
 
<%text>## Subject prefix for application emails.</%text>
 
<%text>## A space between this prefix and the real subject is automatically added.</%text>
 
<%text>## Default:</%text>
 
#email_prefix =
 
<%text>## Example:</%text>
 
#email_prefix = [Kallithea]
 

	
 
<%text>## Recipients for error emails and fallback recipients of application mails.</%text>
 
<%text>## Multiple addresses can be specified, space-separated.</%text>
 
<%text>## Only addresses are allowed, do not add any name part.</%text>
 
<%text>## Default:</%text>
 
#email_to =
 
<%text>## Examples:</%text>
 
#email_to = admin@example.com
 
#email_to = admin@example.com another_admin@example.com
 

	
 
<%text>## 'From' header for error emails. You can optionally add a name.</%text>
 
<%text>## Default:</%text>
 
#error_email_from = pylons@yourapp.com
 
<%text>## Examples:</%text>
 
#error_email_from = Kallithea Errors <kallithea-noreply@example.com>
 
#error_email_from = paste_error@example.com
 

	
 
<%text>## SMTP server settings</%text>
 
<%text>## Only smtp_server is mandatory. All other settings take the specified default</%text>
 
<%text>## values.</%text>
 
#smtp_server = smtp.example.com
 
#smtp_username =
 
#smtp_password =
 
#smtp_port = 25
 
#smtp_use_tls = false
 
#smtp_use_ssl = false
 
<%text>## SMTP authentication parameters to use (e.g. LOGIN PLAIN CRAM-MD5, etc.).</%text>
 
<%text>## If empty, use any of the authentication parameters supported by the server.</%text>
 
#smtp_auth =
 

	
 
[server:main]
 
%if http_server == 'paste':
 
<%text>## PASTE ##</%text>
 
use = egg:Paste#http
 
<%text>## nr of worker threads to spawn</%text>
 
threadpool_workers = 5
 
<%text>## max request before thread respawn</%text>
 
threadpool_max_requests = 10
 
<%text>## option to use threads of process</%text>
 
use_threadpool = true
 

	
 
%elif http_server == 'waitress':
 
<%text>## WAITRESS ##</%text>
 
use = egg:waitress#main
 
<%text>## number of worker threads</%text>
 
threads = 5
 
<%text>## MAX BODY SIZE 100GB</%text>
 
max_request_body_size = 107374182400
 
<%text>## use poll instead of select, fixes fd limits, may not work on old</%text>
 
<%text>## windows systems.</%text>
 
#asyncore_use_poll = True
 

	
 
%elif http_server == 'gunicorn':
 
<%text>## GUNICORN ##</%text>
 
use = egg:gunicorn#main
 
<%text>## number of process workers. You must set `instance_id = *` when this option</%text>
 
<%text>## is set to more than one worker</%text>
 
workers = 1
 
<%text>## process name</%text>
 
proc_name = kallithea
 
<%text>## type of worker class, one of sync, eventlet, gevent, tornado</%text>
 
<%text>## recommended for bigger setup is using of of other than sync one</%text>
 
worker_class = sync
 
max_requests = 1000
 
<%text>## ammount of time a worker can handle request before it gets killed and</%text>
 
<%text>## restarted</%text>
 
timeout = 3600
 

	
 
%elif http_server == 'uwsgi':
 
<%text>## UWSGI ##</%text>
 
<%text>## run with uwsgi --ini-paste-logged <inifile.ini></%text>
 
[uwsgi]
 
socket = /tmp/uwsgi.sock
 
master = true
 
http = 127.0.0.1:5000
 

	
 
<%text>## set as deamon and redirect all output to file</%text>
 
#daemonize = ./uwsgi_kallithea.log
 

	
 
<%text>## master process PID</%text>
 
pidfile = ./uwsgi_kallithea.pid
 

	
 
<%text>## stats server with workers statistics, use uwsgitop</%text>
 
<%text>## for monitoring, `uwsgitop 127.0.0.1:1717`</%text>
 
stats = 127.0.0.1:1717
 
memory-report = true
 

	
 
<%text>## log 5XX errors</%text>
 
log-5xx = true
 

	
 
<%text>## Set the socket listen queue size.</%text>
 
listen = 256
 

	
 
<%text>## Gracefully Reload workers after the specified amount of managed requests</%text>
 
<%text>## (avoid memory leaks).</%text>
 
max-requests = 1000
 

	
 
<%text>## enable large buffers</%text>
 
buffer-size = 65535
 

	
 
<%text>## socket and http timeouts ##</%text>
 
http-timeout = 3600
 
socket-timeout = 3600
 

	
 
<%text>## Log requests slower than the specified number of milliseconds.</%text>
 
log-slow = 10
 

	
 
<%text>## Exit if no app can be loaded.</%text>
 
need-app = true
 

	
 
<%text>## Set lazy mode (load apps in workers instead of master).</%text>
 
lazy = true
 

	
 
<%text>## scaling ##</%text>
 
<%text>## set cheaper algorithm to use, if not set default will be used</%text>
 
cheaper-algo = spare
 

	
 
<%text>## minimum number of workers to keep at all times</%text>
 
cheaper = 1
 

	
 
<%text>## number of workers to spawn at startup</%text>
 
cheaper-initial = 1
 

	
 
<%text>## maximum number of workers that can be spawned</%text>
 
workers = 4
 

	
 
<%text>## how many workers should be spawned at a time</%text>
 
cheaper-step = 1
 

	
 
%endif
 
<%text>## COMMON ##</%text>
 
host = ${host}
 
port = ${port}
 

	
 
<%text>## middleware for hosting the WSGI application under a URL prefix</%text>
 
#[filter:proxy-prefix]
 
#use = egg:PasteDeploy#prefix
 
#prefix = /<your-prefix>
 

	
 
[app:main]
 
use = egg:kallithea
 
<%text>## enable proxy prefix middleware</%text>
 
#filter-with = proxy-prefix
 

	
 
full_stack = true
 
static_files = true
 
<%text>## Available Languages:</%text>
 
<%text>## cs de fr hu ja nl_BE pl pt_BR ru sk zh_CN zh_TW</%text>
 
lang =
 
cache_dir = ${here}/data
 
index_dir = ${here}/data/index
 

	
 
<%text>## perform a full repository scan on each server start, this should be</%text>
 
<%text>## set to false after first startup, to allow faster server restarts.</%text>
 
initial_repo_scan = false
 

	
 
<%text>## uncomment and set this path to use archive download cache</%text>
 
archive_cache_dir = ${here}/tarballcache
 

	
 
<%text>## change this to unique ID for security</%text>
 
app_instance_uuid = ${uuid()}
 

	
 
<%text>## cut off limit for large diffs (size in bytes)</%text>
 
cut_off_limit = 256000
 

	
 
<%text>## use cache version of scm repo everywhere</%text>
 
vcs_full_cache = true
 

	
 
<%text>## force https in Kallithea, fixes https redirects, assumes it's always https</%text>
 
force_https = false
 

	
 
<%text>## use Strict-Transport-Security headers</%text>
 
use_htsts = false
 

	
 
<%text>## number of commits stats will parse on each iteration</%text>
 
commit_parse_limit = 25
 

	
 
<%text>## path to git executable</%text>
 
git_path = git
 

	
 
<%text>## git rev filter option, --all is the default filter, if you need to</%text>
 
<%text>## hide all refs in changelog switch this to --branches --tags</%text>
 
#git_rev_filter = --branches --tags
 

	
 
<%text>## RSS feed options</%text>
 
rss_cut_off_limit = 256000
 
rss_items_per_page = 10
 
rss_include_diff = false
 

	
 
<%text>## options for showing and identifying changesets</%text>
 
show_sha_length = 12
 
show_revision_number = false
 

	
 
<%text>## gist URL alias, used to create nicer urls for gist. This should be an</%text>
 
<%text>## url that does rewrites to _admin/gists/<gistid>.</%text>
 
<%text>## example: http://gist.example.com/{gistid}. Empty means use the internal</%text>
 
<%text>## Kallithea url, ie. http[s]://kallithea.example.com/_admin/gists/<gistid></%text>
 
gist_alias_url =
 

	
 
<%text>## white list of API enabled controllers. This allows to add list of</%text>
 
<%text>## controllers to which access will be enabled by api_key. eg: to enable</%text>
 
<%text>## api access to raw_files put `FilesController:raw`, to enable access to patches</%text>
 
<%text>## add `ChangesetController:changeset_patch`. This list should be "," separated</%text>
 
<%text>## Syntax is <ControllerClass>:<function>. Check debug logs for generated names</%text>
 
<%text>## Recommended settings below are commented out:</%text>
 
api_access_controllers_whitelist =
 
#    ChangesetController:changeset_patch,
 
#    ChangesetController:changeset_raw,
 
#    FilesController:raw,
 
#    FilesController:archivefile
 

	
 
<%text>## default encoding used to convert from and to unicode</%text>
 
<%text>## can be also a comma seperated list of encoding in case of mixed encodings</%text>
 
default_encoding = utf8
 

	
 
<%text>## issue tracker for Kallithea (leave blank to disable, absent for default)</%text>
 
#bugtracker = https://bitbucket.org/conservancy/kallithea/issues
 

	
 
<%text>## issue tracking mapping for commits messages</%text>
 
<%text>## comment out issue_pat, issue_server, issue_prefix to enable</%text>
 

	
 
<%text>## pattern to get the issues from commit messages</%text>
 
<%text>## default one used here is #<numbers> with a regex passive group for `#`</%text>
 
<%text>## {id} will be all groups matched from this pattern</%text>
 

	
 
issue_pat = (?:\s*#)(\d+)
 

	
 
<%text>## server url to the issue, each {id} will be replaced with match</%text>
 
<%text>## fetched from the regex and {repo} is replaced with full repository name</%text>
 
<%text>## including groups {repo_name} is replaced with just name of repo</%text>
 

	
 
issue_server_link = https://issues.example.com/{repo}/issue/{id}
 

	
 
<%text>## prefix to add to link to indicate it's an url</%text>
 
<%text>## #314 will be replaced by <issue_prefix><id></%text>
 

	
 
issue_prefix = #
 

	
 
<%text>## issue_pat, issue_server_link, issue_prefix can have suffixes to specify</%text>
 
<%text>## multiple patterns, to other issues server, wiki or others</%text>
 
<%text>## below an example how to create a wiki pattern</%text>
 
# wiki-some-id -> https://wiki.example.com/some-id
 

	
 
#issue_pat_wiki = (?:wiki-)(.+)
 
#issue_server_link_wiki = https://wiki.example.com/{id}
 
#issue_prefix_wiki = WIKI-
 

	
 
<%text>## alternative return HTTP header for failed authentication. Default HTTP</%text>
 
<%text>## response is 401 HTTPUnauthorized. Currently Mercurial clients have trouble with</%text>
 
<%text>## handling that. Set this variable to 403 to return HTTPForbidden</%text>
 
auth_ret_code =
 

	
 
<%text>## locking return code. When repository is locked return this HTTP code. 2XX</%text>
 
<%text>## codes don't break the transactions while 4XX codes do</%text>
 
lock_ret_code = 423
 

	
 
<%text>## allows to change the repository location in settings page</%text>
 
allow_repo_location_change = True
 

	
 
<%text>## allows to setup custom hooks in settings page</%text>
 
allow_custom_hooks_settings = True
 

	
 
<%text>## extra extensions for indexing, space separated and without the leading '.'.</%text>
 
# index.extensions =
 
#    gemfile
 
#    lock
 

	
 
<%text>## extra filenames for indexing, space separated</%text>
 
# index.filenames =
 
#    .dockerignore
 
#    .editorconfig
 
#    INSTALL
 
#    CHANGELOG
 

	
 
<%text>####################################</%text>
 
<%text>###        CELERY CONFIG        ####</%text>
 
<%text>####################################</%text>
 

	
 
use_celery = false
 
broker.host = localhost
 
broker.vhost = rabbitmqhost
 
broker.port = 5672
 
broker.user = rabbitmq
 
broker.password = qweqwe
 

	
 
celery.imports = kallithea.lib.celerylib.tasks
 

	
 
celery.result.backend = amqp
 
celery.result.dburi = amqp://
 
celery.result.serialier = json
 

	
 
#celery.send.task.error.emails = true
 
#celery.amqp.task.result.expires = 18000
 

	
 
celeryd.concurrency = 2
 
#celeryd.log.file = celeryd.log
 
celeryd.log.level = DEBUG
 
celeryd.max.tasks.per.child = 1
 

	
 
<%text>## tasks will never be sent to the queue, but executed locally instead.</%text>
 
celery.always.eager = false
 

	
 
<%text>####################################</%text>
 
<%text>###         BEAKER CACHE        ####</%text>
 
<%text>####################################</%text>
 

	
 
beaker.cache.data_dir = ${here}/data/cache/data
 
beaker.cache.lock_dir = ${here}/data/cache/lock
 

	
 
beaker.cache.regions = short_term,long_term,sql_cache_short
 

	
 
beaker.cache.short_term.type = memory
 
beaker.cache.short_term.expire = 60
 
beaker.cache.short_term.key_length = 256
 

	
 
beaker.cache.long_term.type = memory
 
beaker.cache.long_term.expire = 36000
 
beaker.cache.long_term.key_length = 256
 

	
 
beaker.cache.sql_cache_short.type = memory
 
beaker.cache.sql_cache_short.expire = 10
 
beaker.cache.sql_cache_short.key_length = 256
 

	
 
<%text>####################################</%text>
 
<%text>###       BEAKER SESSION        ####</%text>
 
<%text>####################################</%text>
 

	
 
<%text>## Name of session cookie. Should be unique for a given host and path, even when running</%text>
 
<%text>## on different ports. Otherwise, cookie sessions will be shared and messed up.</%text>
 
beaker.session.key = kallithea
 
<%text>## Sessions should always only be accessible by the browser, not directly by JavaScript.</%text>
 
beaker.session.httponly = true
 
<%text>## Session lifetime. 2592000 seconds is 30 days.</%text>
 
beaker.session.timeout = 2592000
 

	
 
<%text>## Server secret used with HMAC to ensure integrity of cookies.</%text>
 
beaker.session.secret = ${uuid()}
 
<%text>## Further, encrypt the data with AES.</%text>
 
#beaker.session.encrypt_key = <key_for_encryption>
 
#beaker.session.validate_key = <validation_key>
 

	
 
<%text>## Type of storage used for the session, current types are</%text>
 
<%text>## dbm, file, memcached, database, and memory.</%text>
 

	
 
<%text>## File system storage of session data. (default)</%text>
 
#beaker.session.type = file
 

	
 
<%text>## Cookie only, store all session data inside the cookie. Requires secure secrets.</%text>
 
#beaker.session.type = cookie
 

	
 
<%text>## Database storage of session data.</%text>
 
#beaker.session.type = ext:database
 
#beaker.session.sa.url = postgresql://postgres:qwe@localhost/kallithea
 
#beaker.session.table_name = db_session
 

	
 
%if error_aggregation_service == 'errormator':
 
<%text>############################</%text>
 
<%text>## ERROR HANDLING SYSTEMS ##</%text>
 
<%text>############################</%text>
 

	
 
<%text>####################</%text>
 
<%text>### [errormator] ###</%text>
 
<%text>####################</%text>
 

	
 
<%text>## Errormator is tailored to work with Kallithea, see</%text>
 
<%text>## http://errormator.com for details how to obtain an account</%text>
 
<%text>## you must install python package `errormator_client` to make it work</%text>
 

	
 
<%text>## errormator enabled</%text>
 
errormator = false
 

	
 
errormator.server_url = https://api.errormator.com
 
errormator.api_key = YOUR_API_KEY
 

	
 
<%text>## TWEAK AMOUNT OF INFO SENT HERE</%text>
 

	
 
<%text>## enables 404 error logging (default False)</%text>
 
errormator.report_404 = false
 

	
 
<%text>## time in seconds after request is considered being slow (default 1)</%text>
 
errormator.slow_request_time = 1
 

	
 
<%text>## record slow requests in application</%text>
 
<%text>## (needs to be enabled for slow datastore recording and time tracking)</%text>
 
errormator.slow_requests = true
 

	
 
<%text>## enable hooking to application loggers</%text>
 
#errormator.logging = true
 

	
 
<%text>## minimum log level for log capture</%text>
 
#errormator.logging.level = WARNING
 

	
 
<%text>## send logs only from erroneous/slow requests</%text>
 
<%text>## (saves API quota for intensive logging)</%text>
 
errormator.logging_on_error = false
 

	
 
<%text>## list of additonal keywords that should be grabbed from environ object</%text>
 
<%text>## can be string with comma separated list of words in lowercase</%text>
 
<%text>## (by default client will always send following info:</%text>
 
<%text>## 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that</%text>
 
<%text>## start with HTTP* this list be extended with additional keywords here</%text>
 
errormator.environ_keys_whitelist =
 

	
 
<%text>## list of keywords that should be blanked from request object</%text>
 
<%text>## can be string with comma separated list of words in lowercase</%text>
 
<%text>## (by default client will always blank keys that contain following words</%text>
 
<%text>## 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'</%text>
 
<%text>## this list be extended with additional keywords set here</%text>
 
errormator.request_keys_blacklist =
 

	
 
<%text>## list of namespaces that should be ignores when gathering log entries</%text>
 
<%text>## can be string with comma separated list of namespaces</%text>
 
<%text>## (by default the client ignores own entries: errormator_client.client)</%text>
 
errormator.log_namespace_blacklist =
 

	
 
%elif error_aggregation_service == 'sentry':
 
<%text>################</%text>
 
<%text>### [sentry] ###</%text>
 
<%text>################</%text>
 

	
 
<%text>## sentry is a alternative open source error aggregator</%text>
 
<%text>## you must install python packages `sentry` and `raven` to enable</%text>
 

	
 
sentry.dsn = YOUR_DNS
 
sentry.servers =
 
sentry.name =
 
sentry.key =
 
sentry.public_key =
 
sentry.secret_key =
 
sentry.project =
 
sentry.site =
 
sentry.include_paths =
 
sentry.exclude_paths =
 

	
 
%endif
 
<%text>################################################################################</%text>
 
<%text>## WARNING: *THE LINE BELOW MUST BE UNCOMMENTED ON A PRODUCTION ENVIRONMENT*  ##</%text>
 
<%text>## Debug mode will enable the interactive debugging tool, allowing ANYONE to  ##</%text>
 
<%text>## execute malicious code after an exception is raised.                       ##</%text>
 
<%text>################################################################################</%text>
 
set debug = false
 

	
 
<%text>##################################</%text>
 
<%text>###       LOGVIEW CONFIG       ###</%text>
 
<%text>##################################</%text>
 

	
 
logview.sqlalchemy = #faa
 
logview.pylons.templating = #bfb
 
logview.pylons.util = #eee
 

	
 
<%text>#########################################################</%text>
 
<%text>### DB CONFIGS - EACH DB WILL HAVE IT'S OWN CONFIG    ###</%text>
 
<%text>#########################################################</%text>
 

	
 
%if database_engine == 'sqlite':
 
# SQLITE [default]
 
sqlalchemy.db1.url = sqlite:///${here}/kallithea.db?timeout=60
 

	
 
%elif database_engine == 'postgres':
 
# POSTGRESQL
 
sqlalchemy.db1.url = postgresql://user:pass@localhost/kallithea
 

	
 
%elif database_engine == 'mysql':
 
# MySQL
 
sqlalchemy.db1.url = mysql://user:pass@localhost/kallithea?charset=utf8
 

	
 
%endif
 
# see sqlalchemy docs for others
 

	
 
sqlalchemy.db1.echo = false
 
sqlalchemy.db1.pool_recycle = 3600
 

	
 
<%text>################################</%text>
 
<%text>### LOGGING CONFIGURATION   ####</%text>
 
<%text>################################</%text>
 

	
 
[loggers]
 
keys = root, routes, kallithea, sqlalchemy, beaker, templates, whoosh_indexer
 

	
 
[handlers]
 
keys = console, console_sql
 

	
 
[formatters]
 
keys = generic, color_formatter, color_formatter_sql
 

	
 
<%text>#############</%text>
 
<%text>## LOGGERS ##</%text>
 
<%text>#############</%text>
 

	
 
[logger_root]
 
level = NOTSET
 
handlers = console
 

	
 
[logger_routes]
 
level = DEBUG
 
handlers =
 
qualname = routes.middleware
 
<%text>## "level = DEBUG" logs the route matched and routing variables.</%text>
 
propagate = 1
 

	
 
[logger_beaker]
 
level = DEBUG
 
handlers =
 
qualname = beaker.container
 
propagate = 1
 

	
 
[logger_templates]
 
level = INFO
 
handlers =
 
qualname = pylons.templating
 
propagate = 1
 

	
 
[logger_kallithea]
 
level = DEBUG
 
handlers =
 
qualname = kallithea
 
propagate = 1
 

	
 
[logger_sqlalchemy]
 
level = INFO
 
handlers = console_sql
 
qualname = sqlalchemy.engine
 
propagate = 0
 

	
 
[logger_whoosh_indexer]
 
level = DEBUG
 
handlers =
 
qualname = whoosh_indexer
 
propagate = 1
 

	
 
<%text>##############</%text>
 
<%text>## HANDLERS ##</%text>
 
<%text>##############</%text>
 

	
 
[handler_console]
 
class = StreamHandler
 
args = (sys.stderr,)
 
level = INFO
 
formatter = generic
 

	
 
[handler_console_sql]
 
class = StreamHandler
 
args = (sys.stderr,)
 
level = WARN
 
formatter = generic
 

	
 
<%text>################</%text>
 
<%text>## FORMATTERS ##</%text>
 
<%text>################</%text>
 

	
 
[formatter_generic]
 
format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
 
datefmt = %Y-%m-%d %H:%M:%S
 

	
 
[formatter_color_formatter]
 
class = kallithea.lib.colored_formatter.ColorFormatter
 
format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
 
datefmt = %Y-%m-%d %H:%M:%S
 

	
 
[formatter_color_formatter_sql]
 
class = kallithea.lib.colored_formatter.ColorFormatterSql
 
format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
 
datefmt = %Y-%m-%d %H:%M:%S
kallithea/config/deployment.ini_tmpl
Show inline comments
 
################################################################################
 
################################################################################
 
# Kallithea - Example config                                                   #
 
#                                                                              #
 
# The %(here)s variable will be replaced with the parent directory of this file#
 
################################################################################
 
################################################################################
 

	
 
[DEFAULT]
 
debug = true
 
pdebug = false
 

	
 
################################################################################
 
## Email settings                                                             ##
 
##                                                                            ##
 
## Refer to the documentation ("Email settings") for more details.            ##
 
##                                                                            ##
 
## It is recommended to use a valid sender address that passes access         ##
 
## validation and spam filtering in mail servers.                             ##
 
################################################################################
 

	
 
## 'From' header for application emails. You can optionally add a name.
 
## Default:
 
#app_email_from = Kallithea
 
## Examples:
 
#app_email_from = Kallithea <kallithea-noreply@example.com>
 
#app_email_from = kallithea-noreply@example.com
 

	
 
## Subject prefix for application emails.
 
## A space between this prefix and the real subject is automatically added.
 
## Default:
 
#email_prefix =
 
## Example:
 
#email_prefix = [Kallithea]
 

	
 
## Recipients for error emails and fallback recipients of application mails.
 
## Multiple addresses can be specified, space-separated.
 
## Only addresses are allowed, do not add any name part.
 
## Default:
 
#email_to =
 
## Examples:
 
#email_to = admin@example.com
 
#email_to = admin@example.com another_admin@example.com
 

	
 
## 'From' header for error emails. You can optionally add a name.
 
## Default:
 
#error_email_from = pylons@yourapp.com
 
## Examples:
 
#error_email_from = Kallithea Errors <kallithea-noreply@example.com>
 
#error_email_from = paste_error@example.com
 

	
 
## SMTP server settings
 
## Only smtp_server is mandatory. All other settings take the specified default
 
## values.
 
#smtp_server = smtp.example.com
 
#smtp_username =
 
#smtp_password =
 
#smtp_port = 25
 
#smtp_use_tls = false
 
#smtp_use_ssl = false
 
## SMTP authentication parameters to use (e.g. LOGIN PLAIN CRAM-MD5, etc.).
 
## If empty, use any of the authentication parameters supported by the server.
 
#smtp_auth =
 

	
 
[server:main]
 
## PASTE ##
 
#use = egg:Paste#http
 
## nr of worker threads to spawn
 
#threadpool_workers = 5
 
## max request before thread respawn
 
#threadpool_max_requests = 10
 
## option to use threads of process
 
#use_threadpool = true
 

	
 
## WAITRESS ##
 
use = egg:waitress#main
 
## number of worker threads
 
threads = 5
 
## MAX BODY SIZE 100GB
 
max_request_body_size = 107374182400
 
## use poll instead of select, fixes fd limits, may not work on old
 
## windows systems.
 
#asyncore_use_poll = True
 

	
 
## GUNICORN ##
 
#use = egg:gunicorn#main
 
## number of process workers. You must set `instance_id = *` when this option
 
## is set to more than one worker
 
#workers = 1
 
## process name
 
#proc_name = kallithea
 
## type of worker class, one of sync, eventlet, gevent, tornado
 
## recommended for bigger setup is using of of other than sync one
 
#worker_class = sync
 
#max_requests = 1000
 
## ammount of time a worker can handle request before it gets killed and
 
## restarted
 
#timeout = 3600
 

	
 
## UWSGI ##
 
## run with uwsgi --ini-paste-logged <inifile.ini>
 
#[uwsgi]
 
#socket = /tmp/uwsgi.sock
 
#master = true
 
#http = 127.0.0.1:5000
 

	
 
## set as deamon and redirect all output to file
 
#daemonize = ./uwsgi_kallithea.log
 

	
 
## master process PID
 
#pidfile = ./uwsgi_kallithea.pid
 

	
 
## stats server with workers statistics, use uwsgitop
 
## for monitoring, `uwsgitop 127.0.0.1:1717`
 
#stats = 127.0.0.1:1717
 
#memory-report = true
 

	
 
## log 5XX errors
 
#log-5xx = true
 

	
 
## Set the socket listen queue size.
 
#listen = 256
 

	
 
## Gracefully Reload workers after the specified amount of managed requests
 
## (avoid memory leaks).
 
#max-requests = 1000
 

	
 
## enable large buffers
 
#buffer-size = 65535
 

	
 
## socket and http timeouts ##
 
#http-timeout = 3600
 
#socket-timeout = 3600
 

	
 
## Log requests slower than the specified number of milliseconds.
 
#log-slow = 10
 

	
 
## Exit if no app can be loaded.
 
#need-app = true
 

	
 
## Set lazy mode (load apps in workers instead of master).
 
#lazy = true
 

	
 
## scaling ##
 
## set cheaper algorithm to use, if not set default will be used
 
#cheaper-algo = spare
 

	
 
## minimum number of workers to keep at all times
 
#cheaper = 1
 

	
 
## number of workers to spawn at startup
 
#cheaper-initial = 1
 

	
 
## maximum number of workers that can be spawned
 
#workers = 4
 

	
 
## how many workers should be spawned at a time
 
#cheaper-step = 1
 

	
 
## COMMON ##
 
host = 127.0.0.1
 
port = 5000
 

	
 
## middleware for hosting the WSGI application under a URL prefix
 
#[filter:proxy-prefix]
 
#use = egg:PasteDeploy#prefix
 
#prefix = /<your-prefix>
 

	
 
[app:main]
 
use = egg:kallithea
 
## enable proxy prefix middleware
 
#filter-with = proxy-prefix
 

	
 
full_stack = true
 
static_files = true
 
## Available Languages:
 
## cs de fr hu ja nl_BE pl pt_BR ru sk zh_CN zh_TW
 
lang =
 
cache_dir = %(here)s/data
 
index_dir = %(here)s/data/index
 

	
 
## perform a full repository scan on each server start, this should be
 
## set to false after first startup, to allow faster server restarts.
 
initial_repo_scan = false
 

	
 
## uncomment and set this path to use archive download cache
 
archive_cache_dir = %(here)s/tarballcache
 

	
 
## change this to unique ID for security
 
app_instance_uuid = ${app_instance_uuid}
 

	
 
## cut off limit for large diffs (size in bytes)
 
cut_off_limit = 256000
 

	
 
## use cache version of scm repo everywhere
 
vcs_full_cache = true
 

	
 
## force https in Kallithea, fixes https redirects, assumes it's always https
 
force_https = false
 

	
 
## use Strict-Transport-Security headers
 
use_htsts = false
 

	
 
## number of commits stats will parse on each iteration
 
commit_parse_limit = 25
 

	
 
## path to git executable
 
git_path = git
 

	
 
## git rev filter option, --all is the default filter, if you need to
 
## hide all refs in changelog switch this to --branches --tags
 
#git_rev_filter = --branches --tags
 

	
 
## RSS feed options
 
rss_cut_off_limit = 256000
 
rss_items_per_page = 10
 
rss_include_diff = false
 

	
 
## options for showing and identifying changesets
 
show_sha_length = 12
 
show_revision_number = false
 

	
 
## gist URL alias, used to create nicer urls for gist. This should be an
 
## url that does rewrites to _admin/gists/<gistid>.
 
## example: http://gist.example.com/{gistid}. Empty means use the internal
 
## Kallithea url, ie. http[s]://kallithea.example.com/_admin/gists/<gistid>
 
gist_alias_url =
 

	
 
## white list of API enabled controllers. This allows to add list of
 
## controllers to which access will be enabled by api_key. eg: to enable
 
## api access to raw_files put `FilesController:raw`, to enable access to patches
 
## add `ChangesetController:changeset_patch`. This list should be "," separated
 
## Syntax is <ControllerClass>:<function>. Check debug logs for generated names
 
## Recommended settings below are commented out:
 
api_access_controllers_whitelist =
 
#    ChangesetController:changeset_patch,
 
#    ChangesetController:changeset_raw,
 
#    FilesController:raw,
 
#    FilesController:archivefile
 

	
 
## default encoding used to convert from and to unicode
 
## can be also a comma seperated list of encoding in case of mixed encodings
 
default_encoding = utf8
 

	
 
## issue tracker for Kallithea (leave blank to disable, absent for default)
 
#bugtracker = https://bitbucket.org/conservancy/kallithea/issues
 

	
 
## issue tracking mapping for commits messages
 
## comment out issue_pat, issue_server, issue_prefix to enable
 

	
 
## pattern to get the issues from commit messages
 
## default one used here is #<numbers> with a regex passive group for `#`
 
## {id} will be all groups matched from this pattern
 

	
 
issue_pat = (?:\s*#)(\d+)
 

	
 
## server url to the issue, each {id} will be replaced with match
 
## fetched from the regex and {repo} is replaced with full repository name
 
## including groups {repo_name} is replaced with just name of repo
 

	
 
issue_server_link = https://issues.example.com/{repo}/issue/{id}
 

	
 
## prefix to add to link to indicate it's an url
 
## #314 will be replaced by <issue_prefix><id>
 

	
 
issue_prefix = #
 

	
 
## issue_pat, issue_server_link, issue_prefix can have suffixes to specify
 
## multiple patterns, to other issues server, wiki or others
 
## below an example how to create a wiki pattern
 
# wiki-some-id -> https://wiki.example.com/some-id
 

	
 
#issue_pat_wiki = (?:wiki-)(.+)
 
#issue_server_link_wiki = https://wiki.example.com/{id}
 
#issue_prefix_wiki = WIKI-
 

	
 
## alternative return HTTP header for failed authentication. Default HTTP
 
## response is 401 HTTPUnauthorized. Currently Mercurial clients have trouble with
 
## handling that. Set this variable to 403 to return HTTPForbidden
 
auth_ret_code =
 

	
 
## locking return code. When repository is locked return this HTTP code. 2XX
 
## codes don't break the transactions while 4XX codes do
 
lock_ret_code = 423
 

	
 
## allows to change the repository location in settings page
 
allow_repo_location_change = True
 

	
 
## allows to setup custom hooks in settings page
 
allow_custom_hooks_settings = True
 

	
 
## extra extensions for indexing, space separated and without the leading '.'.
 
# index.extensions =
 
#    gemfile
 
#    lock
 

	
 
## extra filenames for indexing, space separated
 
# index.filenames =
 
#    .dockerignore
 
#    .editorconfig
 
#    INSTALL
 
#    CHANGELOG
 

	
 
####################################
 
###        CELERY CONFIG        ####
 
####################################
 

	
 
use_celery = false
 
broker.host = localhost
 
broker.vhost = rabbitmqhost
 
broker.port = 5672
 
broker.user = rabbitmq
 
broker.password = qweqwe
 

	
 
celery.imports = kallithea.lib.celerylib.tasks
 

	
 
celery.result.backend = amqp
 
celery.result.dburi = amqp://
 
celery.result.serialier = json
 

	
 
#celery.send.task.error.emails = true
 
#celery.amqp.task.result.expires = 18000
 

	
 
celeryd.concurrency = 2
 
#celeryd.log.file = celeryd.log
 
celeryd.log.level = DEBUG
 
celeryd.max.tasks.per.child = 1
 

	
 
## tasks will never be sent to the queue, but executed locally instead.
 
celery.always.eager = false
 

	
 
####################################
 
###         BEAKER CACHE        ####
 
####################################
 

	
 
beaker.cache.data_dir = %(here)s/data/cache/data
 
beaker.cache.lock_dir = %(here)s/data/cache/lock
 

	
 
beaker.cache.regions = short_term,long_term,sql_cache_short
 

	
 
beaker.cache.short_term.type = memory
 
beaker.cache.short_term.expire = 60
 
beaker.cache.short_term.key_length = 256
 

	
 
beaker.cache.long_term.type = memory
 
beaker.cache.long_term.expire = 36000
 
beaker.cache.long_term.key_length = 256
 

	
 
beaker.cache.sql_cache_short.type = memory
 
beaker.cache.sql_cache_short.expire = 10
 
beaker.cache.sql_cache_short.key_length = 256
 

	
 
####################################
 
###       BEAKER SESSION        ####
 
####################################
 

	
 
## Name of session cookie. Should be unique for a given host and path, even when running
 
## on different ports. Otherwise, cookie sessions will be shared and messed up.
 
beaker.session.key = kallithea
 
## Sessions should always only be accessible by the browser, not directly by JavaScript.
 
beaker.session.httponly = true
 
## Session lifetime. 2592000 seconds is 30 days.
 
beaker.session.timeout = 2592000
 

	
 
## Server secret used with HMAC to ensure integrity of cookies.
 
beaker.session.secret = ${app_instance_uuid}
 
## Further, encrypt the data with AES.
 
#beaker.session.encrypt_key = <key_for_encryption>
 
#beaker.session.validate_key = <validation_key>
 

	
 
## Type of storage used for the session, current types are
 
## dbm, file, memcached, database, and memory.
 

	
 
## File system storage of session data. (default)
 
#beaker.session.type = file
 

	
 
## Cookie only, store all session data inside the cookie. Requires secure secrets.
 
#beaker.session.type = cookie
 

	
 
## Database storage of session data.
 
#beaker.session.type = ext:database
 
#beaker.session.sa.url = postgresql://postgres:qwe@localhost/kallithea
 
#beaker.session.table_name = db_session
 

	
 
############################
 
## ERROR HANDLING SYSTEMS ##
 
############################
 

	
 
####################
 
### [errormator] ###
 
####################
 

	
 
## Errormator is tailored to work with Kallithea, see
 
## http://errormator.com for details how to obtain an account
 
## you must install python package `errormator_client` to make it work
 

	
 
## errormator enabled
 
errormator = false
 

	
 
errormator.server_url = https://api.errormator.com
 
errormator.api_key = YOUR_API_KEY
 

	
 
## TWEAK AMOUNT OF INFO SENT HERE
 

	
 
## enables 404 error logging (default False)
 
errormator.report_404 = false
 

	
 
## time in seconds after request is considered being slow (default 1)
 
errormator.slow_request_time = 1
 

	
 
## record slow requests in application
 
## (needs to be enabled for slow datastore recording and time tracking)
 
errormator.slow_requests = true
 

	
 
## enable hooking to application loggers
 
#errormator.logging = true
 

	
 
## minimum log level for log capture
 
#errormator.logging.level = WARNING
 

	
 
## send logs only from erroneous/slow requests
 
## (saves API quota for intensive logging)
 
errormator.logging_on_error = false
 

	
 
## list of additonal keywords that should be grabbed from environ object
 
## can be string with comma separated list of words in lowercase
 
## (by default client will always send following info:
 
## 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
 
## start with HTTP* this list be extended with additional keywords here
 
errormator.environ_keys_whitelist =
 

	
 
## list of keywords that should be blanked from request object
 
## can be string with comma separated list of words in lowercase
 
## (by default client will always blank keys that contain following words
 
## 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
 
## this list be extended with additional keywords set here
 
errormator.request_keys_blacklist =
 

	
 
## list of namespaces that should be ignores when gathering log entries
 
## can be string with comma separated list of namespaces
 
## (by default the client ignores own entries: errormator_client.client)
 
errormator.log_namespace_blacklist =
 

	
 
################
 
### [sentry] ###
 
################
 

	
 
## sentry is a alternative open source error aggregator
 
## you must install python packages `sentry` and `raven` to enable
 

	
 
sentry.dsn = YOUR_DNS
 
sentry.servers =
 
sentry.name =
 
sentry.key =
 
sentry.public_key =
 
sentry.secret_key =
 
sentry.project =
 
sentry.site =
 
sentry.include_paths =
 
sentry.exclude_paths =
 

	
 
################################################################################
 
## WARNING: *THE LINE BELOW MUST BE UNCOMMENTED ON A PRODUCTION ENVIRONMENT*  ##
 
## Debug mode will enable the interactive debugging tool, allowing ANYONE to  ##
 
## execute malicious code after an exception is raised.                       ##
 
################################################################################
 
set debug = false
 

	
 
##################################
 
###       LOGVIEW CONFIG       ###
 
##################################
 

	
 
logview.sqlalchemy = #faa
 
logview.pylons.templating = #bfb
 
logview.pylons.util = #eee
 

	
 
#########################################################
 
### DB CONFIGS - EACH DB WILL HAVE IT'S OWN CONFIG    ###
 
#########################################################
 

	
 
# SQLITE [default]
 
sqlalchemy.db1.url = sqlite:///%(here)s/kallithea.db?timeout=60
 

	
 
# POSTGRESQL
 
#sqlalchemy.db1.url = postgresql://user:pass@localhost/kallithea
 

	
 
# MySQL
 
#sqlalchemy.db1.url = mysql://user:pass@localhost/kallithea?charset=utf8
 

	
 
# see sqlalchemy docs for others
 

	
 
sqlalchemy.db1.echo = false
 
sqlalchemy.db1.pool_recycle = 3600
 

	
 
################################
 
### LOGGING CONFIGURATION   ####
 
################################
 

	
 
[loggers]
 
keys = root, routes, kallithea, sqlalchemy, beaker, templates, whoosh_indexer
 

	
 
[handlers]
 
keys = console, console_sql
 

	
 
[formatters]
 
keys = generic, color_formatter, color_formatter_sql
 

	
 
#############
 
## LOGGERS ##
 
#############
 

	
 
[logger_root]
 
level = NOTSET
 
handlers = console
 

	
 
[logger_routes]
 
level = DEBUG
 
handlers =
 
qualname = routes.middleware
 
## "level = DEBUG" logs the route matched and routing variables.
 
propagate = 1
 

	
 
[logger_beaker]
 
level = DEBUG
 
handlers =
 
qualname = beaker.container
 
propagate = 1
 

	
 
[logger_templates]
 
level = INFO
 
handlers =
 
qualname = pylons.templating
 
propagate = 1
 

	
 
[logger_kallithea]
 
level = DEBUG
 
handlers =
 
qualname = kallithea
 
propagate = 1
 

	
 
[logger_sqlalchemy]
 
level = INFO
 
handlers = console_sql
 
qualname = sqlalchemy.engine
 
propagate = 0
 

	
 
[logger_whoosh_indexer]
 
level = DEBUG
 
handlers =
 
qualname = whoosh_indexer
 
propagate = 1
 

	
 
##############
 
## HANDLERS ##
 
##############
 

	
 
[handler_console]
 
class = StreamHandler
 
args = (sys.stderr,)
 
level = INFO
 
formatter = generic
 

	
 
[handler_console_sql]
 
class = StreamHandler
 
args = (sys.stderr,)
 
level = WARN
 
formatter = generic
 

	
 
################
 
## FORMATTERS ##
 
################
 

	
 
[formatter_generic]
 
format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
 
datefmt = %Y-%m-%d %H:%M:%S
 

	
 
[formatter_color_formatter]
 
class = kallithea.lib.colored_formatter.ColorFormatter
 
format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
 
datefmt = %Y-%m-%d %H:%M:%S
 

	
 
[formatter_color_formatter_sql]
 
class = kallithea.lib.colored_formatter.ColorFormatterSql
 
format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
 
datefmt = %Y-%m-%d %H:%M:%S
kallithea/model/db.py
Show inline comments
 
@@ -674,1541 +674,1537 @@ class User(Base, BaseModel):
 
            user_id=user.user_id,
 
            username=user.username,
 
            firstname=user.name,
 
            lastname=user.lastname,
 
            email=user.email,
 
            emails=user.emails,
 
            active=user.active,
 
            admin=user.admin,
 
        )
 
        if details:
 
            data.update(dict(
 
                extern_type=user.extern_type,
 
                extern_name=user.extern_name,
 
                api_key=user.api_key,
 
                api_keys=user.api_keys,
 
                last_login=user.last_login,
 
                ip_addresses=user.ip_addresses
 
                ))
 
        return data
 

	
 
    def __json__(self):
 
        data = dict(
 
            full_name=self.full_name,
 
            full_name_or_username=self.full_name_or_username,
 
            short_contact=self.short_contact,
 
            full_contact=self.full_contact
 
        )
 
        data.update(self.get_api_data())
 
        return data
 

	
 

	
 
class UserApiKeys(Base, BaseModel):
 
    __tablename__ = 'user_api_keys'
 
    __table_args__ = (
 
        Index('uak_api_key_idx', 'api_key'),
 
        Index('uak_api_key_expires_idx', 'api_key', 'expires'),
 
        _table_args_default_dict,
 
    )
 
    __mapper_args__ = {}
 

	
 
    user_api_key_id = Column(Integer(), unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False)
 
    api_key = Column(String(255), nullable=False, unique=True)
 
    description = Column(UnicodeText(1024), nullable=False)
 
    expires = Column(Float(53), nullable=False)
 
    created_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 

	
 
    user = relationship('User')
 

	
 
    @property
 
    def expired(self):
 
        if self.expires == -1:
 
            return False
 
        return time.time() > self.expires
 

	
 

	
 
class UserEmailMap(Base, BaseModel):
 
    __tablename__ = 'user_email_map'
 
    __table_args__ = (
 
        Index('uem_email_idx', 'email'),
 
        _table_args_default_dict,
 
    )
 
    __mapper_args__ = {}
 

	
 
    email_id = Column(Integer(), unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False)
 
    _email = Column("email", String(255), nullable=False, unique=True)
 
    user = relationship('User')
 

	
 
    @validates('_email')
 
    def validate_email(self, key, email):
 
        # check if this email is not main one
 
        main_email = Session().query(User).filter(User.email == email).scalar()
 
        if main_email is not None:
 
            raise AttributeError('email %s is present is user table' % email)
 
        return email
 

	
 
    @hybrid_property
 
    def email(self):
 
        return self._email
 

	
 
    @email.setter
 
    def email(self, val):
 
        self._email = val.lower() if val else None
 

	
 

	
 
class UserIpMap(Base, BaseModel):
 
    __tablename__ = 'user_ip_map'
 
    __table_args__ = (
 
        UniqueConstraint('user_id', 'ip_addr'),
 
        _table_args_default_dict,
 
    )
 
    __mapper_args__ = {}
 

	
 
    ip_id = Column(Integer(), unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False)
 
    ip_addr = Column(String(255), nullable=False)
 
    active = Column(Boolean(), nullable=False, default=True)
 
    user = relationship('User')
 

	
 
    @classmethod
 
    def _get_ip_range(cls, ip_addr):
 
        from kallithea.lib import ipaddr
 
        net = ipaddr.IPNetwork(address=ip_addr)
 
        return [str(net.network), str(net.broadcast)]
 

	
 
    def __json__(self):
 
        return dict(
 
          ip_addr=self.ip_addr,
 
          ip_range=self._get_ip_range(self.ip_addr)
 
        )
 

	
 
    def __unicode__(self):
 
        return u"<%s('user_id:%s=>%s')>" % (self.__class__.__name__,
 
                                            self.user_id, self.ip_addr)
 

	
 
class UserLog(Base, BaseModel):
 
    __tablename__ = 'user_logs'
 
    __table_args__ = (
 
        _table_args_default_dict,
 
    )
 

	
 
    user_log_id = Column(Integer(), unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=True)
 
    username = Column(String(255), nullable=False)
 
    repository_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=True)
 
    repository_name = Column(Unicode(255), nullable=False)
 
    user_ip = Column(String(255), nullable=True)
 
    action = Column(UnicodeText(1200000), nullable=False)
 
    action_date = Column(DateTime(timezone=False), nullable=False)
 

	
 
    def __unicode__(self):
 
        return u"<%s('id:%s:%s')>" % (self.__class__.__name__,
 
                                      self.repository_name,
 
                                      self.action)
 

	
 
    @property
 
    def action_as_day(self):
 
        return datetime.date(*self.action_date.timetuple()[:3])
 

	
 
    user = relationship('User')
 
    repository = relationship('Repository', cascade='')
 

	
 

	
 
class UserGroup(Base, BaseModel):
 
    __tablename__ = 'users_groups'
 
    __table_args__ = (
 
        _table_args_default_dict,
 
    )
 

	
 
    users_group_id = Column(Integer(), unique=True, primary_key=True)
 
    users_group_name = Column(Unicode(255), nullable=False, unique=True)
 
    user_group_description = Column(Unicode(10000), nullable=True) # FIXME: not nullable?
 
    users_group_active = Column(Boolean(), nullable=False)
 
    inherit_default_permissions = Column("users_group_inherit_default_permissions", Boolean(), nullable=False, default=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False)
 
    created_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 
    _group_data = Column("group_data", LargeBinary(), nullable=True)  # JSON data # FIXME: not nullable?
 

	
 
    members = relationship('UserGroupMember', cascade="all, delete-orphan")
 
    users_group_to_perm = relationship('UserGroupToPerm', cascade='all')
 
    users_group_repo_to_perm = relationship('UserGroupRepoToPerm', cascade='all')
 
    users_group_repo_group_to_perm = relationship('UserGroupRepoGroupToPerm', cascade='all')
 
    user_user_group_to_perm = relationship('UserUserGroupToPerm ', cascade='all')
 
    user_group_user_group_to_perm = relationship('UserGroupUserGroupToPerm ', primaryjoin="UserGroupUserGroupToPerm.target_user_group_id==UserGroup.users_group_id", cascade='all')
 

	
 
    user = relationship('User')
 

	
 
    @hybrid_property
 
    def group_data(self):
 
        if not self._group_data:
 
            return {}
 

	
 
        try:
 
            return json.loads(self._group_data)
 
        except TypeError:
 
            return {}
 

	
 
    @group_data.setter
 
    def group_data(self, val):
 
        try:
 
            self._group_data = json.dumps(val)
 
        except Exception:
 
            log.error(traceback.format_exc())
 

	
 
    def __unicode__(self):
 
        return u"<%s('id:%s:%s')>" % (self.__class__.__name__,
 
                                      self.users_group_id,
 
                                      self.users_group_name)
 

	
 
    @classmethod
 
    def get_by_group_name(cls, group_name, cache=False,
 
                          case_insensitive=False):
 
        if case_insensitive:
 
            q = cls.query().filter(func.lower(cls.users_group_name) == func.lower(group_name))
 
        else:
 
            q = cls.query().filter(cls.users_group_name == group_name)
 
        if cache:
 
            q = q.options(FromCache(
 
                            "sql_cache_short",
 
                            "get_group_%s" % _hash_key(group_name)
 
                          )
 
            )
 
        return q.scalar()
 

	
 
    @classmethod
 
    def get(cls, user_group_id, cache=False):
 
        user_group = cls.query()
 
        if cache:
 
            user_group = user_group.options(FromCache("sql_cache_short",
 
                                    "get_users_group_%s" % user_group_id))
 
        return user_group.get(user_group_id)
 

	
 
    def get_api_data(self, with_members=True):
 
        user_group = self
 

	
 
        data = dict(
 
            users_group_id=user_group.users_group_id,
 
            group_name=user_group.users_group_name,
 
            group_description=user_group.user_group_description,
 
            active=user_group.users_group_active,
 
            owner=user_group.user.username,
 
        )
 
        if with_members:
 
            members = []
 
            for user in user_group.members:
 
                user = user.user
 
                members.append(user.get_api_data())
 
            data['members'] = members
 

	
 
        return data
 

	
 

	
 
class UserGroupMember(Base, BaseModel):
 
    __tablename__ = 'users_groups_members'
 
    __table_args__ = (
 
        _table_args_default_dict,
 
    )
 

	
 
    users_group_member_id = Column(Integer(), unique=True, primary_key=True)
 
    users_group_id = Column(Integer(), ForeignKey('users_groups.users_group_id'), nullable=False)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False)
 

	
 
    user = relationship('User')
 
    users_group = relationship('UserGroup')
 

	
 
    def __init__(self, gr_id='', u_id=''):
 
        self.users_group_id = gr_id
 
        self.user_id = u_id
 

	
 

	
 
class RepositoryField(Base, BaseModel):
 
    __tablename__ = 'repositories_fields'
 
    __table_args__ = (
 
        UniqueConstraint('repository_id', 'field_key'),  # no-multi field
 
        _table_args_default_dict,
 
    )
 

	
 
    PREFIX = 'ex_'  # prefix used in form to not conflict with already existing fields
 

	
 
    repo_field_id = Column(Integer(), unique=True, primary_key=True)
 
    repository_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=False)
 
    field_key = Column(String(250), nullable=False)
 
    field_label = Column(String(1024), nullable=False)
 
    field_value = Column(String(10000), nullable=False)
 
    field_desc = Column(String(1024), nullable=False)
 
    field_type = Column(String(255), nullable=False)
 
    created_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 

	
 
    repository = relationship('Repository')
 

	
 
    @property
 
    def field_key_prefixed(self):
 
        return 'ex_%s' % self.field_key
 

	
 
    @classmethod
 
    def un_prefix_key(cls, key):
 
        if key.startswith(cls.PREFIX):
 
            return key[len(cls.PREFIX):]
 
        return key
 

	
 
    @classmethod
 
    def get_by_key_name(cls, key, repo):
 
        row = cls.query() \
 
                .filter(cls.repository == repo) \
 
                .filter(cls.field_key == key).scalar()
 
        return row
 

	
 

	
 
class Repository(Base, BaseModel):
 
    __tablename__ = 'repositories'
 
    __table_args__ = (
 
        Index('r_repo_name_idx', 'repo_name'),
 
        _table_args_default_dict,
 
    )
 

	
 
    DEFAULT_CLONE_URI = '{scheme}://{user}@{netloc}/{repo}'
 
    DEFAULT_CLONE_URI_ID = '{scheme}://{user}@{netloc}/_{repoid}'
 

	
 
    STATE_CREATED = 'repo_state_created'
 
    STATE_PENDING = 'repo_state_pending'
 
    STATE_ERROR = 'repo_state_error'
 

	
 
    repo_id = Column(Integer(), unique=True, primary_key=True)
 
    repo_name = Column(Unicode(255), nullable=False, unique=True)
 
    repo_state = Column(String(255), nullable=False)
 

	
 
    clone_uri = Column(String(255), nullable=True) # FIXME: not nullable?
 
    repo_type = Column(String(255), nullable=False)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False)
 
    private = Column(Boolean(), nullable=False)
 
    enable_statistics = Column("statistics", Boolean(), nullable=False, default=True)
 
    enable_downloads = Column("downloads", Boolean(), nullable=False, default=True)
 
    description = Column(Unicode(10000), nullable=False)
 
    created_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 
    updated_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 
    _landing_revision = Column("landing_revision", String(255), nullable=False)
 
    enable_locking = Column(Boolean(), nullable=False, default=False)
 
    _locked = Column("locked", String(255), nullable=True) # FIXME: not nullable?
 
    _changeset_cache = Column("changeset_cache", LargeBinary(), nullable=True) # JSON data # FIXME: not nullable?
 

	
 
    fork_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=True)
 
    group_id = Column(Integer(), ForeignKey('groups.group_id'), nullable=True)
 

	
 
    user = relationship('User')
 
    fork = relationship('Repository', remote_side=repo_id)
 
    group = relationship('RepoGroup')
 
    repo_to_perm = relationship('UserRepoToPerm', cascade='all', order_by='UserRepoToPerm.repo_to_perm_id')
 
    users_group_to_perm = relationship('UserGroupRepoToPerm', cascade='all')
 
    stats = relationship('Statistics', cascade='all', uselist=False)
 

	
 
    followers = relationship('UserFollowing',
 
                             primaryjoin='UserFollowing.follows_repo_id==Repository.repo_id',
 
                             cascade='all')
 
    extra_fields = relationship('RepositoryField',
 
                                cascade="all, delete-orphan")
 

	
 
    logs = relationship('UserLog')
 
    comments = relationship('ChangesetComment', cascade="all, delete-orphan")
 

	
 
    pull_requests_org = relationship('PullRequest',
 
                    primaryjoin='PullRequest.org_repo_id==Repository.repo_id',
 
                    cascade="all, delete-orphan")
 

	
 
    pull_requests_other = relationship('PullRequest',
 
                    primaryjoin='PullRequest.other_repo_id==Repository.repo_id',
 
                    cascade="all, delete-orphan")
 

	
 
    def __unicode__(self):
 
        return u"<%s('%s:%s')>" % (self.__class__.__name__, self.repo_id,
 
                                   safe_unicode(self.repo_name))
 

	
 
    @hybrid_property
 
    def landing_rev(self):
 
        # always should return [rev_type, rev]
 
        if self._landing_revision:
 
            _rev_info = self._landing_revision.split(':')
 
            if len(_rev_info) < 2:
 
                _rev_info.insert(0, 'rev')
 
            return [_rev_info[0], _rev_info[1]]
 
        return [None, None]
 

	
 
    @landing_rev.setter
 
    def landing_rev(self, val):
 
        if ':' not in val:
 
            raise ValueError('value must be delimited with `:` and consist '
 
                             'of <rev_type>:<rev>, got %s instead' % val)
 
        self._landing_revision = val
 

	
 
    @hybrid_property
 
    def locked(self):
 
        # always should return [user_id, timelocked]
 
        if self._locked:
 
            _lock_info = self._locked.split(':')
 
            return int(_lock_info[0]), _lock_info[1]
 
        return [None, None]
 

	
 
    @locked.setter
 
    def locked(self, val):
 
        if val and isinstance(val, (list, tuple)):
 
            self._locked = ':'.join(map(str, val))
 
        else:
 
            self._locked = None
 

	
 
    @hybrid_property
 
    def changeset_cache(self):
 
        try:
 
            cs_cache = json.loads(self._changeset_cache) # might raise on bad data
 
            cs_cache['raw_id'] # verify data, raise exception on error
 
            return cs_cache
 
        except (TypeError, KeyError, ValueError):
 
            return EmptyChangeset().__json__()
 

	
 
    @changeset_cache.setter
 
    def changeset_cache(self, val):
 
        try:
 
            self._changeset_cache = json.dumps(val)
 
        except Exception:
 
            log.error(traceback.format_exc())
 

	
 
    @classmethod
 
    def url_sep(cls):
 
        return URL_SEP
 

	
 
    @classmethod
 
    def normalize_repo_name(cls, repo_name):
 
        """
 
        Normalizes os specific repo_name to the format internally stored inside
 
        database using URL_SEP
 

	
 
        :param cls:
 
        :param repo_name:
 
        """
 
        return cls.url_sep().join(repo_name.split(os.sep))
 

	
 
    @classmethod
 
    def get_by_repo_name(cls, repo_name):
 
        q = Session().query(cls).filter(cls.repo_name == repo_name)
 
        q = q.options(joinedload(Repository.fork)) \
 
                .options(joinedload(Repository.user)) \
 
                .options(joinedload(Repository.group))
 
        return q.scalar()
 

	
 
    @classmethod
 
    def get_by_full_path(cls, repo_full_path):
 
        repo_name = repo_full_path.split(cls.base_path(), 1)[-1]
 
        repo_name = cls.normalize_repo_name(repo_name)
 
        return cls.get_by_repo_name(repo_name.strip(URL_SEP))
 

	
 
    @classmethod
 
    def get_repo_forks(cls, repo_id):
 
        return cls.query().filter(Repository.fork_id == repo_id)
 

	
 
    @classmethod
 
    def base_path(cls):
 
        """
 
        Returns base path where all repos are stored
 

	
 
        :param cls:
 
        """
 
        q = Session().query(Ui) \
 
            .filter(Ui.ui_key == cls.url_sep())
 
        q = q.options(FromCache("sql_cache_short", "repository_repo_path"))
 
        return q.one().ui_value
 

	
 
    @property
 
    def forks(self):
 
        """
 
        Return forks of this repo
 
        """
 
        return Repository.get_repo_forks(self.repo_id)
 

	
 
    @property
 
    def parent(self):
 
        """
 
        Returns fork parent
 
        """
 
        return self.fork
 

	
 
    @property
 
    def just_name(self):
 
        return self.repo_name.split(Repository.url_sep())[-1]
 

	
 
    @property
 
    def groups_with_parents(self):
 
        groups = []
 
        if self.group is None:
 
            return groups
 

	
 
        cur_gr = self.group
 
        groups.insert(0, cur_gr)
 
        while 1:
 
            gr = getattr(cur_gr, 'parent_group', None)
 
            cur_gr = cur_gr.parent_group
 
            if gr is None:
 
                break
 
            groups.insert(0, gr)
 

	
 
        return groups
 

	
 
    @property
 
    def groups_and_repo(self):
 
        return self.groups_with_parents, self.just_name, self.repo_name
 

	
 
    @LazyProperty
 
    def repo_path(self):
 
        """
 
        Returns base full path for that repository means where it actually
 
        exists on a filesystem
 
        """
 
        q = Session().query(Ui).filter(Ui.ui_key ==
 
                                              Repository.url_sep())
 
        q = q.options(FromCache("sql_cache_short", "repository_repo_path"))
 
        return q.one().ui_value
 

	
 
    @property
 
    def repo_full_path(self):
 
        p = [self.repo_path]
 
        # we need to split the name by / since this is how we store the
 
        # names in the database, but that eventually needs to be converted
 
        # into a valid system path
 
        p += self.repo_name.split(Repository.url_sep())
 
        return os.path.join(*map(safe_unicode, p))
 

	
 
    @property
 
    def cache_keys(self):
 
        """
 
        Returns associated cache keys for that repo
 
        """
 
        return CacheInvalidation.query() \
 
            .filter(CacheInvalidation.cache_args == self.repo_name) \
 
            .order_by(CacheInvalidation.cache_key) \
 
            .all()
 

	
 
    def get_new_name(self, repo_name):
 
        """
 
        returns new full repository name based on assigned group and new new
 

	
 
        :param group_name:
 
        """
 
        path_prefix = self.group.full_path_splitted if self.group else []
 
        return Repository.url_sep().join(path_prefix + [repo_name])
 

	
 
    @property
 
    def _ui(self):
 
        """
 
        Creates an db based ui object for this repository
 
        """
 
        from kallithea.lib.utils import make_ui
 
        return make_ui('db', clear_session=False)
 

	
 
    @classmethod
 
    def is_valid(cls, repo_name):
 
        """
 
        returns True if given repo name is a valid filesystem repository
 

	
 
        :param cls:
 
        :param repo_name:
 
        """
 
        from kallithea.lib.utils import is_valid_repo
 

	
 
        return is_valid_repo(repo_name, cls.base_path())
 

	
 
    def get_api_data(self):
 
        """
 
        Common function for generating repo api data
 

	
 
        """
 
        repo = self
 
        data = dict(
 
            repo_id=repo.repo_id,
 
            repo_name=repo.repo_name,
 
            repo_type=repo.repo_type,
 
            clone_uri=repo.clone_uri,
 
            private=repo.private,
 
            created_on=repo.created_on,
 
            description=repo.description,
 
            landing_rev=repo.landing_rev,
 
            owner=repo.user.username,
 
            fork_of=repo.fork.repo_name if repo.fork else None,
 
            enable_statistics=repo.enable_statistics,
 
            enable_locking=repo.enable_locking,
 
            enable_downloads=repo.enable_downloads,
 
            last_changeset=repo.changeset_cache,
 
            locked_by=User.get(self.locked[0]).get_api_data() \
 
                if self.locked[0] else None,
 
            locked_date=time_to_datetime(self.locked[1]) \
 
                if self.locked[1] else None
 
        )
 
        rc_config = Setting.get_app_settings()
 
        repository_fields = str2bool(rc_config.get('repository_fields'))
 
        if repository_fields:
 
            for f in self.extra_fields:
 
                data[f.field_key_prefixed] = f.field_value
 

	
 
        return data
 

	
 
    @classmethod
 
    def lock(cls, repo, user_id, lock_time=None):
 
        if lock_time is not None:
 
            lock_time = time.time()
 
        repo.locked = [user_id, lock_time]
 
        Session().add(repo)
 
        Session().commit()
 

	
 
    @classmethod
 
    def unlock(cls, repo):
 
        repo.locked = None
 
        Session().add(repo)
 
        Session().commit()
 

	
 
    @classmethod
 
    def getlock(cls, repo):
 
        return repo.locked
 

	
 
    @property
 
    def last_db_change(self):
 
        return self.updated_on
 

	
 
    @property
 
    def clone_uri_hidden(self):
 
        clone_uri = self.clone_uri
 
        if clone_uri:
 
            import urlobject
 
            url_obj = urlobject.URLObject(self.clone_uri)
 
            if url_obj.password:
 
                clone_uri = url_obj.with_password('*****')
 
        return clone_uri
 

	
 
    def clone_url(self, **override):
 
        import kallithea.lib.helpers as h
 
        qualified_home_url = h.canonical_url('home')
 

	
 
        uri_tmpl = None
 
        if 'with_id' in override:
 
            uri_tmpl = self.DEFAULT_CLONE_URI_ID
 
            del override['with_id']
 

	
 
        if 'uri_tmpl' in override:
 
            uri_tmpl = override['uri_tmpl']
 
            del override['uri_tmpl']
 

	
 
        # we didn't override our tmpl from **overrides
 
        if not uri_tmpl:
 
            uri_tmpl = self.DEFAULT_CLONE_URI
 
            try:
 
                from pylons import tmpl_context as c
 
                uri_tmpl = c.clone_uri_tmpl
 
            except AttributeError:
 
                # in any case if we call this outside of request context,
 
                # ie, not having tmpl_context set up
 
                pass
 

	
 
        return get_clone_url(uri_tmpl=uri_tmpl,
 
                             qualified_home_url=qualified_home_url,
 
                             repo_name=self.repo_name,
 
                             repo_id=self.repo_id, **override)
 

	
 
    def set_state(self, state):
 
        self.repo_state = state
 
        Session().add(self)
 
    #==========================================================================
 
    # SCM PROPERTIES
 
    #==========================================================================
 

	
 
    def get_changeset(self, rev=None):
 
        return get_changeset_safe(self.scm_instance, rev)
 

	
 
    def get_landing_changeset(self):
 
        """
 
        Returns landing changeset, or if that doesn't exist returns the tip
 
        """
 
        _rev_type, _rev = self.landing_rev
 
        cs = self.get_changeset(_rev)
 
        if isinstance(cs, EmptyChangeset):
 
            return self.get_changeset()
 
        return cs
 

	
 
    def update_changeset_cache(self, cs_cache=None):
 
        """
 
        Update cache of last changeset for repository, keys should be::
 

	
 
            short_id
 
            raw_id
 
            revision
 
            message
 
            date
 
            author
 

	
 
        :param cs_cache:
 
        """
 
        from kallithea.lib.vcs.backends.base import BaseChangeset
 
        if cs_cache is None:
 
            cs_cache = EmptyChangeset()
 
            # use no-cache version here
 
            scm_repo = self.scm_instance_no_cache()
 
            if scm_repo:
 
                cs_cache = scm_repo.get_changeset()
 

	
 
        if isinstance(cs_cache, BaseChangeset):
 
            cs_cache = cs_cache.__json__()
 

	
 
        if (not self.changeset_cache or cs_cache['raw_id'] != self.changeset_cache['raw_id']):
 
            _default = datetime.datetime.fromtimestamp(0)
 
            last_change = cs_cache.get('date') or _default
 
            log.debug('updated repo %s with new cs cache %s',
 
                      self.repo_name, cs_cache)
 
            self.updated_on = last_change
 
            self.changeset_cache = cs_cache
 
            Session().add(self)
 
            Session().commit()
 
        else:
 
            log.debug('changeset_cache for %s already up to date with %s',
 
                      self.repo_name, cs_cache['raw_id'])
 

	
 
    @property
 
    def tip(self):
 
        return self.get_changeset('tip')
 

	
 
    @property
 
    def author(self):
 
        return self.tip.author
 

	
 
    @property
 
    def last_change(self):
 
        return self.scm_instance.last_change
 

	
 
    def get_comments(self, revisions=None):
 
        """
 
        Returns comments for this repository grouped by revisions
 

	
 
        :param revisions: filter query by revisions only
 
        """
 
        cmts = ChangesetComment.query() \
 
            .filter(ChangesetComment.repo == self)
 
        if revisions is not None:
 
            if not revisions:
 
                return {} # don't use sql 'in' on empty set
 
            cmts = cmts.filter(ChangesetComment.revision.in_(revisions))
 
        grouped = collections.defaultdict(list)
 
        for cmt in cmts.all():
 
            grouped[cmt.revision].append(cmt)
 
        return grouped
 

	
 
    def statuses(self, revisions):
 
        """
 
        Returns statuses for this repository.
 
        PRs without any votes do _not_ show up as unreviewed.
 

	
 
        :param revisions: list of revisions to get statuses for
 
        """
 
        if not revisions:
 
            return {}
 

	
 
        statuses = ChangesetStatus.query() \
 
            .filter(ChangesetStatus.repo == self) \
 
            .filter(ChangesetStatus.version == 0) \
 
            .filter(ChangesetStatus.revision.in_(revisions))
 

	
 
        grouped = {}
 
        for stat in statuses.all():
 
            pr_id = pr_nice_id = pr_repo = None
 
            if stat.pull_request:
 
                pr_id = stat.pull_request.pull_request_id
 
                pr_nice_id = PullRequest.make_nice_id(pr_id)
 
                pr_repo = stat.pull_request.other_repo.repo_name
 
            grouped[stat.revision] = [str(stat.status), stat.status_lbl,
 
                                      pr_id, pr_repo, pr_nice_id,
 
                                      stat.author]
 
        return grouped
 

	
 
    def _repo_size(self):
 
        from kallithea.lib import helpers as h
 
        log.debug('calculating repository size...')
 
        return h.format_byte_size(self.scm_instance.size)
 

	
 
    #==========================================================================
 
    # SCM CACHE INSTANCE
 
    #==========================================================================
 

	
 
    def set_invalidate(self):
 
        """
 
        Mark caches of this repo as invalid.
 
        """
 
        CacheInvalidation.set_invalidate(self.repo_name)
 

	
 
    @property
 
    def scm_instance(self):
 
        import kallithea
 
        full_cache = str2bool(kallithea.CONFIG.get('vcs_full_cache'))
 
        if full_cache:
 
            return self.scm_instance_cached()
 
        return self.__get_instance()
 

	
 
    def scm_instance_cached(self, valid_cache_keys=None):
 
        @cache_region('long_term', 'scm_instance_cached')
 
        def _c(repo_name): # repo_name is just for the cache key
 
            log.debug('Creating new %s scm_instance and populating cache', repo_name)
 
            return self.scm_instance_no_cache()
 
        rn = self.repo_name
 

	
 
        valid = CacheInvalidation.test_and_set_valid(rn, None, valid_cache_keys=valid_cache_keys)
 
        if not valid:
 
            log.debug('Cache for %s invalidated, getting new object', rn)
 
            region_invalidate(_c, None, 'scm_instance_cached', rn)
 
        else:
 
            log.debug('Trying to get scm_instance of %s from cache', rn)
 
        return _c(rn)
 

	
 
    def scm_instance_no_cache(self):
 
        repo_full_path = safe_str(self.repo_full_path)
 
        alias = get_scm(repo_full_path)[0]
 
        log.debug('Creating instance of %s repository from %s',
 
                  alias, self.repo_full_path)
 
        backend = get_backend(alias)
 

	
 
        if alias == 'hg':
 
            repo = backend(repo_full_path, create=False,
 
                           baseui=self._ui)
 
        else:
 
            repo = backend(repo_full_path, create=False)
 

	
 
        return repo
 

	
 
    def __json__(self):
 
        return dict(landing_rev = self.landing_rev)
 

	
 
class RepoGroup(Base, BaseModel):
 
    __tablename__ = 'groups'
 
    __table_args__ = (
 
        UniqueConstraint('group_name', 'group_parent_id'),
 
        CheckConstraint('group_id != group_parent_id'),
 
        _table_args_default_dict,
 
    )
 
    __mapper_args__ = {'order_by': 'group_name'}
 

	
 
    SEP = ' &raquo; '
 

	
 
    group_id = Column(Integer(), unique=True, primary_key=True)
 
    group_name = Column(Unicode(255), nullable=False, unique=True)
 
    group_parent_id = Column(Integer(), ForeignKey('groups.group_id'), nullable=True)
 
    group_description = Column(Unicode(10000), nullable=False)
 
    enable_locking = Column(Boolean(), nullable=False, default=False)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False)
 
    created_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 

	
 
    repo_group_to_perm = relationship('UserRepoGroupToPerm', cascade='all', order_by='UserRepoGroupToPerm.group_to_perm_id')
 
    users_group_to_perm = relationship('UserGroupRepoGroupToPerm', cascade='all')
 
    parent_group = relationship('RepoGroup', remote_side=group_id)
 
    user = relationship('User')
 

	
 
    def __init__(self, group_name='', parent_group=None):
 
        self.group_name = group_name
 
        self.parent_group = parent_group
 

	
 
    def __unicode__(self):
 
        return u"<%s('id:%s:%s')>" % (self.__class__.__name__, self.group_id,
 
                                      self.group_name)
 

	
 
    @classmethod
 
    def _generate_choice(cls, repo_group):
 
        """Return tuple with group_id and name as html literal"""
 
        from webhelpers.html import literal
 
        if repo_group is None:
 
            return (-1, u'-- %s --' % _('top level'))
 
        return repo_group.group_id, literal(cls.SEP.join(repo_group.full_path_splitted))
 

	
 
    @classmethod
 
    def groups_choices(cls, groups):
 
        """Return tuples with group_id and name as html literal."""
 
        return sorted((cls._generate_choice(g) for g in groups),
 
                      key=lambda c: c[1].split(cls.SEP))
 

	
 
    @classmethod
 
    def url_sep(cls):
 
        return URL_SEP
 

	
 
    @classmethod
 
    def get_by_group_name(cls, group_name, cache=False, case_insensitive=False):
 
        if case_insensitive:
 
            gr = cls.query() \
 
                .filter(func.lower(cls.group_name) == func.lower(group_name))
 
        else:
 
            gr = cls.query() \
 
                .filter(cls.group_name == group_name)
 
        if cache:
 
            gr = gr.options(FromCache(
 
                            "sql_cache_short",
 
                            "get_group_%s" % _hash_key(group_name)
 
                            )
 
            )
 
        return gr.scalar()
 

	
 
    @property
 
    def parents(self):
 
        parents_recursion_limit = 10
 
        groups = []
 
        if self.parent_group is None:
 
            return groups
 
        cur_gr = self.parent_group
 
        groups.insert(0, cur_gr)
 
        cnt = 0
 
        while 1:
 
            cnt += 1
 
            gr = getattr(cur_gr, 'parent_group', None)
 
            cur_gr = cur_gr.parent_group
 
            if gr is None:
 
                break
 
            if cnt == parents_recursion_limit:
 
                # this will prevent accidental infinite loops
 
                log.error(('more than %s parents found for group %s, stopping '
 
                           'recursive parent fetching' % (parents_recursion_limit, self)))
 
                break
 

	
 
            groups.insert(0, gr)
 
        return groups
 

	
 
    @property
 
    def children(self):
 
        return RepoGroup.query().filter(RepoGroup.parent_group == self)
 

	
 
    @property
 
    def name(self):
 
        return self.group_name.split(RepoGroup.url_sep())[-1]
 

	
 
    @property
 
    def full_path(self):
 
        return self.group_name
 

	
 
    @property
 
    def full_path_splitted(self):
 
        return self.group_name.split(RepoGroup.url_sep())
 

	
 
    @property
 
    def repositories(self):
 
        return Repository.query() \
 
                .filter(Repository.group == self) \
 
                .order_by(Repository.repo_name)
 

	
 
    @property
 
    def repositories_recursive_count(self):
 
        cnt = self.repositories.count()
 

	
 
        def children_count(group):
 
            cnt = 0
 
            for child in group.children:
 
                cnt += child.repositories.count()
 
                cnt += children_count(child)
 
            return cnt
 

	
 
        return cnt + children_count(self)
 

	
 
    def _recursive_objects(self, include_repos=True):
 
        all_ = []
 

	
 
        def _get_members(root_gr):
 
            if include_repos:
 
                for r in root_gr.repositories:
 
                    all_.append(r)
 
            childs = root_gr.children.all()
 
            if childs:
 
                for gr in childs:
 
                    all_.append(gr)
 
                    _get_members(gr)
 

	
 
        _get_members(self)
 
        return [self] + all_
 

	
 
    def recursive_groups_and_repos(self):
 
        """
 
        Recursive return all groups, with repositories in those groups
 
        """
 
        return self._recursive_objects()
 

	
 
    def recursive_groups(self):
 
        """
 
        Returns all children groups for this group including children of children
 
        """
 
        return self._recursive_objects(include_repos=False)
 

	
 
    def get_new_name(self, group_name):
 
        """
 
        returns new full group name based on parent and new name
 

	
 
        :param group_name:
 
        """
 
        path_prefix = (self.parent_group.full_path_splitted if
 
                       self.parent_group else [])
 
        return RepoGroup.url_sep().join(path_prefix + [group_name])
 

	
 
    def get_api_data(self):
 
        """
 
        Common function for generating api data
 

	
 
        """
 
        group = self
 
        data = dict(
 
            group_id=group.group_id,
 
            group_name=group.group_name,
 
            group_description=group.group_description,
 
            parent_group=group.parent_group.group_name if group.parent_group else None,
 
            repositories=[x.repo_name for x in group.repositories],
 
            owner=group.user.username
 
        )
 
        return data
 

	
 

	
 
class Permission(Base, BaseModel):
 
    __tablename__ = 'permissions'
 
    __table_args__ = (
 
        Index('p_perm_name_idx', 'permission_name'),
 
        _table_args_default_dict,
 
    )
 

	
 
    PERMS = [
 
        ('hg.admin', _('Kallithea Administrator')),
 

	
 
        ('repository.none', _('Default user has no access to new repositories')),
 
        ('repository.read', _('Default user has read access to new repositories')),
 
        ('repository.write', _('Default user has write access to new repositories')),
 
        ('repository.admin', _('Default user has admin access to new repositories')),
 

	
 
        ('group.none', _('Default user has no access to new repository groups')),
 
        ('group.read', _('Default user has read access to new repository groups')),
 
        ('group.write', _('Default user has write access to new repository groups')),
 
        ('group.admin', _('Default user has admin access to new repository groups')),
 

	
 
        ('usergroup.none', _('Default user has no access to new user groups')),
 
        ('usergroup.read', _('Default user has read access to new user groups')),
 
        ('usergroup.write', _('Default user has write access to new user groups')),
 
        ('usergroup.admin', _('Default user has admin access to new user groups')),
 

	
 
        ('hg.repogroup.create.false', _('Only admins can create repository groups')),
 
        ('hg.repogroup.create.true', _('Non-admins can create repository groups')),
 

	
 
        ('hg.usergroup.create.false', _('Only admins can create user groups')),
 
        ('hg.usergroup.create.true', _('Non-admins can create user groups')),
 

	
 
        ('hg.create.none', _('Only admins can create top level repositories')),
 
        ('hg.create.repository', _('Non-admins can create top level repositories')),
 

	
 
        ('hg.create.write_on_repogroup.true', _('Repository creation enabled with write permission to a repository group')),
 
        ('hg.create.write_on_repogroup.false', _('Repository creation disabled with write permission to a repository group')),
 

	
 
        ('hg.fork.none', _('Only admins can fork repositories')),
 
        ('hg.fork.repository', _('Non-admins can fork repositories')),
 

	
 
        ('hg.register.none', _('Registration disabled')),
 
        ('hg.register.manual_activate', _('User registration with manual account activation')),
 
        ('hg.register.auto_activate', _('User registration with automatic account activation')),
 

	
 
        ('hg.extern_activate.manual', _('Manual activation of external account')),
 
        ('hg.extern_activate.auto', _('Automatic activation of external account')),
 
    ]
 

	
 
    #definition of system default permissions for DEFAULT user
 
    DEFAULT_USER_PERMISSIONS = [
 
        'repository.read',
 
        'group.read',
 
        'usergroup.read',
 
        'hg.create.repository',
 
        'hg.create.write_on_repogroup.true',
 
        'hg.fork.repository',
 
        'hg.register.manual_activate',
 
        'hg.extern_activate.auto',
 
    ]
 

	
 
    # defines which permissions are more important higher the more important
 
    # Weight defines which permissions are more important.
 
    # The higher number the more important.
 
    PERM_WEIGHTS = {
 
        'repository.none': 0,
 
        'repository.read': 1,
 
        'repository.write': 3,
 
        'repository.admin': 4,
 

	
 
        'group.none': 0,
 
        'group.read': 1,
 
        'group.write': 3,
 
        'group.admin': 4,
 

	
 
        'usergroup.none': 0,
 
        'usergroup.read': 1,
 
        'usergroup.write': 3,
 
        'usergroup.admin': 4,
 

	
 
        'hg.repogroup.create.false': 0,
 
        'hg.repogroup.create.true': 1,
 

	
 
        'hg.usergroup.create.false': 0,
 
        'hg.usergroup.create.true': 1,
 

	
 
        'hg.fork.none': 0,
 
        'hg.fork.repository': 1,
 

	
 
        'hg.create.none': 0,
 
        'hg.create.repository': 1
 
    }
 

	
 
    permission_id = Column(Integer(), unique=True, primary_key=True)
 
    permission_name = Column(String(255), nullable=False)
 

	
 
    def __unicode__(self):
 
        return u"<%s('%s:%s')>" % (
 
            self.__class__.__name__, self.permission_id, self.permission_name
 
        )
 

	
 
    @classmethod
 
    def get_by_key(cls, key):
 
        return cls.query().filter(cls.permission_name == key).scalar()
 

	
 
    @classmethod
 
    def get_default_perms(cls, default_user_id):
 
        q = Session().query(UserRepoToPerm, Repository, cls) \
 
         .join((Repository, UserRepoToPerm.repository_id == Repository.repo_id)) \
 
         .join((cls, UserRepoToPerm.permission_id == cls.permission_id)) \
 
         .filter(UserRepoToPerm.user_id == default_user_id)
 

	
 
        return q.all()
 

	
 
    @classmethod
 
    def get_default_group_perms(cls, default_user_id):
 
        q = Session().query(UserRepoGroupToPerm, RepoGroup, cls) \
 
         .join((RepoGroup, UserRepoGroupToPerm.group_id == RepoGroup.group_id)) \
 
         .join((cls, UserRepoGroupToPerm.permission_id == cls.permission_id)) \
 
         .filter(UserRepoGroupToPerm.user_id == default_user_id)
 

	
 
        return q.all()
 

	
 
    @classmethod
 
    def get_default_user_group_perms(cls, default_user_id):
 
        q = Session().query(UserUserGroupToPerm, UserGroup, cls) \
 
         .join((UserGroup, UserUserGroupToPerm.user_group_id == UserGroup.users_group_id)) \
 
         .join((cls, UserUserGroupToPerm.permission_id == cls.permission_id)) \
 
         .filter(UserUserGroupToPerm.user_id == default_user_id)
 

	
 
        return q.all()
 

	
 

	
 
class UserRepoToPerm(Base, BaseModel):
 
    __tablename__ = 'repo_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('user_id', 'repository_id', 'permission_id'),
 
        _table_args_default_dict,
 
    )
 

	
 
    repo_to_perm_id = Column(Integer(), unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False)
 
    repository_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=False)
 

	
 
    user = relationship('User')
 
    repository = relationship('Repository')
 
    permission = relationship('Permission')
 

	
 
    @classmethod
 
    def create(cls, user, repository, permission):
 
        n = cls()
 
        n.user = user
 
        n.repository = repository
 
        n.permission = permission
 
        Session().add(n)
 
        return n
 

	
 
    def __unicode__(self):
 
        return u'<%s => %s >' % (self.user, self.repository)
 

	
 

	
 
class UserUserGroupToPerm(Base, BaseModel):
 
    __tablename__ = 'user_user_group_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('user_id', 'user_group_id', 'permission_id'),
 
        _table_args_default_dict,
 
    )
 

	
 
    user_user_group_to_perm_id = Column(Integer(), unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False)
 
    user_group_id = Column(Integer(), ForeignKey('users_groups.users_group_id'), nullable=False)
 

	
 
    user = relationship('User')
 
    user_group = relationship('UserGroup')
 
    permission = relationship('Permission')
 

	
 
    @classmethod
 
    def create(cls, user, user_group, permission):
 
        n = cls()
 
        n.user = user
 
        n.user_group = user_group
 
        n.permission = permission
 
        Session().add(n)
 
        return n
 

	
 
    def __unicode__(self):
 
        return u'<%s => %s >' % (self.user, self.user_group)
 

	
 

	
 
class UserToPerm(Base, BaseModel):
 
    __tablename__ = 'user_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('user_id', 'permission_id'),
 
        _table_args_default_dict,
 
    )
 

	
 
    user_to_perm_id = Column(Integer(), unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False)
 

	
 
    user = relationship('User')
 
    permission = relationship('Permission')
 

	
 
    def __unicode__(self):
 
        return u'<%s => %s >' % (self.user, self.permission)
 

	
 

	
 
class UserGroupRepoToPerm(Base, BaseModel):
 
    __tablename__ = 'users_group_repo_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('repository_id', 'users_group_id', 'permission_id'),
 
        _table_args_default_dict,
 
    )
 

	
 
    users_group_to_perm_id = Column(Integer(), unique=True, primary_key=True)
 
    users_group_id = Column(Integer(), ForeignKey('users_groups.users_group_id'), nullable=False)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False)
 
    repository_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=False)
 

	
 
    users_group = relationship('UserGroup')
 
    permission = relationship('Permission')
 
    repository = relationship('Repository')
 

	
 
    @classmethod
 
    def create(cls, users_group, repository, permission):
 
        n = cls()
 
        n.users_group = users_group
 
        n.repository = repository
 
        n.permission = permission
 
        Session().add(n)
 
        return n
 

	
 
    def __unicode__(self):
 
        return u'<UserGroupRepoToPerm:%s => %s >' % (self.users_group, self.repository)
 

	
 

	
 
class UserGroupUserGroupToPerm(Base, BaseModel):
 
    __tablename__ = 'user_group_user_group_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('target_user_group_id', 'user_group_id', 'permission_id'),
 
        CheckConstraint('target_user_group_id != user_group_id'),
 
        _table_args_default_dict,
 
    )
 

	
 
    user_group_user_group_to_perm_id = Column(Integer(), unique=True, primary_key=True)
 
    target_user_group_id = Column(Integer(), ForeignKey('users_groups.users_group_id'), nullable=False)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False)
 
    user_group_id = Column(Integer(), ForeignKey('users_groups.users_group_id'), nullable=False)
 

	
 
    target_user_group = relationship('UserGroup', primaryjoin='UserGroupUserGroupToPerm.target_user_group_id==UserGroup.users_group_id')
 
    user_group = relationship('UserGroup', primaryjoin='UserGroupUserGroupToPerm.user_group_id==UserGroup.users_group_id')
 
    permission = relationship('Permission')
 

	
 
    @classmethod
 
    def create(cls, target_user_group, user_group, permission):
 
        n = cls()
 
        n.target_user_group = target_user_group
 
        n.user_group = user_group
 
        n.permission = permission
 
        Session().add(n)
 
        return n
 

	
 
    def __unicode__(self):
 
        return u'<UserGroupUserGroup:%s => %s >' % (self.target_user_group, self.user_group)
 

	
 

	
 
class UserGroupToPerm(Base, BaseModel):
 
    __tablename__ = 'users_group_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('users_group_id', 'permission_id',),
 
        _table_args_default_dict,
 
    )
 

	
 
    users_group_to_perm_id = Column(Integer(), unique=True, primary_key=True)
 
    users_group_id = Column(Integer(), ForeignKey('users_groups.users_group_id'), nullable=False)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False)
 

	
 
    users_group = relationship('UserGroup')
 
    permission = relationship('Permission')
 

	
 

	
 
class UserRepoGroupToPerm(Base, BaseModel):
 
    __tablename__ = 'user_repo_group_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('user_id', 'group_id', 'permission_id'),
 
        _table_args_default_dict,
 
    )
 

	
 
    group_to_perm_id = Column(Integer(), unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False)
 
    group_id = Column(Integer(), ForeignKey('groups.group_id'), nullable=False)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False)
 

	
 
    user = relationship('User')
 
    group = relationship('RepoGroup')
 
    permission = relationship('Permission')
 

	
 
    @classmethod
 
    def create(cls, user, repository_group, permission):
 
        n = cls()
 
        n.user = user
 
        n.group = repository_group
 
        n.permission = permission
 
        Session().add(n)
 
        return n
 

	
 

	
 
class UserGroupRepoGroupToPerm(Base, BaseModel):
 
    __tablename__ = 'users_group_repo_group_to_perm'
 
    __table_args__ = (
 
        UniqueConstraint('users_group_id', 'group_id'),
 
        _table_args_default_dict,
 
    )
 

	
 
    users_group_repo_group_to_perm_id = Column(Integer(), unique=True, primary_key=True)
 
    users_group_id = Column(Integer(), ForeignKey('users_groups.users_group_id'), nullable=False)
 
    group_id = Column(Integer(), ForeignKey('groups.group_id'), nullable=False)
 
    permission_id = Column(Integer(), ForeignKey('permissions.permission_id'), nullable=False)
 

	
 
    users_group = relationship('UserGroup')
 
    permission = relationship('Permission')
 
    group = relationship('RepoGroup')
 

	
 
    @classmethod
 
    def create(cls, user_group, repository_group, permission):
 
        n = cls()
 
        n.users_group = user_group
 
        n.group = repository_group
 
        n.permission = permission
 
        Session().add(n)
 
        return n
 

	
 

	
 
class Statistics(Base, BaseModel):
 
    __tablename__ = 'statistics'
 
    __table_args__ = (
 
         _table_args_default_dict,
 
    )
 

	
 
    stat_id = Column(Integer(), unique=True, primary_key=True)
 
    repository_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=False, unique=True)
 
    stat_on_revision = Column(Integer(), nullable=False)
 
    commit_activity = Column(LargeBinary(1000000), nullable=False)#JSON data
 
    commit_activity_combined = Column(LargeBinary(), nullable=False)#JSON data
 
    languages = Column(LargeBinary(1000000), nullable=False)#JSON data
 

	
 
    repository = relationship('Repository', single_parent=True)
 

	
 

	
 
class UserFollowing(Base, BaseModel):
 
    __tablename__ = 'user_followings'
 
    __table_args__ = (
 
        UniqueConstraint('user_id', 'follows_repository_id'),
 
        UniqueConstraint('user_id', 'follows_user_id'),
 
        _table_args_default_dict,
 
    )
 

	
 
    user_following_id = Column(Integer(), unique=True, primary_key=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False)
 
    follows_repo_id = Column("follows_repository_id", Integer(), ForeignKey('repositories.repo_id'), nullable=True)
 
    follows_user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=True)
 
    follows_from = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 

	
 
    user = relationship('User', primaryjoin='User.user_id==UserFollowing.user_id')
 

	
 
    follows_user = relationship('User', primaryjoin='User.user_id==UserFollowing.follows_user_id')
 
    follows_repository = relationship('Repository', order_by='Repository.repo_name')
 

	
 
    @classmethod
 
    def get_repo_followers(cls, repo_id):
 
        return cls.query().filter(cls.follows_repo_id == repo_id)
 

	
 

	
 
class CacheInvalidation(Base, BaseModel):
 
    __tablename__ = 'cache_invalidation'
 
    __table_args__ = (
 
        Index('key_idx', 'cache_key'),
 
        _table_args_default_dict,
 
    )
 

	
 
    # cache_id, not used
 
    cache_id = Column(Integer(), unique=True, primary_key=True)
 
    # cache_key as created by _get_cache_key
 
    cache_key = Column(Unicode(255), nullable=False, unique=True)
 
    # cache_args is a repo_name
 
    cache_args = Column(Unicode(255), nullable=False)
 
    # instance sets cache_active True when it is caching, other instances set
 
    # cache_active to False to indicate that this cache is invalid
 
    cache_active = Column(Boolean(), nullable=False, default=False)
 

	
 
    def __init__(self, cache_key, repo_name=''):
 
        self.cache_key = cache_key
 
        self.cache_args = repo_name
 
        self.cache_active = False
 

	
 
    def __unicode__(self):
 
        return u"<%s('%s:%s[%s]')>" % (
 
            self.__class__.__name__,
 
            self.cache_id, self.cache_key, self.cache_active)
 

	
 
    def _cache_key_partition(self):
 
        prefix, repo_name, suffix = self.cache_key.partition(self.cache_args)
 
        return prefix, repo_name, suffix
 

	
 
    def get_prefix(self):
 
        """
 
        get prefix that might have been used in _get_cache_key to
 
        generate self.cache_key. Only used for informational purposes
 
        in repo_edit.html.
 
        """
 
        # prefix, repo_name, suffix
 
        return self._cache_key_partition()[0]
 

	
 
    def get_suffix(self):
 
        """
 
        get suffix that might have been used in _get_cache_key to
 
        generate self.cache_key. Only used for informational purposes
 
        in repo_edit.html.
 
        """
 
        # prefix, repo_name, suffix
 
        return self._cache_key_partition()[2]
 

	
 
    @classmethod
 
    def clear_cache(cls):
 
        """
 
        Delete all cache keys from database.
 
        Should only be run when all instances are down and all entries thus stale.
 
        """
 
        cls.query().delete()
 
        Session().commit()
 

	
 
    @classmethod
 
    def _get_cache_key(cls, key):
 
        """
 
        Wrapper for generating a unique cache key for this instance and "key".
 
        key must / will start with a repo_name which will be stored in .cache_args .
 
        """
 
        import kallithea
 
        prefix = kallithea.CONFIG.get('instance_id', '')
 
        return "%s%s" % (prefix, key)
 

	
 
    @classmethod
 
    def set_invalidate(cls, repo_name):
 
        """
 
        Mark all caches of a repo as invalid in the database.
 
        """
 
        inv_objs = Session().query(cls).filter(cls.cache_args == repo_name).all()
 
        log.debug('for repo %s got %s invalidation objects',
 
                  safe_str(repo_name), inv_objs)
 

	
 
        for inv_obj in inv_objs:
 
            log.debug('marking %s key for invalidation based on repo_name=%s',
 
                      inv_obj, safe_str(repo_name))
 
            Session().delete(inv_obj)
 
        Session().commit()
 

	
 
    @classmethod
 
    def test_and_set_valid(cls, repo_name, kind, valid_cache_keys=None):
 
        """
 
        Mark this cache key as active and currently cached.
 
        Return True if the existing cache registration still was valid.
 
        Return False to indicate that it had been invalidated and caches should be refreshed.
 
        """
 

	
 
        key = (repo_name + '_' + kind) if kind else repo_name
 
        cache_key = cls._get_cache_key(key)
 

	
 
        if valid_cache_keys and cache_key in valid_cache_keys:
 
            return True
 

	
 
        inv_obj = cls.query().filter(cls.cache_key == cache_key).scalar()
 
        if inv_obj is None:
 
            inv_obj = cls(cache_key, repo_name)
 
        elif inv_obj.cache_active:
 
            return True
 
        inv_obj.cache_active = True
 
        Session().add(inv_obj)
 
        try:
 
            Session().commit()
 
        except sqlalchemy.exc.IntegrityError:
 
            log.error('commit of CacheInvalidation failed - retrying')
 
            Session().rollback()
 
            inv_obj = cls.query().filter(cls.cache_key == cache_key).scalar()
 
            if inv_obj is None:
 
                log.error('failed to create CacheInvalidation entry')
 
                # TODO: fail badly?
 
            # else: TOCTOU - another thread added the key at the same time; no further action required
 
        return False
 

	
 
    @classmethod
 
    def get_valid_cache_keys(cls):
 
        """
 
        Return opaque object with information of which caches still are valid
 
        and can be used without checking for invalidation.
 
        """
 
        return set(inv_obj.cache_key for inv_obj in cls.query().filter(cls.cache_active).all())
 

	
 

	
 
class ChangesetComment(Base, BaseModel):
 
    __tablename__ = 'changeset_comments'
 
    __table_args__ = (
 
        Index('cc_revision_idx', 'revision'),
 
        Index('cc_pull_request_id_idx', 'pull_request_id'),
 
        _table_args_default_dict,
 
    )
 

	
 
    comment_id = Column(Integer(), unique=True, primary_key=True)
 
    repo_id = Column(Integer(), ForeignKey('repositories.repo_id'), nullable=False)
 
    revision = Column(String(40), nullable=True)
 
    pull_request_id = Column(Integer(), ForeignKey('pull_requests.pull_request_id'), nullable=True)
 
    line_no = Column(Unicode(10), nullable=True)
 
    f_path = Column(Unicode(1000), nullable=True)
 
    user_id = Column(Integer(), ForeignKey('users.user_id'), nullable=False)
 
    text = Column(UnicodeText(25000), nullable=False)
 
    created_on = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 
    modified_at = Column(DateTime(timezone=False), nullable=False, default=datetime.datetime.now)
 

	
 
    author = relationship('User')
 
    repo = relationship('Repository')
 
    # status_change is frequently used directly in templates - make it a lazy
 
    # join to avoid fetching each related ChangesetStatus on demand.
 
    # There will only be one ChangesetStatus referencing each comment so the join will not explode.
 
    status_change = relationship('ChangesetStatus',
 
                                 cascade="all, delete-orphan", lazy='joined')
 
    pull_request = relationship('PullRequest')
 

	
 
    @classmethod
 
    def get_users(cls, revision=None, pull_request_id=None):
 
        """
 
        Returns user associated with this ChangesetComment. ie those
 
        who actually commented
 

	
 
        :param cls:
 
        :param revision:
 
        """
 
        q = Session().query(User) \
 
                .join(ChangesetComment.author)
 
        if revision is not None:
 
            q = q.filter(cls.revision == revision)
 
        elif pull_request_id is not None:
 
            q = q.filter(cls.pull_request_id == pull_request_id)
 
        return q.all()
 

	
 
    def url(self):
 
        anchor = "comment-%s" % self.comment_id
 
        import kallithea.lib.helpers as h
 
        if self.revision:
 
            return h.url('changeset_home', repo_name=self.repo.repo_name, revision=self.revision, anchor=anchor)
 
        elif self.pull_request_id is not None:
 
            return self.pull_request.url(anchor=anchor)
 

	
 
    def deletable(self):
 
        return self.created_on > datetime.datetime.now() - datetime.timedelta(minutes=5)
kallithea/tests/test.ini
Show inline comments
 
################################################################################
 
################################################################################
 
# Kallithea - config for tests:                                                #
 
# initial_repo_scan = true                                                     #
 
# vcs_full_cache = false                                                       #
 
# sqlalchemy and kallithea_test.sqlite                                         #
 
# custom logging                                                               #
 
#                                                                              #
 
# The %(here)s variable will be replaced with the parent directory of this file#
 
################################################################################
 
################################################################################
 

	
 
[DEFAULT]
 
debug = true
 
pdebug = false
 

	
 
################################################################################
 
## Email settings                                                             ##
 
##                                                                            ##
 
## Refer to the documentation ("Email settings") for more details.            ##
 
##                                                                            ##
 
## It is recommended to use a valid sender address that passes access         ##
 
## validation and spam filtering in mail servers.                             ##
 
################################################################################
 

	
 
## 'From' header for application emails. You can optionally add a name.
 
## Default:
 
#app_email_from = Kallithea
 
## Examples:
 
#app_email_from = Kallithea <kallithea-noreply@example.com>
 
#app_email_from = kallithea-noreply@example.com
 

	
 
## Subject prefix for application emails.
 
## A space between this prefix and the real subject is automatically added.
 
## Default:
 
#email_prefix =
 
## Example:
 
#email_prefix = [Kallithea]
 

	
 
## Recipients for error emails and fallback recipients of application mails.
 
## Multiple addresses can be specified, space-separated.
 
## Only addresses are allowed, do not add any name part.
 
## Default:
 
#email_to =
 
## Examples:
 
#email_to = admin@example.com
 
#email_to = admin@example.com another_admin@example.com
 

	
 
## 'From' header for error emails. You can optionally add a name.
 
## Default:
 
#error_email_from = pylons@yourapp.com
 
## Examples:
 
#error_email_from = Kallithea Errors <kallithea-noreply@example.com>
 
#error_email_from = paste_error@example.com
 

	
 
## SMTP server settings
 
## Only smtp_server is mandatory. All other settings take the specified default
 
## values.
 
#smtp_server = smtp.example.com
 
#smtp_username =
 
#smtp_password =
 
#smtp_port = 25
 
#smtp_use_tls = false
 
#smtp_use_ssl = false
 
## SMTP authentication parameters to use (e.g. LOGIN PLAIN CRAM-MD5, etc.).
 
## If empty, use any of the authentication parameters supported by the server.
 
#smtp_auth =
 

	
 
[server:main]
 
## PASTE ##
 
#use = egg:Paste#http
 
## nr of worker threads to spawn
 
#threadpool_workers = 5
 
## max request before thread respawn
 
#threadpool_max_requests = 10
 
## option to use threads of process
 
#use_threadpool = true
 

	
 
## WAITRESS ##
 
use = egg:waitress#main
 
## number of worker threads
 
threads = 5
 
## MAX BODY SIZE 100GB
 
max_request_body_size = 107374182400
 
## use poll instead of select, fixes fd limits, may not work on old
 
## windows systems.
 
#asyncore_use_poll = True
 

	
 
## GUNICORN ##
 
#use = egg:gunicorn#main
 
## number of process workers. You must set `instance_id = *` when this option
 
## is set to more than one worker
 
#workers = 1
 
## process name
 
#proc_name = kallithea
 
## type of worker class, one of sync, eventlet, gevent, tornado
 
## recommended for bigger setup is using of of other than sync one
 
#worker_class = sync
 
#max_requests = 1000
 
## ammount of time a worker can handle request before it gets killed and
 
## restarted
 
#timeout = 3600
 

	
 
## UWSGI ##
 
## run with uwsgi --ini-paste-logged <inifile.ini>
 
#[uwsgi]
 
#socket = /tmp/uwsgi.sock
 
#master = true
 
#http = 127.0.0.1:5000
 

	
 
## set as deamon and redirect all output to file
 
#daemonize = ./uwsgi_kallithea.log
 

	
 
## master process PID
 
#pidfile = ./uwsgi_kallithea.pid
 

	
 
## stats server with workers statistics, use uwsgitop
 
## for monitoring, `uwsgitop 127.0.0.1:1717`
 
#stats = 127.0.0.1:1717
 
#memory-report = true
 

	
 
## log 5XX errors
 
#log-5xx = true
 

	
 
## Set the socket listen queue size.
 
#listen = 256
 

	
 
## Gracefully Reload workers after the specified amount of managed requests
 
## (avoid memory leaks).
 
#max-requests = 1000
 

	
 
## enable large buffers
 
#buffer-size = 65535
 

	
 
## socket and http timeouts ##
 
#http-timeout = 3600
 
#socket-timeout = 3600
 

	
 
## Log requests slower than the specified number of milliseconds.
 
#log-slow = 10
 

	
 
## Exit if no app can be loaded.
 
#need-app = true
 

	
 
## Set lazy mode (load apps in workers instead of master).
 
#lazy = true
 

	
 
## scaling ##
 
## set cheaper algorithm to use, if not set default will be used
 
#cheaper-algo = spare
 

	
 
## minimum number of workers to keep at all times
 
#cheaper = 1
 

	
 
## number of workers to spawn at startup
 
#cheaper-initial = 1
 

	
 
## maximum number of workers that can be spawned
 
#workers = 4
 

	
 
## how many workers should be spawned at a time
 
#cheaper-step = 1
 

	
 
## COMMON ##
 
host = 127.0.0.1
 
#port = 5000
 
port = 4999
 

	
 
## middleware for hosting the WSGI application under a URL prefix
 
#[filter:proxy-prefix]
 
#use = egg:PasteDeploy#prefix
 
#prefix = /<your-prefix>
 

	
 
[app:main]
 
use = egg:kallithea
 
## enable proxy prefix middleware
 
#filter-with = proxy-prefix
 

	
 
full_stack = true
 
static_files = true
 
## Available Languages:
 
## cs de fr hu ja nl_BE pl pt_BR ru sk zh_CN zh_TW
 
lang =
 
cache_dir = %(here)s/data
 
index_dir = %(here)s/data/index
 

	
 
## perform a full repository scan on each server start, this should be
 
## set to false after first startup, to allow faster server restarts.
 
#initial_repo_scan = false
 
initial_repo_scan = true
 

	
 
## uncomment and set this path to use archive download cache
 
archive_cache_dir = %(here)s/tarballcache
 

	
 
## change this to unique ID for security
 
app_instance_uuid = test
 

	
 
## cut off limit for large diffs (size in bytes)
 
cut_off_limit = 256000
 

	
 
## use cache version of scm repo everywhere
 
#vcs_full_cache = true
 
vcs_full_cache = false
 

	
 
## force https in Kallithea, fixes https redirects, assumes it's always https
 
force_https = false
 

	
 
## use Strict-Transport-Security headers
 
use_htsts = false
 

	
 
## number of commits stats will parse on each iteration
 
commit_parse_limit = 25
 

	
 
## path to git executable
 
git_path = git
 

	
 
## git rev filter option, --all is the default filter, if you need to
 
## hide all refs in changelog switch this to --branches --tags
 
#git_rev_filter = --branches --tags
 

	
 
## RSS feed options
 
rss_cut_off_limit = 256000
 
rss_items_per_page = 10
 
rss_include_diff = false
 

	
 
## options for showing and identifying changesets
 
show_sha_length = 12
 
#show_revision_number = false
 
show_revision_number = true
 

	
 
## gist URL alias, used to create nicer urls for gist. This should be an
 
## url that does rewrites to _admin/gists/<gistid>.
 
## example: http://gist.example.com/{gistid}. Empty means use the internal
 
## Kallithea url, ie. http[s]://kallithea.example.com/_admin/gists/<gistid>
 
gist_alias_url =
 

	
 
## white list of API enabled controllers. This allows to add list of
 
## controllers to which access will be enabled by api_key. eg: to enable
 
## api access to raw_files put `FilesController:raw`, to enable access to patches
 
## add `ChangesetController:changeset_patch`. This list should be "," separated
 
## Syntax is <ControllerClass>:<function>. Check debug logs for generated names
 
## Recommended settings below are commented out:
 
api_access_controllers_whitelist =
 
#    ChangesetController:changeset_patch,
 
#    ChangesetController:changeset_raw,
 
#    FilesController:raw,
 
#    FilesController:archivefile
 

	
 
## default encoding used to convert from and to unicode
 
## can be also a comma seperated list of encoding in case of mixed encodings
 
default_encoding = utf8
 

	
 
## issue tracker for Kallithea (leave blank to disable, absent for default)
 
#bugtracker = https://bitbucket.org/conservancy/kallithea/issues
 

	
 
## issue tracking mapping for commits messages
 
## comment out issue_pat, issue_server, issue_prefix to enable
 

	
 
## pattern to get the issues from commit messages
 
## default one used here is #<numbers> with a regex passive group for `#`
 
## {id} will be all groups matched from this pattern
 

	
 
issue_pat = (?:\s*#)(\d+)
 

	
 
## server url to the issue, each {id} will be replaced with match
 
## fetched from the regex and {repo} is replaced with full repository name
 
## including groups {repo_name} is replaced with just name of repo
 

	
 
issue_server_link = https://issues.example.com/{repo}/issue/{id}
 

	
 
## prefix to add to link to indicate it's an url
 
## #314 will be replaced by <issue_prefix><id>
 

	
 
issue_prefix = #
 

	
 
## issue_pat, issue_server_link, issue_prefix can have suffixes to specify
 
## multiple patterns, to other issues server, wiki or others
 
## below an example how to create a wiki pattern
 
# wiki-some-id -> https://wiki.example.com/some-id
 

	
 
#issue_pat_wiki = (?:wiki-)(.+)
 
#issue_server_link_wiki = https://wiki.example.com/{id}
 
#issue_prefix_wiki = WIKI-
 

	
 
## alternative return HTTP header for failed authentication. Default HTTP
 
## response is 401 HTTPUnauthorized. Currently Mercurial clients have trouble with
 
## handling that. Set this variable to 403 to return HTTPForbidden
 
auth_ret_code =
 

	
 
## locking return code. When repository is locked return this HTTP code. 2XX
 
## codes don't break the transactions while 4XX codes do
 
lock_ret_code = 423
 

	
 
## allows to change the repository location in settings page
 
allow_repo_location_change = True
 

	
 
## allows to setup custom hooks in settings page
 
allow_custom_hooks_settings = True
 

	
 
## extra extensions for indexing, space separated and without the leading '.'.
 
# index.extensions =
 
#    gemfile
 
#    lock
 

	
 
## extra filenames for indexing, space separated
 
# index.filenames =
 
#    .dockerignore
 
#    .editorconfig
 
#    INSTALL
 
#    CHANGELOG
 

	
 
####################################
 
###        CELERY CONFIG        ####
 
####################################
 

	
 
use_celery = false
 
broker.host = localhost
 
broker.vhost = rabbitmqhost
 
broker.port = 5672
 
broker.user = rabbitmq
 
broker.password = qweqwe
 

	
 
celery.imports = kallithea.lib.celerylib.tasks
 

	
 
celery.result.backend = amqp
 
celery.result.dburi = amqp://
 
celery.result.serialier = json
 

	
 
#celery.send.task.error.emails = true
 
#celery.amqp.task.result.expires = 18000
 

	
 
celeryd.concurrency = 2
 
#celeryd.log.file = celeryd.log
 
celeryd.log.level = DEBUG
 
celeryd.max.tasks.per.child = 1
 

	
 
## tasks will never be sent to the queue, but executed locally instead.
 
celery.always.eager = false
 

	
 
####################################
 
###         BEAKER CACHE        ####
 
####################################
 

	
 
beaker.cache.data_dir = %(here)s/data/cache/data
 
beaker.cache.lock_dir = %(here)s/data/cache/lock
 

	
 
beaker.cache.regions = short_term,long_term,sql_cache_short
 

	
 
beaker.cache.short_term.type = memory
 
beaker.cache.short_term.expire = 60
 
beaker.cache.short_term.key_length = 256
 

	
 
beaker.cache.long_term.type = memory
 
beaker.cache.long_term.expire = 36000
 
beaker.cache.long_term.key_length = 256
 

	
 
beaker.cache.sql_cache_short.type = memory
 
#beaker.cache.sql_cache_short.expire = 10
 
beaker.cache.sql_cache_short.expire = 1
 
beaker.cache.sql_cache_short.key_length = 256
 

	
 
####################################
 
###       BEAKER SESSION        ####
 
####################################
 

	
 
## Name of session cookie. Should be unique for a given host and path, even when running
 
## on different ports. Otherwise, cookie sessions will be shared and messed up.
 
beaker.session.key = kallithea
 
## Sessions should always only be accessible by the browser, not directly by JavaScript.
 
beaker.session.httponly = true
 
## Session lifetime. 2592000 seconds is 30 days.
 
beaker.session.timeout = 2592000
 

	
 
## Server secret used with HMAC to ensure integrity of cookies.
 
beaker.session.secret = {74e0cd75-b339-478b-b129-07dd221def1f}
 
## Further, encrypt the data with AES.
 
#beaker.session.encrypt_key = <key_for_encryption>
 
#beaker.session.validate_key = <validation_key>
 

	
 
## Type of storage used for the session, current types are
 
## dbm, file, memcached, database, and memory.
 

	
 
## File system storage of session data. (default)
 
#beaker.session.type = file
 

	
 
## Cookie only, store all session data inside the cookie. Requires secure secrets.
 
#beaker.session.type = cookie
 

	
 
## Database storage of session data.
 
#beaker.session.type = ext:database
 
#beaker.session.sa.url = postgresql://postgres:qwe@localhost/kallithea
 
#beaker.session.table_name = db_session
 

	
 
############################
 
## ERROR HANDLING SYSTEMS ##
 
############################
 

	
 
####################
 
### [errormator] ###
 
####################
 

	
 
## Errormator is tailored to work with Kallithea, see
 
## http://errormator.com for details how to obtain an account
 
## you must install python package `errormator_client` to make it work
 

	
 
## errormator enabled
 
errormator = false
 

	
 
errormator.server_url = https://api.errormator.com
 
errormator.api_key = YOUR_API_KEY
 

	
 
## TWEAK AMOUNT OF INFO SENT HERE
 

	
 
## enables 404 error logging (default False)
 
errormator.report_404 = false
 

	
 
## time in seconds after request is considered being slow (default 1)
 
errormator.slow_request_time = 1
 

	
 
## record slow requests in application
 
## (needs to be enabled for slow datastore recording and time tracking)
 
errormator.slow_requests = true
 

	
 
## enable hooking to application loggers
 
#errormator.logging = true
 

	
 
## minimum log level for log capture
 
#errormator.logging.level = WARNING
 

	
 
## send logs only from erroneous/slow requests
 
## (saves API quota for intensive logging)
 
errormator.logging_on_error = false
 

	
 
## list of additonal keywords that should be grabbed from environ object
 
## can be string with comma separated list of words in lowercase
 
## (by default client will always send following info:
 
## 'REMOTE_USER', 'REMOTE_ADDR', 'SERVER_NAME', 'CONTENT_TYPE' + all keys that
 
## start with HTTP* this list be extended with additional keywords here
 
errormator.environ_keys_whitelist =
 

	
 
## list of keywords that should be blanked from request object
 
## can be string with comma separated list of words in lowercase
 
## (by default client will always blank keys that contain following words
 
## 'password', 'passwd', 'pwd', 'auth_tkt', 'secret', 'csrf'
 
## this list be extended with additional keywords set here
 
errormator.request_keys_blacklist =
 

	
 
## list of namespaces that should be ignores when gathering log entries
 
## can be string with comma separated list of namespaces
 
## (by default the client ignores own entries: errormator_client.client)
 
errormator.log_namespace_blacklist =
 

	
 
################
 
### [sentry] ###
 
################
 

	
 
## sentry is a alternative open source error aggregator
 
## you must install python packages `sentry` and `raven` to enable
 

	
 
sentry.dsn = YOUR_DNS
 
sentry.servers =
 
sentry.name =
 
sentry.key =
 
sentry.public_key =
 
sentry.secret_key =
 
sentry.project =
 
sentry.site =
 
sentry.include_paths =
 
sentry.exclude_paths =
 

	
 
################################################################################
 
## WARNING: *THE LINE BELOW MUST BE UNCOMMENTED ON A PRODUCTION ENVIRONMENT*  ##
 
## Debug mode will enable the interactive debugging tool, allowing ANYONE to  ##
 
## execute malicious code after an exception is raised.                       ##
 
################################################################################
 
set debug = false
 

	
 
##################################
 
###       LOGVIEW CONFIG       ###
 
##################################
 

	
 
logview.sqlalchemy = #faa
 
logview.pylons.templating = #bfb
 
logview.pylons.util = #eee
 

	
 
#########################################################
 
### DB CONFIGS - EACH DB WILL HAVE IT'S OWN CONFIG    ###
 
#########################################################
 

	
 
# SQLITE [default]
 
#sqlalchemy.db1.url = sqlite:///%(here)s/kallithea.db?timeout=60
 
sqlalchemy.db1.url = sqlite:///%(here)s/kallithea_test.sqlite
 

	
 
# POSTGRESQL
 
#sqlalchemy.db1.url = postgresql://user:pass@localhost/kallithea
 

	
 
# MySQL
 
#sqlalchemy.db1.url = mysql://user:pass@localhost/kallithea?charset=utf8
 

	
 
# see sqlalchemy docs for others
 

	
 
sqlalchemy.db1.echo = false
 
sqlalchemy.db1.pool_recycle = 3600
 

	
 
################################
 
### LOGGING CONFIGURATION   ####
 
################################
 

	
 
[loggers]
 
keys = root, routes, kallithea, sqlalchemy, beaker, templates, whoosh_indexer
 

	
 
[handlers]
 
keys = console, console_sql
 

	
 
[formatters]
 
keys = generic, color_formatter, color_formatter_sql
 

	
 
#############
 
## LOGGERS ##
 
#############
 

	
 
[logger_root]
 
#level = NOTSET
 
level = DEBUG
 
handlers = console
 

	
 
[logger_routes]
 
level = DEBUG
 
handlers =
 
qualname = routes.middleware
 
## "level = DEBUG" logs the route matched and routing variables.
 
propagate = 1
 

	
 
[logger_beaker]
 
level = DEBUG
 
handlers =
 
qualname = beaker.container
 
propagate = 1
 

	
 
[logger_templates]
 
level = INFO
 
handlers =
 
qualname = pylons.templating
 
propagate = 1
 

	
 
[logger_kallithea]
 
level = DEBUG
 
handlers =
 
qualname = kallithea
 
propagate = 1
 

	
 
[logger_sqlalchemy]
 
#level = INFO
 
level = ERROR
 
#handlers = console_sql
 
handlers = console
 
qualname = sqlalchemy.engine
 
propagate = 0
 

	
 
[logger_whoosh_indexer]
 
level = DEBUG
 
handlers =
 
qualname = whoosh_indexer
 
propagate = 1
 

	
 
##############
 
## HANDLERS ##
 
##############
 

	
 
[handler_console]
 
class = StreamHandler
 
args = (sys.stderr,)
 
#level = INFO
 
level = NOTSET
 
formatter = generic
 

	
 
[handler_console_sql]
 
class = StreamHandler
 
args = (sys.stderr,)
 
level = WARN
 
formatter = generic
 

	
 
################
 
## FORMATTERS ##
 
################
 

	
 
[formatter_generic]
 
format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
 
datefmt = %Y-%m-%d %H:%M:%S
 

	
 
[formatter_color_formatter]
 
class = kallithea.lib.colored_formatter.ColorFormatter
 
format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
 
datefmt = %Y-%m-%d %H:%M:%S
 

	
 
[formatter_color_formatter_sql]
 
class = kallithea.lib.colored_formatter.ColorFormatterSql
 
format = %(asctime)s.%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s
 
datefmt = %Y-%m-%d %H:%M:%S
scripts/generate-ini.py
Show inline comments
 
#!/usr/bin/env python2
 
"""
 
Based on kallithea/bin/template.ini.mako, generate
 
  kallithea/config/deployment.ini_tmpl
 
  development.ini
 
  kallithea/tests/test.ini
 
"""
 

	
 
import re
 

	
 
makofile = 'kallithea/bin/template.ini.mako'
 

	
 
# the mako conditionals used in all other ini files and templates
 
selected_mako_conditionals = set([
 
    "database_engine == 'sqlite'",
 
    "http_server == 'waitress'",
 
    "error_aggregation_service == 'errormator'",
 
    "error_aggregation_service == 'sentry'",
 
])
 

	
 
# the mako variables used in all other ini files and templates
 
mako_variable_values = {
 
    'host': '127.0.0.1',
 
    'port': '5000',
 
    'here': '%(here)s',
 
    'uuid()': '${app_instance_uuid}',
 
}
 

	
 
# files to be generated from the mako template
 
ini_files = [
 
    ('kallithea/config/deployment.ini_tmpl',
 
        '''
 
        Kallithea - Example config
 

	
 
        The %(here)s variable will be replaced with the parent directory of this file
 
        ''',
 
        {}, # exactly the same settings as template.ini.mako
 
    ),
 
    ('kallithea/tests/test.ini',
 
        '''
 
        Kallithea - config for tests:
 
        initial_repo_scan = true
 
        vcs_full_cache = false
 
        sqlalchemy and kallithea_test.sqlite
 
        custom logging
 

	
 
        The %(here)s variable will be replaced with the parent directory of this file
 
        ''',
 
        {
 
            '[server:main]': {
 
                'port': '4999',
 
            },
 
            '[app:main]': {
 
                'initial_repo_scan': 'true',
 
                'app_instance_uuid': 'test',
 
                'vcs_full_cache': 'false',
 
                'show_revision_number': 'true',
 
                'beaker.cache.sql_cache_short.expire': '1',
 
                'beaker.session.secret': '{74e0cd75-b339-478b-b129-07dd221def1f}',
 
                'sqlalchemy.db1.url': 'sqlite:///%(here)s/kallithea_test.sqlite',
 
            },
 
            '[logger_root]': {
 
                'level': 'DEBUG',
 
            },
 
            '[logger_sqlalchemy]': {
 
                'level': 'ERROR',
 
                'handlers': 'console',
 
            },
 
            '[handler_console]': {
 
                'level': 'NOTSET',
 
            },
 
        },
 
    ),
 
    ('development.ini',
 
        '''
 
        Kallithea - Development config:
 
        listening on *:5000
 
        sqlite and kallithea.db
 
        initial_repo_scan = true
 
        set debug = true
 
        verbose and colorful logging
 

	
 
        The %(here)s variable will be replaced with the parent directory of this file
 
        ''',
 
        {
 
            '[server:main]': {
 
                'host': '0.0.0.0',
 
            },
 
            '[app:main]': {
 
                'initial_repo_scan': 'true',
 
                'set debug': 'true',
 
                'app_instance_uuid': 'development-not-secret',
 
                'beaker.session.secret': 'development-not-secret',
 
            },
 
            '[handler_console]': {
 
                'level': 'DEBUG',
 
                'formatter': 'color_formatter',
 
            },
 
            '[handler_console_sql]': {
 
                'level': 'DEBUG',
 
                'formatter': 'color_formatter_sql',
 
            },
 
        },
 
    ),
 
]
 

	
 

	
 
def main():
 
    # make sure all mako lines starting with '#' (the '##' comments) are marked up as <text>
 
    print 'reading:', makofile
 
    mako_org = file(makofile).read()
 
    mako_no_text_markup = re.sub(r'</?%text>', '', mako_org)
 
    mako_marked_up = re.sub(r'\n(##.*)', r'\n<%text>\1</%text>', mako_no_text_markup, flags=re.MULTILINE)
 
    if mako_marked_up != mako_org:
 
        print 'writing:', makofile
 
        file(makofile, 'w').write(mako_marked_up)
 

	
 
    # select the right mako conditionals for the other less sophisticated formats
 
    def sub_conditionals(m):
 
        """given a %if...%endif match, replace with just the selected
 
        conditional sections enabled and the rest as comments
 
        """
 
        conditional_lines = m.group(1)
 
        def sub_conditional(m):
 
            """given a conditional and the corresponding lines, return them raw
 
            or commented out, based on whether conditional is selected
 
            """
 
            criteria, lines = m.groups()
 
            if criteria not in selected_mako_conditionals:
 
                lines = '\n'.join((l if not l or l.startswith('#') else '#' + l) for l in lines.split('\n'))
 
            return lines
 
        conditional_lines = re.sub(r'^%(?:el)?if (.*):\n((?:^[^%\n].*\n|\n)*)',
 
            sub_conditional, conditional_lines, flags=re.MULTILINE)
 
        return conditional_lines
 
    mako_no_conditionals = re.sub(r'^(%if .*\n(?:[^%\n].*\n|%elif .*\n|\n)*)%endif\n',
 
        sub_conditionals, mako_no_text_markup, flags=re.MULTILINE)
 

	
 
    # expand mako variables
 
    def pyrepl(m):
 
        return mako_variable_values.get(m.group(1), m.group(0))
 
    mako_no_variables = re.sub(r'\${([^}]*)}', pyrepl, mako_no_conditionals)
 

	
 
    # remove utf-8 coding header
 
    base_ini = re.sub(r'^## -\*- coding: utf-8 -\*-\n', '', mako_no_variables)
 

	
 
    # create ini files
 
    for fn, desc, settings in ini_files:
 
        print 'updating:', fn
 
        ini_lines = re.sub(
 
            '# Kallithea - config file generated with kallithea-config *#\n',
 
            ''.join('# %-77s#\n' % l.strip() for l in desc.strip().split('\n')),
 
            base_ini)
 
        def process_section(m):
 
            """process a ini section, replacing values as necessary"""
 
            sectionname, lines = m.groups()
 
            if sectionname in settings:
 
                section_settings = settings[sectionname]
 
                def process_line(m):
 
                    """process a section line and update value if necessary"""
 
                    setting, value = m.groups()
 
                    line = m.group(0)
 
                    if setting in section_settings:
 
                        line = '%s = %s' % (setting, section_settings[setting])
 
                        if '$' not in value:
 
                            line = '#%s = %s\n%s' % (setting, value, line)
 
                    return line.rstrip()
 
                lines = re.sub(r'^([^#\n].*) = ?(.*)', process_line, lines, flags=re.MULTILINE)
 
            return sectionname + '\n' + lines
 
        ini_lines = re.sub(r'^(\[.*\])\n((?:(?:[^[\n].*)?\n)*)', process_section, ini_lines, flags=re.MULTILINE)
 
        file(fn, 'w').write(ini_lines)
 

	
 
if __name__ == '__main__':
 
    main()
0 comments (0 inline, 0 general)