Changeset - f61adead3520
[Not reviewed]
beta
0 4 0
Marcin Kuzminski - 13 years ago 2012-11-24 01:59:14
marcin@python-works.com
Added option to ini files for controlling full cache of VCS instances.
This can increase performance for huge repositories like 300K changesets
4 files changed with 7 insertions and 1 deletions:
0 comments (0 inline, 0 general)
development.ini
Show inline comments
 
################################################################################
 
################################################################################
 
# RhodeCode - Pylons environment configuration                                 #
 
#                                                                              # 
 
# The %(here)s variable will be replaced with the parent directory of this file#
 
################################################################################
 

	
 
[DEFAULT]
 
debug = true
 
pdebug = false
 
################################################################################
 
## Uncomment and replace with the address which should receive                ## 
 
## any error reports after application crash                                  ##
 
## Additionally those settings will be used by RhodeCode mailing system       ##
 
################################################################################
 
#email_to = admin@localhost
 
#error_email_from = paste_error@localhost
 
#app_email_from = rhodecode-noreply@localhost
 
#error_message =
 
#email_prefix = [RhodeCode]
 

	
 
#smtp_server = mail.server.com
 
#smtp_username = 
 
#smtp_password = 
 
#smtp_port = 
 
#smtp_use_tls = false
 
#smtp_use_ssl = true
 
# Specify available auth parameters here (e.g. LOGIN PLAIN CRAM-MD5, etc.)
 
#smtp_auth = 
 

	
 
[server:main]
 
##nr of threads to spawn
 
#threadpool_workers = 5
 

	
 
##max request before thread respawn
 
#threadpool_max_requests = 10
 

	
 
##option to use threads of process
 
#use_threadpool = true
 

	
 
#use = egg:Paste#http
 
use = egg:waitress#main
 
host = 0.0.0.0
 
port = 5000
 

	
 
[filter:proxy-prefix]
 
# prefix middleware for rc
 
use = egg:PasteDeploy#prefix
 
prefix = /<your-prefix>
 

	
 
[app:main]
 
use = egg:rhodecode
 
#filter-with = proxy-prefix
 
full_stack = true
 
static_files = true
 
# Optional Languages
 
# en, fr, ja, pt_BR, zh_CN, zh_TW
 
lang = en
 
cache_dir = %(here)s/data
 
index_dir = %(here)s/data/index
 
app_instance_uuid = rc-develop
 
cut_off_limit = 256000
 
vcs_full_cache = True
 
force_https = false
 
commit_parse_limit = 25
 
use_gravatar = true
 

	
 
## alternative_gravatar_url allows you to use your own avatar server application
 
## the following parts of the URL will be replaced
 
## {email}        user email
 
## {md5email}     md5 hash of the user email (like at gravatar.com)
 
## {size}         size of the image that is expected from the server application
 
## {scheme}       http/https from RhodeCode server
 
## {netloc}       network location from RhodeCode server
 
#alternative_gravatar_url = http://myavatarserver.com/getbyemail/{email}/{size}
 
#alternative_gravatar_url = http://myavatarserver.com/getbymd5/{md5email}?s={size}
 

	
 
container_auth_enabled = false
 
proxypass_auth_enabled = false
 
## default encoding used to convert from and to unicode
 
## can be also a comma seperated list of encoding in case of mixed encodings
 
default_encoding = utf8
 

	
 
## overwrite schema of clone url
 
## available vars:
 
## scheme - http/https
 
## user - current user
 
## pass - password 
 
## netloc - network location
 
## path - usually repo_name
 

	
 
#clone_uri = {scheme}://{user}{pass}{netloc}{path}
 

	
 
## issue tracking mapping for commits messages
 
## comment out issue_pat, issue_server, issue_prefix to enable
 

	
 
## pattern to get the issues from commit messages
 
## default one used here is #<numbers> with a regex passive group for `#`
 
## {id} will be all groups matched from this pattern
 

	
 
issue_pat = (?:\s*#)(\d+)
 

	
 
## server url to the issue, each {id} will be replaced with match
 
## fetched from the regex and {repo} is replaced with full repository name
 
## including groups {repo_name} is replaced with just name of repo
 

	
 
issue_server_link = https://myissueserver.com/{repo}/issue/{id}
 

	
 
## prefix to add to link to indicate it's an url
 
## #314 will be replaced by <issue_prefix><id>
 

	
 
issue_prefix = #
 

	
 
## issue_pat, issue_server_link, issue_prefix can have suffixes to specify
 
## multiple patterns, to other issues server, wiki or others
 
## below an example how to create a wiki pattern 
 
#  #wiki-some-id -> https://mywiki.com/some-id
 

	
 
#issue_pat_wiki = (?:wiki-)(.+)
 
#issue_server_link_wiki = https://mywiki.com/{id}
 
#issue_prefix_wiki = WIKI-
 

	
 

	
 
## instance-id prefix
 
## a prefix key for this instance used for cache invalidation when running 
 
## multiple instances of rhodecode, make sure it's globally unique for 
 
## all running rhodecode instances. Leave empty if you don't use it
 
instance_id = 
 

	
 
## alternative return HTTP header for failed authentication. Default HTTP
 
## response is 401 HTTPUnauthorized. Currently HG clients have troubles with 
 
## handling that. Set this variable to 403 to return HTTPForbidden
 
auth_ret_code =
 

	
 
####################################
 
###        CELERY CONFIG        ####
 
####################################
 
use_celery = false
 
broker.host = localhost
 
broker.vhost = rabbitmqhost
 
broker.port = 5672
 
broker.user = rabbitmq
 
broker.password = qweqwe
 

	
 
celery.imports = rhodecode.lib.celerylib.tasks
 

	
 
celery.result.backend = amqp
 
celery.result.dburi = amqp://
 
celery.result.serialier = json
 

	
 
#celery.send.task.error.emails = true
 
#celery.amqp.task.result.expires = 18000
 

	
 
celeryd.concurrency = 2
 
#celeryd.log.file = celeryd.log
 
celeryd.log.level = debug
 
celeryd.max.tasks.per.child = 1
 

	
 
#tasks will never be sent to the queue, but executed locally instead.
 
celery.always.eager = false
 

	
 
####################################
 
###         BEAKER CACHE        ####
 
####################################
 
beaker.cache.data_dir=%(here)s/data/cache/data
 
beaker.cache.lock_dir=%(here)s/data/cache/lock
 

	
 
beaker.cache.regions=super_short_term,short_term,long_term,sql_cache_short,sql_cache_med,sql_cache_long
 

	
 
beaker.cache.super_short_term.type=memory
 
beaker.cache.super_short_term.expire=10
 
beaker.cache.super_short_term.key_length = 256
 

	
 
beaker.cache.short_term.type=memory
 
beaker.cache.short_term.expire=60
 
beaker.cache.short_term.key_length = 256
 

	
 
beaker.cache.long_term.type=memory
 
beaker.cache.long_term.expire=36000
 
beaker.cache.long_term.key_length = 256
 

	
 
beaker.cache.sql_cache_short.type=memory
 
beaker.cache.sql_cache_short.expire=10
 
beaker.cache.sql_cache_short.key_length = 256
 

	
 
beaker.cache.sql_cache_med.type=memory
 
beaker.cache.sql_cache_med.expire=360
 
beaker.cache.sql_cache_med.key_length = 256
 

	
 
beaker.cache.sql_cache_long.type=file
 
beaker.cache.sql_cache_long.expire=3600
 
beaker.cache.sql_cache_long.key_length = 256
 

	
 
####################################
 
###       BEAKER SESSION        ####
 
####################################
 
## Type of storage used for the session, current types are 
 
## dbm, file, memcached, database, and memory. 
 
## The storage uses the Container API 
 
## that is also used by the cache system.
 

	
 
## db session ##
 
#beaker.session.type = ext:database
 
#beaker.session.sa.url = postgresql://postgres:qwe@localhost/rhodecode
 
#beaker.session.table_name = db_session 
 

	
 
## encrypted cookie client side session, good for many instances ##
 
#beaker.session.type = cookie
 

	
 
## file based cookies (default) ##
 
#beaker.session.type = file
 

	
 

	
 
beaker.session.key = rhodecode
 
## secure cookie requires AES python libraries ##
 
#beaker.session.encrypt_key = g654dcno0-9873jhgfreyu
 
#beaker.session.validate_key = 9712sds2212c--zxc123
 
## sets session as invalid if it haven't been accessed for given amount of time
 
beaker.session.timeout = 2592000
 
beaker.session.httponly = true
 
#beaker.session.cookie_path = /<your-prefix>
 

	
 
## uncomment for https secure cookie ##
 
beaker.session.secure = false
 

	
 
## auto save the session to not to use .save() ##
 
beaker.session.auto = False
 

	
 
## default cookie expiration time in seconds `true` expire at browser close ##
 
#beaker.session.cookie_expires = 3600
 

	
 

	
 
############################
 
## ERROR HANDLING SYSTEMS ##
 
############################
 

	
 
####################
 
### [errormator] ###
 
####################
 

	
 
# Errormator is tailored to work with RhodeCode, see 
 
# http://errormator.com for details how to obtain an account
 
# you must install python package `errormator_client` to make it work
 

	
 
# errormator enabled
 
errormator = true
 

	
 
errormator.server_url = https://api.errormator.com
 
errormator.api_key = YOUR_API_KEY
 

	
 
# TWEAK AMOUNT OF INFO SENT HERE
 

	
 
# enables 404 error logging (default False)
 
errormator.report_404 = false
 

	
production.ini
Show inline comments
 
################################################################################
 
################################################################################
 
# RhodeCode - Pylons environment configuration                                 #
 
#                                                                              # 
 
# The %(here)s variable will be replaced with the parent directory of this file#
 
################################################################################
 

	
 
[DEFAULT]
 
debug = true
 
pdebug = false
 
################################################################################
 
## Uncomment and replace with the address which should receive                ## 
 
## any error reports after application crash                                  ##
 
## Additionally those settings will be used by RhodeCode mailing system       ##
 
################################################################################
 
#email_to = admin@localhost
 
#error_email_from = paste_error@localhost
 
#app_email_from = rhodecode-noreply@localhost
 
#error_message =
 
#email_prefix = [RhodeCode]
 

	
 
#smtp_server = mail.server.com
 
#smtp_username = 
 
#smtp_password = 
 
#smtp_port = 
 
#smtp_use_tls = false
 
#smtp_use_ssl = true
 
# Specify available auth parameters here (e.g. LOGIN PLAIN CRAM-MD5, etc.)
 
#smtp_auth = 
 

	
 
[server:main]
 
##nr of threads to spawn
 
#threadpool_workers = 5
 

	
 
##max request before thread respawn
 
#threadpool_max_requests = 10
 

	
 
##option to use threads of process
 
#use_threadpool = true
 

	
 
#use = egg:Paste#http
 
use = egg:waitress#main
 
host = 127.0.0.1
 
port = 8001
 

	
 
[filter:proxy-prefix]
 
# prefix middleware for rc
 
use = egg:PasteDeploy#prefix
 
prefix = /<your-prefix>
 

	
 
[app:main]
 
use = egg:rhodecode
 
#filter-with = proxy-prefix
 
full_stack = true
 
static_files = true
 
# Optional Languages
 
# en, fr, ja, pt_BR, zh_CN, zh_TW
 
lang = en
 
cache_dir = %(here)s/data
 
index_dir = %(here)s/data/index
 
app_instance_uuid = rc-production
 
cut_off_limit = 256000
 
vcs_full_cache = True
 
force_https = false
 
commit_parse_limit = 50
 
use_gravatar = true
 

	
 
## alternative_gravatar_url allows you to use your own avatar server application
 
## the following parts of the URL will be replaced
 
## {email}        user email
 
## {md5email}     md5 hash of the user email (like at gravatar.com)
 
## {size}         size of the image that is expected from the server application
 
## {scheme}       http/https from RhodeCode server
 
## {netloc}       network location from RhodeCode server
 
#alternative_gravatar_url = http://myavatarserver.com/getbyemail/{email}/{size}
 
#alternative_gravatar_url = http://myavatarserver.com/getbymd5/{md5email}?s={size}
 

	
 
container_auth_enabled = false
 
proxypass_auth_enabled = false
 
## default encoding used to convert from and to unicode
 
## can be also a comma seperated list of encoding in case of mixed encodings
 
default_encoding = utf8
 

	
 
## overwrite schema of clone url
 
## available vars:
 
## scheme - http/https
 
## user - current user
 
## pass - password 
 
## netloc - network location
 
## path - usually repo_name
 

	
 
#clone_uri = {scheme}://{user}{pass}{netloc}{path}
 

	
 
## issue tracking mapping for commits messages
 
## comment out issue_pat, issue_server, issue_prefix to enable
 

	
 
## pattern to get the issues from commit messages
 
## default one used here is #<numbers> with a regex passive group for `#`
 
## {id} will be all groups matched from this pattern
 

	
 
issue_pat = (?:\s*#)(\d+)
 

	
 
## server url to the issue, each {id} will be replaced with match
 
## fetched from the regex and {repo} is replaced with full repository name
 
## including groups {repo_name} is replaced with just name of repo
 

	
 
issue_server_link = https://myissueserver.com/{repo}/issue/{id}
 

	
 
## prefix to add to link to indicate it's an url
 
## #314 will be replaced by <issue_prefix><id>
 

	
 
issue_prefix = #
 

	
 
## issue_pat, issue_server_link, issue_prefix can have suffixes to specify
 
## multiple patterns, to other issues server, wiki or others
 
## below an example how to create a wiki pattern 
 
#  #wiki-some-id -> https://mywiki.com/some-id
 

	
 
#issue_pat_wiki = (?:wiki-)(.+)
 
#issue_server_link_wiki = https://mywiki.com/{id}
 
#issue_prefix_wiki = WIKI-
 

	
 

	
 
## instance-id prefix
 
## a prefix key for this instance used for cache invalidation when running 
 
## multiple instances of rhodecode, make sure it's globally unique for 
 
## all running rhodecode instances. Leave empty if you don't use it
 
instance_id = 
 

	
 
## alternative return HTTP header for failed authentication. Default HTTP
 
## response is 401 HTTPUnauthorized. Currently HG clients have troubles with 
 
## handling that. Set this variable to 403 to return HTTPForbidden
 
auth_ret_code =
 

	
 
####################################
 
###        CELERY CONFIG        ####
 
####################################
 
use_celery = false
 
broker.host = localhost
 
broker.vhost = rabbitmqhost
 
broker.port = 5672
 
broker.user = rabbitmq
 
broker.password = qweqwe
 

	
 
celery.imports = rhodecode.lib.celerylib.tasks
 

	
 
celery.result.backend = amqp
 
celery.result.dburi = amqp://
 
celery.result.serialier = json
 

	
 
#celery.send.task.error.emails = true
 
#celery.amqp.task.result.expires = 18000
 

	
 
celeryd.concurrency = 2
 
#celeryd.log.file = celeryd.log
 
celeryd.log.level = debug
 
celeryd.max.tasks.per.child = 1
 

	
 
#tasks will never be sent to the queue, but executed locally instead.
 
celery.always.eager = false
 

	
 
####################################
 
###         BEAKER CACHE        ####
 
####################################
 
beaker.cache.data_dir=%(here)s/data/cache/data
 
beaker.cache.lock_dir=%(here)s/data/cache/lock
 

	
 
beaker.cache.regions=super_short_term,short_term,long_term,sql_cache_short,sql_cache_med,sql_cache_long
 

	
 
beaker.cache.super_short_term.type=memory
 
beaker.cache.super_short_term.expire=10
 
beaker.cache.super_short_term.key_length = 256
 

	
 
beaker.cache.short_term.type=memory
 
beaker.cache.short_term.expire=60
 
beaker.cache.short_term.key_length = 256
 

	
 
beaker.cache.long_term.type=memory
 
beaker.cache.long_term.expire=36000
 
beaker.cache.long_term.key_length = 256
 

	
 
beaker.cache.sql_cache_short.type=memory
 
beaker.cache.sql_cache_short.expire=10
 
beaker.cache.sql_cache_short.key_length = 256
 

	
 
beaker.cache.sql_cache_med.type=memory
 
beaker.cache.sql_cache_med.expire=360
 
beaker.cache.sql_cache_med.key_length = 256
 

	
 
beaker.cache.sql_cache_long.type=file
 
beaker.cache.sql_cache_long.expire=3600
 
beaker.cache.sql_cache_long.key_length = 256
 

	
 
####################################
 
###       BEAKER SESSION        ####
 
####################################
 
## Type of storage used for the session, current types are 
 
## dbm, file, memcached, database, and memory. 
 
## The storage uses the Container API 
 
## that is also used by the cache system.
 

	
 
## db session ##
 
#beaker.session.type = ext:database
 
#beaker.session.sa.url = postgresql://postgres:qwe@localhost/rhodecode
 
#beaker.session.table_name = db_session 
 

	
 
## encrypted cookie client side session, good for many instances ##
 
#beaker.session.type = cookie
 

	
 
## file based cookies (default) ##
 
#beaker.session.type = file
 

	
 

	
 
beaker.session.key = rhodecode
 
## secure cookie requires AES python libraries ##
 
#beaker.session.encrypt_key = g654dcno0-9873jhgfreyu
 
#beaker.session.validate_key = 9712sds2212c--zxc123
 
## sets session as invalid if it haven't been accessed for given amount of time
 
beaker.session.timeout = 2592000
 
beaker.session.httponly = true
 
#beaker.session.cookie_path = /<your-prefix>
 

	
 
## uncomment for https secure cookie ##
 
beaker.session.secure = false
 

	
 
## auto save the session to not to use .save() ##
 
beaker.session.auto = False
 

	
 
## default cookie expiration time in seconds `true` expire at browser close ##
 
#beaker.session.cookie_expires = 3600
 

	
 

	
 
############################
 
## ERROR HANDLING SYSTEMS ##
 
############################
 

	
 
####################
 
### [errormator] ###
 
####################
 

	
 
# Errormator is tailored to work with RhodeCode, see 
 
# http://errormator.com for details how to obtain an account
 
# you must install python package `errormator_client` to make it work
 

	
 
# errormator enabled
 
errormator = true
 

	
 
errormator.server_url = https://api.errormator.com
 
errormator.api_key = YOUR_API_KEY
 

	
 
# TWEAK AMOUNT OF INFO SENT HERE
 

	
 
# enables 404 error logging (default False)
 
errormator.report_404 = false
 

	
rhodecode/config/deployment.ini_tmpl
Show inline comments
 
################################################################################
 
################################################################################
 
# RhodeCode - Pylons environment configuration                                 #
 
#                                                                              # 
 
# The %(here)s variable will be replaced with the parent directory of this file#
 
################################################################################
 

	
 
[DEFAULT]
 
debug = true
 
pdebug = false
 
################################################################################
 
## Uncomment and replace with the address which should receive                ## 
 
## any error reports after application crash                                  ##
 
## Additionally those settings will be used by RhodeCode mailing system       ##
 
################################################################################
 
#email_to = admin@localhost
 
#error_email_from = paste_error@localhost
 
#app_email_from = rhodecode-noreply@localhost
 
#error_message =
 
#email_prefix = [RhodeCode]
 

	
 
#smtp_server = mail.server.com
 
#smtp_username = 
 
#smtp_password = 
 
#smtp_port = 
 
#smtp_use_tls = false
 
#smtp_use_ssl = true
 
# Specify available auth parameters here (e.g. LOGIN PLAIN CRAM-MD5, etc.)
 
#smtp_auth = 
 

	
 
[server:main]
 
##nr of threads to spawn
 
#threadpool_workers = 5
 

	
 
##max request before thread respawn
 
#threadpool_max_requests = 10
 

	
 
##option to use threads of process
 
#use_threadpool = true
 

	
 
#use = egg:Paste#http
 
use = egg:waitress#main
 
host = 127.0.0.1
 
port = 5000
 

	
 
[filter:proxy-prefix]
 
# prefix middleware for rc
 
use = egg:PasteDeploy#prefix
 
prefix = /<your-prefix>
 

	
 
[app:main]
 
use = egg:rhodecode
 
#filter-with = proxy-prefix
 
full_stack = true
 
static_files = true
 
# Optional Languages
 
# en, fr, ja, pt_BR, zh_CN, zh_TW
 
lang = en
 
cache_dir = %(here)s/data
 
index_dir = %(here)s/data/index
 
app_instance_uuid = ${app_instance_uuid}
 
cut_off_limit = 256000
 
vcs_full_cache = True
 
force_https = false
 
commit_parse_limit = 50
 
use_gravatar = true
 

	
 
## alternative_gravatar_url allows you to use your own avatar server application
 
## the following parts of the URL will be replaced
 
## {email}        user email
 
## {md5email}     md5 hash of the user email (like at gravatar.com)
 
## {size}         size of the image that is expected from the server application
 
## {scheme}       http/https from RhodeCode server
 
## {netloc}       network location from RhodeCode server
 
#alternative_gravatar_url = http://myavatarserver.com/getbyemail/{email}/{size}
 
#alternative_gravatar_url = http://myavatarserver.com/getbymd5/{md5email}?s={size}
 

	
 
container_auth_enabled = false
 
proxypass_auth_enabled = false
 
## default encoding used to convert from and to unicode
 
## can be also a comma seperated list of encoding in case of mixed encodings
 
default_encoding = utf8
 

	
 
## overwrite schema of clone url
 
## available vars:
 
## scheme - http/https
 
## user - current user
 
## pass - password 
 
## netloc - network location
 
## path - usually repo_name
 

	
 
#clone_uri = {scheme}://{user}{pass}{netloc}{path}
 

	
 
## issue tracking mapping for commits messages
 
## comment out issue_pat, issue_server, issue_prefix to enable
 

	
 
## pattern to get the issues from commit messages
 
## default one used here is #<numbers> with a regex passive group for `#`
 
## {id} will be all groups matched from this pattern
 

	
 
issue_pat = (?:\s*#)(\d+)
 

	
 
## server url to the issue, each {id} will be replaced with match
 
## fetched from the regex and {repo} is replaced with full repository name
 
## including groups {repo_name} is replaced with just name of repo
 

	
 
issue_server_link = https://myissueserver.com/{repo}/issue/{id}
 

	
 
## prefix to add to link to indicate it's an url
 
## #314 will be replaced by <issue_prefix><id>
 

	
 
issue_prefix = #
 

	
 
## issue_pat, issue_server_link, issue_prefix can have suffixes to specify
 
## multiple patterns, to other issues server, wiki or others
 
## below an example how to create a wiki pattern 
 
#  #wiki-some-id -> https://mywiki.com/some-id
 

	
 
#issue_pat_wiki = (?:wiki-)(.+)
 
#issue_server_link_wiki = https://mywiki.com/{id}
 
#issue_prefix_wiki = WIKI-
 

	
 

	
 
## instance-id prefix
 
## a prefix key for this instance used for cache invalidation when running 
 
## multiple instances of rhodecode, make sure it's globally unique for 
 
## all running rhodecode instances. Leave empty if you don't use it
 
instance_id = 
 

	
 
## alternative return HTTP header for failed authentication. Default HTTP
 
## response is 401 HTTPUnauthorized. Currently HG clients have troubles with 
 
## handling that. Set this variable to 403 to return HTTPForbidden
 
auth_ret_code =
 

	
 
####################################
 
###        CELERY CONFIG        ####
 
####################################
 
use_celery = false
 
broker.host = localhost
 
broker.vhost = rabbitmqhost
 
broker.port = 5672
 
broker.user = rabbitmq
 
broker.password = qweqwe
 

	
 
celery.imports = rhodecode.lib.celerylib.tasks
 

	
 
celery.result.backend = amqp
 
celery.result.dburi = amqp://
 
celery.result.serialier = json
 

	
 
#celery.send.task.error.emails = true
 
#celery.amqp.task.result.expires = 18000
 

	
 
celeryd.concurrency = 2
 
#celeryd.log.file = celeryd.log
 
celeryd.log.level = debug
 
celeryd.max.tasks.per.child = 1
 

	
 
#tasks will never be sent to the queue, but executed locally instead.
 
celery.always.eager = false
 

	
 
####################################
 
###         BEAKER CACHE        ####
 
####################################
 
beaker.cache.data_dir=%(here)s/data/cache/data
 
beaker.cache.lock_dir=%(here)s/data/cache/lock
 

	
 
beaker.cache.regions=super_short_term,short_term,long_term,sql_cache_short,sql_cache_med,sql_cache_long
 

	
 
beaker.cache.super_short_term.type=memory
 
beaker.cache.super_short_term.expire=10
 
beaker.cache.super_short_term.key_length = 256
 

	
 
beaker.cache.short_term.type=memory
 
beaker.cache.short_term.expire=60
 
beaker.cache.short_term.key_length = 256
 

	
 
beaker.cache.long_term.type=memory
 
beaker.cache.long_term.expire=36000
 
beaker.cache.long_term.key_length = 256
 

	
 
beaker.cache.sql_cache_short.type=memory
 
beaker.cache.sql_cache_short.expire=10
 
beaker.cache.sql_cache_short.key_length = 256
 

	
 
beaker.cache.sql_cache_med.type=memory
 
beaker.cache.sql_cache_med.expire=360
 
beaker.cache.sql_cache_med.key_length = 256
 

	
 
beaker.cache.sql_cache_long.type=file
 
beaker.cache.sql_cache_long.expire=3600
 
beaker.cache.sql_cache_long.key_length = 256
 

	
 
####################################
 
###       BEAKER SESSION        ####
 
####################################
 
## Type of storage used for the session, current types are 
 
## dbm, file, memcached, database, and memory. 
 
## The storage uses the Container API 
 
## that is also used by the cache system.
 

	
 
## db session ##
 
#beaker.session.type = ext:database
 
#beaker.session.sa.url = postgresql://postgres:qwe@localhost/rhodecode
 
#beaker.session.table_name = db_session 
 

	
 
## encrypted cookie client side session, good for many instances ##
 
#beaker.session.type = cookie
 

	
 
## file based cookies (default) ##
 
#beaker.session.type = file
 

	
 

	
 
beaker.session.key = rhodecode
 
## secure cookie requires AES python libraries ##
 
#beaker.session.encrypt_key = g654dcno0-9873jhgfreyu
 
#beaker.session.validate_key = 9712sds2212c--zxc123
 
## sets session as invalid if it haven't been accessed for given amount of time
 
beaker.session.timeout = 2592000
 
beaker.session.httponly = true
 
#beaker.session.cookie_path = /<your-prefix>
 

	
 
## uncomment for https secure cookie ##
 
beaker.session.secure = false
 

	
 
## auto save the session to not to use .save() ##
 
beaker.session.auto = False
 

	
 
## default cookie expiration time in seconds `true` expire at browser close ##
 
#beaker.session.cookie_expires = 3600
 

	
 

	
 
############################
 
## ERROR HANDLING SYSTEMS ##
 
############################
 

	
 
####################
 
### [errormator] ###
 
####################
 

	
 
# Errormator is tailored to work with RhodeCode, see 
 
# http://errormator.com for details how to obtain an account
 
# you must install python package `errormator_client` to make it work
 

	
 
# errormator enabled
 
errormator = true
 

	
 
errormator.server_url = https://api.errormator.com
 
errormator.api_key = YOUR_API_KEY
 

	
 
# TWEAK AMOUNT OF INFO SENT HERE
 

	
 
# enables 404 error logging (default False)
 
errormator.report_404 = false
 

	
rhodecode/model/db.py
Show inline comments
 
@@ -752,385 +752,388 @@ class Repository(Base, BaseModel):
 
        # names in the database, but that eventually needs to be converted
 
        # into a valid system path
 
        p += self.repo_name.split(Repository.url_sep())
 
        return os.path.join(*p)
 

	
 
    @property
 
    def cache_keys(self):
 
        """
 
        Returns associated cache keys for that repo
 
        """
 
        return CacheInvalidation.query()\
 
            .filter(CacheInvalidation.cache_args == self.repo_name)\
 
            .order_by(CacheInvalidation.cache_key)\
 
            .all()
 

	
 
    def get_new_name(self, repo_name):
 
        """
 
        returns new full repository name based on assigned group and new new
 

	
 
        :param group_name:
 
        """
 
        path_prefix = self.group.full_path_splitted if self.group else []
 
        return Repository.url_sep().join(path_prefix + [repo_name])
 

	
 
    @property
 
    def _ui(self):
 
        """
 
        Creates an db based ui object for this repository
 
        """
 
        from rhodecode.lib.utils import make_ui
 
        return make_ui('db', clear_session=False)
 

	
 
    @classmethod
 
    def inject_ui(cls, repo, extras={}):
 
        from rhodecode.lib.vcs.backends.hg import MercurialRepository
 
        from rhodecode.lib.vcs.backends.git import GitRepository
 
        required = (MercurialRepository, GitRepository)
 
        if not isinstance(repo, required):
 
            raise Exception('repo must be instance of %s' % required)
 

	
 
        # inject ui extra param to log this action via push logger
 
        for k, v in extras.items():
 
            repo._repo.ui.setconfig('rhodecode_extras', k, v)
 

	
 
    @classmethod
 
    def is_valid(cls, repo_name):
 
        """
 
        returns True if given repo name is a valid filesystem repository
 

	
 
        :param cls:
 
        :param repo_name:
 
        """
 
        from rhodecode.lib.utils import is_valid_repo
 

	
 
        return is_valid_repo(repo_name, cls.base_path())
 

	
 
    def get_api_data(self):
 
        """
 
        Common function for generating repo api data
 

	
 
        """
 
        repo = self
 
        data = dict(
 
            repo_id=repo.repo_id,
 
            repo_name=repo.repo_name,
 
            repo_type=repo.repo_type,
 
            clone_uri=repo.clone_uri,
 
            private=repo.private,
 
            created_on=repo.created_on,
 
            description=repo.description,
 
            landing_rev=repo.landing_rev,
 
            owner=repo.user.username,
 
            fork_of=repo.fork.repo_name if repo.fork else None
 
        )
 

	
 
        return data
 

	
 
    @classmethod
 
    def lock(cls, repo, user_id):
 
        repo.locked = [user_id, time.time()]
 
        Session().add(repo)
 
        Session().commit()
 

	
 
    @classmethod
 
    def unlock(cls, repo):
 
        repo.locked = None
 
        Session().add(repo)
 
        Session().commit()
 

	
 
    @property
 
    def last_db_change(self):
 
        return self.updated_on
 

	
 
    #==========================================================================
 
    # SCM PROPERTIES
 
    #==========================================================================
 

	
 
    def get_changeset(self, rev=None):
 
        return get_changeset_safe(self.scm_instance, rev)
 

	
 
    def get_landing_changeset(self):
 
        """
 
        Returns landing changeset, or if that doesn't exist returns the tip
 
        """
 
        cs = self.get_changeset(self.landing_rev) or self.get_changeset()
 
        return cs
 

	
 
    def update_last_change(self, last_change=None):
 
        if last_change is None:
 
            last_change = datetime.datetime.now()
 
        if self.updated_on is None or self.updated_on != last_change:
 
            log.debug('updated repo %s with new date %s' % (self, last_change))
 
            self.updated_on = last_change
 
            Session().add(self)
 
            Session().commit()
 

	
 
    @property
 
    def tip(self):
 
        return self.get_changeset('tip')
 

	
 
    @property
 
    def author(self):
 
        return self.tip.author
 

	
 
    @property
 
    def last_change(self):
 
        return self.scm_instance.last_change
 

	
 
    def get_comments(self, revisions=None):
 
        """
 
        Returns comments for this repository grouped by revisions
 

	
 
        :param revisions: filter query by revisions only
 
        """
 
        cmts = ChangesetComment.query()\
 
            .filter(ChangesetComment.repo == self)
 
        if revisions:
 
            cmts = cmts.filter(ChangesetComment.revision.in_(revisions))
 
        grouped = defaultdict(list)
 
        for cmt in cmts.all():
 
            grouped[cmt.revision].append(cmt)
 
        return grouped
 

	
 
    def statuses(self, revisions=None):
 
        """
 
        Returns statuses for this repository
 

	
 
        :param revisions: list of revisions to get statuses for
 
        :type revisions: list
 
        """
 

	
 
        statuses = ChangesetStatus.query()\
 
            .filter(ChangesetStatus.repo == self)\
 
            .filter(ChangesetStatus.version == 0)
 
        if revisions:
 
            statuses = statuses.filter(ChangesetStatus.revision.in_(revisions))
 
        grouped = {}
 

	
 
        #maybe we have open new pullrequest without a status ?
 
        stat = ChangesetStatus.STATUS_UNDER_REVIEW
 
        status_lbl = ChangesetStatus.get_status_lbl(stat)
 
        for pr in PullRequest.query().filter(PullRequest.org_repo == self).all():
 
            for rev in pr.revisions:
 
                pr_id = pr.pull_request_id
 
                pr_repo = pr.other_repo.repo_name
 
                grouped[rev] = [stat, status_lbl, pr_id, pr_repo]
 

	
 
        for stat in statuses.all():
 
            pr_id = pr_repo = None
 
            if stat.pull_request:
 
                pr_id = stat.pull_request.pull_request_id
 
                pr_repo = stat.pull_request.other_repo.repo_name
 
            grouped[stat.revision] = [str(stat.status), stat.status_lbl,
 
                                      pr_id, pr_repo]
 
        return grouped
 

	
 
    #==========================================================================
 
    # SCM CACHE INSTANCE
 
    #==========================================================================
 

	
 
    @property
 
    def invalidate(self):
 
        return CacheInvalidation.invalidate(self.repo_name)
 

	
 
    def set_invalidate(self):
 
        """
 
        set a cache for invalidation for this instance
 
        """
 
        CacheInvalidation.set_invalidate(repo_name=self.repo_name)
 

	
 
    @LazyProperty
 
    def scm_instance(self):
 
        return self.scm_instance_cached()
 
        import rhodecode
 
        full_cache = str2bool(rhodecode.CONFIG.get('vcs_full_cache'))
 
        if full_cache:
 
            return self.scm_instance_cached()
 
        return self.__get_instance()
 

	
 
    def scm_instance_cached(self, cache_map=None):
 
        @cache_region('long_term')
 
        def _c(repo_name):
 
            return self.__get_instance()
 
        rn = self.repo_name
 
        log.debug('Getting cached instance of repo')
 

	
 
        if cache_map:
 
            # get using prefilled cache_map
 
            invalidate_repo = cache_map[self.repo_name]
 
            if invalidate_repo:
 
                invalidate_repo = (None if invalidate_repo.cache_active
 
                                   else invalidate_repo)
 
        else:
 
            # get from invalidate
 
            invalidate_repo = self.invalidate
 

	
 
        if invalidate_repo is not None:
 
            region_invalidate(_c, None, rn)
 
            # update our cache
 
            CacheInvalidation.set_valid(invalidate_repo.cache_key)
 
        return _c(rn)
 

	
 
    def __get_instance(self):
 
        repo_full_path = self.repo_full_path
 
        try:
 
            alias = get_scm(repo_full_path)[0]
 
            log.debug('Creating instance of %s repository' % alias)
 
            backend = get_backend(alias)
 
        except VCSError:
 
            log.error(traceback.format_exc())
 
            log.error('Perhaps this repository is in db and not in '
 
                      'filesystem run rescan repositories with '
 
                      '"destroy old data " option from admin panel')
 
            return
 

	
 
        if alias == 'hg':
 

	
 
            repo = backend(safe_str(repo_full_path), create=False,
 
                           baseui=self._ui)
 
            # skip hidden web repository
 
            if repo._get_hidden():
 
                return
 
        else:
 
            repo = backend(repo_full_path, create=False)
 

	
 
        return repo
 

	
 

	
 
class RepoGroup(Base, BaseModel):
 
    __tablename__ = 'groups'
 
    __table_args__ = (
 
        UniqueConstraint('group_name', 'group_parent_id'),
 
        CheckConstraint('group_id != group_parent_id'),
 
        {'extend_existing': True, 'mysql_engine': 'InnoDB',
 
         'mysql_charset': 'utf8'},
 
    )
 
    __mapper_args__ = {'order_by': 'group_name'}
 

	
 
    group_id = Column("group_id", Integer(), nullable=False, unique=True, default=None, primary_key=True)
 
    group_name = Column("group_name", String(255, convert_unicode=False, assert_unicode=None), nullable=False, unique=True, default=None)
 
    group_parent_id = Column("group_parent_id", Integer(), ForeignKey('groups.group_id'), nullable=True, unique=None, default=None)
 
    group_description = Column("group_description", String(10000, convert_unicode=False, assert_unicode=None), nullable=True, unique=None, default=None)
 
    enable_locking = Column("enable_locking", Boolean(), nullable=False, unique=None, default=False)
 

	
 
    repo_group_to_perm = relationship('UserRepoGroupToPerm', cascade='all', order_by='UserRepoGroupToPerm.group_to_perm_id')
 
    users_group_to_perm = relationship('UsersGroupRepoGroupToPerm', cascade='all')
 

	
 
    parent_group = relationship('RepoGroup', remote_side=group_id)
 

	
 
    def __init__(self, group_name='', parent_group=None):
 
        self.group_name = group_name
 
        self.parent_group = parent_group
 

	
 
    def __unicode__(self):
 
        return u"<%s('%s:%s')>" % (self.__class__.__name__, self.group_id,
 
                                  self.group_name)
 

	
 
    @classmethod
 
    def groups_choices(cls, check_perms=False):
 
        from webhelpers.html import literal as _literal
 
        from rhodecode.model.scm import ScmModel
 
        groups = cls.query().all()
 
        if check_perms:
 
            #filter group user have access to, it's done
 
            #magically inside ScmModel based on current user
 
            groups = ScmModel().get_repos_groups(groups)
 
        repo_groups = [('', '')]
 
        sep = ' &raquo; '
 
        _name = lambda k: _literal(sep.join(k))
 

	
 
        repo_groups.extend([(x.group_id, _name(x.full_path_splitted))
 
                              for x in groups])
 

	
 
        repo_groups = sorted(repo_groups, key=lambda t: t[1].split(sep)[0])
 
        return repo_groups
 

	
 
    @classmethod
 
    def url_sep(cls):
 
        return URL_SEP
 

	
 
    @classmethod
 
    def get_by_group_name(cls, group_name, cache=False, case_insensitive=False):
 
        if case_insensitive:
 
            gr = cls.query()\
 
                .filter(cls.group_name.ilike(group_name))
 
        else:
 
            gr = cls.query()\
 
                .filter(cls.group_name == group_name)
 
        if cache:
 
            gr = gr.options(FromCache(
 
                            "sql_cache_short",
 
                            "get_group_%s" % _hash_key(group_name)
 
                            )
 
            )
 
        return gr.scalar()
 

	
 
    @property
 
    def parents(self):
 
        parents_recursion_limit = 5
 
        groups = []
 
        if self.parent_group is None:
 
            return groups
 
        cur_gr = self.parent_group
 
        groups.insert(0, cur_gr)
 
        cnt = 0
 
        while 1:
 
            cnt += 1
 
            gr = getattr(cur_gr, 'parent_group', None)
 
            cur_gr = cur_gr.parent_group
 
            if gr is None:
 
                break
 
            if cnt == parents_recursion_limit:
 
                # this will prevent accidental infinit loops
 
                log.error('group nested more than %s' %
 
                          parents_recursion_limit)
 
                break
 

	
 
            groups.insert(0, gr)
 
        return groups
 

	
 
    @property
 
    def children(self):
 
        return RepoGroup.query().filter(RepoGroup.parent_group == self)
 

	
 
    @property
 
    def name(self):
 
        return self.group_name.split(RepoGroup.url_sep())[-1]
 

	
 
    @property
 
    def full_path(self):
 
        return self.group_name
 

	
 
    @property
 
    def full_path_splitted(self):
 
        return self.group_name.split(RepoGroup.url_sep())
 

	
 
    @property
 
    def repositories(self):
 
        return Repository.query()\
 
                .filter(Repository.group == self)\
 
                .order_by(Repository.repo_name)
 

	
 
    @property
 
    def repositories_recursive_count(self):
 
        cnt = self.repositories.count()
 

	
 
        def children_count(group):
 
            cnt = 0
 
            for child in group.children:
 
                cnt += child.repositories.count()
 
                cnt += children_count(child)
 
            return cnt
 

	
 
        return cnt + children_count(self)
 

	
 
    def recursive_groups_and_repos(self):
 
        """
 
        Recursive return all groups, with repositories in those groups
 
        """
 
        all_ = []
 

	
 
        def _get_members(root_gr):
 
            for r in root_gr.repositories:
 
                all_.append(r)
 
            childs = root_gr.children.all()
 
            if childs:
 
                for gr in childs:
 
                    all_.append(gr)
 
                    _get_members(gr)
0 comments (0 inline, 0 general)