Changeset - a1696507b3ad
[Not reviewed]
beta
0 5 0
Marcin Kuzminski - 12 years ago 2013-05-22 02:59:20
marcin@python-works.com
use consisten double quote docstring formatting
5 files changed with 21 insertions and 21 deletions:
0 comments (0 inline, 0 general)
rhodecode/controllers/error.py
Show inline comments
 
# -*- coding: utf-8 -*-
 
"""
 
    rhodecode.controllers.error
 
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

	
 
    RhodeCode error controller
 

	
 
    :created_on: Dec 8, 2010
 
    :author: marcink
 
    :copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com>
 
    :license: GPLv3, see COPYING for more details.
 
"""
 
# This program is free software: you can redistribute it and/or modify
 
# it under the terms of the GNU General Public License as published by
 
# the Free Software Foundation, either version 3 of the License, or
 
# (at your option) any later version.
 
#
 
# This program is distributed in the hope that it will be useful,
 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
# GNU General Public License for more details.
 
#
 
# You should have received a copy of the GNU General Public License
 
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
import os
 
import cgi
 
import logging
 
import paste.fileapp
 

	
 
from pylons import tmpl_context as c, request, config, url
 
from pylons.i18n.translation import _
 
from pylons.middleware import  media_path
 

	
 
from rhodecode.lib.base import BaseController, render
 

	
 
log = logging.getLogger(__name__)
 

	
 

	
 
class ErrorController(BaseController):
 
    """Generates error documents as and when they are required.
 

	
 
    The ErrorDocuments middleware forwards to ErrorController when error
 
    related status codes are returned from the application.
 

	
 
    This behavior can be altered by changing the parameters to the
 
    ErrorDocuments middleware in your config/middleware.py file.
 
    """
 

	
 
    def __before__(self):
 
        #disable all base actions since we don't need them here
 
        pass
 

	
 
    def document(self):
 
        resp = request.environ.get('pylons.original_response')
 
        c.rhodecode_name = config.get('rhodecode_title')
 

	
 
        log.debug('### %s ###' % resp.status)
 

	
 
        e = request.environ
 
        c.serv_p = r'%(protocol)s://%(host)s/' \
 
                                    % {'protocol': e.get('wsgi.url_scheme'),
 
                                       'host': e.get('HTTP_HOST'), }
 

	
 
        c.error_message = cgi.escape(request.GET.get('code', str(resp.status)))
 
        c.error_explanation = self.get_error_explanation(resp.status_int)
 

	
 
        #  redirect to when error with given seconds
 
        c.redirect_time = 0
 
        c.redirect_module = _('Home page')
 
        c.url_redirect = "/"
 

	
 
        return render('/errors/error_document.html')
 

	
 
    def img(self, id):
 
        """Serve Pylons' stock images"""
 
        return self._serve_file(os.path.join(media_path, 'img', id))
 

	
 
    def style(self, id):
 
        """Serve Pylons' stock stylesheets"""
 
        return self._serve_file(os.path.join(media_path, 'style', id))
 

	
 
    def _serve_file(self, path):
 
        """Call Paste's FileApp (a WSGI application) to serve the file
 
        at the specified path
 
        """
 
        fapp = paste.fileapp.FileApp(path)
 
        return fapp(request.environ, self.start_response)
 

	
 
    def get_error_explanation(self, code):
 
        ''' get the error explanations of int codes
 
            [400, 401, 403, 404, 500]'''
 
        """ get the error explanations of int codes
 
            [400, 401, 403, 404, 500]"""
 
        try:
 
            code = int(code)
 
        except Exception:
 
            code = 500
 

	
 
        if code == 400:
 
            return _('The request could not be understood by the server'
 
                     ' due to malformed syntax.')
 
        if code == 401:
 
            return _('Unauthorized access to resource')
 
        if code == 403:
 
            return _("You don't have permission to view this page")
 
        if code == 404:
 
            return _('The resource could not be found')
 
        if code == 500:
 
            return _('The server encountered an unexpected condition'
 
                     ' which prevented it from fulfilling the request.')
rhodecode/lib/dbmigrate/migrate/versioning/genmodel.py
Show inline comments
 
"""
 
Code to generate a Python model from a database or differences
 
between a model and database.
 

	
 
Some of this is borrowed heavily from the AutoCode project at:
 
http://code.google.com/p/sqlautocode/
 
"""
 

	
 
import sys
 
import logging
 

	
 
import sqlalchemy
 

	
 
from rhodecode.lib.dbmigrate import migrate
 
from rhodecode.lib.dbmigrate.migrate import changeset
 

	
 

	
 
log = logging.getLogger(__name__)
 
HEADER = """
 
## File autogenerated by genmodel.py
 

	
 
from sqlalchemy import *
 
meta = MetaData()
 
"""
 

	
 
DECLARATIVE_HEADER = """
 
## File autogenerated by genmodel.py
 

	
 
from sqlalchemy import *
 
from sqlalchemy.ext import declarative
 

	
 
Base = declarative.declarative_base()
 
"""
 

	
 

	
 
class ModelGenerator(object):
 
    """Various transformations from an A, B diff.
 

	
 
    In the implementation, A tends to be called the model and B
 
    the database (although this is not true of all diffs).
 
    The diff is directionless, but transformations apply the diff
 
    in a particular direction, described in the method name.
 
    """
 

	
 
    def __init__(self, diff, engine, declarative=False):
 
        self.diff = diff
 
        self.engine = engine
 
        self.declarative = declarative
 

	
 
    def column_repr(self, col):
 
        kwarg = []
 
        if col.key != col.name:
 
            kwarg.append('key')
 
        if col.primary_key:
 
            col.primary_key = True  # otherwise it dumps it as 1
 
            kwarg.append('primary_key')
 
        if not col.nullable:
 
            kwarg.append('nullable')
 
        if col.onupdate:
 
            kwarg.append('onupdate')
 
        if col.default:
 
            if col.primary_key:
 
                # I found that PostgreSQL automatically creates a
 
                # default value for the sequence, but let's not show
 
                # that.
 
                pass
 
            else:
 
                kwarg.append('default')
 
        args = ['%s=%r' % (k, getattr(col, k)) for k in kwarg]
 

	
 
        # crs: not sure if this is good idea, but it gets rid of extra
 
        # u''
 
        name = col.name.encode('utf8')
 

	
 
        type_ = col.type
 
        for cls in col.type.__class__.__mro__:
 
            if cls.__module__ == 'sqlalchemy.types' and \
 
                not cls.__name__.isupper():
 
                if cls is not type_.__class__:
 
                    type_ = cls()
 
                break
 

	
 
        type_repr = repr(type_)
 
        if type_repr.endswith('()'):
 
            type_repr = type_repr[:-2]
 

	
 
        constraints = [repr(cn) for cn in col.constraints]
 

	
 
        data = {
 
            'name': name,
 
            'commonStuff': ', '.join([type_repr] + constraints + args),
 
        }
 

	
 
        if self.declarative:
 
            return """%(name)s = Column(%(commonStuff)s)""" % data
 
        else:
 
            return """Column(%(name)r, %(commonStuff)s)""" % data
 

	
 
    def _getTableDefn(self, table, metaName='meta'):
 
        out = []
 
        tableName = table.name
 
        if self.declarative:
 
            out.append("class %(table)s(Base):" % {'table': tableName})
 
            out.append("    __tablename__ = '%(table)s'\n" %
 
                            {'table': tableName})
 
            for col in table.columns:
 
                out.append("    %s" % self.column_repr(col))
 
            out.append('\n')
 
        else:
 
            out.append("%(table)s = Table('%(table)s', %(meta)s," %
 
                       {'table': tableName, 'meta': metaName})
 
            for col in table.columns:
 
                out.append("    %s," % self.column_repr(col))
 
            out.append(")\n")
 
        return out
 

	
 
    def _get_tables(self,missingA=False,missingB=False,modified=False):
 
        to_process = []
 
        for bool_,names,metadata in (
 
            (missingA,self.diff.tables_missing_from_A,self.diff.metadataB),
 
            (missingB,self.diff.tables_missing_from_B,self.diff.metadataA),
 
            (modified,self.diff.tables_different,self.diff.metadataA),
 
                ):
 
            if bool_:
 
                for name in names:
 
                    yield metadata.tables.get(name)
 

	
 
    def genBDefinition(self):
 
        """Generates the source code for a definition of B.
 

	
 
        Assumes a diff where A is empty.
 

	
 
        Was: toPython. Assume database (B) is current and model (A) is empty.
 
        """
 

	
 
        out = []
 
        if self.declarative:
 
            out.append(DECLARATIVE_HEADER)
 
        else:
 
            out.append(HEADER)
 
        out.append("")
 
        for table in self._get_tables(missingA=True):
 
            out.extend(self._getTableDefn(table))
 
        return '\n'.join(out)
 

	
 
    def genB2AMigration(self, indent='    '):
 
        '''Generate a migration from B to A.
 
        """Generate a migration from B to A.
 

	
 
        Was: toUpgradeDowngradePython
 
        Assume model (A) is most current and database (B) is out-of-date.
 
        '''
 
        """
 

	
 
        decls = ['from migrate.changeset import schema',
 
                 'pre_meta = MetaData()',
 
                 'post_meta = MetaData()',
 
                ]
 
        upgradeCommands = ['pre_meta.bind = migrate_engine',
 
                           'post_meta.bind = migrate_engine']
 
        downgradeCommands = list(upgradeCommands)
 

	
 
        for tn in self.diff.tables_missing_from_A:
 
            pre_table = self.diff.metadataB.tables[tn]
 
            decls.extend(self._getTableDefn(pre_table, metaName='pre_meta'))
 
            upgradeCommands.append(
 
                "pre_meta.tables[%(table)r].drop()" % {'table': tn})
 
            downgradeCommands.append(
 
                "pre_meta.tables[%(table)r].create()" % {'table': tn})
 

	
 
        for tn in self.diff.tables_missing_from_B:
 
            post_table = self.diff.metadataA.tables[tn]
 
            decls.extend(self._getTableDefn(post_table, metaName='post_meta'))
 
            upgradeCommands.append(
 
                "post_meta.tables[%(table)r].create()" % {'table': tn})
 
            downgradeCommands.append(
 
                "post_meta.tables[%(table)r].drop()" % {'table': tn})
 

	
 
        for (tn, td) in self.diff.tables_different.iteritems():
 
            if td.columns_missing_from_A or td.columns_different:
 
                pre_table = self.diff.metadataB.tables[tn]
 
                decls.extend(self._getTableDefn(
 
                    pre_table, metaName='pre_meta'))
 
            if td.columns_missing_from_B or td.columns_different:
 
                post_table = self.diff.metadataA.tables[tn]
 
                decls.extend(self._getTableDefn(
 
                    post_table, metaName='post_meta'))
 

	
 
            for col in td.columns_missing_from_A:
 
                upgradeCommands.append(
 
                    'pre_meta.tables[%r].columns[%r].drop()' % (tn, col))
 
                downgradeCommands.append(
 
                    'pre_meta.tables[%r].columns[%r].create()' % (tn, col))
 
            for col in td.columns_missing_from_B:
 
                upgradeCommands.append(
 
                    'post_meta.tables[%r].columns[%r].create()' % (tn, col))
 
                downgradeCommands.append(
 
                    'post_meta.tables[%r].columns[%r].drop()' % (tn, col))
 
            for modelCol, databaseCol, modelDecl, databaseDecl in td.columns_different:
 
                upgradeCommands.append(
 
                    'assert False, "Can\'t alter columns: %s:%s=>%s"' % (
 
                    tn, modelCol.name, databaseCol.name))
 
                downgradeCommands.append(
 
                    'assert False, "Can\'t alter columns: %s:%s=>%s"' % (
 
                    tn, modelCol.name, databaseCol.name))
 

	
 
        return (
 
            '\n'.join(decls),
 
            '\n'.join('%s%s' % (indent, line) for line in upgradeCommands),
 
            '\n'.join('%s%s' % (indent, line) for line in downgradeCommands))
 

	
 
    def _db_can_handle_this_change(self,td):
 
        """Check if the database can handle going from B to A."""
 

	
 
        if (td.columns_missing_from_B
 
            and not td.columns_missing_from_A
 
            and not td.columns_different):
 
            # Even sqlite can handle column additions.
 
            return True
 
        else:
 
            return not self.engine.url.drivername.startswith('sqlite')
 

	
 
    def runB2A(self):
 
        """Goes from B to A.
 

	
 
        Was: applyModel. Apply model (A) to current database (B).
 
        """
 

	
 
        meta = sqlalchemy.MetaData(self.engine)
 

	
 
        for table in self._get_tables(missingA=True):
 
            table = table.tometadata(meta)
 
            table.drop()
 
        for table in self._get_tables(missingB=True):
 
            table = table.tometadata(meta)
 
            table.create()
 
        for modelTable in self._get_tables(modified=True):
 
            tableName = modelTable.name
 
            modelTable = modelTable.tometadata(meta)
 
            dbTable = self.diff.metadataB.tables[tableName]
 

	
 
            td = self.diff.tables_different[tableName]
 

	
 
            if self._db_can_handle_this_change(td):
 

	
 
                for col in td.columns_missing_from_B:
 
                    modelTable.columns[col].create()
 
                for col in td.columns_missing_from_A:
 
                    dbTable.columns[col].drop()
 
                # XXX handle column changes here.
 
            else:
 
                # Sqlite doesn't support drop column, so you have to
 
                # do more: create temp table, copy data to it, drop
 
                # old table, create new table, copy data back.
 
                #
 
                # I wonder if this is guaranteed to be unique?
 
                tempName = '_temp_%s' % modelTable.name
 

	
 
                def getCopyStatement():
 
                    preparer = self.engine.dialect.preparer
 
                    commonCols = []
 
                    for modelCol in modelTable.columns:
 
                        if modelCol.name in dbTable.columns:
 
                            commonCols.append(modelCol.name)
 
                    commonColsStr = ', '.join(commonCols)
 
                    return 'INSERT INTO %s (%s) SELECT %s FROM %s' % \
 
                        (tableName, commonColsStr, commonColsStr, tempName)
 

	
 
                # Move the data in one transaction, so that we don't
 
                # leave the database in a nasty state.
 
                connection = self.engine.connect()
 
                trans = connection.begin()
 
                try:
 
                    connection.execute(
 
                        'CREATE TEMPORARY TABLE %s as SELECT * from %s' % \
 
                            (tempName, modelTable.name))
 
                    # make sure the drop takes place inside our
 
                    # transaction with the bind parameter
 
                    modelTable.drop(bind=connection)
 
                    modelTable.create(bind=connection)
 
                    connection.execute(getCopyStatement())
 
                    connection.execute('DROP TABLE %s' % tempName)
 
                    trans.commit()
 
                except:
 
                    trans.rollback()
 
                    raise
rhodecode/lib/dbmigrate/migrate/versioning/schemadiff.py
Show inline comments
 
@@ -55,241 +55,241 @@ class ColDiff(object):
 
      The :class:`~sqlalchemy.schema.Column` object for A.
 

	
 
    .. attribute:: col_B
 

	
 
      The :class:`~sqlalchemy.schema.Column` object for B.
 

	
 
    .. attribute:: type_A
 

	
 
      The most generic type of the :class:`~sqlalchemy.schema.Column`
 
      object in A.
 

	
 
    .. attribute:: type_B
 

	
 
      The most generic type of the :class:`~sqlalchemy.schema.Column`
 
      object in A.
 

	
 
    """
 

	
 
    diff = False
 

	
 
    def __init__(self,col_A,col_B):
 
        self.col_A = col_A
 
        self.col_B = col_B
 

	
 
        self.type_A = col_A.type
 
        self.type_B = col_B.type
 

	
 
        self.affinity_A = self.type_A._type_affinity
 
        self.affinity_B = self.type_B._type_affinity
 

	
 
        if self.affinity_A is not self.affinity_B:
 
            self.diff = True
 
            return
 

	
 
        if isinstance(self.type_A,Float) or isinstance(self.type_B,Float):
 
            if not (isinstance(self.type_A,Float) and isinstance(self.type_B,Float)):
 
                self.diff=True
 
                return
 

	
 
        for attr in ('precision','scale','length'):
 
            A = getattr(self.type_A,attr,None)
 
            B = getattr(self.type_B,attr,None)
 
            if not (A is None or B is None) and A!=B:
 
                self.diff=True
 
                return
 

	
 
    def __nonzero__(self):
 
        return self.diff
 

	
 
class TableDiff(object):
 
    """
 
    Container for differences in one :class:`~sqlalchemy.schema.Table`
 
    between two :class:`~sqlalchemy.schema.MetaData` instances, ``A``
 
    and ``B``.
 

	
 
    .. attribute:: columns_missing_from_A
 

	
 
      A sequence of column names that were found in B but weren't in
 
      A.
 

	
 
    .. attribute:: columns_missing_from_B
 

	
 
      A sequence of column names that were found in A but weren't in
 
      B.
 

	
 
    .. attribute:: columns_different
 

	
 
      A dictionary containing information about columns that were
 
      found to be different.
 
      It maps column names to a :class:`ColDiff` objects describing the
 
      differences found.
 
    """
 
    __slots__ = (
 
        'columns_missing_from_A',
 
        'columns_missing_from_B',
 
        'columns_different',
 
        )
 

	
 
    def __nonzero__(self):
 
        return bool(
 
            self.columns_missing_from_A or
 
            self.columns_missing_from_B or
 
            self.columns_different
 
            )
 

	
 
class SchemaDiff(object):
 
    """
 
    Compute the difference between two :class:`~sqlalchemy.schema.MetaData`
 
    objects.
 

	
 
    The string representation of a :class:`SchemaDiff` will summarise
 
    the changes found between the two
 
    :class:`~sqlalchemy.schema.MetaData` objects.
 

	
 
    The length of a :class:`SchemaDiff` will give the number of
 
    changes found, enabling it to be used much like a boolean in
 
    expressions.
 

	
 
    :param metadataA:
 
      First :class:`~sqlalchemy.schema.MetaData` to compare.
 

	
 
    :param metadataB:
 
      Second :class:`~sqlalchemy.schema.MetaData` to compare.
 

	
 
    :param labelA:
 
      The label to use in messages about the first
 
      :class:`~sqlalchemy.schema.MetaData`.
 

	
 
    :param labelB:
 
      The label to use in messages about the second
 
      :class:`~sqlalchemy.schema.MetaData`.
 

	
 
    :param excludeTables:
 
      A sequence of table names to exclude.
 

	
 
    .. attribute:: tables_missing_from_A
 

	
 
      A sequence of table names that were found in B but weren't in
 
      A.
 

	
 
    .. attribute:: tables_missing_from_B
 

	
 
      A sequence of table names that were found in A but weren't in
 
      B.
 

	
 
    .. attribute:: tables_different
 

	
 
      A dictionary containing information about tables that were found
 
      to be different.
 
      It maps table names to a :class:`TableDiff` objects describing the
 
      differences found.
 
    """
 

	
 
    def __init__(self,
 
                 metadataA, metadataB,
 
                 labelA='metadataA',
 
                 labelB='metadataB',
 
                 excludeTables=None):
 

	
 
        self.metadataA, self.metadataB = metadataA, metadataB
 
        self.labelA, self.labelB = labelA, labelB
 
        self.label_width = max(len(labelA),len(labelB))
 
        excludeTables = set(excludeTables or [])
 

	
 
        A_table_names = set(metadataA.tables.keys())
 
        B_table_names = set(metadataB.tables.keys())
 

	
 
        self.tables_missing_from_A = sorted(
 
            B_table_names - A_table_names - excludeTables
 
            )
 
        self.tables_missing_from_B = sorted(
 
            A_table_names - B_table_names - excludeTables
 
            )
 

	
 
        self.tables_different = {}
 
        for table_name in A_table_names.intersection(B_table_names):
 

	
 
            td = TableDiff()
 

	
 
            A_table = metadataA.tables[table_name]
 
            B_table = metadataB.tables[table_name]
 

	
 
            A_column_names = set(A_table.columns.keys())
 
            B_column_names = set(B_table.columns.keys())
 

	
 
            td.columns_missing_from_A = sorted(
 
                B_column_names - A_column_names
 
                )
 

	
 
            td.columns_missing_from_B = sorted(
 
                A_column_names - B_column_names
 
                )
 

	
 
            td.columns_different = {}
 

	
 
            for col_name in A_column_names.intersection(B_column_names):
 

	
 
                cd = ColDiff(
 
                    A_table.columns.get(col_name),
 
                    B_table.columns.get(col_name)
 
                    )
 

	
 
                if cd:
 
                    td.columns_different[col_name]=cd
 

	
 
            # XXX - index and constraint differences should
 
            #       be checked for here
 

	
 
            if td:
 
                self.tables_different[table_name]=td
 

	
 
    def __str__(self):
 
        ''' Summarize differences. '''
 
        """ Summarize differences. """
 
        out = []
 
        column_template ='      %%%is: %%r' % self.label_width
 

	
 
        for names,label in (
 
            (self.tables_missing_from_A,self.labelA),
 
            (self.tables_missing_from_B,self.labelB),
 
            ):
 
            if names:
 
                out.append(
 
                    '  tables missing from %s: %s' % (
 
                        label,', '.join(sorted(names))
 
                        )
 
                    )
 

	
 
        for name,td in sorted(self.tables_different.items()):
 
            out.append(
 
               '  table with differences: %s' % name
 
               )
 
            for names,label in (
 
                (td.columns_missing_from_A,self.labelA),
 
                (td.columns_missing_from_B,self.labelB),
 
                ):
 
                if names:
 
                    out.append(
 
                        '    %s missing these columns: %s' % (
 
                            label,', '.join(sorted(names))
 
                            )
 
                        )
 
            for name,cd in td.columns_different.items():
 
                out.append('    column with differences: %s' % name)
 
                out.append(column_template % (self.labelA,cd.col_A))
 
                out.append(column_template % (self.labelB,cd.col_B))
 

	
 
        if out:
 
            out.insert(0, 'Schema diffs:')
 
            return '\n'.join(out)
 
        else:
 
            return 'No schema diffs'
 

	
 
    def __len__(self):
 
        """
 
        Used in bool evaluation, return of 0 means no diffs.
 
        """
 
        return (
 
            len(self.tables_missing_from_A) +
 
            len(self.tables_missing_from_B) +
 
            len(self.tables_different)
 
            )
rhodecode/lib/vcs/subprocessio.py
Show inline comments
 
'''
 
"""
 
Module provides a class allowing to wrap communication over subprocess.Popen
 
input, output, error streams into a meaningfull, non-blocking, concurrent
 
stream processor exposing the output data as an iterator fitting to be a
 
return value passed by a WSGI applicaiton to a WSGI server per PEP 3333.
 

	
 
Copyright (c) 2011  Daniel Dotsenko <dotsa@hotmail.com>
 

	
 
This file is part of git_http_backend.py Project.
 

	
 
git_http_backend.py Project is free software: you can redistribute it and/or
 
modify it under the terms of the GNU Lesser General Public License as
 
published by the Free Software Foundation, either version 2.1 of the License,
 
or (at your option) any later version.
 

	
 
git_http_backend.py Project is distributed in the hope that it will be useful,
 
but WITHOUT ANY WARRANTY; without even the implied warranty of
 
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 
GNU Lesser General Public License for more details.
 

	
 
You should have received a copy of the GNU Lesser General Public License
 
along with git_http_backend.py Project.
 
If not, see <http://www.gnu.org/licenses/>.
 
'''
 
"""
 
import os
 
import subprocess
 
from rhodecode.lib.vcs.utils.compat import deque, Event, Thread, _bytes, _bytearray
 

	
 

	
 
class StreamFeeder(Thread):
 
    """
 
    Normal writing into pipe-like is blocking once the buffer is filled.
 
    This thread allows a thread to seep data from a file-like into a pipe
 
    without blocking the main thread.
 
    We close inpipe once the end of the source stream is reached.
 
    """
 
    def __init__(self, source):
 
        super(StreamFeeder, self).__init__()
 
        self.daemon = True
 
        filelike = False
 
        self.bytes = _bytes()
 
        if type(source) in (type(''), _bytes, _bytearray):  # string-like
 
            self.bytes = _bytes(source)
 
        else:  # can be either file pointer or file-like
 
            if type(source) in (int, long):  # file pointer it is
 
                ## converting file descriptor (int) stdin into file-like
 
                try:
 
                    source = os.fdopen(source, 'rb', 16384)
 
                except Exception:
 
                    pass
 
            # let's see if source is file-like by now
 
            try:
 
                filelike = source.read
 
            except Exception:
 
                pass
 
        if not filelike and not self.bytes:
 
            raise TypeError("StreamFeeder's source object must be a readable "
 
                            "file-like, a file descriptor, or a string-like.")
 
        self.source = source
 
        self.readiface, self.writeiface = os.pipe()
 

	
 
    def run(self):
 
        t = self.writeiface
 
        if self.bytes:
 
            os.write(t, self.bytes)
 
        else:
 
            s = self.source
 
            b = s.read(4096)
 
            while b:
 
                os.write(t, b)
 
                b = s.read(4096)
 
        os.close(t)
 

	
 
    @property
 
    def output(self):
 
        return self.readiface
 

	
 

	
 
class InputStreamChunker(Thread):
 
    def __init__(self, source, target, buffer_size, chunk_size):
 

	
 
        super(InputStreamChunker, self).__init__()
 

	
 
        self.daemon = True  # die die die.
 

	
 
        self.source = source
 
        self.target = target
 
        self.chunk_count_max = int(buffer_size / chunk_size) + 1
 
        self.chunk_size = chunk_size
 

	
 
        self.data_added = Event()
 
        self.data_added.clear()
 

	
 
        self.keep_reading = Event()
 
        self.keep_reading.set()
 

	
 
        self.EOF = Event()
 
        self.EOF.clear()
 

	
 
        self.go = Event()
 
        self.go.set()
 

	
 
    def stop(self):
 
        self.go.clear()
 
        self.EOF.set()
 
        try:
 
            # this is not proper, but is done to force the reader thread let
 
            # go of the input because, if successful, .close() will send EOF
 
            # down the pipe.
 
            self.source.close()
 
        except:
 
            pass
 

	
 
    def run(self):
 
        s = self.source
 
        t = self.target
 
        cs = self.chunk_size
 
        ccm = self.chunk_count_max
 
        kr = self.keep_reading
 
        da = self.data_added
 
        go = self.go
 

	
 
        try:
 
            b = s.read(cs)
 
        except ValueError:
 
            b = ''
 

	
 
        while b and go.is_set():
 
            if len(t) > ccm:
 
                kr.clear()
 
                kr.wait(2)
 
#                # this only works on 2.7.x and up
 
#                if not kr.wait(10):
 
#                    raise Exception("Timed out while waiting for input to be read.")
 
                # instead we'll use this
 
                if len(t) > ccm + 3:
 
                    raise IOError("Timed out while waiting for input from subprocess.")
 
            t.append(b)
 
            da.set()
 
            b = s.read(cs)
 
        self.EOF.set()
 
        da.set()  # for cases when done but there was no input.
 

	
 

	
 
class BufferedGenerator():
 
    '''
 
    """
 
    Class behaves as a non-blocking, buffered pipe reader.
 
    Reads chunks of data (through a thread)
 
    from a blocking pipe, and attaches these to an array (Deque) of chunks.
 
    Reading is halted in the thread when max chunks is internally buffered.
 
    The .next() may operate in blocking or non-blocking fashion by yielding
 
    '' if no data is ready
 
    to be sent or by not returning until there is some data to send
 
    When we get EOF from underlying source pipe we raise the marker to raise
 
    StopIteration after the last chunk of data is yielded.
 
    '''
 
    """
 

	
 
    def __init__(self, source, buffer_size=65536, chunk_size=4096,
 
                 starting_values=[], bottomless=False):
 

	
 
        if bottomless:
 
            maxlen = int(buffer_size / chunk_size)
 
        else:
 
            maxlen = None
 

	
 
        self.data = deque(starting_values, maxlen)
 

	
 
        self.worker = InputStreamChunker(source, self.data, buffer_size,
 
                                         chunk_size)
 
        if starting_values:
 
            self.worker.data_added.set()
 
        self.worker.start()
 

	
 
    ####################
 
    # Generator's methods
 
    ####################
 

	
 
    def __iter__(self):
 
        return self
 

	
 
    def next(self):
 
        while not len(self.data) and not self.worker.EOF.is_set():
 
            self.worker.data_added.clear()
 
            self.worker.data_added.wait(0.2)
 
        if len(self.data):
 
            self.worker.keep_reading.set()
 
            return _bytes(self.data.popleft())
 
        elif self.worker.EOF.is_set():
 
            raise StopIteration
 

	
 
    def throw(self, type, value=None, traceback=None):
 
        if not self.worker.EOF.is_set():
 
            raise type(value)
 

	
 
    def start(self):
 
        self.worker.start()
 

	
 
    def stop(self):
 
        self.worker.stop()
 

	
 
    def close(self):
 
        try:
 
            self.worker.stop()
 
            self.throw(GeneratorExit)
 
        except (GeneratorExit, StopIteration):
 
            pass
 

	
 
    def __del__(self):
 
        self.close()
 

	
 
    ####################
 
    # Threaded reader's infrastructure.
 
    ####################
 
    @property
 
    def input(self):
 
        return self.worker.w
 

	
 
    @property
 
    def data_added_event(self):
 
        return self.worker.data_added
 

	
 
    @property
 
    def data_added(self):
 
        return self.worker.data_added.is_set()
 

	
 
    @property
 
    def reading_paused(self):
 
        return not self.worker.keep_reading.is_set()
 

	
 
    @property
 
    def done_reading_event(self):
 
        '''
 
        """
 
        Done_reding does not mean that the iterator's buffer is empty.
 
        Iterator might have done reading from underlying source, but the read
 
        chunks might still be available for serving through .next() method.
 

	
 
        @return An Event class instance.
 
        '''
 
        """
 
        return self.worker.EOF
 

	
 
    @property
 
    def done_reading(self):
 
        '''
 
        """
 
        Done_reding does not mean that the iterator's buffer is empty.
 
        Iterator might have done reading from underlying source, but the read
 
        chunks might still be available for serving through .next() method.
 

	
 
        @return An Bool value.
 
        '''
 
        """
 
        return self.worker.EOF.is_set()
 

	
 
    @property
 
    def length(self):
 
        '''
 
        """
 
        returns int.
 

	
 
        This is the lenght of the que of chunks, not the length of
 
        the combined contents in those chunks.
 

	
 
        __len__() cannot be meaningfully implemented because this
 
        reader is just flying throuh a bottomless pit content and
 
        can only know the lenght of what it already saw.
 

	
 
        If __len__() on WSGI server per PEP 3333 returns a value,
 
        the responce's length will be set to that. In order not to
 
        confuse WSGI PEP3333 servers, we will not implement __len__
 
        at all.
 
        '''
 
        """
 
        return len(self.data)
 

	
 
    def prepend(self, x):
 
        self.data.appendleft(x)
 

	
 
    def append(self, x):
 
        self.data.append(x)
 

	
 
    def extend(self, o):
 
        self.data.extend(o)
 

	
 
    def __getitem__(self, i):
 
        return self.data[i]
 

	
 

	
 
class SubprocessIOChunker(object):
 
    '''
 
    """
 
    Processor class wrapping handling of subprocess IO.
 

	
 
    In a way, this is a "communicate()" replacement with a twist.
 

	
 
    - We are multithreaded. Writing in and reading out, err are all sep threads.
 
    - We support concurrent (in and out) stream processing.
 
    - The output is not a stream. It's a queue of read string (bytes, not unicode)
 
      chunks. The object behaves as an iterable. You can "for chunk in obj:" us.
 
    - We are non-blocking in more respects than communicate()
 
      (reading from subprocess out pauses when internal buffer is full, but
 
       does not block the parent calling code. On the flip side, reading from
 
       slow-yielding subprocess may block the iteration until data shows up. This
 
       does not block the parallel inpipe reading occurring parallel thread.)
 

	
 
    The purpose of the object is to allow us to wrap subprocess interactions into
 
    and interable that can be passed to a WSGI server as the application's return
 
    value. Because of stream-processing-ability, WSGI does not have to read ALL
 
    of the subprocess's output and buffer it, before handing it to WSGI server for
 
    HTTP response. Instead, the class initializer reads just a bit of the stream
 
    to figure out if error ocurred or likely to occur and if not, just hands the
 
    further iteration over subprocess output to the server for completion of HTTP
 
    response.
 

	
 
    The real or perceived subprocess error is trapped and raised as one of
 
    EnvironmentError family of exceptions
 

	
 
    Example usage:
 
    #    try:
 
    #        answer = SubprocessIOChunker(
 
    #            cmd,
 
    #            input,
 
    #            buffer_size = 65536,
 
    #            chunk_size = 4096
 
    #            )
 
    #    except (EnvironmentError) as e:
 
    #        print str(e)
 
    #        raise e
 
    #
 
    #    return answer
 

	
 

	
 
    '''
 
    """
 
    def __init__(self, cmd, inputstream=None, buffer_size=65536,
 
                 chunk_size=4096, starting_values=[], **kwargs):
 
        '''
 
        """
 
        Initializes SubprocessIOChunker
 

	
 
        :param cmd: A Subprocess.Popen style "cmd". Can be string or array of strings
 
        :param inputstream: (Default: None) A file-like, string, or file pointer.
 
        :param buffer_size: (Default: 65536) A size of total buffer per stream in bytes.
 
        :param chunk_size: (Default: 4096) A max size of a chunk. Actual chunk may be smaller.
 
        :param starting_values: (Default: []) An array of strings to put in front of output que.
 
        '''
 
        """
 

	
 
        if inputstream:
 
            input_streamer = StreamFeeder(inputstream)
 
            input_streamer.start()
 
            inputstream = input_streamer.output
 

	
 
        _shell = kwargs.get('shell', True)
 
        if isinstance(cmd, (list, tuple)):
 
            cmd = ' '.join(cmd)
 

	
 
        kwargs['shell'] = _shell
 
        _p = subprocess.Popen(cmd,
 
            bufsize=-1,
 
            stdin=inputstream,
 
            stdout=subprocess.PIPE,
 
            stderr=subprocess.PIPE,
 
            **kwargs
 
        )
 

	
 
        bg_out = BufferedGenerator(_p.stdout, buffer_size, chunk_size, starting_values)
 
        bg_err = BufferedGenerator(_p.stderr, 16000, 1, bottomless=True)
 

	
 
        while not bg_out.done_reading and not bg_out.reading_paused and not bg_err.length:
 
            # doing this until we reach either end of file, or end of buffer.
 
            bg_out.data_added_event.wait(1)
 
            bg_out.data_added_event.clear()
 

	
 
        # at this point it's still ambiguous if we are done reading or just full buffer.
 
        # Either way, if error (returned by ended process, or implied based on
 
        # presence of stuff in stderr output) we error out.
 
        # Else, we are happy.
 
        _returncode = _p.poll()
 
        if _returncode or (_returncode == None and bg_err.length):
 
            try:
 
                _p.terminate()
 
            except:
 
                pass
 
            bg_out.stop()
 
            bg_err.stop()
 
            err = '%s' % ''.join(bg_err)
 
            if err:
 
                raise EnvironmentError("Subprocess exited due to an error:\n" + err)
 
            raise EnvironmentError("Subprocess exited with non 0 ret code:%s" % _returncode)
 

	
 
        self.process = _p
 
        self.output = bg_out
 
        self.error = bg_err
 

	
 
    def __iter__(self):
 
        return self
 

	
 
    def next(self):
 
        if self.process.poll():
 
            err = '%s' % ''.join(self.error)
 
            raise EnvironmentError("Subprocess exited due to an error:\n" + err)
 
        return self.output.next()
 

	
 
    def throw(self, type, value=None, traceback=None):
 
        if self.output.length or not self.output.done_reading:
 
            raise type(value)
 

	
 
    def close(self):
 
        try:
 
            self.process.terminate()
 
        except:
 
            pass
 
        try:
 
            self.output.close()
 
        except:
 
            pass
 
        try:
 
            self.error.close()
 
        except:
 
            pass
 

	
 
    def __del__(self):
 
        self.close()
rhodecode/tests/vcs/test_git.py
Show inline comments
 
@@ -68,402 +68,402 @@ class GitRepositoryTest(unittest.TestCas
 
            'Repo was cloned and updated but file %s was found'
 
            % fpath)
 

	
 
    def test_repo_clone_into_bare_repo(self):
 
        repo = GitRepository(TEST_GIT_REPO)
 
        clone_path = TEST_GIT_REPO_CLONE + '_bare.git'
 
        repo_clone = GitRepository(clone_path, create=True,
 
            src_url=repo.path, bare=True)
 
        self.assertTrue(repo_clone._repo.bare)
 

	
 
    def test_create_repo_is_not_bare_by_default(self):
 
        repo = GitRepository(get_new_dir('not-bare-by-default'), create=True)
 
        self.assertFalse(repo._repo.bare)
 

	
 
    def test_create_bare_repo(self):
 
        repo = GitRepository(get_new_dir('bare-repo'), create=True, bare=True)
 
        self.assertTrue(repo._repo.bare)
 

	
 
    def test_revisions(self):
 
        # there are 112 revisions (by now)
 
        # so we can assume they would be available from now on
 
        subset = set([
 
            'c1214f7e79e02fc37156ff215cd71275450cffc3',
 
            '38b5fe81f109cb111f549bfe9bb6b267e10bc557',
 
            'fa6600f6848800641328adbf7811fd2372c02ab2',
 
            '102607b09cdd60e2793929c4f90478be29f85a17',
 
            '49d3fd156b6f7db46313fac355dca1a0b94a0017',
 
            '2d1028c054665b962fa3d307adfc923ddd528038',
 
            'd7e0d30fbcae12c90680eb095a4f5f02505ce501',
 
            'ff7ca51e58c505fec0dd2491de52c622bb7a806b',
 
            'dd80b0f6cf5052f17cc738c2951c4f2070200d7f',
 
            '8430a588b43b5d6da365400117c89400326e7992',
 
            'd955cd312c17b02143c04fa1099a352b04368118',
 
            'f67b87e5c629c2ee0ba58f85197e423ff28d735b',
 
            'add63e382e4aabc9e1afdc4bdc24506c269b7618',
 
            'f298fe1189f1b69779a4423f40b48edf92a703fc',
 
            'bd9b619eb41994cac43d67cf4ccc8399c1125808',
 
            '6e125e7c890379446e98980d8ed60fba87d0f6d1',
 
            'd4a54db9f745dfeba6933bf5b1e79e15d0af20bd',
 
            '0b05e4ed56c802098dfc813cbe779b2f49e92500',
 
            '191caa5b2c81ed17c0794bf7bb9958f4dcb0b87e',
 
            '45223f8f114c64bf4d6f853e3c35a369a6305520',
 
            'ca1eb7957a54bce53b12d1a51b13452f95bc7c7e',
 
            'f5ea29fc42ef67a2a5a7aecff10e1566699acd68',
 
            '27d48942240f5b91dfda77accd2caac94708cc7d',
 
            '622f0eb0bafd619d2560c26f80f09e3b0b0d78af',
 
            'e686b958768ee96af8029fe19c6050b1a8dd3b2b'])
 
        self.assertTrue(subset.issubset(set(self.repo.revisions)))
 

	
 

	
 

	
 
    def test_slicing(self):
 
        #4 1 5 10 95
 
        for sfrom, sto, size in [(0, 4, 4), (1, 2, 1), (10, 15, 5),
 
                                 (10, 20, 10), (5, 100, 95)]:
 
            revs = list(self.repo[sfrom:sto])
 
            self.assertEqual(len(revs), size)
 
            self.assertEqual(revs[0], self.repo.get_changeset(sfrom))
 
            self.assertEqual(revs[-1], self.repo.get_changeset(sto - 1))
 

	
 

	
 
    def test_branches(self):
 
        # TODO: Need more tests here
 
        # Removed (those are 'remotes' branches for cloned repo)
 
        #self.assertTrue('master' in self.repo.branches)
 
        #self.assertTrue('gittree' in self.repo.branches)
 
        #self.assertTrue('web-branch' in self.repo.branches)
 
        for name, id in self.repo.branches.items():
 
            self.assertTrue(isinstance(
 
                self.repo.get_changeset(id), GitChangeset))
 

	
 
    def test_tags(self):
 
        # TODO: Need more tests here
 
        self.assertTrue('v0.1.1' in self.repo.tags)
 
        self.assertTrue('v0.1.2' in self.repo.tags)
 
        for name, id in self.repo.tags.items():
 
            self.assertTrue(isinstance(
 
                self.repo.get_changeset(id), GitChangeset))
 

	
 
    def _test_single_changeset_cache(self, revision):
 
        chset = self.repo.get_changeset(revision)
 
        self.assertTrue(revision in self.repo.changesets)
 
        self.assertTrue(chset is self.repo.changesets[revision])
 

	
 
    def test_initial_changeset(self):
 
        id = self.repo.revisions[0]
 
        init_chset = self.repo.get_changeset(id)
 
        self.assertEqual(init_chset.message, 'initial import\n')
 
        self.assertEqual(init_chset.author,
 
            'Marcin Kuzminski <marcin@python-blog.com>')
 
        for path in ('vcs/__init__.py',
 
                     'vcs/backends/BaseRepository.py',
 
                     'vcs/backends/__init__.py'):
 
            self.assertTrue(isinstance(init_chset.get_node(path), FileNode))
 
        for path in ('', 'vcs', 'vcs/backends'):
 
            self.assertTrue(isinstance(init_chset.get_node(path), DirNode))
 

	
 
        self.assertRaises(NodeDoesNotExistError, init_chset.get_node, path='foobar')
 

	
 
        node = init_chset.get_node('vcs/')
 
        self.assertTrue(hasattr(node, 'kind'))
 
        self.assertEqual(node.kind, NodeKind.DIR)
 

	
 
        node = init_chset.get_node('vcs')
 
        self.assertTrue(hasattr(node, 'kind'))
 
        self.assertEqual(node.kind, NodeKind.DIR)
 

	
 
        node = init_chset.get_node('vcs/__init__.py')
 
        self.assertTrue(hasattr(node, 'kind'))
 
        self.assertEqual(node.kind, NodeKind.FILE)
 

	
 
    def test_not_existing_changeset(self):
 
        self.assertRaises(RepositoryError, self.repo.get_changeset,
 
            'f' * 40)
 

	
 
    def test_changeset10(self):
 

	
 
        chset10 = self.repo.get_changeset(self.repo.revisions[9])
 
        README = """===
 
VCS
 
===
 

	
 
Various Version Control System management abstraction layer for Python.
 

	
 
Introduction
 
------------
 

	
 
TODO: To be written...
 

	
 
"""
 
        node = chset10.get_node('README.rst')
 
        self.assertEqual(node.kind, NodeKind.FILE)
 
        self.assertEqual(node.content, README)
 

	
 

	
 
class GitChangesetTest(unittest.TestCase):
 

	
 
    def setUp(self):
 
        self.repo = GitRepository(TEST_GIT_REPO)
 

	
 
    def test_default_changeset(self):
 
        tip = self.repo.get_changeset()
 
        self.assertEqual(tip, self.repo.get_changeset(None))
 
        self.assertEqual(tip, self.repo.get_changeset('tip'))
 

	
 
    def test_root_node(self):
 
        tip = self.repo.get_changeset()
 
        self.assertTrue(tip.root is tip.get_node(''))
 

	
 
    def test_lazy_fetch(self):
 
        """
 
        Test if changeset's nodes expands and are cached as we walk through
 
        the revision. This test is somewhat hard to write as order of tests
 
        is a key here. Written by running command after command in a shell.
 
        """
 
        hex = '2a13f185e4525f9d4b59882791a2d397b90d5ddc'
 
        self.assertTrue(hex in self.repo.revisions)
 
        chset = self.repo.get_changeset(hex)
 
        self.assertTrue(len(chset.nodes) == 0)
 
        root = chset.root
 
        self.assertTrue(len(chset.nodes) == 1)
 
        self.assertTrue(len(root.nodes) == 8)
 
        # accessing root.nodes updates chset.nodes
 
        self.assertTrue(len(chset.nodes) == 9)
 

	
 
        docs = root.get_node('docs')
 
        # we haven't yet accessed anything new as docs dir was already cached
 
        self.assertTrue(len(chset.nodes) == 9)
 
        self.assertTrue(len(docs.nodes) == 8)
 
        # accessing docs.nodes updates chset.nodes
 
        self.assertTrue(len(chset.nodes) == 17)
 

	
 
        self.assertTrue(docs is chset.get_node('docs'))
 
        self.assertTrue(docs is root.nodes[0])
 
        self.assertTrue(docs is root.dirs[0])
 
        self.assertTrue(docs is chset.get_node('docs'))
 

	
 
    def test_nodes_with_changeset(self):
 
        hex = '2a13f185e4525f9d4b59882791a2d397b90d5ddc'
 
        chset = self.repo.get_changeset(hex)
 
        root = chset.root
 
        docs = root.get_node('docs')
 
        self.assertTrue(docs is chset.get_node('docs'))
 
        api = docs.get_node('api')
 
        self.assertTrue(api is chset.get_node('docs/api'))
 
        index = api.get_node('index.rst')
 
        self.assertTrue(index is chset.get_node('docs/api/index.rst'))
 
        self.assertTrue(index is chset.get_node('docs')\
 
            .get_node('api')\
 
            .get_node('index.rst'))
 

	
 
    def test_branch_and_tags(self):
 
        '''
 
        """
 
        rev0 = self.repo.revisions[0]
 
        chset0 = self.repo.get_changeset(rev0)
 
        self.assertEqual(chset0.branch, 'master')
 
        self.assertEqual(chset0.tags, [])
 

	
 
        rev10 = self.repo.revisions[10]
 
        chset10 = self.repo.get_changeset(rev10)
 
        self.assertEqual(chset10.branch, 'master')
 
        self.assertEqual(chset10.tags, [])
 

	
 
        rev44 = self.repo.revisions[44]
 
        chset44 = self.repo.get_changeset(rev44)
 
        self.assertEqual(chset44.branch, 'web-branch')
 

	
 
        tip = self.repo.get_changeset('tip')
 
        self.assertTrue('tip' in tip.tags)
 
        '''
 
        """
 
        # Those tests would fail - branches are now going
 
        # to be changed at main API in order to support git backend
 
        pass
 

	
 
    def _test_slices(self, limit, offset):
 
        count = self.repo.count()
 
        changesets = self.repo.get_changesets(limit=limit, offset=offset)
 
        idx = 0
 
        for changeset in changesets:
 
            rev = offset + idx
 
            idx += 1
 
            rev_id = self.repo.revisions[rev]
 
            if idx > limit:
 
                self.fail("Exceeded limit already (getting revision %s, "
 
                    "there are %s total revisions, offset=%s, limit=%s)"
 
                    % (rev_id, count, offset, limit))
 
            self.assertEqual(changeset, self.repo.get_changeset(rev_id))
 
        result = list(self.repo.get_changesets(limit=limit, offset=offset))
 
        start = offset
 
        end = limit and offset + limit or None
 
        sliced = list(self.repo[start:end])
 
        self.failUnlessEqual(result, sliced,
 
            msg="Comparison failed for limit=%s, offset=%s"
 
            "(get_changeset returned: %s and sliced: %s"
 
            % (limit, offset, result, sliced))
 

	
 
    def _test_file_size(self, revision, path, size):
 
        node = self.repo.get_changeset(revision).get_node(path)
 
        self.assertTrue(node.is_file())
 
        self.assertEqual(node.size, size)
 

	
 
    def test_file_size(self):
 
        to_check = (
 
            ('c1214f7e79e02fc37156ff215cd71275450cffc3',
 
                'vcs/backends/BaseRepository.py', 502),
 
            ('d7e0d30fbcae12c90680eb095a4f5f02505ce501',
 
                'vcs/backends/hg.py', 854),
 
            ('6e125e7c890379446e98980d8ed60fba87d0f6d1',
 
                'setup.py', 1068),
 

	
 
            ('d955cd312c17b02143c04fa1099a352b04368118',
 
                'vcs/backends/base.py', 2921),
 
            ('ca1eb7957a54bce53b12d1a51b13452f95bc7c7e',
 
                'vcs/backends/base.py', 3936),
 
            ('f50f42baeed5af6518ef4b0cb2f1423f3851a941',
 
                'vcs/backends/base.py', 6189),
 
        )
 
        for revision, path, size in to_check:
 
            self._test_file_size(revision, path, size)
 

	
 
    def test_file_history(self):
 
        # we can only check if those revisions are present in the history
 
        # as we cannot update this test every time file is changed
 
        files = {
 
            'setup.py': [
 
                '54386793436c938cff89326944d4c2702340037d',
 
                '51d254f0ecf5df2ce50c0b115741f4cf13985dab',
 
                '998ed409c795fec2012b1c0ca054d99888b22090',
 
                '5e0eb4c47f56564395f76333f319d26c79e2fb09',
 
                '0115510b70c7229dbc5dc49036b32e7d91d23acd',
 
                '7cb3fd1b6d8c20ba89e2264f1c8baebc8a52d36e',
 
                '2a13f185e4525f9d4b59882791a2d397b90d5ddc',
 
                '191caa5b2c81ed17c0794bf7bb9958f4dcb0b87e',
 
                'ff7ca51e58c505fec0dd2491de52c622bb7a806b',
 
            ],
 
            'vcs/nodes.py': [
 
                '33fa3223355104431402a888fa77a4e9956feb3e',
 
                'fa014c12c26d10ba682fadb78f2a11c24c8118e1',
 
                'e686b958768ee96af8029fe19c6050b1a8dd3b2b',
 
                'ab5721ca0a081f26bf43d9051e615af2cc99952f',
 
                'c877b68d18e792a66b7f4c529ea02c8f80801542',
 
                '4313566d2e417cb382948f8d9d7c765330356054',
 
                '6c2303a793671e807d1cfc70134c9ca0767d98c2',
 
                '54386793436c938cff89326944d4c2702340037d',
 
                '54000345d2e78b03a99d561399e8e548de3f3203',
 
                '1c6b3677b37ea064cb4b51714d8f7498f93f4b2b',
 
                '2d03ca750a44440fb5ea8b751176d1f36f8e8f46',
 
                '2a08b128c206db48c2f0b8f70df060e6db0ae4f8',
 
                '30c26513ff1eb8e5ce0e1c6b477ee5dc50e2f34b',
 
                'ac71e9503c2ca95542839af0ce7b64011b72ea7c',
 
                '12669288fd13adba2a9b7dd5b870cc23ffab92d2',
 
                '5a0c84f3e6fe3473e4c8427199d5a6fc71a9b382',
 
                '12f2f5e2b38e6ff3fbdb5d722efed9aa72ecb0d5',
 
                '5eab1222a7cd4bfcbabc218ca6d04276d4e27378',
 
                'f50f42baeed5af6518ef4b0cb2f1423f3851a941',
 
                'd7e390a45f6aa96f04f5e7f583ad4f867431aa25',
 
                'f15c21f97864b4f071cddfbf2750ec2e23859414',
 
                'e906ef056cf539a4e4e5fc8003eaf7cf14dd8ade',
 
                'ea2b108b48aa8f8c9c4a941f66c1a03315ca1c3b',
 
                '84dec09632a4458f79f50ddbbd155506c460b4f9',
 
                '0115510b70c7229dbc5dc49036b32e7d91d23acd',
 
                '2a13f185e4525f9d4b59882791a2d397b90d5ddc',
 
                '3bf1c5868e570e39569d094f922d33ced2fa3b2b',
 
                'b8d04012574729d2c29886e53b1a43ef16dd00a1',
 
                '6970b057cffe4aab0a792aa634c89f4bebf01441',
 
                'dd80b0f6cf5052f17cc738c2951c4f2070200d7f',
 
                'ff7ca51e58c505fec0dd2491de52c622bb7a806b',
 
            ],
 
            'vcs/backends/git.py': [
 
                '4cf116ad5a457530381135e2f4c453e68a1b0105',
 
                '9a751d84d8e9408e736329767387f41b36935153',
 
                'cb681fb539c3faaedbcdf5ca71ca413425c18f01',
 
                '428f81bb652bcba8d631bce926e8834ff49bdcc6',
 
                '180ab15aebf26f98f714d8c68715e0f05fa6e1c7',
 
                '2b8e07312a2e89e92b90426ab97f349f4bce2a3a',
 
                '50e08c506174d8645a4bb517dd122ac946a0f3bf',
 
                '54000345d2e78b03a99d561399e8e548de3f3203',
 
            ],
 
        }
 
        for path, revs in files.items():
 
            node = self.repo.get_changeset(revs[0]).get_node(path)
 
            node_revs = [chset.raw_id for chset in node.history]
 
            self.assertTrue(set(revs).issubset(set(node_revs)),
 
                "We assumed that %s is subset of revisions for which file %s "
 
                "has been changed, and history of that node returned: %s"
 
                % (revs, path, node_revs))
 

	
 
    def test_file_annotate(self):
 
        files = {
 
            'vcs/backends/__init__.py': {
 
                'c1214f7e79e02fc37156ff215cd71275450cffc3': {
 
                    'lines_no': 1,
 
                    'changesets': [
 
                        'c1214f7e79e02fc37156ff215cd71275450cffc3',
 
                    ],
 
                },
 
                '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647': {
 
                    'lines_no': 21,
 
                    'changesets': [
 
                        '49d3fd156b6f7db46313fac355dca1a0b94a0017',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                    ],
 
                },
 
                'e29b67bd158580fc90fc5e9111240b90e6e86064': {
 
                    'lines_no': 32,
 
                    'changesets': [
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '5eab1222a7cd4bfcbabc218ca6d04276d4e27378',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '54000345d2e78b03a99d561399e8e548de3f3203',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '78c3f0c23b7ee935ec276acb8b8212444c33c396',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '2a13f185e4525f9d4b59882791a2d397b90d5ddc',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '78c3f0c23b7ee935ec276acb8b8212444c33c396',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '992f38217b979d0b0987d0bae3cc26dac85d9b19',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                        '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647',
 
                    ],
 
                },
 
            },
 
        }
 

	
0 comments (0 inline, 0 general)