tailor-0.9.35+darcs20090615/0000755000175000017500000000000011215407216015156 5ustar vdanjeanvdanjeantailor-0.9.35+darcs20090615/vcpx/0000755000175000017500000000000011215407134016135 5ustar vdanjeanvdanjeantailor-0.9.35+darcs20090615/vcpx/repository/0000755000175000017500000000000011215407134020354 5ustar vdanjeanvdanjeantailor-0.9.35+darcs20090615/vcpx/repository/cdv.py0000644000175000017500000000753211215407134021511 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- Codeville details # :Creato: gio 05 mag 2005 23:47:45 CEST # :Autore: Lele Gaifax # :Licenza: GNU General Public License # """ This module implements the backends for Codeville. """ __docformat__ = 'reStructuredText' from vcpx.repository import Repository from vcpx.shwrap import ExternalCommand from vcpx.target import SynchronizableTargetWorkingDir, TargetInitializationFailure from vcpx.source import ChangesetApplicationFailure from vcpx.tzinfo import UTC class CdvRepository(Repository): METADIR = '.cdv' def _load(self, project): Repository._load(self, project) self.EXECUTABLE = project.config.get(self.name, 'cdv-command', 'cdv') def create(self): """ Create the base directory if it doesn't exist, and the repository as well in the new working directory, executing a ``cdv init`` there. """ from os.path import join, normpath, exists if exists(join(self.basedir, self.METADIR)): return init = ExternalCommand(cwd=self.basedir, command=self.command("init")) init.execute() if init.exit_status: raise TargetInitializationFailure( "%s returned status %s" % (str(init), init.exit_status)) class CdvWorkingDir(SynchronizableTargetWorkingDir): ## SynchronizableTargetWorkingDir def _replayChangeset(self, changeset): """ Under Codeville, it's safer to explicitly edit modified items. """ SynchronizableTargetWorkingDir._replayChangeset(self, changeset) names = [e.name for e in changeset.modifiedEntries()] cmd = self.repository.command("edit") ExternalCommand(cwd=self.repository.basedir, command=cmd).execute(names) def _addPathnames(self, names): """ Add some new filesystem objects. """ cmd = self.repository.command("add") ExternalCommand(cwd=self.repository.basedir, command=cmd).execute(names) def _commit(self, date, author, patchname, changelog=None, entries=None, tags = [], isinitialcommit = False): """ Commit the changeset. """ encode = self.repository.encode logmessage = [] if patchname: logmessage.append(patchname) if changelog: logmessage.append(changelog.replace('%', '%%')) cmd = self.repository.command("-u", encode(author), "commit", "-m", encode('\n'.join(logmessage)), "-D", date.astimezone(UTC).strftime('%Y/%m/%d %H:%M:%S UTC')) if not entries: entries = ['...'] c = ExternalCommand(cwd=self.repository.basedir, command=cmd) c.execute(entries) if c.exit_status: raise ChangesetApplicationFailure("%s returned status %d" % (str(c), c.exit_status)) def _removePathnames(self, names): """ Remove some filesystem object. """ cmd = self.repository.command("remove") ExternalCommand(cwd=self.repository.basedir, command=cmd).execute(names) def _renamePathname(self, oldname, newname): """ Rename a filesystem object. """ cmd = self.repository.command("rename") ExternalCommand(cwd=self.repository.basedir, command=cmd).execute(oldname, newname) def _prepareTargetRepository(self): self.repository.create() def _prepareWorkingDirectory(self, source_repo): """ Set the user on the repository. """ from os import getenv cmd = self.repository.command("set", "user") user = getenv('CDV_USER') or getenv('LOGNAME') ExternalCommand(cwd=self.repository.basedir, command=cmd).execute(user) tailor-0.9.35+darcs20090615/vcpx/repository/arx.py0000644000175000017500000000605111215407134021522 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- ArX stuff # :Creato: ven 24 giu 2005 20:42:46 CEST # :Autore: Lele Gaifax # :Licenza: GNU General Public License # # Modified 2005 by Walter Landry for ArX """ This module implements the backends for ArX. """ __docformat__ = 'reStructuredText' from vcpx.repository import Repository from vcpx.shwrap import ExternalCommand from vcpx.target import SynchronizableTargetWorkingDir, TargetInitializationFailure from vcpx.source import ChangesetApplicationFailure class ArxRepository(Repository): METADIR = '_arx' def _load(self, project): Repository._load(self, project) self.EXECUTABLE = project.config.get(self.name, 'arx-command', 'arx') class ArxWorkingDir(SynchronizableTargetWorkingDir): ## SynchronizableTargetWorkingDir def _addPathnames(self, names): """ Add some new filesystem objects. """ cmd = self.repository.command("add") ExternalCommand(cwd=self.repository.basedir, command=cmd).execute(names) def _commit(self, date, author, patchname, changelog=None, entries=None, tags = [], isinitialcommit = False): """ Commit the changeset. """ encode = self.repository.encode logmessage = [] if patchname: logmessage.append(patchname) if changelog: logmessage.append(changelog.replace('%', '%%')) cmd = self.repository.command("commit", "-s", encode('\n'.join(logmessage)), "--author", encode(author), "--date", date.isoformat()) c = ExternalCommand(cwd=self.repository.basedir, command=cmd) c.execute() if c.exit_status: raise ChangesetApplicationFailure("%s returned status %d" % (str(c), c.exit_status)) def _removePathnames(self, names): """ Remove some filesystem object. """ cmd = self.repository.command("rm") ExternalCommand(cwd=self.repository.basedir, command=cmd).execute(names) def _renamePathname(self, oldname, newname): """ Rename a filesystem object. """ cmd = self.repository.command("copy") rename = ExternalCommand(cwd=self.repository.basedir, command=cmd) rename.execute(oldname, newname) def _initializeWorkingDir(self): """ Setup the ArX working copy The user must setup a ArX working directory himself. Then we simply use 'arx commit', without having to specify an archive or branch. ArX looks up the archive and branch in it's _arx directory. """ from os.path import exists, join if not exists(join(self.repository.basedir, '_arx')): raise TargetInitializationFailure("Please setup '%s' as an ArX working directory" % self.repository.basedir) SynchronizableTargetWorkingDir._initializeWorkingDir(self) tailor-0.9.35+darcs20090615/vcpx/repository/aegis/0000755000175000017500000000000011215407134021444 5ustar vdanjeanvdanjeantailor-0.9.35+darcs20090615/vcpx/repository/aegis/target.py0000644000175000017500000003755011215407134023316 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- Aegis details # :Creato: sab 24 mag 2008 15:44:00 CEST # :Autore: Walter Franzini # :Licenza: GNU General Public License # """ This module contains the target specific bits of the Aegis backend. """ __docformat__ = 'reStructuredText' import os import os.path import re import shutil from vcpx.changes import ChangesetEntry from vcpx.shwrap import ExternalCommand, ReopenableNamedTemporaryFile, PIPE, STDOUT from vcpx.source import ChangesetApplicationFailure from vcpx.target import SynchronizableTargetWorkingDir class AegisTargetWorkingDir(SynchronizableTargetWorkingDir): """ A target working directory under ``Aegis``. """ change_number = "not-a-number" def _commit(self, date, author, patchname, changelog=None, entries=None, tags = [], isinitialcommit = False): """ Commit the changeset. """ config_files = \ self.repository.project_file_list_get(self.repository.USAGE_CONFIG) # # The invocation for the initialcommit does not receive entries. # if isinitialcommit or not config_files: self.__new_file("aegis.conf", "config") self.__config_file(self.repository.basedir, "aegis.conf") elif not entries: # # Return successfully even if the changeset does not # contain entries just in case it's a tag changeset. # return True change_attribute_file = \ self.__change_attribute_file(brief_description=patchname, description=changelog) self.__change_attribute(change_attribute_file.name) self.__develop_end() # # If the change cannot be closed, e.g. because a file is # modified in a change not yet integrated, then we should stop here. # # return False # # Next step only if develop_end => awaiting_integration # self.__integrate_begin() self.__finish() def _prepareTargetRepository(self): # # Aegis refuse to use an already existing directory as the # development directory of a change. # if os.path.exists(self.repository.basedir): shutil.rmtree(self.repository.basedir) def _prepareToReplayChangeset(self, changeset): """ Runs aegis -New_Change -dir target.basedir """ self._prepareTargetRepository() self.change_number = self.__new_change(changeset.revision) self.__develop_begin() # # This function MUST return # return True def _adaptChangeset(self, changeset): project_files = self.repository.project_file_list_get() from copy import deepcopy adapted = deepcopy(changeset) # # adapt the entries: # * delete directory entries # * delete entries with action_kind REMOVE not in the repository (DEL => ) # * change to ADD a rename of a file non in the repository (REN => ADD) # * remove from the changeset entries whith 2 operation (REN + UPD => REN); # * change the ADD action_kind for files already in the repository (ADD => UPD); # * change the UPD action_kind for files *non* in the repository (UPD => ADD); # for e in adapted.entries[:]: if e.is_directory or e.is_symlink: adapted.entries.remove(e) continue if e.action_kind == e.DELETED and not project_files.count(e.name): self.log.info("remove delete entry %s", e.name) adapted.entries.remove(e) renamed_file = [] for e in adapted.entries: if e.action_kind == e.RENAMED: renamed_file.append(e.name) for e in adapted.entries[:]: if renamed_file.count(e.name) and e.action_kind != e.RENAMED: adapted.entries.remove(e) if e.action_kind == e.RENAMED and not project_files.count(e.old_name): e.action_kind = e.ADDED e.old_name = None if e.action_kind == e.ADDED and project_files.count(e.name): e.action_kind = ChangesetEntry.UPDATED elif e.action_kind == e.UPDATED and not project_files.count(e.name): e.action_kind = e.ADDED # # Returns even if the changeset does not contain entries to # give the opportunity to still register tags. # return SynchronizableTargetWorkingDir._adaptChangeset(self, adapted) def _initializeWorkingDir(self): # # This method is called only by importFirstRevision # self.__new_file('.') def _prepareWorkingDirectory(self, source_repo): # # Receive the first changeset from the source repository. # self.change_number = self.__new_change() self.__develop_begin() return True def _tag(self, tag, author, date): self.__delta_name(tag) def _addSubtree(self, subdir): # # Aegis new_file command is recursive, there is no need to # walk the directory tree. # pass def _addEntries(self, entries): for e in entries: self.__new_file(e.name) def _addPathnames(self, names): for name in names: self.__new_file(name) def _editPathnames(self, names): for name in names: self.__copy_file(name) def _removeEntries(self, entries): for e in entries: self.__remove_file(e.name) def _removePathnames(self, names): for name in names: self.__remove_file(name) def _renameEntries(self, entries): for e in entries: self.__move_file(e.old_name, e.name) def _renamePathname(self, oldname, newname): self.__move_file(oldname, newname) # # The following methods wraps change's related aegis commands. # def __new_change(self, title = "none", description = "none"): change_attr_file = \ self.__change_attribute_file(brief_description = title, description = description) change_number_file = ReopenableNamedTemporaryFile('aegis', 'tailor') cmd = self.repository.command("-new_change", "-project", self.repository.module, "-file", change_attr_file.name, "-output", change_number_file.name) new_change = ExternalCommand(cwd="/tmp", command=cmd) output = new_change.execute(stdout = PIPE, stderr = STDOUT)[0] if new_change.exit_status > 0: raise ChangesetApplicationFailure( "%s returned status %d, saying: %s" % (str(new_change), new_change.exit_status, output.read())) fd = open(change_number_file.name, "rb") change_number = fd.read() fd.close() return change_number.strip() def __develop_begin(self): cmd = self.repository.command("-develop_begin", "-project", self.repository.module, "-change", self.change_number, "-directory", self.repository.basedir) develop_begin = ExternalCommand(cwd="/tmp", command=cmd) output = develop_begin.execute(stdout = PIPE, stderr = STDOUT)[0] if develop_begin.exit_status > 0: raise ChangesetApplicationFailure( "%s returned status %d, saying: %s" % (str(develop_begin), develop_begin.exit_status, output.read())) self.log.info(output.read()) def __develop_end(self): self.__finish() def __integrate_begin(self): cmd = self.repository.command("-integrate_begin", "-project", self.repository.module, "-change", self.change_number) integrate_begin = ExternalCommand(cwd="/tmp", command=cmd) output = integrate_begin.execute(stdout = PIPE, stderr = STDOUT)[0] if integrate_begin.exit_status > 0: raise ChangesetApplicationFailure( "%s returned status %d, saying: %s" % (str(integrate_begin), integrate_begin.exit_status, output.read())) def __integrate_pass(self): self.__finish() def __finish(self): cmd = self.repository.command("-finish", "-project", self.repository.module, "-change", self.change_number) finish = ExternalCommand(cwd="/tmp", command=cmd) output = finish.execute(stdout = PIPE, stderr = STDOUT)[0] if finish.exit_status > 0: raise ChangesetApplicationFailure( "%s returned status %d, saying: %s" % (str(finish), finish.exit_status, output.read())) def __change_attribute(self, file): cmd = self.repository.command ("-change_attr", "-project", self.repository.module, "-change", self.change_number) change_attr = ExternalCommand (cwd="/tmp", command=cmd) output = change_attr.execute ("-file", file, stdout = PIPE, stderr = STDOUT)[0] if change_attr.exit_status > 0: raise ChangesetApplicationFailure( "%s returned status %d, saying: %s" % (str(change_attr), change_attr.exit_status, output.read())) def __delta_name (self, delta): cmd = self.repository.command ("-delta_name", "-project", self.repository.module) delta_name = ExternalCommand (cwd="/tmp", command=cmd) output = delta_name.execute (delta, stdout = PIPE, stderr = STDOUT)[0] if delta_name.exit_status > 0: raise ChangesetApplicationFailure( "%s returned status %d, saying: %s" % (str(delta_name), delta_name.exit_status, output.read())) # # File's related methods. # def __new_file(self, file_names, usage = None): # # Tailor try to add also the aegis own log file and it's forbidden. # if file_names == "./aegis.log": return if usage == "config": cmd = self.repository.command("-new_file", "-keep", "-config", "-not-logging", "-project", self.repository.module, "-change", self.change_number) else: cmd = self.repository.command("-new_file", "-keep", "-not-logging", "-project", self.repository.module, "-change", self.change_number) new_file = ExternalCommand(cwd=self.repository.basedir, command=cmd) output = new_file.execute(file_names, stdout = PIPE, stderr = STDOUT)[0] if new_file.exit_status > 0: raise ChangesetApplicationFailure( "%s returned status %d, saying: %s" % (str(new_file), new_file.exit_status, output.read())) def __copy_file(self, file_names): cmd = self.repository.command("-copy", "-keep", "-not-logging", "-project", self.repository.module, "-change", self.change_number) copy_file = ExternalCommand(cwd=self.repository.basedir, command=cmd) output = copy_file.execute(file_names, stdout = PIPE, stderr = STDOUT)[0] if copy_file.exit_status > 0: raise ChangesetApplicationFailure( "%s returned status %d, saying: %s" % (str(copy_file), copy_file.exit_status, output.read())) def __move_file(self, old_name, new_name): # # The aegis command to rename files does not have the -keep # option to preserve the content of the file, do it manually. # fp = open(os.path.join(self.repository.basedir, new_name), 'rb') content = fp.read() fp.close() cmd = self.repository.command("-move", "-not-logging", "-project", self.repository.module, "-change", self.change_number) move_file = ExternalCommand(cwd=self.repository.basedir, command=cmd) output = move_file.execute(old_name, new_name, stdout = PIPE, stderr = STDOUT)[0] if move_file.exit_status > 0: raise ChangesetApplicationFailure( "%s returned status %d, saying: %s" % (str(move_file), move_file.exit_status, output.read())) # # Restore the previously saved content of the renamed file. # fp = open(os.path.join(self.repository.basedir, new_name), 'wb') fp.write(content) fp.close() def __remove_file(self, file_name): cmd = self.repository.command("-remove", "-not-logging", "-project", self.repository.module, "-change", self.change_number) remove_file = ExternalCommand(cwd=self.repository.basedir, command=cmd) output = remove_file.execute(file_name, stdout = PIPE, stderr = STDOUT)[0] if remove_file.exit_status > 0: raise ChangesetApplicationFailure( "%s returned status %d, saying: %s" % (str(remove_file), remove_file.exit_status, output.read())) def __change_attribute_file(self, *args, **kwargs): """ Create a temporary file to modify change's attributes. """ if kwargs['brief_description']: brief_description = \ self.repository.normalize(kwargs['brief_description']) else: brief_description = 'none' if kwargs['description']: description = \ self.repository.normalize(kwargs['description']) else: description = "none" attribute_file = ReopenableNamedTemporaryFile('aegis', 'tailor') fd = open(attribute_file.name, 'wb') fd.write(""" brief_description = "%s"; description = "%s"; cause = external_improvement; test_exempt = true; test_baseline_exempt = true; regression_test_exempt = true; """ % (brief_description, description)) fd.close() return attribute_file def __config_file(self, dir, name): """ Prepare the basic configuration to make aegis work: * define a successfull build command (exit 0) * define the history command * define the merge commands """ c = open(os.path.join(dir, name), "wb", 0644) c.write(""" build_command = "exit 0"; link_integration_directory = true; history_get_command = "aesvt -check-out -edit ${quote $edit} " "-history ${quote $history} -f ${quote $output}"; history_put_command = "aesvt -check-in -history ${quote $history} " "-f ${quote $input}"; history_query_command = "aesvt -query -history ${quote $history}"; history_content_limitation = binary_capable; diff_command = "set +e; $diff $orig $i > $out; test $$? -le 1"; merge_command = "(diff3 -e $i $orig $mr | sed -e '/^w$$/d' -e '/^q$$/d'; echo '1,$$p') " "| ed - $i > $out"; patch_diff_command = "set +e; $diff -C0 -L $index -L $index $orig $i > $out; test $$? -le 1"; shell_safe_filenames = false; """) c.close() tailor-0.9.35+darcs20090615/vcpx/repository/aegis/__init__.py0000644000175000017500000000443711215407134023565 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- Aegis details # :Creato: sab 24 mag 2008 15:44:00 CEST # :Autore: Walter Franzini # :Licenza: GNU General Public License # import vcpx.changes from textwrap import wrap from vcpx.repository import Repository from vcpx.shwrap import ExternalCommand, PIPE, STDOUT class AegisRepository(Repository): USAGE_BUILD = 'build' USAGE_CONFIG = 'config' USAGE_MANUAL_TEST = 'manual-test' USAGE_SOURCE = 'source' USAGE_TEST = 'test' def _load(self, project): Repository._load (self, project) cget = project.config.get self.EXECUTABLE = cget(self.name, 'aegis-command', 'aegis') def _validateConfiguration(self): pass def command(self, *args, **kwargs): # # Run aegis in verbose mode # args = args + ('-verbose',) # # Disable the log file functionality # if not kwargs.has_key('env'): kwargs['env'] = {} kwargs['env']['AEGIS_FLAGS'] = "log_file_preference = never;" # # aefinish is a different executable. Take care of it. # original_command = self.EXECUTABLE if args[0] == "-finish": self.EXECUTABLE = "aefinish" args = args[1:] rc = Repository.command(self, *args, **kwargs) self.EXECUTABLE = original_command return rc def project_file_list_get(self, usage = None): cmd = self.command("-project", self.module, executable = "aelpf") if usage: cmd.append('--usage') cmd.append(usage) aelpf = ExternalCommand (cwd="/tmp", command=cmd) output = aelpf.execute(stdout = PIPE, stderr = STDOUT)[0] if aelpf.exit_status > 0: raise ChangesetApplicationFailure( "%s returned status %d, saying: %s" % (str(aelpf), aelpf.exit_status, output.read())) return [f.strip() for f in output.readlines()] def normalize(self, message): # # Aegis use C syntax for strings stored in config files, adapt # the message to conform. # message = message.replace ('\\', '\\\\') return "\\n\\\n".join(wrap(message)).replace('"', '\\"') tailor-0.9.35+darcs20090615/vcpx/repository/baz.py0000644000175000017500000000225611215407134021507 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- baz (Arch 1.x) backend # :Creato: sab 13 ago 2005 12:16:16 CEST # :Autore: Ollivier Robert # :Licenza: GNU General Public License # """ This module implements the backends for baz (Arch 1.x). """ __docformat__ = 'reStructuredText' from vcpx.repository.tla import TlaRepository, TlaWorkingDir class BazRepository(TlaRepository): def _load(self, project): TlaRepository._load(self, project) self.EXECUTABLE = project.config.get(self.name, 'baz-command', 'baz') def command(self, *args, **kwargs): if args: if args[0] == 'tree-lint': args = list(args) args[0] = 'lint' elif args[0] == 'missing' and args[1] == '-f': args = list(args) del args[1] return TlaRepository.command(self, *args, **kwargs) class BazWorkingDir(TlaWorkingDir): """ A working directory under ``baz``. """ # For tailor purposes, the only difference between baz and tla # is the name of command "lint", that tla calls "tree-lint". # The BazRepository takes care of fixing that. tailor-0.9.35+darcs20090615/vcpx/repository/svn.py0000644000175000017500000011227311215407134021542 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- Subversion details # :Creato: ven 18 giu 2004 15:00:52 CEST # :Autore: Lele Gaifax # :Licenza: GNU General Public License # """ This module contains supporting classes for Subversion. """ __docformat__ = 'reStructuredText' from vcpx.changes import ChangesetEntry from vcpx.config import ConfigurationError from vcpx.repository import Repository from vcpx.shwrap import ExternalCommand, PIPE, STDOUT, ReopenableNamedTemporaryFile from vcpx.source import UpdatableSourceWorkingDir, ChangesetApplicationFailure from vcpx.target import SynchronizableTargetWorkingDir, TargetInitializationFailure, \ PostCommitCheckFailure from vcpx.tzinfo import UTC class SvnRepository(Repository): METADIR = '.svn' def command(self, *args, **kwargs): if kwargs.get('svnadmin', False): kwargs['executable'] = self.__svnadmin return Repository.command(self, *args, **kwargs) def _load(self, project): Repository._load(self, project) cget = project.config.get self.EXECUTABLE = cget(self.name, 'svn-command', 'svn') self.__svnadmin = cget(self.name, 'svnadmin-command', 'svnadmin') self.use_propset = cget(self.name, 'use-propset', False) self.propset_date = cget(self.name, 'propset-date', True) self.filter_badchars = cget(self.name, 'filter-badchars', False) self.use_limit = cget(self.name, 'use-limit', True) self.trust_root = cget(self.name, 'trust-root', False) self.ignore_externals = cget(self.name, 'ignore-externals', True) self.commit_all_files = cget(self.name, 'commit-all-files', True) self.tags_path = cget(self.name, 'svn-tags', '/tags') self.branches_path = cget(self.name, 'svn-branches', '/branches') self._setupTagsDirectory = None def setupTagsDirectory(self): if self._setupTagsDirectory == None: self._setupTagsDirectory = False if self.module and self.module <> '/': # Check the existing tags directory cmd = self.command("ls") svnls = ExternalCommand(command=cmd) svnls.execute(self.repository + self.tags_path) if svnls.exit_status: # create it, if not exist cmd = self.command("mkdir", "-m", "This directory will host the tags") svnmkdir = ExternalCommand(command=cmd) svnmkdir.execute(self.repository + self.tags_path) if svnmkdir.exit_status: raise TargetInitializationFailure( "Was not able to create tags directory '%s'" % self.tags_path) else: self.log.debug("Directory '%s' already exists" % self.tags_path) self._setupTagsDirectory = True else: self.log.debug("Tags needs module setup other than '/'") return self._setupTagsDirectory def _validateConfiguration(self): from vcpx.config import ConfigurationError Repository._validateConfiguration(self) if not self.repository: self.log.critical('Missing repository information in %r', self.name) raise ConfigurationError("Must specify the root of the " "Subversion repository used " "as %s with the option " "'repository'" % self.which) elif self.repository.endswith('/'): self.log.debug("Removing final slash from %r in %r", self.repository, self.name) self.repository = self.repository.rstrip('/') if not self.module: self.log.critical('Missing module information in %r', self.name) raise ConfigurationError("Must specify the path within the " "Subversion repository as 'module'") if self.module == '.': self.log.warning("Replacing '.' with '/' in module name in %r", self.name) self.module = '/' elif not self.module.startswith('/'): self.log.debug("Prepending '/' to module %r in %r", self.module, self.name) self.module = '/' + self.module if not self.tags_path.startswith('/'): self.log.debug("Prepending '/' to svn-tags %r in %r", self.tags_path, self.name) self.tags_path = '/' + self.tags_path if not self.branches_path.startswith('/'): self.log.debug("Prepending '/' to svn-branches %r in %r", self.branches_path, self.name) self.branches_path = '/' + self.branches_path def create(self): """ Create a local SVN repository, if it does not exist, and configure it. """ from os.path import join, exists from sys import platform # Verify the existence of repository by listing its root cmd = self.command("ls") svnls = ExternalCommand(command=cmd) svnls.execute(self.repository) # Create it if it isn't a valid repository if svnls.exit_status: if not self.repository.startswith('file:///'): raise TargetInitializationFailure("%r does not exist and " "cannot be created since " "it's not a local (file:///) " "repository" % self.repository) if platform != 'win32': repodir = self.repository[7:] else: repodir = self.repository[8:] cmd = self.command("create", "--fs-type", "fsfs", svnadmin=True) svnadmin = ExternalCommand(command=cmd) svnadmin.execute(repodir) if svnadmin.exit_status: raise TargetInitializationFailure("Was not able to create a 'fsfs' " "svn repository at %r" % self.repository) if self.use_propset: if not self.repository.startswith('file:///'): self.log.warning("Repository is remote, cannot verify if it " "has the 'pre-revprop-change' hook active, needed " "by 'use-propset=True'. Assuming it does...") else: if platform != 'win32': repodir = self.repository[7:] else: repodir = self.repository[8:] hookname = join(repodir, 'hooks', 'pre-revprop-change') if platform == 'win32': hookname += '.bat' if not exists(hookname): prehook = open(hookname, 'w') if platform <> 'win32': prehook.write('#!/bin/sh\n') prehook.write('exit 0\n') prehook.close() if platform <> 'win32': from os import chmod chmod(hookname, 0755) if self.module and self.module <> '/': cmd = self.command("ls") svnls = ExternalCommand(command=cmd) svnls.execute(self.repository + self.module) if svnls.exit_status: paths = [] # Auto detect missing "branches/" if self.module.startswith(self.branches_path + '/'): path = self.repository + self.branches_path cmd = self.command("ls") svnls = ExternalCommand(command=cmd) svnls.execute(path) if svnls.exit_status: paths.append(path) paths.append(self.repository + self.module) cmd = self.command("mkdir", "--parents", "-m", "This directory will host the upstream sources") svnmkdir = ExternalCommand(command=cmd) svnmkdir.execute(paths) if svnmkdir.exit_status: raise TargetInitializationFailure("Was not able to create the " "module %r" % self.module) def changesets_from_svnlog(log, repository, chunksize=2**15): from xml.sax import make_parser from xml.sax.handler import ContentHandler, ErrorHandler from datetime import datetime from vcpx.changes import ChangesetEntry, Changeset def get_entry_from_path(path, module=repository.module): # Given the repository url of this wc, say # "http://server/plone/CMFPlone/branches/Plone-2_0-branch" # extract the "entry" portion (a relative path) from what # svn log --xml says, ie # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py" # that is to say "tests/PloneTestCase.py" if not module.endswith('/'): module = module + '/' if path.startswith(module): relative = path[len(module):] return relative # The path is outside our tracked tree... repository.log.debug('Ignoring "%s" since it is not under "%s"', path, module) return None class SvnXMLLogHandler(ContentHandler): # Map between svn action and tailor's. # NB: 'R', in svn parlance, means REPLACED, something other # system may view as a simpler ADD, taking the following as # the most common idiom:: # # # Rename the old file with a better name # $ svn mv somefile nicer-name-scheme.py # # # Be nice with lazy users # $ echo "exec nicer-name-scheme.py" > somefile # # # Add the wrapper with the old name # $ svn add somefile # # $ svn commit -m "Longer name for somefile" ACTIONSMAP = {'R': 'R', # will be ChangesetEntry.ADDED 'M': ChangesetEntry.UPDATED, 'A': ChangesetEntry.ADDED, 'D': ChangesetEntry.DELETED} def __init__(self): self.changesets = [] self.current = None self.current_field = [] self.renamed = {} self.copies = [] def startElement(self, name, attributes): if name == 'logentry': self.current = {} self.current['revision'] = attributes['revision'] self.current['entries'] = [] self.copies = [] elif name in ['author', 'date', 'msg']: self.current_field = [] elif name == 'path': self.current_field = [] if attributes.has_key('copyfrom-path'): self.current_path_action = ( attributes['action'], attributes['copyfrom-path'], attributes['copyfrom-rev']) else: self.current_path_action = attributes['action'] def endElement(self, name): if name == 'logentry': # Sort the paths to make tests easier self.current['entries'].sort(lambda a,b: cmp(a.name, b.name)) # Eliminate "useless" entries: SVN does not have atomic # renames, but rather uses a ADD+RM duo. # # So cycle over all entries of this patch, discarding # the deletion of files that were actually renamed, and # at the same time change related entry from ADDED to # RENAMED. # When copying a directory from another location in the # repository (outside the tracked tree), SVN will report files # below this dir that are not being committed as being # removed. # We thus need to change the action_kind for all entries # that are below a dir that was "copyfrom" from a path # outside of this module: # D -> Remove entry completely (it's not going to be in here) # (M,A) -> A # Finally, take care of the 'R' entries: if the entry # is a target of a rename, just discard it (hopefully # the target VC will do the right thing), otherwise # change those to 'A'. mv_or_cp = {} for e in self.current['entries']: if (e.action_kind == e.ADDED or e.action_kind == 'R') and e.old_name is not None: mv_or_cp[e.old_name] = e def parent_was_copied(n): for p in self.copies: if n.startswith(p+'/'): return True return False # Find renames from deleted directories: # $ svn mv dir/a.txt a.txt # $ svn del dir def check_renames_from_dir(name): for e in mv_or_cp.values(): if e.old_name.startswith(name+'/'): e.action_kind = e.RENAMED entries = [] entries2 = [] for e in self.current['entries']: if e.action_kind==e.DELETED: if mv_or_cp.has_key(e.name): mv_or_cp[e.name].action_kind = e.RENAMED else: check_renames_from_dir(e.name) entries2.append(e) elif e.action_kind=='R': # In svn parlance, 'R' means Replaced: a typical # scenario is # $ svn mv a.txt b.txt # $ touch a.txt # $ svn add a.txt if mv_or_cp.has_key(e.name): mv_or_cp[e.name].action_kind = e.RENAMED else: check_renames_from_dir(e.name) # Another scenario is # $ svn mv dir otherdir # $ svn rm otherdir/subdir # $ svn mv olddir/subdir otherdir # $ svn rm olddir if e.old_name is not None: e.action_kind = e.RENAMED else: e.action_kind = e.ADDED entries2.append(e) elif parent_was_copied(e.name): if e.action_kind != e.DELETED: e.action_kind = e.ADDED entries.append(e) else: entries.append(e) # Changes sort: first MODIFY|ADD|RENAME, than REPLACE|DELETE for e in entries2: entries.append(e) svndate = self.current['date'] # 2004-04-16T17:12:48.000000Z y,m,d = map(int, svndate[:10].split('-')) hh,mm,ss = map(int, svndate[11:19].split(':')) ms = int(svndate[20:-1]) timestamp = datetime(y, m, d, hh, mm, ss, ms, UTC) changeset = Changeset(self.current['revision'], timestamp, self.current.get('author'), self.current['msg'], entries) self.changesets.append(changeset) self.current = None elif name in ['author', 'date', 'msg']: self.current[name] = ''.join(self.current_field) elif name == 'path': path = ''.join(self.current_field) entrypath = get_entry_from_path(path) if entrypath: entry = ChangesetEntry(entrypath) if type(self.current_path_action) == type( () ): self.copies.append(entry.name) old = get_entry_from_path(self.current_path_action[1]) if old: entry.action_kind = self.ACTIONSMAP[self.current_path_action[0]] entry.old_name = old self.renamed[entry.old_name] = True else: entry.action_kind = entry.ADDED else: entry.action_kind = self.ACTIONSMAP[self.current_path_action] self.current['entries'].append(entry) def characters(self, data): self.current_field.append(data) parser = make_parser() handler = SvnXMLLogHandler() parser.setContentHandler(handler) parser.setErrorHandler(ErrorHandler()) chunk = log.read(chunksize) while chunk: parser.feed(chunk) for cs in handler.changesets: yield cs handler.changesets = [] chunk = log.read(chunksize) parser.close() for cs in handler.changesets: yield cs class SvnWorkingDir(UpdatableSourceWorkingDir, SynchronizableTargetWorkingDir): ## UpdatableSourceWorkingDir def _getUpstreamChangesets(self, sincerev=None): if sincerev: sincerev = int(sincerev) else: sincerev = 0 cmd = self.repository.command("log", "--verbose", "--xml", "--non-interactive", "--revision", "%d:HEAD" % (sincerev+1)) svnlog = ExternalCommand(cwd=self.repository.basedir, command=cmd) log = svnlog.execute('.', stdout=PIPE, TZ='UTC0')[0] if svnlog.exit_status: return [] if self.repository.filter_badchars: from string import maketrans from cStringIO import StringIO # Apparently some (SVN repo contains)/(SVN server dumps) some # characters that are illegal in an XML stream. This was the case # with Twisted Matrix master repository. To be safe, we replace # all of them with a question mark. if isinstance(self.repository.filter_badchars, basestring): allbadchars = self.repository.filter_badchars else: allbadchars = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09" \ "\x0B\x0C\x0E\x0F\x10\x11\x12\x13\x14\x15" \ "\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\x7f" tt = maketrans(allbadchars, "?"*len(allbadchars)) log = StringIO(log.read().translate(tt)) return changesets_from_svnlog(log, self.repository) def _applyChangeset(self, changeset): from os import walk from os.path import join, isdir from time import sleep # Complete changeset information, determining the is_directory # flag of the removed entries, before updating to the given revision for entry in changeset.entries: if entry.action_kind == entry.DELETED: entry.is_directory = isdir(join(self.repository.basedir, entry.name)) cmd = self.repository.command("update") if self.repository.ignore_externals: cmd.append("--ignore-externals") cmd.extend(["--revision", changeset.revision]) svnup = ExternalCommand(cwd=self.repository.basedir, command=cmd) retry = 0 while True: out, err = svnup.execute(".", stdout=PIPE, stderr=PIPE) if svnup.exit_status == 1: retry += 1 if retry>3: break delay = 2**retry self.log.error("%s returned status %s saying\n%s", str(svnup), svnup.exit_status, err.read()) self.log.warning("Retrying in %d seconds...", delay) sleep(delay) else: break if svnup.exit_status: raise ChangesetApplicationFailure( "%s returned status %s saying\n%s" % (str(svnup), svnup.exit_status, err.read())) self.log.debug("%s updated to %s", ','.join([e.name for e in changeset.entries]), changeset.revision) # Complete changeset information, determining the is_directory # flag of the added entries implicitly_added_entries = [] known_added_entries = set() for entry in changeset.entries: if entry.action_kind == entry.ADDED: known_added_entries.add(entry.name) fullname = join(self.repository.basedir, entry.name) entry.is_directory = isdir(fullname) # If it is a directory, extend the entries of the # changeset with all its contents, if not already there. if entry.is_directory: for root, subdirs, files in walk(fullname): if '.svn' in subdirs: subdirs.remove('.svn') for f in files: name = join(root, f)[len(self.repository.basedir)+1:] newe = ChangesetEntry(name) newe.action_kind = newe.ADDED implicitly_added_entries.append(newe) for d in subdirs: name = join(root, d)[len(self.repository.basedir)+1:] newe = ChangesetEntry(name) newe.action_kind = newe.ADDED newe.is_directory = True implicitly_added_entries.append(newe) for e in implicitly_added_entries: if not e.name in known_added_entries: changeset.entries.append(e) result = [] for line in out: if len(line)>2 and line[0] == 'C' and line[1] == ' ': self.log.warning("Conflict after svn update: %r", line) result.append(line[2:-1]) return result def _checkoutUpstreamRevision(self, revision): """ Concretely do the checkout of the upstream revision. """ from os.path import join, exists # Verify that the we have the root of the repository: do that # iterating an "svn ls" over the hierarchy until one fails lastok = self.repository.repository if not self.repository.trust_root: # Use --non-interactive, so that it fails if credentials # are needed. cmd = self.repository.command("ls", "--non-interactive") svnls = ExternalCommand(command=cmd) # First verify that we have a valid repository svnls.execute(self.repository.repository) if svnls.exit_status: lastok = None else: # Then verify it really points to the root of the # repository: this is needed because later the svn log # parser needs to know the "offset". reporoot = lastok[:lastok.rfind('/')] # Even if it would be enough asserting that the uplevel # directory is not a repository, find the real root to # suggest it in the exception. But don't go too far, that # is, stop when you hit schema://... while '//' in reporoot: svnls.execute(reporoot) if svnls.exit_status: break lastok = reporoot reporoot = reporoot[:reporoot.rfind('/')] if lastok is None: raise ConfigurationError('%r is not the root of a svn repository. If ' 'you are sure it is indeed, you may try setting ' 'the option "trust-root" to "True".' % self.repository.repository) elif lastok <> self.repository.repository: module = self.repository.repository[len(lastok):] module += self.repository.module raise ConfigurationError('Non-root svn repository %r. ' 'Please specify that as "repository=%s" ' 'and "module=%s".' % (self.repository.repository, lastok, module.rstrip('/'))) if revision == 'INITIAL': initial = True cmd = self.repository.command("log", "--verbose", "--xml", "--non-interactive", "--stop-on-copy", "--revision", "1:HEAD") if self.repository.use_limit: cmd.extend(["--limit", "1"]) svnlog = ExternalCommand(command=cmd) out, err = svnlog.execute("%s%s" % (self.repository.repository, self.repository.module), stdout=PIPE, stderr=PIPE) if svnlog.exit_status: raise TargetInitializationFailure( "%s returned status %d saying\n%s" % (str(svnlog), svnlog.exit_status, err.read())) csets = changesets_from_svnlog(out, self.repository) last = csets.next() revision = last.revision else: initial = False if not exists(join(self.repository.basedir, self.repository.METADIR)): self.log.debug("Checking out a working copy") cmd = self.repository.command("co", "--quiet") if self.repository.ignore_externals: cmd.append("--ignore-externals") cmd.extend(["--revision", revision]) svnco = ExternalCommand(command=cmd) out, err = svnco.execute("%s%s@%s" % (self.repository.repository, self.repository.module, revision), self.repository.basedir, stdout=PIPE, stderr=PIPE) if svnco.exit_status: raise TargetInitializationFailure( "%s returned status %s saying\n%s" % (str(svnco), svnco.exit_status, err.read())) else: self.log.debug("%r already exists, assuming it's " "a svn working dir", self.repository.basedir) if not initial: if revision=='HEAD': revision = 'COMMITTED' cmd = self.repository.command("log", "--verbose", "--xml", "--non-interactive", "--revision", revision) svnlog = ExternalCommand(cwd=self.repository.basedir, command=cmd) out, err = svnlog.execute(stdout=PIPE, stderr=PIPE) if svnlog.exit_status: raise TargetInitializationFailure( "%s returned status %d saying\n%s" % (str(svnlog), svnlog.exit_status, err.read())) csets = changesets_from_svnlog(out, self.repository) last = csets.next() self.log.debug("Working copy up to svn revision %s", last.revision) return last ## SynchronizableTargetWorkingDir def _addPathnames(self, names): """ Add some new filesystem objects. """ cmd = self.repository.command("add", "--quiet", "--no-auto-props", "--non-recursive") ExternalCommand(cwd=self.repository.basedir, command=cmd).execute(names) def _propsetRevision(self, out, command, date, author): from re import search encode = self.repository.encode line = out.readline() if not line: # svn did not find anything to commit self.log.warning('svn did not find anything to commit') return # Assume svn output the revision number in the last output line while line: lastline = line line = out.readline() revno = search('\d+', lastline) if revno is None: out.seek(0) raise ChangesetApplicationFailure("%s wrote unrecognizable " "revision number:\n%s" % (str(command), out.read())) revision = revno.group(0) if self.repository.use_propset: cmd = self.repository.command("propset", "%(propname)s", "--quiet", "--revprop", "--revision", revision) pset = ExternalCommand(cwd=self.repository.basedir, command=cmd) if self.repository.propset_date: date = date.astimezone(UTC).replace(microsecond=0, tzinfo=None) pset.execute(date.isoformat()+".000000Z", propname='svn:date') pset.execute(encode(author), propname='svn:author') return revision def _tag(self, tag, date, author): """ TAG current revision. """ if self.repository.setupTagsDirectory(): src = self.repository.repository + self.repository.module dest = self.repository.repository + self.repository.tags_path \ + '/' + tag.replace('/', '_') cmd = self.repository.command("copy", src, dest, "-m", tag) svntag = ExternalCommand(cwd=self.repository.basedir, command=cmd) out, err = svntag.execute(stdout=PIPE, stderr=PIPE) if svntag.exit_status: raise ChangesetApplicationFailure("%s returned status %d saying\n%s" % (str(svntag), svntag.exit_status, err.read())) self._propsetRevision(out, svntag, date, author) def _commit(self, date, author, patchname, changelog=None, entries=None, tags = [], isinitialcommit = False): """ Commit the changeset. """ encode = self.repository.encode logmessage = [] if patchname: logmessage.append(patchname) if changelog: logmessage.append(changelog) # If we cannot use propset, fall back to old behaviour of # appending these info to the changelog if not self.repository.use_propset: logmessage.append('') logmessage.append('Original author: %s' % encode(author)) logmessage.append('Date: %s' % date) elif not self.repository.propset_date: logmessage.append('') logmessage.append('Date: %s' % date) rontf = ReopenableNamedTemporaryFile('svn', 'tailor') log = open(rontf.name, "w") log.write(encode('\n'.join(logmessage))) log.close() cmd = self.repository.command("commit", "--file", rontf.name) commit = ExternalCommand(cwd=self.repository.basedir, command=cmd) if not entries or self.repository.commit_all_files: entries = ['.'] out, err = commit.execute(entries, stdout=PIPE, stderr=PIPE) if commit.exit_status: raise ChangesetApplicationFailure("%s returned status %d saying\n%s" % (str(commit), commit.exit_status, err.read())) revision = self._propsetRevision(out, commit, date, author) if not revision: # svn did not find anything to commit return cmd = self.repository.command("update", "--quiet") if self.repository.ignore_externals: cmd.append("--ignore-externals") cmd.extend(["--revision", revision]) ExternalCommand(cwd=self.repository.basedir, command=cmd).execute() def _postCommitCheck(self): """ Assert that all the entries in the working dir are versioned. """ cmd = self.repository.command("status") whatsnew = ExternalCommand(cwd=self.repository.basedir, command=cmd) output = whatsnew.execute(stdout=PIPE, stderr=STDOUT)[0] unknown = [l for l in output.readlines() if l.startswith('?')] if unknown: raise PostCommitCheckFailure( "Changes left in working dir after commit:\n%s" % ''.join(unknown)) def _removePathnames(self, names): """ Remove some filesystem objects. """ cmd = self.repository.command("remove", "--quiet", "--force") remove = ExternalCommand(cwd=self.repository.basedir, command=cmd) remove.execute(names) def _renamePathname(self, oldname, newname): """ Rename a filesystem object. """ from os import rename from os.path import join, exists, isdir from time import sleep from datetime import datetime # --force in case the file has been changed and moved in one revision cmd = self.repository.command("mv", "--quiet", "--force") # Subversion does not seem to allow # $ mv a.txt b.txt # $ svn mv a.txt b.txt # Here we are in this situation, since upstream VCS already # moved the item. # It may be better to let subversion do the move itself. For one thing, # svn's cp+rm is different from rm+add (cp preserves history). unmoved = False oldpath = join(self.repository.basedir, oldname) newpath = join(self.repository.basedir, newname) if not exists(oldpath): try: rename(newpath, oldpath) except OSError: self.log.critical('Cannot rename %r back to %r', newpath, oldpath) raise unmoved = True # Ticket #135: Need a timediff between rsync and directory move if isdir(oldpath): now = datetime.now() if hasattr(self, '_last_svn_move'): last = self._last_svn_move else: last = now if not (now-last).seconds: sleep(1) self._last_svn_move = now move = ExternalCommand(cwd=self.repository.basedir, command=cmd) out, err = move.execute(oldname, newname, stdout=PIPE, stderr=PIPE) if move.exit_status: if unmoved: rename(oldpath, newpath) raise ChangesetApplicationFailure("%s returned status %d saying\n%s" % (str(move), move.exit_status, err.read())) def _prepareTargetRepository(self): """ Check for target repository existence, eventually create it. """ if not self.repository.repository: return self.repository.create() def _prepareWorkingDirectory(self, source_repo): """ Checkout a working copy of the target SVN repository. """ from os.path import join, exists from vcpx.dualwd import IGNORED_METADIRS if not self.repository.repository or exists(join(self.repository.basedir, self.repository.METADIR)): return cmd = self.repository.command("co", "--quiet") if self.repository.ignore_externals: cmd.append("--ignore-externals") svnco = ExternalCommand(command=cmd) svnco.execute("%s%s" % (self.repository.repository, self.repository.module), self.repository.basedir) ignore = [md for md in IGNORED_METADIRS] if self.logfile.startswith(self.repository.basedir): ignore.append(self.logfile[len(self.repository.basedir)+1:]) if self.state_file.filename.startswith(self.repository.basedir): sfrelname = self.state_file.filename[len(self.repository.basedir)+1:] ignore.append(sfrelname) ignore.append(sfrelname+'.old') ignore.append(sfrelname+'.journal') cmd = self.repository.command("propset", "%(propname)s", "--quiet") pset = ExternalCommand(cwd=self.repository.basedir, command=cmd) pset.execute('\n'.join(ignore), '.', propname='svn:ignore') def _initializeWorkingDir(self): """ Add the given directory to an already existing svn working tree. """ from os.path import exists, join if not exists(join(self.repository.basedir, self.repository.METADIR)): raise TargetInitializationFailure("'%s' needs to be an SVN working copy already under SVN" % self.repository.basedir) SynchronizableTargetWorkingDir._initializeWorkingDir(self) tailor-0.9.35+darcs20090615/vcpx/repository/mock.py0000644000175000017500000000775511215407134021675 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- mock source backend # :Creato: Sun Jul 16 02:50:04 CEST 2006 # :Autore: Adeodato Simó # :Licenza: GNU General Public License # """ This module implements a mock source backend to be used in tests. """ __docformat__ = 'reStructuredText' import os from shutil import rmtree from datetime import datetime, timedelta from vcpx.tzinfo import UTC from vcpx import TailorBug from vcpx.repository import Repository from vcpx.source import UpdatableSourceWorkingDir from vcpx.changes import Changeset, ChangesetEntry class MockRepository(Repository): def create(self): if not os.path.isdir(self.basedir): os.makedirs(self.basedir) class MockWorkingDir(UpdatableSourceWorkingDir): def __init__(self, *args, **kwargs): super(MockWorkingDir, self).__init__(*args, **kwargs) self.rev_offset = 0 self.changesets = [] def _getUpstreamChangesets(self, sincerev): return self.changesets[sincerev-self.rev_offset:] def _applyChangeset(self, changeset): for e in changeset.entries: e.apply(self.repository.basedir) return [] def _checkoutUpstreamRevision(self, revision): if revision == 'INITIAL': cset = self.changesets[0] self.rev_offset = cset.revision - 1 self._applyChangeset(cset) return cset else: raise TailorBug("Don't know what to do!") def _get_changesets(self): if not self.__changesets: raise TailorBug("Attempted to use empty MockWorkingDir!") return self.__changesets def _set_changesets(self, changesets): self.__changesets = changesets changesets = property(_get_changesets, _set_changesets) class MockChangeset(Changeset): def __init__(self, log, entries): super(MockChangeset, self).__init__(MockChangeset.Rev.next(), MockChangeset.Date.next(), None, log, entries) def Rev(): initial = 0 while True: initial += 1 yield initial def Date(): initial = datetime.now(UTC) step = timedelta(seconds=1) while True: initial += step yield initial Rev = Rev() Date = Date() class MockChangesetEntry(ChangesetEntry): def __init__(self, action, name, old_name=None, contents=None): super(MockChangesetEntry, self).__init__(name) self.contents = contents self.old_name = old_name self.action_kind = action if self.name.endswith('/'): self.is_directory = True self.name = self.name[:-1] if self.old_name and self.old_name.endswith('/'): self.old_name = self.old_name[:-1] def apply(self, where): name = os.path.join(where, self.name) if self.action_kind == self.ADDED: if self.is_directory: os.makedirs(name) else: dirname = os.path.dirname(name) if not os.path.exists(dirname): os.makedirs(dirname) f = file(name, 'w') if self.contents is not None: f.write(self.contents) f.close() elif self.action_kind == self.DELETED: if os.path.exists(name): if self.is_directory: rmtree(name) else: os.unlink(name) elif self.action_kind == self.RENAMED: old_name = os.path.join(where, self.old_name) if os.path.exists(old_name): os.rename(old_name, name) elif self.action_kind == self.UPDATED: if self.contents is not None: f = file(name, 'w') f.write(self.contents) else: # update timestamp f = file(name, 'w+') f.close() else: raise TailorBug("Unknown ChangesetEntry.action_kind: %s" % str(self.action_kind)) tailor-0.9.35+darcs20090615/vcpx/repository/hg.py0000644000175000017500000006355111215407134021336 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- Mercurial native backend # :Creato: dom 11 set 2005 22:58:38 CEST # :Autore: Lele Gaifax # Brendan Cully # :Licenza: GNU General Public License # """ This module implements the backends for Mercurial, using its native API instead of thru the command line. """ __docformat__ = 'reStructuredText' from mercurial import ui, hg, cmdutil, commands from vcpx.repository import Repository from vcpx.source import UpdatableSourceWorkingDir from vcpx.target import PostCommitCheckFailure, \ SynchronizableTargetWorkingDir class HgRepository(Repository): METADIR = '.hg' def _load(self, project): Repository._load(self, project) ppath = project.config.get(self.name, 'python-path') if ppath: from sys import path if ppath not in path: path.insert(0, ppath) self.EXTRA_METADIRS = ['.hgtags'] def _validateConfiguration(self): """ Mercurial expects all data to be in utf-8, so we disallow other encodings """ Repository._validateConfiguration(self) if self.encoding.upper() != 'UTF-8': self.log.warning("Forcing UTF-8 encoding instead of " + self.encoding) self.encoding = 'UTF-8' class HgWorkingDir(UpdatableSourceWorkingDir, SynchronizableTargetWorkingDir): # UpdatableSourceWorkingDir def _checkoutUpstreamRevision(self, revision): """ Initial checkout (hg clone) """ from os import mkdir, rename, rmdir from os.path import exists, join self._getUI() # We have to clone the entire repository to be able to pull from it # later. So a partial checkout is a full clone followed by an update # directly to the desired revision. # If the basedir does not exist, create it if not exists(self.repository.basedir): mkdir(self.repository.basedir) # clone it only if .hg does not exist if not exists(join(self.repository.basedir, ".hg")): # Hg won't check out into an existing directory checkoutdir = join(self.repository.basedir,".hgtmp") opts = self._defaultOpts('clone') opts['noupdate'] = True commands.clone(self._ui, self.repository.repository, checkoutdir, **opts) rename(join(checkoutdir, ".hg"), join(self.repository.basedir,".hg")) rmdir(checkoutdir) else: # Does hgrc exist? If not, we write one hgrc = join(self.repository.basedir, ".hg", "hgrc") if not exists(hgrc): hgrc = file(hgrc, "w") hgrc.write("[paths]\ndefault = %s\ndefault-push = %s\n" % (self.repository.repository, self.repository.repository)) hgrc.close() repo = self._getRepo() node = self._getNode(repo, revision) self.log.info('Extracting revision %r from %r into %r', revision, self.repository.repository, self.repository.basedir) repo.update(node) return self._changesetForRevision(repo, revision) def _getUpstreamChangesets(self, sincerev): """Fetch new changesets from the source""" repo = self._getRepo() self._hgCommand('pull', 'default') if hasattr(repo.changelog, 'count'): # hg < 1.1 numcs = repo.changelog.count() else: # hg >= 1.1 numcs = len(repo) from mercurial.node import bin for rev in xrange(repo.changelog.rev(bin(sincerev)) + 1, numcs): yield self._changesetForRevision(repo, str(rev)) def __maybeDeleteDirectory(self, entrydir): from os.path import join, exists from os import listdir from vcpx.dualwd import IGNORED_METADIRS if not entrydir: return False absentrydir = join(self.repository.basedir, entrydir) if not exists(absentrydir): # Oh, the directory disappeared: insert a REMOVE event # against it. return True else: contents = listdir(absentrydir) for imd in IGNORED_METADIRS: if imd in contents: contents.remove(imd) if contents: return False else: return True return False def _applyChangeset(self, changeset): from os.path import join, exists, split # Collect added and deleted directories addeddirs = [] deleteddirs = [] # Collect renamed entries for each directory, to recognize # directory renames mayberendirs = {} renamed = set() for e in changeset.entries[:]: if e.action_kind == e.ADDED: entrydir = split(e.name)[0] if entrydir and not exists(join(self.repository.basedir, entrydir)): addeddirs.append((e, entrydir)) elif e.action_kind == e.DELETED: # If the file is already missing, this may be a merge # patchset: remove the entry so the target won't try # to delete it twice if not exists(join(self.repository.basedir, e.name)): changeset.entries.remove(e) self.log.warning('Repeated deletion of %s, assuming a merge', e.name) elif e.action_kind == e.UPDATED: # If an updated entries does not exist promote it to # an addition if not exists(join(self.repository.basedir, e.name)): e.action_kind = e.ADDED self.log.warning('Update of missing entry %s, promoting to ADD', e.name) elif e.action_kind == e.RENAMED: entrydir = split(e.name)[0] if entrydir and not exists(join(self.repository.basedir, entrydir)): addeddirs.append((e, entrydir)) oldentrydir = split(e.old_name)[0] if oldentrydir in mayberendirs: mayberendirs[oldentrydir].append(e) else: mayberendirs[oldentrydir] = [e] # hg allows to "split" a file in several chunks, # renaming (but it should really be called # "replicating") the same entry to multiple # destinations. Convert succeding ones to ADDs. if e.old_name in renamed: self.log.warning('Detected multiple renames of a single ' 'entry %s to %s, converting to ADD', e.old_name, e.name) e.action_kind = e.ADDED e.old_name = None else: renamed.add(e.old_name) repo = self._getRepo() node = self._getNode(repo, changeset.revision) self.log.info('Updating to %r', changeset.revision) res = repo.update(node) # The following code is for backward compatibility: hg 0.9.5 # raises an Abort exception instead of just returning a status; # but under 0.9.5 we reimplanted hg.clean() into repo.update(): # hg.clean() performs a clobbering clean merge and thus does # not stop on that situation. if res: # Files in to-be-merged changesets not on the trunk will # cause a merge error on update. If no files are modified, # added, removed, or deleted, do update -C modified, added, removed, deleted = repo.changes()[0:4] conflicting = modified + added + removed + deleted if conflicting: res = conflicting else: res = repo.update(node, force=True) for e in changeset.entries: if e.action_kind == e.DELETED: entrydir = split(e.name)[0] if not entrydir in deleteddirs and self.__maybeDeleteDirectory(entrydir): deleteddirs.append(entrydir) elif e.action_kind == e.RENAMED: entrydir = split(e.old_name)[0] if (not entrydir in deleteddirs and not e.name.startswith(entrydir+'/') and self.__maybeDeleteDirectory(entrydir)): deleteddirs.append(entrydir) # Recognize directory renames dirsadded = set([d[1] for d in addeddirs]) for dir in mayberendirs: entries = mayberendirs[dir] pivottargetdir = split(entries[0].name)[0] # If this is a new directory, and all the entries in the # original directory were moved there... if pivottargetdir in dirsadded and dir in deleteddirs: alltothesamedir = True for e in entries: if e.action_kind == e.RENAMED \ and split(e.old_name)[0] == dir \ and split(e.name)[0] != pivottargetdir: alltothesamedir = False break if alltothesamedir: # Replace all RENs on the single files with a REN # on the directory for e in entries: if e.action_kind == e.RENAMED \ and split(e.old_name)[0] == dir: changeset.entries.remove(e) rendir = changeset.addEntry(pivottargetdir, None) rendir.action_kind = rendir.RENAMED rendir.old_name = dir for d in addeddirs[:]: if d[1] == pivottargetdir: addeddirs.remove(d) deleteddirs.remove(dir) self.log.debug('Identified directory rename, from %s to %s', dir, pivottargetdir) # Fake up ADD and DEL events for the directories implicitly # added/removed, so that the replayer gets their name. done = set() for entry,path in addeddirs: if not path in done: done.add(path) entry = changeset.addEntry(path, None, before=entry) entry.action_kind = entry.ADDED entry.is_directory = True self.log.debug("registering new %s directory", entry.name) done = set() for path in deleteddirs: if not path in done: done.add(path) deldir = changeset.addEntry(path, None) deldir.action_kind = deldir.DELETED deldir.is_directory = True self.log.debug("registering %s directory deletion", path) return res def _changesetForRevision(self, repo, revision): from datetime import datetime from vcpx.changes import Changeset, ChangesetEntry from vcpx.tzinfo import FixedOffset entries = [] node = self._getNode(repo, revision) parents = repo.changelog.parents(node) nodecontent = repo.changelog.read(node) # hg 0.9.5+ returns a tuple of six elements, last seems useless for us (manifest, user, date, files, message) = nodecontent[:5] dt, tz = date date = datetime.fromtimestamp(dt, FixedOffset(-tz/60)) # note the minus sign! manifest = repo.manifest.read(manifest) # To find adds, we get the manifests of any parents. If a file doesn't # occur there, it's new. pms = {} for parent in repo.changelog.parents(node): pms.update(repo.manifest.read(repo.changelog.read(parent)[0])) # if files contains only '.hgtags', this is probably a tag cset. # Tailor appears to only support tagging the current version, so only # pass on tags that are for the immediate parents of the current node tags = None if files == ['.hgtags']: tags = [tag for (tag, tagnode) in repo.tags().iteritems() if tagnode in parents] # Don't include the file itself in the changeset. It's only useful # to mercurial, and if we do end up making a tailor round trip # the nodes will be wrong anyway. if '.hgtags' in files: files.remove('.hgtags') if pms.has_key('.hgtags'): del pms['.hgtags'] for f in files: e = ChangesetEntry(f) # find renames fl = repo.file(f) oldname = f in manifest and fl.renamed(manifest[f]) if oldname: e.action_kind = ChangesetEntry.RENAMED e.old_name = oldname[0] # hg copy can copy the same file to multiple destinations # Currently this is handled as multiple renames. It would # probably be better to have ChangesetEntry.COPIED. if pms.has_key(oldname[0]): pms.pop(oldname[0]) else: if pms.has_key(f): e.action_kind = ChangesetEntry.UPDATED else: e.action_kind = ChangesetEntry.ADDED entries.append(e) for df in [file for file in pms.iterkeys() if not manifest.has_key(file)]: e = ChangesetEntry(df) e.action_kind = ChangesetEntry.DELETED entries.append(e) from mercurial.node import hex revision = hex(node) return Changeset(revision, date, user, message, entries, tags=tags) def _getUI(self): try: return self._ui except AttributeError: project = self.repository.projectref() debug = project.config.get(self.repository.name, 'debug', False) self._ui = ui.ui(project.verbose, debug, not debug, False) return self._ui def _getRepo(self): try: return self._hg except AttributeError: # dirstate walker uses simple string comparison between # repo root and os.getcwd, so root should be canonified. from os.path import realpath ui = self._getUI() self._hg = hg.repository(ui=ui, path=realpath(self.repository.basedir), create=False) # Pick up repository-specific UI settings. self._ui = self._hg.ui # 0.9.5 repos does not have update()... if not hasattr(self._hg, 'update'): # Use clean(), to force a clean merge clobbering local changes self._hg.update = lambda n: hg.clean(self._hg, n) return self._hg def _getNode(self, repo, revision): """Convert a tailor revision ID into an hg node""" if revision == "HEAD": node = repo.changelog.tip() else: if revision == "INITIAL": rev = "0" else: rev = revision node = repo.changelog.lookup(rev) return node def _normalizeEntryPaths(self, entry): """ Normalize the name and old_name of an entry. This implementation uses ``mercurial.util.normpath()``, since at this level hg is expecting UNIX style pathnames, with forward slash"/" as separator, also under insane operating systems. """ from mercurial.util import normpath entry.name = normpath(self.repository.encode(entry.name)) if entry.old_name: entry.old_name = normpath(self.repository.encode(entry.old_name)) def _removeDirs(self, names): from os.path import isdir, join, normpath """Remove the names that reference a directory.""" return [n for n in names if not isdir(join(self.repository.basedir, normpath(n)))] return notdirs def _addPathnames(self, names): from os.path import join notdirs = self._removeDirs(names) if notdirs: self.log.info('Adding %s...', ', '.join(notdirs)) self._hg.add(notdirs) def _commit(self, date, author, patchname, changelog=None, names=[], tags = [], isinitialcommit = False): from calendar import timegm # like mktime(), but returns UTC timestamp from os.path import exists, join, normpath encode = self.repository.encode logmessage = [] if patchname: logmessage.append(patchname) if changelog: logmessage.append(changelog) if logmessage: self.log.info('Committing %s...', logmessage[0]) logmessage = encode('\n'.join(logmessage)) else: self.log.info('Committing...') logmessage = "Empty changelog" timestamp = timegm(date.utctimetuple()) timezone = date.utcoffset().seconds + date.utcoffset().days * 24 * 3600 opts = {} opts['message'] = logmessage opts['user'] = encode(author) opts['date'] = '%d %d' % (timestamp, -timezone) # note the minus sign! notdirs = self._removeDirs(names) if (not isinitialcommit) and len(notdirs) == 0 and \ (tags is None or len(tags) == 0): # Empty changeset; make sure we still see it empty = open(join(self.repository.basedir, '.hgempty'), 'a') empty.write("\nEmpty original changeset by %s:\n" % author) empty.write(logmessage + "\n") empty.close() self._hg.add(['.hgempty']) self._hgCommand('commit', **opts) def _postCommitCheck(self): repo = self._getRepo() modified, added, removed, deleted, \ unknown, ignored, clean = [n for n in repo.status()] if modified or added or removed or deleted or unknown: raise PostCommitCheckFailure( "Changes left in working dir after commit: %s" % str(modified or added or removed or deleted or unknown)) def _tag(self, tag, date, author): """ Tag the tip with a given identifier """ # TODO: keep a handle on the changeset holding this tag? Then # we can extract author, log, date from it. # This seems gross. I don't get why I'm getting a unicode tag when # it's just ascii underneath. Something weird is happening in CVS. tag = self.repository.encode(tag) # CVS can't tell when a tag was applied so it tends to pass around # too many. We want to support retagging so we can't just ignore # duplicates. But we can safely ignore a tag if it is contained # in the commit history from tip back to the last non-tag commit. repo = self._getRepo() tagnodes = repo.tags().values() try: tagnode = repo.tags()[tag] # tag commit can't be merge, right? parent = repo.changelog.parents(repo.changelog.tip())[0] while parent in tagnodes: if tagnode == parent: return parent = repo.changelog.parents(parent)[0] except KeyError: pass self._hgCommand('tag', tag) def _defaultOpts(self, cmd): # Not sure this is public. commands.parse might be, but this # is easier, and while dispatch is easiest, you lose ui. # findxxx() is not public, and to make that clear, hg folks # keep moving the function around... if hasattr(cmdutil, 'findcmd'): # >= 0.9.4 if cmdutil.findcmd.func_code.co_argcount == 2: # 0.9.4 def findcmd(cmd): return cmdutil.findcmd(self._getUI(), cmd) elif cmdutil.findcmd.func_code.co_argcount == 3: # >= 0.9.5 def findcmd(cmd): if cmdutil.findcmd.func_code.co_varnames[0] == "ui": # < 1.1.0 return cmdutil.findcmd(self._getUI(), cmd, commands.table) else: # >= 1.1.0 return cmdutil.findcmd(cmd, commands.table) elif hasattr(commands, 'findcmd'): # < 0.9.4 if commands.findcmd.func_code.co_argcount == 1: findcmd = commands.findcmd else: def findcmd(cmd): return commands.findcmd(self._getUI(), cmd) elif hasattr(commands, 'find'): # ancient hg findcmd = commands.find else: raise RuntimeError("unable to locate mercurial's 'findcmd()'") return dict([(f[1].replace('-', '_'), f[2]) for f in findcmd(cmd)[1][1]]) def _hgCommand(self, cmd, *args, **opts): import os allopts = self._defaultOpts(cmd) allopts.update(opts) cmd = getattr(commands, cmd) cwd = os.getcwd() os.chdir(self.repository.basedir) try: cmd(self._ui, self._hg, *args, **allopts) finally: os.chdir(cwd) def _removePathnames(self, names): """Remove a sequence of entries""" from os.path import join self.log.info('Removing %s...', ', '.join(names)) for name in names: files = self._walk(name) # We can't use isdir because the source has already # removed the entry, so we do a dirstate lookup. if files: for f in self._walk(name): self._hgCommand('remove', join(name, f), unlink=True) else: self._hgCommand('remove', name, unlink=True) def _renamePathname(self, oldname, newname): """Rename an entry""" from os.path import join, isdir, normpath self.log.info('Renaming %r to %r...', oldname, newname) # Check both names, because maybe we are operating in # disjunct dirs, and the target may be renamed to a # temporary name if (isdir(join(self.repository.basedir, normpath(oldname))) or isdir(join(self.repository.basedir, normpath(newname)))): # Given lack of support for directories in current HG, # loop over all files under the old directory and # do a copy on them. for f in self._walk(oldname): oldpath = join(oldname, f) self._hgCommand('copy', oldpath, join(newname, f)) self._hgCommand('remove', oldpath, unlink=True) else: self._hgCommand('copy', oldname, newname) self._hgCommand('remove', oldname, unlink=True) def _prepareTargetRepository(self): """ Create the base directory if it doesn't exist, and the repository as well in the new working directory. """ from os.path import join, exists, realpath self._getUI() if exists(join(self.repository.basedir, self.repository.METADIR)): create = 0 else: create = 1 self.log.info('Initializing new repository in %r...', self.repository.basedir) self._hg = hg.repository(ui=self._ui, path=realpath(self.repository.basedir), create=create) def _prepareWorkingDirectory(self, source_repo): """ Create the .hgignore. """ from os.path import join from re import escape from vcpx.dualwd import IGNORED_METADIRS # Create the .hgignore file, that contains a regexp per line # with all known VCs metadirs to be skipped. ignore = open(join(self.repository.basedir, '.hgignore'), 'w') ignore.write('\n'.join(['(^|/)%s($|/)' % escape(md) for md in IGNORED_METADIRS])) ignore.write('\n') if self.logfile.startswith(self.repository.basedir): ignore.write('^') ignore.write(self.logfile[len(self.repository.basedir)+1:]) ignore.write('$\n') if self.state_file.filename.startswith(self.repository.basedir): sfrelname = self.state_file.filename[len(self.repository.basedir)+1:] ignore.write('^') ignore.write(sfrelname) ignore.write('$\n') ignore.write('^') ignore.write(sfrelname+'.old') ignore.write('$\n') ignore.write('^') ignore.write(sfrelname+'.journal') ignore.write('$\n') ignore.close() self._hg.add(['.hgignore']) self._hgCommand('commit', '.hgignore', message = 'Tailor preparing to convert repo by adding .hgignore') def _initializeWorkingDir(self): self._hgCommand('add') def _walk(self, subdir): """ Returns the files mercurial knows about under subdir, relative to subdir. """ from os.path import join, split files = [] try: # hg >= 1.1 # def walk(self, match, unknown, ignored) from mercurial import match def get_paths(): matcher = match.exact(self.repository.basedir, self.repository.basedir, [subdir]) walk = self._getRepo().dirstate.walk for path in walk(matcher, True, False): yield path except ImportError: # hg < 1.1 # def walk(self, files=None, match=util.always, badmatch=None) def get_paths(): walk = self._getRepo().dirstate.walk for src, path in walk([subdir]): yield path for path in get_paths(): # If subdir is a plain file, just return if path == subdir: return None (hd, tl) = split(path) while hd != subdir and hd != '': hd, nt = split(hd) tl = join(nt, tl) files.append(tl) return files tailor-0.9.35+darcs20090615/vcpx/repository/cvsps.py0000644000175000017500000007324111215407134022073 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- CVSPS details # :Creato: mer 16 giu 2004 00:46:12 CEST # :Autore: Lele Gaifax # :Licenza: GNU General Public License # """ This module contains supporting classes for CVS. To get a cross-repository revision number a la Subversion, the implementation uses `cvsps` to fetch the changes from the upstream repository. """ __docformat__ = 'reStructuredText' from vcpx import TailorException from vcpx.repository import Repository from vcpx.shwrap import ExternalCommand, PIPE from vcpx.source import UpdatableSourceWorkingDir, ChangesetApplicationFailure, \ InvocationError from vcpx.target import SynchronizableTargetWorkingDir, TargetInitializationFailure from vcpx.tzinfo import UTC class EmptyRepositoriesFoolsMe(TailorException): "Cannot handle empty repositories. Maybe wrong module/repository?" # This is the exception raised when we try to tailor an empty CVS # repository. This is more a shortcoming of tailor, rather than a # real problem with those repositories. class CvspsRepository(Repository): METADIR = 'CVS' def _load(self, project): from datetime import timedelta Repository._load(self, project) self.EXECUTABLE = project.config.get(self.name, 'cvs-command', 'cvs') self.__cvsps = project.config.get(self.name, 'cvsps-command', 'cvsps') self.tag_entries = project.config.get(self.name, 'tag-entries', 'True') self.freeze_keywords = project.config.get(self.name, 'freeze-keywords', 'False') threshold = project.config.get(self.name, 'changeset-threshold', '180') self.changeset_threshold = timedelta(seconds=float(threshold)) def _validateConfiguration(self): from os.path import split from vcpx.config import ConfigurationError Repository._validateConfiguration(self) if not self.module and self.repository: self.module = split(self.repository)[1] if not self.module: self.log.critical('Missing module information in %r', self.name) raise ConfigurationError("Must specify a repository and maybe " "a module also") if self.module.endswith('/'): self.log.debug("Removing final slash from %r in %r", self.module, self.name) self.module = self.module.rstrip('/') def command(self, *args, **kwargs): if kwargs.get('cvsps', False): kwargs['executable'] = self.__cvsps return Repository.command(self, *args, **kwargs) def create(self): """ Create a local CVS repository. """ from os import rmdir, makedirs from tempfile import mkdtemp from os.path import join, exists if self.repository.startswith(':local:'): path = self.repository[7:] elif self.repository.startswith('/'): path = self.repository else: # Remote repository return if exists(join(path, 'CVSROOT')): return makedirs(path) cmd = self.command("-f", "-d", path, "init") c = ExternalCommand(command=cmd) c.execute() if c.exit_status: raise TargetInitializationFailure("Could not create CVS repository at %r", path) if self.module: tempwc = mkdtemp('cvs', 'tailor') cmd = self.command("-f", "-d", path, "import", "-m", "This directory will host the " "upstream sources", self.module, "tailor", "start") c = ExternalCommand(cwd=tempwc, command=cmd) c.execute() rmdir(tempwc) if c.exit_status: raise TargetInitializationFailure("Could not create initial module") def changesets_from_cvsps(log, sincerev=None): """ Parse CVSps log. """ from datetime import datetime from vcpx.changes import Changeset, ChangesetEntry from vcpx.repository.cvs import compare_cvs_revs # cvsps output sample: ## --------------------- ## PatchSet 1500 ## Date: 2004/05/09 17:54:22 ## Author: grubert ## Branch: HEAD ## Tag: (none) ## Log: ## Tell the reason for using mbox (not wrapping long lines). ## ## Members: ## docutils/writers/latex2e.py:1.78->1.79 l = None while 1: l = log.readline() if l <> '---------------------\n': break l = log.readline() assert l.startswith('PatchSet '), "Parse error: %s"%l pset = {} pset['revision'] = l[9:-1].strip() l = log.readline() while not l.startswith('Log:'): field,value = l.split(':',1) pset[field.lower()] = value.strip() l = log.readline() msg = [] l = log.readline() msg.append(l) l = log.readline() while l <> 'Members: \n': msg.append(l) l = log.readline() assert l.startswith('Members:'), "Parse error: %s" % l entries = [] l = log.readline() seen = {} while l.startswith('\t'): if not sincerev or (sincerev') # Due to the fuzzy mechanism, cvsps may group # together two commits on a single entry, thus # giving something like: # # Normalizer.py:1.12->1.13 # Registry.py:1.22->1.23 # Registry.py:1.21->1.22 # Stopwords.py:1.9->1.10 # # Collapse those into a single one. e = seen.get(file) if not e: e = ChangesetEntry(file) e.old_revision = fromrev e.new_revision = torev seen[file] = e entries.append(e) else: if compare_cvs_revs(e.old_revision, fromrev)>0: e.old_revision = fromrev if compare_cvs_revs(e.new_revision, torev)<0: e.new_revision = torev if fromrev=='INITIAL': e.action_kind = e.ADDED elif "(DEAD)" in torev: e.action_kind = e.DELETED e.new_revision = torev[:torev.index('(DEAD)')] else: e.action_kind = e.UPDATED l = log.readline() if not sincerev or (sincerev3: break delay = 2**retry self.log.warning("%s returned status %s, " "retrying in %d seconds...", str(cvsup), cvsup.exit_status, delay) sleep(retry) else: break if cvsup.exit_status: raise ChangesetApplicationFailure( "%s returned status %s" % (str(cvsup), cvsup.exit_status)) self.log.debug("%s updated to %s", ','.join(names), e.new_revision) # Fake up ADD and DEL events for the directories implicitly # added/removed, so that the replayer gets their name. for entry,path in addeddirs: entry = changeset.addEntry(path, None, before=entry) entry.action_kind = entry.ADDED entry.is_directory = True self.log.debug("registering new %s directory", entry.name) for path in deleteddirs: deldir = changeset.addEntry(path, None) deldir.action_kind = deldir.DELETED deldir.is_directory = True self.log.debug("registering %s directory deletion", path) # Since we are not going to issue cvs updates on whole # directories but only on files, most probably the -P # above has no effect: remove the deleted dir if it's still # there. if exists(join(self.repository.basedir, e.name)): self.log.info("removing stale deleted directory %s", path) rmtree(join(self.repository.basedir, e.name)) # Make sure all files are present on disk: CVS update does not # create them nor reports an error if the files have been # completely removed from the cvs repository. So loop over the # entries and verify the presence of added/changed ones. for entry in changeset.entries: if (entry.action_kind in (entry.ADDED, entry.UPDATED) and not exists(join(self.repository.basedir, entry.name))): self.log.warning("Ignoring entry %s, CVS source no " "longer knows about it.", entry.name) changeset.entries.remove(entry) def _checkoutUpstreamRevision(self, revision): """ Concretely do the checkout of the upstream sources. Use `revision` as the name of the tag to get, or as a date if it starts with a number. Return the last applied changeset. """ from os.path import join, exists, split from time import sleep from vcpx.repository.cvs import CvsEntries, compare_cvs_revs from vcpx.changes import ChangesetEntry if not self.repository.module: raise InvocationError("Must specify a module name") timestamp = None if revision is not None: # If the revision contains a space, assume it really # specify a branch and a timestamp. If it starts with # a digit, assume it's a timestamp. Otherwise, it must # be a branch name if revision[0] in '0123456789' or revision == 'INITIAL': timestamp = revision revision = None elif ' ' in revision: revision, timestamp = revision.split(' ', 1) csets = self.getPendingChangesets(revision) if not csets: raise TargetInitializationFailure( "Something went wrong: there are no changesets since " "revision '%s'" % revision) if timestamp == 'INITIAL': initialcset = csets.next() timestamp = initialcset.date.replace(tzinfo=None).isoformat(sep=' ') else: initialcset = None if not exists(join(self.repository.basedir, 'CVS')): # CVS does not handle "checkout -d multi/level/subdir", so # split the basedir and use it's parentdir as cwd below. parentdir, subdir = split(self.repository.basedir) cmd = self.repository.command("-f", "-q", "-d", self.repository.repository, "checkout", "-d", subdir) if revision: cmd.extend(["-r", revision]) if timestamp: cmd.extend(["-D", "%s UTC" % timestamp]) if self.repository.freeze_keywords: cmd.append('-kk') checkout = ExternalCommand(cwd=parentdir, command=cmd) retry = 0 while True: checkout.execute(self.repository.module) if checkout.exit_status: retry += 1 if retry>3: break delay = 2**retry self.log.warning("%s returned status %s, " "retrying in %d seconds...", str(checkout), checkout.exit_status, delay) sleep(retry) else: break if checkout.exit_status: raise TargetInitializationFailure( "%s returned status %s" % (str(checkout), checkout.exit_status)) else: self.log.info("Using existing %s", self.repository.basedir) if self.repository.tag_entries: self.__forceTagOnEachEntry() entries = CvsEntries(self.repository.basedir) youngest_entry = entries.getYoungestEntry() if youngest_entry is None: raise EmptyRepositoriesFoolsMe("The working copy '%s' of the " "CVS repository seems empty, " "don't know how to deal with " "that." % self.repository.basedir) # loop over the changesets and find the last applied, to find # out the actual cvsps revision found = False def already_applied(cs, entries=entries): "Loop over changeset entries to determine if it's already applied." applied = False # applied become True when an entry is DELETED *and* there is # no metainfo for it: thus, a changeset that removes a few entries # very late in history would be assumed as applied. Prevent that # by checking for at least one explicit match on an existing entry. onepositive = False for m in cs.entries: info = entries.getFileInfo(m.name) # If the entry's info exists, compare the on-disk # version with what we have: the revision is already # applied if the former is greater or equal than the # latter. The same if the info does not exist and it's # a delete event. if info: odversion = info.cvs_version applied = compare_cvs_revs(odversion, m.new_revision) >= 0 # If only one "hunk" is not yet applied, the whole # changeset is new. if not applied: break else: onepositive = True elif m.action_kind == ChangesetEntry.DELETED: applied = True return applied and onepositive # We cannot stop at the first not-applied cset, because it may # old enough to trick already_applied(): an entry may have # been moved in the meantime, and thus the getFileInfo() # method would return None, for example... So we really have # to loop over the whole queue. for cset in self.state_file: applied = already_applied(cset) found = found or applied if applied: last = cset if not found and initialcset: found = already_applied(initialcset) if found: last = initialcset if not found: raise TargetInitializationFailure( "Something went wrong: unable to determine the exact upstream " "revision of the checked out tree in '%s'. Either you specified " "the wrong initial timestamp, or you are checking out a " "composition of 'CVS modules' and Tailor does not support them; " "see the option 'trim-module-components' for a possible " "workaround." % self.repository.basedir) else: self.log.info("Working copy up to revision %s", last.revision) return last def __createParentCVSDirectories(self, changeset, entry): """ Verify that the hierarchy down to the entry is under CVS. If the directory containing the entry does not exist, create it and make it appear as under CVS so that a subsequent 'cvs update' will work. Returns the list of eventually added directories. """ from os.path import split, join, exists from os import mkdir tobeadded = [] path = split(entry)[0] parentcvs = join(self.repository.basedir, path, 'CVS') while not exists(parentcvs): tobeadded.insert(0, path) if not path: break path = split(path)[0] parentcvs = join(self.repository.basedir, path, 'CVS') assert exists(parentcvs), "Uhm, strange things happen: " \ "unable to find or create parent CVS area for %r" % entry if tobeadded: reposf = open(join(parentcvs, 'Repository')) rep = reposf.readline()[:-1] reposf.close() rootf = open(join(parentcvs, 'Root')) root = rootf.readline() rootf.close() for toadd in tobeadded: basedir = join(self.repository.basedir, toadd) cvsarea = join(basedir, 'CVS') if not exists(basedir): mkdir(basedir) # Create fake CVS area mkdir(cvsarea) # Create an empty "Entries" file entries = open(join(cvsarea, 'Entries'), 'w') entries.close() reposf = open(join(cvsarea, 'Repository'), 'w') rep = '/'.join((rep, split(basedir)[1])) reposf.write("%s\n" % rep) reposf.close() rootf = open(join(cvsarea, 'Root'), 'w') rootf.write(root) rootf.close() return tobeadded ## SynchronizableTargetWorkingDir def _prepareTargetRepository(self): """ Create the CVS repository if it's local and does not exist. """ self.repository.create() def _prepareWorkingDirectory(self, source_repo): """ Checkout a working copy of the target CVS. """ from os.path import join, exists, split if not self.repository.repository or exists(join(self.repository.basedir, 'CVS')): return # CVS does not handle "checkout -d multi/level/subdir", so # split the basedir and use it's parentdir as cwd below. parentdir, subdir = split(self.repository.basedir) cmd = self.repository.command("-f", "-d", self.repository.repository, "co", "-d", subdir) cvsco = ExternalCommand(cwd=parentdir, command=cmd) cvsco.execute(self.repository.module) def _parents(self, path): from os.path import exists, join, split parents = [] parent = split(path)[0] while parent: if exists(join(self.repository.basedir, parent, 'CVS')): break parents.insert(0, parent) parent = split(parent)[0] return parents def _addEntries(self, entries): """ Synthesize missing parent directory additions """ allnames = [e.name for e in entries] newdirs = [] for entry in allnames: for parent in [p for p in self._parents(entry) if p not in allnames]: if p not in newdirs: newdirs.append(parent) newdirs.extend(allnames) self._addPathnames(newdirs) def _addPathnames(self, names): """ Add some new filesystem objects. """ cmd = self.repository.command("-f", '-q', 'add', '-ko') ExternalCommand(cwd=self.repository.basedir, command=cmd).execute(names) def __forceTagOnEachEntry(self): """ Massage each CVS/Entries file, locking (ie, tagging) each entry to its current CVS version. This is to prevent silly errors such those that could arise after a manual ``cvs update`` in the working directory. """ from os import walk, rename, remove from os.path import join, exists self.log.info("Forcing CVS sticky tag in %s", self.repository.basedir) for dir, subdirs, files in walk(self.repository.basedir): if dir[-3:] == 'CVS': efn = join(dir, 'Entries') # Strangeness is a foreign word in CVS: sometime # the Entries isn't there... if not exists(efn): continue f = open(efn) entries = f.readlines() f.close() newentries = [] for e in entries: if e.startswith('/'): fields = e.split('/') fields[-1] = "T%s\n" % fields[2] newe = '/'.join(fields) newentries.append(newe) else: newentries.append(e) rename(efn, efn+'.tailor-old') f = open(efn, 'w') f.writelines(newentries) f.close() remove(efn+'.tailor-old') def _commit(self, date, author, patchname, changelog=None, entries=None, tags = [], isinitialcommit = False): """ Commit the changeset. """ from vcpx.shwrap import ReopenableNamedTemporaryFile encode = self.repository.encode logmessage = [] if patchname: logmessage.append(patchname) if changelog: logmessage.append(changelog) logmessage.append('') logmessage.append('Original author: %s' % author) logmessage.append('Date: %s' % date) rontf = ReopenableNamedTemporaryFile('cvs', 'tailor') log = open(rontf.name, "w") log.write(encode('\n'.join(logmessage))) log.close() cmd = self.repository.command("-f", "-q", "ci", "-F", rontf.name) if not entries: entries = ['.'] c = ExternalCommand(cwd=self.repository.basedir, command=cmd) c.execute(entries) if c.exit_status: raise ChangesetApplicationFailure("%s returned status %d" % (str(c), c.exit_status)) def _removePathnames(self, names): """ Remove some filesystem objects. """ cmd = self.repository.command("-f", "-q", "remove") ExternalCommand(cwd=self.repository.basedir, command=cmd).execute(names) def _renamePathname(self, oldname, newname): """ Rename a filesystem object. """ self._removePathnames([oldname]) self._addPathnames([newname]) def _tag(self, tagname, date, author): """ Apply a tag. """ # Sanitize tagnames for CVS: start with [a-zA-z], only include letters, # numbers, '-' and '_'. # str.isalpha et al are locale-dependent def iscvsalpha(chr): return (chr >= 'a' and chr <= 'z') or (chr >= 'A' and chr <= 'Z') def iscvsdigit(chr): return chr >= '0' and chr <= '9' def iscvschar(chr): return iscvsalpha(chr) or iscvsdigit(chr) or chr == '-' or chr == '_' def cvstagify(chr): if iscvschar(chr): return chr else: return '_' tagname = ''.join([cvstagify(chr) for chr in tagname]) if not iscvsalpha(tagname[0]): tagname = 'tag-' + tagname cmd = self.repository.command("-f", "tag") c = ExternalCommand(cwd=self.repository.basedir, command=cmd) c.execute(tagname) if c.exit_status: raise ChangesetApplicationFailure("%s returned status %d" % (str(c), c.exit_status)) tailor-0.9.35+darcs20090615/vcpx/repository/cvs.py0000644000175000017500000007306311215407134021532 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- Pure CVS solution # :Creato: dom 11 lug 2004 01:59:36 CEST # :Autore: Lele Gaifax # :Licenza: GNU General Public License # """ Given `cvsps` shortcomings, this backend uses CVS only. """ __docformat__ = 'reStructuredText' from vcpx.repository.cvsps import CvspsRepository, CvspsWorkingDir from vcpx.shwrap import ExternalCommand, STDOUT, PIPE from vcpx.source import GetUpstreamChangesetsFailure from vcpx.config import ConfigurationError from vcpx.tzinfo import UTC class CvsRepository(CvspsRepository): def _load(self, project): CvspsRepository._load(self, project) tmc = project.config.get(self.name, 'trim-module-components', '0') self.trim_module_components = int(tmc) def normalize_cvs_rev(rev): """Convert a revision string to a tuple of numbers, eliminating the penultimate zero in a 'magic branch number' if there is one. 1.1.1.1 is converted to (1,1). """ if not rev: rev = '0' # handle locked files by taking only the first part of the # revision string to handle gracefully lines like "1.1 locked" rev = rev.split()[0] r = [int(n) for n in rev.split('.')] # convert "magic branch numbers" like 1.2.0.2 to regular # branch numbers like 1.2.2. if len(r) > 2 and r[-2] == 0: r = r[0:-2] + r[-1:] if r == [1,1,1,1]: r = [1,1] return tuple(r) def compare_cvs_revs(revstr1, revstr2): """Compare two CVS revision strings numerically, not alphabetically.""" r1 = normalize_cvs_rev(revstr1) r2 = normalize_cvs_rev(revstr2) return cmp(r1, r2) def cvs_revs_same_branch(rev1, rev2): """True iff the two normalized revision numbers are on the same branch.""" # Odd-length revisions are branch numbers, even-length ones # are revision numbers. # Two branch numbers can't be on the same branch unless they're identical. if len(rev1) % 2 and len(rev2) % 2: return rev1 == rev2 # Two revision numbers are on the same branch if they # agree up to the last number. if len(rev1) % 2 == 0 and len(rev2) % 2 == 0: return rev1[0:-1] == rev2[0:-1] # One branch number, one revision number. If by removing the last number # of one you get the other, then they're on the same branch, regardless of # which is longer. E.g. revision 1.2 is the root of the branch 1.2.2; # revision 1.2.2.2 is directly on the branch 1.2.2. if rev1[0:-1] == rev2: return True if rev2[0:-1] == rev1: return True return False def is_branch(rev): """True iff the given (normalized) revision number is a branch number""" if len(rev) % 2: return True def rev2branch(rev): """Return the branch on which this (normalized) revision lies""" assert not is_branch(rev) return rev[0:-1] def changesets_from_cvslog(log, module, branch=None, entries=None, since=None, threshold=None, trim_module_components=0): """ Parse CVS log. """ collected = ChangeSetCollector(log, module, branch, entries, since, trim_module_components) last = None if threshold is None: from datetime import timedelta threshold = timedelta(seconds=180) # Loop over collected changesets, and collapse those with same author, # same changelog and that were committed within a threshold one from the # other. If they have entries in common, keep them separated. Special # treatment to deleted entries, given that sometime there are two # deletions on the same file: in that case, keep only the last one, # with higher revision. for cs in collected: if (last and last.author == cs.author and last.log == cs.log and abs(lastts - cs.date) < threshold and not last.tags and not [e for e in cs.entries if e.name in [n.name for n in last.entries if n.action_kind <> e.action_kind]]): for e in cs.entries: if e.action_kind == e.DELETED: doubledelete = False for n in last.entries: if n.name == e.name and n.action_kind == n.DELETED: doubledelete = True n.new_revision = e.new_revision break if not doubledelete: last.entries.append(e) else: last.entries.append(e) last.tags = cs.tags if lastts < cs.date: lastts = cs.date else: if last: last.date = lastts yield last last = cs lastts = cs.date if last: yield last def _getGlobalCVSRevision(timestamp, author): """ CVS does not have the notion of a repository-wide revision number, since it tracks just single files. Here we could "count" the grouped changesets ala `cvsps`, but that's tricky because of branches. Since right now there is nothing that depends on this being a number, not to mention a *serial* number, simply emit a (hopefully) unique signature... """ # don't print timezone info, to remain compatible (does not buy us # anything, it being always UTC) return "%s by %s" % (timestamp.replace(tzinfo=None), author) def _splitGlobalCVSRevision(revision): """ Split what _getGlobalCVSRevision() returns into the two components. """ assert ' by ' in revision, \ "Simple revision found, expected 'timestamp by author'" return revision.split(' by ') class ChangeSetCollector(object): """Collector of the applied change sets.""" # Some string constants we look for in CVS output. intra_sep = '-' * 28 + '\n' inter_sep = '=' * 77 + '\n' def __init__(self, log, module, branch, entries, since, trim_module_components=0): """ Initialize a ChangeSetCollector instance. Loop over the modified entries and collect their logs. """ from logging import getLogger self.changesets = {} """The dictionary mapping (date, author, log) to each entry.""" self.cvslog = log """The log to be parsed.""" self.module = module """The CVS module name.""" self.__lookahead = [] """The look ahead line stack.""" self.log = getLogger('tailor.vcpx.cvs.collector') self.trim_module_components = trim_module_components self.__parseCvsLog(branch, entries, since) def __iter__(self): # Since there can be duplicate keys, try to produce the right # ordering taking into account the first action (thus ADDs # will preceed UPDs...) keys = [] for k,c in self.changesets.items(): action1 = len(c.entries)>0 and c.entries[0].action_kind or ' ' keys.append( (k[0], k[1], action1, k[2]) ) keys.sort() return iter([self.changesets[(k[0], k[1], k[3])] for k in keys]) def __collect(self, timestamp, author, changelog, entry, revision): """Register a change set about an entry.""" from vcpx.changes import Changeset key = (timestamp, author, changelog) if self.changesets.has_key(key): cs = self.changesets[key] for e in cs.entries: if e.name == entry: return e return cs.addEntry(entry, revision) else: cs = Changeset(_getGlobalCVSRevision(timestamp, author), timestamp, author, changelog) self.changesets[key] = cs return cs.addEntry(entry, revision) def __readline(self, lookahead=False): """ Read a line from the log, intercepting the directory being listed. This is used to determine the pathname of each entry, relative to the root of the working copy. """ if lookahead: l = self.cvslog.readline() self.__lookahead.append(l) return l else: if self.__lookahead: l = self.__lookahead.pop(0) else: l = self.cvslog.readline() # Some version of CVS emits the following with a different char-case while l.lower().startswith('cvs rlog: logging '): currentdir = l[18:-1] if currentdir.startswith(self.module): # If the directory starts with the module name, keep # just the remaining part self.__currentdir = currentdir[len(self.module)+1:] elif self.trim_module_components: # This is a quick&dirty workaround to the CVS modules # issue: if, by some heuristic, the user tells how # many components to cut off... parts = currentdir.split('/') if len(parts)>self.trim_module_components: parts = parts[self.trim_module_components:] else: parts = [] self.__currentdir = '/'.join(parts) else: # strip away first component, the name of the product slash = currentdir.find('/') if slash >= 0: self.__currentdir = currentdir[slash+1:] else: self.__currentdir = '' l = self.cvslog.readline() return l def __parseRevision(self, entry): """ Parse a single revision log, extracting the needed information. Return None when there are no more logs to be parsed, otherwise a tuple with the relevant data. """ from datetime import datetime revision = self.__readline() if not revision or not revision.startswith('revision '): return None # Don't just knock off the leading 'revision ' here. # There may be locks, in which case we get output like: # 'revision 1.4 locked by: mem;', with a tab char. rev = revision[:-1].split()[1] infoline = self.__readline() info = infoline.split(';') assert info[0][:6] == 'date: ', infoline # 2004-04-19 14:45:42 +0000, the timezone may be missing dateparts = info[0][6:].split(' ') assert len(dateparts) >= 2, `dateparts` day = dateparts[0] time = dateparts[1] y,m,d = map(int, day.split(day[4])) hh,mm,ss = map(int, time.split(':')) date = datetime(y,m,d,hh,mm,ss,0,UTC) assert info[1].strip()[:8] == 'author: ', infoline author = info[1].strip()[8:] assert info[2].strip()[:7] == 'state: ', infoline state = info[2].strip()[7:] # Fourth element, if present and like "lines +x -y", indicates # this is a change to an existing file. Otherwise its a new # one. newentry = not info[3].strip().startswith('lines: ') # The next line may be either the first of the changelog or a # continuation (?) of the preceeding info line with the # "branches" l = self.__readline() if l.startswith('branches: ') and l.endswith(';\n'): infoline = infoline[:-1] + ';' + l # read the effective first line of log l = self.__readline() mesg = [] while True: if l == self.intra_sep: if self.__readline(True).startswith('revision '): break if l in (None, '', self.inter_sep): break if l<>self.intra_sep: mesg.append(l[:-1]) l = self.__readline() if len(mesg)==1 and mesg[0] == '*** empty log message ***': changelog = '' else: changelog = '\n'.join(mesg) return (date, author, changelog, entry, rev, state, newentry) def __parseCvsLog(self, branch, entries, since): """Parse a complete CVS log.""" from os.path import split, join from re import compile from time import strptime from datetime import datetime from vcpx.changes import Changeset revcount_regex = compile('\\bselected revisions:\\s*(\\d+)\\b') self.__currentdir = None file2rev2tags = {} tagcounts = {} branchnum = None while 1: l = self.__readline() while l and not l.startswith('RCS file: '): l = self.__readline() if not l.startswith('RCS file: '): break assert self.__currentdir is not None, \ "Missed 'cvs rlog: Logging XX' line" entry = join(self.__currentdir, split(l[10:-1])[1][:-2]) if entries is not None: while l and not l.startswith('head: '): l = self.__readline() assert l, "Missed 'head:' line" if branch is None: branchnum = normalize_cvs_rev(l[6:-1]) branchnum = rev2branch(branchnum) while l and not l == 'symbolic names:\n': l = self.__readline() assert l, "Missed 'symbolic names:' line" l = self.__readline() rev2tags = {} while l.startswith('\t'): tag,revision = l[1:-1].split(': ') tagcounts[tag] = tagcounts.get(tag,0) + 1 revision = normalize_cvs_rev(revision) rev2tags.setdefault(revision,[]).append(tag) if tag == branch: branchnum = revision l = self.__readline() # branchnum may still be None, if this file doesn't exist # on the requested branch. # filter out branch tags, and tags for revisions that are # on other branches. for revision in rev2tags.keys(): if is_branch(revision) or \ not branchnum or \ not cvs_revs_same_branch(revision,branchnum): del rev2tags[revision] file2rev2tags[entry] = rev2tags expected_revisions = None while l not in (self.inter_sep, self.intra_sep): m = revcount_regex.search(l) if m is not None: expected_revisions = int(m.group(1)) l = self.__readline() last = previous = None found_revisions = 0 while (l <> self.inter_sep or not self.__readline(True).startswith('revision ')): cs = self.__parseRevision(entry) if cs is None: break date,author,changelog,e,rev,state,newentry = cs # CVS seems to sometimes mess up what it thinks the branch is... if branchnum and not cvs_revs_same_branch(normalize_cvs_rev(rev), branchnum): self.log.warning("Skipped revision %s on entry %s " "as revision didn't match branch revision %s " "for branch %s" % (str(normalize_cvs_rev(rev)), entry, str(branchnum), str(branch))) expected_revisions -= 1 continue if not (previous and state == 'dead' and previous.action_kind == previous.DELETED): # Skip spurious entries added in a branch if not (rev == '1.1' and state == 'dead' and changelog.startswith('file ') and ' was initially added on branch ' in changelog): last = self.__collect(date, author, changelog, e, rev) if state == 'dead': last.action_kind = last.DELETED elif newentry: last.action_kind = last.ADDED else: last.action_kind = last.UPDATED found_revisions = found_revisions + 1 if previous and last.action_kind == last.DELETED: # For unknown reasons, sometimes there are two dead # revision is a row. if previous.action_kind <> last.DELETED: previous.action_kind = previous.ADDED previous = last if expected_revisions <> found_revisions: self.log.warning('Expecting %s revisions, found %s', expected_revisions, found_revisions) # If entries is not given, don't try to desume tags information if entries is None: return # Determine the current revision of each live # (i.e. non-deleted) entry. state = dict(entries.getFileVersions()) # before stepping through changes, see if the initial state is # taggable. If so, add an initial changeset that does nothing # but tag, using the date of the last revision tailor imported # on its previous run. There's no way to tell when the tag # was really applied, so we don't know if it was seen on the # last run or not. Before applying the tag on the other end, # we'll have to check whether it's already been applied. tags = self.__getApplicableTags(state, file2rev2tags, tagcounts) if tags: if since == None: # I think this could only happen if the CVS repo was # tagged before any files were added to it. We could # probably get a better date by looking at when the # files were added, but who cares. timestamp = datetime(1900,1,1).replace(tzinfo=UTC) else: # "since" is a revision name read from the state file, # which means it was originally generated by # getGlobalCVSRevision. The format string "%Y-%m-%d # %H:%M:%S" matches the format generated by the implicit # call to timestamp.__str__() in getGlobalCVSRevision. y,m,d,hh,mm,ss,d1,d2,d3 = strptime(since, "%Y-%m-%d %H:%M:%S") timestamp = datetime(y,m,d,hh,mm,ss,0,UTC) author = "unknown tagger" changelog = "tag %s %s" % (timestamp, tags) key = (timestamp, author, changelog) self.changesets[key] = Changeset(_getGlobalCVSRevision(timestamp, author), timestamp,author,changelog, tags=tags) # Walk through the changesets, identifying ones that result in # a state with a tag. Add that info to the changeset. for cs in self.__iter__(): self.__updateState(state, cs) cs.tags = self.__getApplicableTags(state, file2rev2tags, tagcounts) def __getApplicableTags(self,state,taginfo,expectedcounts): # state: a dictionary mapping filename->revision # # taginfo: a two-level dictionary mapping # tagname->revision->list of tags. # # expectedcounts: a dictionary mapping tagname->number of # files tagged with that name. observedcounts = {} possibletags = [] for filename, revno in state.iteritems(): filetags = taginfo[filename].get(revno,[]) if len(possibletags) == 0: # first iteration of loop possibletags = filetags # Intersection of possibletags and filetags. I'm # avoiding using python sets to preserve python 2.3 # compatibility. possibletags = [t for t in possibletags if t in filetags] for t in filetags: observedcounts[t] = observedcounts.get(t,0) + 1 if len(possibletags) == 0: break # All currently existing files carry the tags in possibletags. # But that doesn't mean that the tags correspond to this # state--we might need to create additional files before # tagging. possibletags = [t for t in possibletags if observedcounts[t] == expectedcounts[t]] return possibletags def __updateState(self,state, changeset): for e in changeset.entries: if e.action_kind in (e.ADDED, e.UPDATED): state[e.name] = normalize_cvs_rev(e.new_revision) elif e.action_kind == e.DELETED: if state.has_key(e.name): del state[e.name] elif e.action_kind == e.RENAMED: if state.has_key(e.name): del state[e.old_name] state[e.name] = normalize_cvs_rev(e.new_revision) class CvsWorkingDir(CvspsWorkingDir): """ Reimplement the mechanism used to get a *changeset* view of the CVS commits. """ def _getUpstreamChangesets(self, sincerev): from os.path import join, exists from time import sleep from codecs import getreader try: reader = getreader(self.repository.encoding) except (ValueError, LookupError), err: raise ConfigurationError('Encoding "%s" does not seem to be ' 'allowed on this system (%s): you ' 'may override the default with ' 'something like "encoding = ascii" in ' 'the %s config section' % (self.repository.encoding, err, self.repository.name)) branch = None fname = join(self.repository.basedir, 'CVS', 'Tag') if exists(fname): tag = open(fname).read() if tag[0] == 'T': branch=tag[1:-1] cmd = self.repository.command("-f", "-d", "%(repository)s", "rlog") if not sincerev or sincerev in ("INITIAL", "HEAD"): # We are bootstrapping, trying to collimate the actual # revision on disk with the changesets, or figuring out # the first revision since = None if sincerev == "HEAD": if branch and branch<>'HEAD': cmd.append("-r%(branch)s.") else: cmd.append("-rHEAD:HEAD") else: cmd.append("-r:HEAD") if not branch: cmd.append("-b") elif ' by ' in sincerev: since, author = _splitGlobalCVSRevision(sincerev) cmd.extend(["-d", "%(since)s UTC<"]) if branch: cmd.append("-r%(branch)s") else: cmd.append("-b") elif sincerev[0] in '0123456789': since = sincerev cmd.extend(["-d", "%(since)s UTC<"]) elif ' ' in sincerev: branch, since = sincerev.split(' ', 1) if since.strip() == 'INITIAL': cmd.extend(["-r%(branch)s"]) else: cmd.extend(["-d", "%(since)s UTC<", "-r%(branch)s"]) else: # Then we assume it's a tag branch = sincerev since = None cmd.extend(["-r:%(branch)s"]) cvslog = ExternalCommand(command=cmd) retry = 0 while True: log = cvslog.execute(self.repository.module, stdout=PIPE, stderr=STDOUT, since=since, repository=self.repository.repository, branch=branch or 'HEAD', TZ='UTC0')[0] if cvslog.exit_status: retry += 1 if retry>3: break delay = 2**retry self.log.info("%s returned status %s, " "retrying in %d seconds...", str(cvslog), cvslog.exit_status, delay) sleep(retry) else: break if cvslog.exit_status: raise GetUpstreamChangesetsFailure( "%s returned status %d" % (str(cvslog), cvslog.exit_status)) log = reader(log, self.repository.encoding_errors_policy) return changesets_from_cvslog(log, self.repository.module, branch, CvsEntries(self.repository.rootdir), since, self.repository.changeset_threshold, self.repository.trim_module_components) def _checkoutUpstreamRevision(self, revision): """ Adjust the 'revision' slot of the changeset, to make it a repository wide unique id. """ last = CvspsWorkingDir._checkoutUpstreamRevision(self, revision) last.revision = _getGlobalCVSRevision(last.date, last.author) return last class CvsEntry(object): """Collect the info about a file in a CVS working dir.""" __slots__ = ('filename', 'cvs_version', 'timestamp', 'cvs_tag') def __init__(self, entry): """Initialize a CvsEntry.""" from datetime import datetime from time import strptime dummy, fn, rev, ts, dummy, tag = entry.split('/') self.filename = fn self.cvs_version = rev if ts == 'Result of merge': self.timestamp = datetime.now(tz=UTC) else: if ts.startswith('Result of merge+'): ts = ts[16:] y,m,d,hh,mm,ss,d1,d2,d3 = strptime(ts, "%a %b %d %H:%M:%S %Y") self.timestamp = datetime(y,m,d,hh,mm,ss,0,UTC) self.cvs_tag = tag def __str__(self): return "CvsEntry('%s', '%s', '%s')" % (self.filename, self.cvs_version, self.cvs_tag) class CvsEntries(object): """Collection of CvsEntry.""" __slots__ = ('files', 'directories', 'deleted') def __init__(self, root): """Parse CVS/Entries file. Walk down the working directory, collecting info from each CVS/Entries found.""" from os.path import join, exists, isdir from os import listdir self.files = {} """Dict of `CvsEntry`, keyed on each file under revision control.""" self.directories = {} """Dict of `CvsEntries`, keyed on subdirectories under revision control.""" self.deleted = False """Flag to denote that this directory was removed.""" entries = join(root, 'CVS', 'Entries') if exists(entries): for entry in open(entries).readlines(): entry = entry[:-1] if entry.startswith('/'): e = CvsEntry(entry) self.files[e.filename] = e elif entry.startswith('D/'): d = entry.split('/')[1] subdir = CvsEntries(join(root, d)) self.directories[d] = subdir elif entry == 'D': self.deleted = True # Sometimes the Entries file does not contain the directories: # crawl the current directory looking for missing ones. for entry in listdir(root): if entry == '.svn': continue dir = join(root, entry) if (isdir(dir) and exists(join(dir, 'CVS', 'Entries')) and not self.directories.has_key(entry)): self.directories[entry] = CvsEntries(dir) if self.deleted: self.deleted = not self.files and not self.directories def __str__(self): return "CvsEntries(%d files, %d subdirectories)" % ( len(self.files), len(self.directories)) def getFileInfo(self, fpath): """Fetch the info about a path, if known. Otherwise return None.""" try: if '/' in fpath: dir,rest = fpath.split('/', 1) return self.directories[dir].getFileInfo(rest) else: return self.files[fpath] except KeyError: return None def getYoungestEntry(self): """Find and return the most recently changed entry.""" latest = None for e in self.files.values(): if not latest or e.timestamp > latest.timestamp: latest = e for d in self.directories.values(): e = d.getYoungestEntry() # skip if there are no entries in the directory if not e: continue if not latest or e.timestamp > latest.timestamp: latest = e return latest def getFileVersions(self, prefix=''): """Return a set of (entry name, version number) pairs.""" pairs = [(prefix+e.filename, normalize_cvs_rev(e.cvs_version)) for e in self.files.values()] for dirname, entries in self.directories.iteritems(): pairs += [(prefix+filename, version) for filename, version in entries.getFileVersions("%s/" % dirname)] return pairs def isEmpty(self): """Return True is this directory does not contain any subentry.""" return not self.files and not self.directories tailor-0.9.35+darcs20090615/vcpx/repository/tla.py0000644000175000017500000002652111215407134021514 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- tla (Arch 1.x) backend # :Creato: sab 13 ago 2005 12:16:16 CEST # :Autore: Robin Farine # :Licenza: GNU General Public License # # Current limitations and pitfalls. # # - Target backend not implemented. # # - In-version continuations not supported (raises an exception); this # would probably require to compute a changeset with 'tla delta' # instead of using update. # # - Pika escaped file names. This implementations requires a version # of tla that supports pika escapes. For changesets created with a # version of tla that did not support pika escapes, if one of these # changeset contains a file name with a valid embedded pika escape # sequence, things will break. """ This module implements the backends for tla (Arch 1.x). This backend interprets tailor's repository, module and revision arguments as follows: repository a registered archive name module ---- revision """ __docformat__ = 'reStructuredText' import os from datetime import datetime from time import strptime from tempfile import mkdtemp from email.Parser import Parser from vcpx.repository import Repository from vcpx.changes import Changeset from vcpx.shwrap import ExternalCommand, PIPE from vcpx.source import UpdatableSourceWorkingDir, ChangesetApplicationFailure, \ GetUpstreamChangesetsFailure from vcpx.target import TargetInitializationFailure from vcpx.tzinfo import UTC, FixedOffset class TlaRepository(Repository): METADIR = '{arch}' def _load(self, project): Repository._load(self, project) self.EXECUTABLE = project.config.get(self.name, 'tla-command', 'tla') self.IGNORE_IDS = project.config.get(self.name, 'ignore-ids', False) self.EXTRA_METADIRS = ['.arch-ids'] class TlaWorkingDir(UpdatableSourceWorkingDir): """ A working directory under ``tla``. """ ## UpdatableSourceWorkingDir def _getUpstreamChangesets(self, sincerev): """ Build the list of upstream changesets missing in the working directory. """ changesets = [] self.fqversion = '/'.join([self.repository.repository, self.repository.module]) c = ExternalCommand(cwd=self.repository.basedir, command=self.repository.command("missing", "-f")) out, err = c.execute(stdout=PIPE, stderr=PIPE) if c.exit_status: raise GetUpstreamChangesetsFailure( "%s returned status %d saying\n%s" % (str(c), c.exit_status, err.read())) changesets = self.__parse_revision_logs(out.read().split()) return changesets def _applyChangeset(self, changeset): """ Do the actual work of applying the changeset to the working copy and record the changes in ``changeset``. Return a list of files involved in conflicts. """ if self.shared_basedirs: tempdir = self.__hide_foreign_entries() try: conflicts = self.__apply_changeset(changeset) finally: if tempdir: self.__restore_foreign_entries(tempdir) else: conflicts = self.__apply_changeset(changeset) return conflicts def _checkoutUpstreamRevision(self, revision): """ Create the initial working directory during bootstrap. """ fqrev = self.__initial_revision(revision) if self.shared_basedirs: tempdir = mkdtemp("", ",,tailor-", self.repository.basedir) try: self.__checkout_initial_revision(fqrev, tempdir, "t") finally: newtree = os.path.join(tempdir, "t") if os.path.exists(newtree): for e in os.listdir(newtree): os.rename(os.path.join(newtree, e), os.path.join(self.repository.basedir, e)) os.rmdir(newtree) os.rmdir(tempdir) else: root, destdir = os.path.split(self.repository.basedir) self.__checkout_initial_revision(fqrev, root, destdir) return self.__parse_revision_logs([fqrev], False)[0] ## TlaWorkingDir private helper functions def __checkout_initial_revision(self, fqrev, root, destdir): if not os.path.exists(root): os.makedirs(root) cmd = self.repository.command("get", "--no-pristine", fqrev, destdir) c = ExternalCommand(cwd=root, command=cmd) out, err = c.execute(stdout=PIPE, stderr=PIPE) if c.exit_status: raise TargetInitializationFailure( "%s returned status %d saying\n%s" % (str(c), c.exit_status, err.read())) def __apply_changeset(self, changeset): c = ExternalCommand(cwd=self.repository.basedir, command=self.repository.command("update")) out, err = c.execute(changeset.revision, stdout=PIPE, stderr=PIPE) if not c.exit_status in [0, 1]: raise ChangesetApplicationFailure( "%s returned status %d saying\n%s" % (str(c), c.exit_status, err.read())) return self.__parse_apply_changeset_output(changeset, out) def __normalize_path(self, path): if len(path) > 2: if path[0:2] == "./": path = path[2:] if path.find("\(") != -1: cmd = self.repository.command("escape", "--unescaped", path) c = ExternalCommand(command=cmd) out, err = c.execute(stdout=PIPE, stderr=PIPE) if c.exit_status: raise GetUpstreamChangesetsFailure( "%s returned status %d saying\n%s" % (str(c), c.exit_status, err.read())) path = out.read() return path def __initial_revision(self, revision): fqversion = '/'.join([self.repository.repository, self.repository.module]) if revision in ['HEAD', 'INITIAL']: cmd = self.repository.command("revisions") if revision == 'HEAD': cmd.append("-r") cmd.append(fqversion) c = ExternalCommand(command=cmd) out, err = c.execute(stdout=PIPE, stderr=PIPE) if c.exit_status: raise TargetInitializationFailure( "%s returned status %d saying\n%s" % (str(c), c.exit_status, err.read())) revision = out.readline().strip() return '--'.join([fqversion, revision]) def __parse_revision_logs(self, fqrevlist, update=True): changesets = [] logparser = Parser() c = ExternalCommand(cwd=self.repository.basedir, command=self.repository.command("cat-archive-log")) for fqrev in fqrevlist: out, err = c.execute(fqrev, stdout=PIPE, stderr=PIPE) if c.exit_status: raise GetUpstreamChangesetsFailure( "%s returned status %d saying\n%s" % (str(c), c.exit_status, err.read())) err = None try: msg = logparser.parse(out) except Exception, err: pass if not err and msg.is_multipart(): err = "unable to parse log description" if not err and update and msg.has_key('Continuation-of'): err = "in-version continuations not supported" if err: raise GetUpstreamChangesetsFailure(str(err)) date = self.__parse_date(msg['Date'], msg['Standard-date']) author = msg['Creator'] revision = fqrev logmsg = [msg['Summary']] s = msg.get('Keywords', "").strip() if s: logmsg.append('Keywords: ' + s) s = msg.get_payload().strip() if s: logmsg.append(s) logmsg = '\n'.join(logmsg) changesets.append(Changeset(revision, date, author, logmsg)) return changesets def __parse_date(self, d1, d2): # d1: Wed Dec 10 15:01:28 EST 2003 # d2: 2003-12-10 04:01:28 GMT d1 = datetime(*strptime(d1[:19] + d1[-5:], '%a %b %d %H:%M:%S %Y')[:6]).replace(tzinfo=UTC) d2 = datetime(*strptime(d2[:19], '%Y-%m-%d %H:%M:%S')[:6]).replace(tzinfo=UTC) offset = d1 - d2 offset = offset.seconds + offset.days * 24 * 3600 return d1.replace(tzinfo=FixedOffset(offset/60)) def __hide_foreign_entries(self): c = ExternalCommand(cwd=self.repository.basedir, command=self.repository.command("tree-lint", "-tu")) out = c.execute(stdout=PIPE)[0] tempdir = mkdtemp("", "++tailor-", self.repository.basedir) try: for e in out: e = e.strip() ht = os.path.split(e) # only move inventory violations at the root if ht[0] and ht[1]: continue os.rename(os.path.join(self.repository.basedir, e), os.path.join(tempdir, e)) except: self.__restore_foreign_entries(tempdir) raise return tempdir def __restore_foreign_entries(self, tempdir): for e in os.listdir(tempdir): os.rename(os.path.join(tempdir, e), os.path.join(self.repository.basedir, e)) os.rmdir(tempdir) def __parse_apply_changeset_output(self, changeset, output): conflicts = [] skip = True for line in output: # skip comment lines, detect beginning and end of change list if line[0] == '*': if line.startswith("* applying changeset"): skip = False elif line.startswith("* reapplying local changes"): break continue if skip: continue l = line.split() l1 = self.__normalize_path(l[1]) l2 = None if len(l) > 2: l2 = self.__normalize_path(l[2]) # ignore permission changes and changes in the {arch} directory if l[0] in ['--', '-/'] or l1.startswith("{arch}"): continue if self.repository.IGNORE_IDS and l1.find('.arch-ids') >= 0: continue rev = changeset.revision if l[0][0] == 'M' or l[0] in ['ch', 'cl']: # 'ch': file <-> symlink, 'cl': ChangeLog updated e = changeset.addEntry(l1, rev) e.action_kind = e.UPDATED elif l[0][0] == 'A': e = changeset.addEntry(l1, rev) e.action_kind = e.ADDED elif l[0][0] == 'D': e = changeset.addEntry(l1, rev) e.action_kind = e.DELETED elif l[0] in ['=>', '/>']: e = changeset.addEntry(l2, rev) e.old_name = l1 e.action_kind = e.RENAMED elif l[0] in ['C', '?']: conflicts.append(l1) if l2: conflicts.append(l2) else: raise ChangesetApplicationFailure( "unhandled changeset operation: \"%s\"" % line.strip()) return conflicts tailor-0.9.35+darcs20090615/vcpx/repository/darcs/0000755000175000017500000000000011215407134021450 5ustar vdanjeanvdanjeantailor-0.9.35+darcs20090615/vcpx/repository/darcs/target.py0000644000175000017500000003713611215407134023322 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: Tailor -- Darcs peculiarities when used as a target # :Creato: lun 10 lug 2006 00:12:15 CEST # :Autore: Lele Gaifax # :Licenza: GNU General Public License # """ This module contains the target specific bits of the darcs backend. """ __docformat__ = 'reStructuredText' from os.path import join, exists import re from vcpx.shwrap import ExternalCommand, PIPE, STDOUT from vcpx.target import ChangesetReplayFailure, SynchronizableTargetWorkingDir, \ PostCommitCheckFailure from vcpx.tzinfo import UTC MOTD = """\ Tailorized equivalent of %s """ class DarcsTargetWorkingDir(SynchronizableTargetWorkingDir): """ A target working directory under ``darcs``. """ def importFirstRevision(self, source_repo, changeset, initial): from os import walk, sep from vcpx.dualwd import IGNORED_METADIRS if not self.repository.split_initial_import_level: super(DarcsTargetWorkingDir, self).importFirstRevision( source_repo, changeset, initial) else: cmd = self.repository.command("add", "--case-ok", "--quiet") add = ExternalCommand(cwd=self.repository.basedir, command=cmd) cmd = self.repository.command("add", "--case-ok", "--recursive", "--quiet") addrecurs = ExternalCommand(cwd=self.repository.basedir, command=cmd) for root, dirs, files in walk(self.repository.basedir): subtree = root[len(self.repository.basedir)+1:] if subtree: log = "Import of subtree %s" % subtree level = len(subtree.split(sep)) else: log = "Import of first level" level = 0 for excd in IGNORED_METADIRS: if excd in dirs: dirs.remove(excd) if level>self.repository.split_initial_import_level: while dirs: d = dirs.pop(0) addrecurs.execute(join(subtree, d)) filenames = [join(subtree, f) for f in files] if filenames: add.execute(*filenames) else: dirnames = [join(subtree, d) for d in dirs] if dirnames: add.execute(*dirnames) filenames = [join(subtree, f) for f in files] if filenames: add.execute(*filenames) self._commit(changeset.date, "tailor", "Initial import", log, isinitialcommit=initial) cmd = self.repository.command("tag", "--author", "tailor") ExternalCommand(cwd=self.repository.basedir, command=cmd).execute( "Initial import from %s" % source_repo.repository) def _addPathnames(self, names): """ Add some new filesystem objects. """ cmd = self.repository.command("add", "--case-ok", "--not-recursive", "--quiet") ExternalCommand(cwd=self.repository.basedir, command=cmd).execute(names) def _addSubtree(self, subdir): """ Use the --recursive variant of ``darcs add`` to add a subtree. """ cmd = self.repository.command("add", "--case-ok", "--recursive", "--quiet") add = ExternalCommand(cwd=self.repository.basedir, command=cmd, ok_status=(0,2)) output = add.execute(subdir, stdout=PIPE, stderr=STDOUT)[0] if add.exit_status and add.exit_status!=2: self.log.warning("%s returned status %d, saying %s", str(add), add.exit_status, output.read()) def _commit(self, date, author, patchname, changelog=None, entries=None, tags = [], isinitialcommit = False): """ Commit the changeset. """ from os import rename, unlink logmessage = [] logmessage.append(date.astimezone(UTC).strftime('%Y/%m/%d %H:%M:%S UTC')) logmessage.append(author) if patchname: logmessage.append(patchname) else: # This is possibile also when REMOVE_FIRST_LOG_LINE is in # effect and the changelog starts with newlines: discard # those, otherwise darcs will complain about invalid patch # name if changelog and changelog.startswith('\n'): while changelog.startswith('\n'): changelog = changelog[1:] if changelog: logmessage.append(changelog) if not logmessage: logmessage.append('Unnamed patch') cmd = self.repository.command("record", "--all", "--pipe", "--ignore-times") if not entries: entries = ['.'] record = ExternalCommand(cwd=self.repository.basedir, command=cmd) output = record.execute(input=self.repository.encode('\n'.join(logmessage)), stdout=PIPE, stderr=STDOUT)[0] # Repair afterwards, for http://bugs.darcs.net/issue693 # # Verified that this is still needed for darcs 2.1.2 (+ 343 patches) # using the config.tailor file that is attached to issue693 above. if record.exit_status == 2: self.log.debug("Trying to repair record failure...") cmd = self.repository.command("repair") repair = ExternalCommand(cwd=self.repository.basedir, command=cmd) repairoutput = repair.execute(stdout=PIPE, stderr=STDOUT)[0] if not repair.exit_status: record.exit_status = repair.exit_status else: self.log.warning("%s returned status %d, saying %s", str(repair), repair.exit_status, repairoutput.read()) if record.exit_status: pending = join(self.repository.basedir, '_darcs', 'patches', 'pending') if exists(pending): wrongpending = pending + '.wrong' if exists(wrongpending): unlink(wrongpending) rename(pending, wrongpending) self.log.debug("Pending file renamed to %s", wrongpending) raise ChangesetReplayFailure( "%s returned status %d, saying: %s" % (str(record), record.exit_status, output.read())) def _postCommitCheck(self): # If we are using --look-for-adds on commit this is useless if not self.repository.use_look_for_adds: cmd = self.repository.command("whatsnew", "--summary", "--look-for-add") whatsnew = ExternalCommand(cwd=self.repository.basedir, command=cmd, ok_status=(1,)) output = whatsnew.execute(stdout=PIPE, stderr=STDOUT)[0] if not whatsnew.exit_status: raise PostCommitCheckFailure( "Changes left in working dir after commit:\n%s" % output.read()) def _replayChangeset(self, changeset): """ Instead of using the "darcs mv" command, manually add the rename to the pending file: this is a dirty trick, that allows darcs to handle the case when the source changeset is something like:: $ bzr mv A B $ touch A $ bzr add A where A is actually replaced, and old A is now B. Since by the time the changeset gets replayed, the source has already replaced A with its new content, darcs would move the *wrong* A to B... """ # The "_darcs/patches/pending" file is basically a patch containing # only the changes (hunks, adds...) not yet recorded by darcs: it does # contain either a single change (that is, exactly one line), or a # collection of changes, with opening and closing curl braces. # Filenames must begin with "./", and eventual spaces replaced by '\32\'. # Order is significant! pending = join(self.repository.basedir, '_darcs', 'patches', 'pending') if exists(pending): p = open(pending).readlines() if p[0] != '{\n': p.insert(0, '{\n') p.append('}\n') else: p = [ '{\n', '}\n' ] entries = [] while changeset.entries: e = changeset.entries.pop(0) if e.action_kind == e.DELETED: elide = False for j,oe in enumerate(changeset.entries): if oe.action_kind == oe.ADDED and e.name == oe.name: self.log.debug('Collapsing a %s and a %s on %s, assuming ' 'an upstream "replacement"', e.action_kind, oe.action_kind, oe.name) del changeset.entries[j] elide = True break if not elide: entries.append(e) elif e.action_kind == e.ADDED: elide = False for j,oe in enumerate(changeset.entries): if oe.action_kind == oe.DELETED and e.name == oe.name: self.log.debug('Collapsing a %s and a %s on %s, assuming ' 'an upstream "replacement"', e.action_kind, oe.action_kind, oe.name) del changeset.entries[j] elide = True break if not elide: entries.append(e) else: entries.append(e) changed = False for e in entries: if e.action_kind == e.RENAMED: self.log.debug('Mimicing "darcs mv %s %s"', e.old_name, e.name) oname = e.old_name.replace(' ', '\\32\\') nname = e.name.replace(' ', '\\32\\') p.insert(-1, 'move ./%s ./%s\n' % (oname, nname)) changed = True elif e.action_kind == e.ADDED: self.log.debug('Mimicing "darcs add %s"', e.name) name = e.name.replace(' ', '\\32\\') if e.is_directory: p.insert(-1, 'adddir ./%s\n' % name) else: p.insert(-1, 'addfile ./%s\n' % name) changed = True elif e.action_kind == e.DELETED: self.log.debug('Mimicing "darcs rm %s"', e.name) name = e.name.replace(' ', '\\32\\') if e.is_directory: p.insert(-1, 'rmdir ./%s\n' % name) else: p.insert(-1, 'rmfile ./%s\n' % name) changed = True if changed: open(pending, 'w').writelines(p) return True def _prepareTargetRepository(self): """ Create the base directory if it doesn't exist, and execute ``darcs initialize`` if needed. """ metadir = join(self.repository.basedir, '_darcs') if not exists(metadir): self.repository.create() prefsdir = join(metadir, 'prefs') prefsname = join(prefsdir, 'prefs') boringname = join(prefsdir, 'boring') if exists(prefsname): for pref in open(prefsname, 'rU'): if pref: pname, pvalue = pref.split(' ', 1) if pname == 'boringfile': boringname = join(self.repository.basedir, pvalue[:-1]) boring = open(boringname, 'rU') ignored = boring.read().rstrip().split('\n') boring.close() # Build a list of compiled regular expressions, that will be # used later to filter the entries. self.__unwanted_entries = [re.compile(rx) for rx in ignored if rx and not rx.startswith('#')] def _prepareWorkingDirectory(self, source_repo): """ Tweak the default settings of the repository. """ motd = open(join(self.repository.basedir, '_darcs/prefs/motd'), 'w') motd.write(MOTD % str(source_repo)) motd.close() def _adaptEntries(self, changeset): """ Filter out boring files. """ from copy import copy adapted = SynchronizableTargetWorkingDir._adaptEntries(self, changeset) # If there are no entries or no rules, there's nothing to do if not adapted or not adapted.entries or not self.__unwanted_entries: return adapted entries = [] skipped = False for e in adapted.entries: skip = False for rx in self.__unwanted_entries: if rx.search(e.name): skip = True break if skip: self.log.info('Entry "%s" skipped per boring rules', e.name) skipped = True else: entries.append(e) # All entries are gone, don't commit this changeset if not entries: self.log.info('All entries ignored, skipping whole ' 'changeset "%s"', changeset.revision) return None if skipped: adapted = copy(adapted) adapted.entries = entries return adapted def _tag(self, tag, date, author): """ Apply the given tag to the repository, unless it has already been applied to the current state. (If it has been applied to an earlier state, do apply it; the later tag overrides the earlier one. """ if tag not in self._currentTags(): cmd = self.repository.command("tag", "--author", "Unknown tagger") ExternalCommand(cwd=self.repository.basedir, command=cmd).execute(tag) def _currentTags(self): """ Return a list of tags that refer to the repository's current state. Does not consider tags themselves to be part of the state, so if the repo was tagged with T1 and then T2, then both T1 and T2 are considered to refer to the current state, even though 'darcs get --tag=T1' and 'darcs get --tag=T2' would have different results (the latter creates a repo that contains tag T2, but the former does not). This function assumes that a tag depends on all patches that precede it in the "darcs changes" list. This assumption is valid if tags only come into the repository via tailor; if the user applies a tag by hand in the hybrid repository, or pulls in a tag from another darcs repository, then the assumption could be violated and mistagging could result. """ from vcpx.repository.darcs.source import changesets_from_darcschanges_unsafe cmd = self.repository.command("changes", "--from-match", "not name ^TAG", "--xml-output", "--reverse") changes = ExternalCommand(cwd=self.repository.basedir, command=cmd) output = changes.execute(stdout=PIPE)[0] if changes.exit_status: raise ChangesetReplayFailure( "%s returned status %d saying\n%s" % (str(changes), changes.exit_status, output.read())) tags = [] for cs in changesets_from_darcschanges_unsafe(output): for tag in cs.tags: if tag not in tags: tags.append(tag) return tags tailor-0.9.35+darcs20090615/vcpx/repository/darcs/source.py0000644000175000017500000007220711215407134023332 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: Tailor -- Darcs peculiarities when used as a source # :Creato: lun 10 lug 2006 00:04:59 CEST # :Autore: Lele Gaifax # :Licenza: GNU General Public License # """ This module contains the source specific bits of the darcs backend. """ __docformat__ = 'reStructuredText' import re from vcpx.changes import ChangesetEntry, Changeset from vcpx.shwrap import ExternalCommand, PIPE, STDOUT from vcpx.source import UpdatableSourceWorkingDir, ChangesetApplicationFailure, \ GetUpstreamChangesetsFailure from vcpx.target import TargetInitializationFailure from vcpx.tzinfo import UTC class DarcsChangeset(Changeset): """ Fixup darcs idiosyncrasies: - collapse "add A; rename A B" into "add B" - collapse "rename A B; remove B" into "remove A" """ def __init__(self, revision, date, author, log, entries=None, **other): """ Initialize a new DarcsChangeset. """ super(DarcsChangeset, self).__init__(revision, date, author, log, entries=None, **other) self.darcs_hash = other.get('darcs_hash') if entries is not None: for e in entries: self.addEntry(e, revision) def __eq__(self, other): equal = (self.revision == other.revision and self.date == other.date and self.author == other.author) if equal: other_hash = getattr(other, 'darcs_hash', None) if (self.darcs_hash is not None and other_hash is not None and self.darcs_hash != other_hash): equal = False return equal def __ne__(self, other): different = (self.revision <> other.revision or self.date <> other.date or self.author <> other.author) if not different: other_hash = getattr(other, 'darcs_hash', None) if (self.darcs_hash is not None and other_hash is not None and self.darcs_hash != other_hash): different = True return different # Match darcs 2.1+ junk: hopefully this regex is strict enough to # not obliterate useful info... ignore_this = re.compile('^Ignore-this: [a-f\\d]+\\n?') def setLog(self, log): """Strip away the "Ignore-this:" noise from the changelog.""" super(DarcsChangeset, self).setLog(self.ignore_this.sub('', log)) def addEntry(self, entry, revision): """ Fixup darcs idiosyncrasies: - collapse "add A; rename A B" into "add B" - collapse "rename A B; add B" into "add B" - annihilate "add A; remove A" - collapse "rename A B; remove B" into "remove A" - collapse "rename A B; rename B C" into "rename A C" - collapse "add A; edit A" into "add A" """ # This should not happen, since the parser feeds us an already built # list of ChangesetEntries, anyway... if not isinstance(entry, ChangesetEntry): return super(DarcsChangeset, self).addEntry(entry, revision) # Ok, before adding this entry, check it against already # known: if this is an add, and there's a rename (such as "add # A; rename A B; ") then... if entry.action_kind == entry.ADDED: # ... we have to check existings, because of a bug in # darcs: `changes --xml` (as of 1.0.7) emits the changes # in the wrong order, that is, it prefers to start with # renames, *always*, even when they obviously follows the # add of the same entry (even, it should apply this "fix" # by its own). # # So, if there's a rename of this entry there, change that # to an addition instead, and don't insert any other entry # darcs hopefully use forward slashes also under win dirname = entry.name+'/' for i,e in enumerate(self.entries): if e.action_kind == e.RENAMED: if e.old_name == entry.name: # Unfortunately we have to check if the order if # messed up, in that case we should not do anything. # Example: mv a a2; mkdir a; mv a2 a/b skip = False for j in self.entries: if j.action_kind == j.RENAMED and j.name.startswith(dirname): skip = True break # Luckily enough (since removes are the first entries # in the list, that is) by anticipating the add we # cure also the case below, when addition follows # edit. if not skip: e.action_kind = e.ADDED e.old_name = None e.is_directory = entry.is_directory # Collapse "add A; edit A" into "add A" for j,oe in enumerate(self.entries): if oe.action_kind == e.UPDATED and e.name == oe.name: del self.entries[j] return e # The "rename A B; add B" into "add B" if e.name == entry.name: del self.entries[i] # Assert also that add_dir events must preceeds any # add_file and ren_file that have that dir as target, # and that add_file preceeds any edit. if ((e.name == entry.name or e.name.startswith(dirname)) or (e.action_kind == e.RENAMED and e.old_name.startswith(dirname))): self.entries.insert(i, entry) return entry # Likewise, if this is a deletion, and there is a rename of # this entry (such as "rename A B; remove B") then turn the # existing rename into a deletion instead. # If instead the removed entry was added by the same patch, # annihilate the two: a bug in darcs (possibly fixed in recent # versions) created patches with ADD+EDIT+REMOVE of a single # file (see tailor ticket #71, or darcs issue185). Too bad # another bug (still present in 1.0.8) hides that and makes # very hard (read: impossible) any workaround on the tailor # side. Luckily I learnt another tiny bit of Haskell and # proposed a fix for that: hopefully the patch will be # accepted by darcs developers. In the meantime, I attached it # to ticket #71: without that, tailor does not have enough # information to do the right thing. elif entry.action_kind == entry.DELETED: for i,e in enumerate(self.entries): if e.action_kind == e.RENAMED and e.name == entry.name: e.action_kind = e.DELETED e.name = e.old_name e.old_name = None e.is_directory = entry.is_directory return e elif e.action_kind == e.ADDED and e.name == entry.name: del self.entries[i] return None elif e.action_kind == e.DELETED and e.is_directory and \ entry.name.startswith(e.name+'/'): self.entries.insert(i, entry) return e # The "rename A B; rename B C" to "rename A C" part elif entry.action_kind == entry.RENAMED: # Adjust previous renames olddirname = entry.old_name+'/' for e in self.entries: if e.action_kind == e.RENAMED and e.name.startswith(olddirname): e.name = entry.name + '/' + e.name[len(olddirname):] for e in self.entries: if e.action_kind == e.RENAMED and e.name == entry.old_name: e.name = entry.name return e # The "rename A B; add B" into "add B", part two for i,e in enumerate(self.entries): if e.action_kind == e.ADDED and e.name == entry.name: return None # Ok, it must be either an edit or a rename: the former goes # obviously to the end, and since the latter, as said, come # in very early, appending is just good. self.entries.append(entry) return entry def changesets_from_darcschanges(changes, unidiff=False, repodir=None, chunksize=2**15, replace_badchars=None): """ Parse XML output of ``darcs changes``. Return a list of ``Changeset`` instances. Filters out the (currently incorrect) tag info from changesets_from_darcschanges_unsafe. """ csets = changesets_from_darcschanges_unsafe(changes, unidiff, repodir, chunksize, replace_badchars) for cs in csets: yield cs def changesets_from_darcschanges_unsafe(changes, unidiff=False, repodir=None, chunksize=2**15, replace_badchars=None): """ Do the real work of parsing the change log, including tags. Warning: the tag information in the changsets returned by this function are only correct if each darcs tag in the repo depends on all of the patches that precede it. This is not a valid assumption in general--a tag that does not depend on patch P can be pulled in from another darcs repo after P. We collect the tag info anyway because DarcsWorkingDir._currentTags() can use it safely despite this problem. Hopefully the problem will eventually be fixed and this function can be renamed changesets_from_darcschanges. """ from xml.sax import make_parser from xml.sax.handler import ContentHandler, ErrorHandler from datetime import datetime class DarcsXMLChangesHandler(ContentHandler): def __init__(self): self.changesets = [] self.current = None self.current_field = [] if unidiff and repodir: cmd = ["darcs", "diff", "--unified", "--repodir", repodir, "--patch", "%(patchname)s"] self.darcsdiff = ExternalCommand(command=cmd) else: self.darcsdiff = None def startElement(self, name, attributes): if name == 'patch': self.current = {} self.current['author'] = attributes['author'] date = attributes['date'] from time import strptime try: # 20040619130027 timestamp = datetime(*strptime(date, '%Y%m%d%H%M%S')[:6]) except ValueError: # Old darcs patches use the form Sun Oct 20 20:01:05 EDT 2002 timestamp = datetime(*strptime(date[:19] + date[-5:], '%a %b %d %H:%M:%S %Y')[:6]) timestamp = timestamp.replace(tzinfo=UTC) # not true for the ValueError case, but oh well self.current['date'] = timestamp self.current['comment'] = '' self.current['hash'] = attributes['hash'] self.current['entries'] = [] self.inverted = (attributes['inverted'] == "True") elif name in ['name', 'comment', 'add_file', 'add_directory', 'modify_file', 'remove_file', 'remove_directory']: self.current_field = [] elif name == 'move': self.old_name = attributes['from'] self.new_name = attributes['to'] def endElement(self, name): if name == 'patch': cset = DarcsChangeset(self.current['name'], self.current['date'], self.current['author'], self.current['comment'], self.current['entries'], tags=self.current.get('tags',[]), darcs_hash=self.current['hash']) if self.darcsdiff: cset.unidiff = self.darcsdiff.execute(TZ='UTC', stdout=PIPE, patchname=cset.revision)[0].read() self.changesets.append(cset) self.current = None elif name in ['name', 'comment']: val = ''.join(self.current_field) if val[:4] == 'TAG ': self.current.setdefault('tags',[]).append(val[4:]) self.current[name] = val elif name == 'move': entry = ChangesetEntry(self.new_name) entry.action_kind = entry.RENAMED entry.old_name = self.old_name self.current['entries'].append(entry) elif name in ['add_file', 'add_directory', 'modify_file', 'remove_file', 'remove_directory']: current_field = ''.join(self.current_field).strip() if self.inverted: # the filenames in file modifications are outdated # if there are renames for i in self.current['entries']: if i.action_kind == i.RENAMED and current_field.startswith(i.old_name): current_field = current_field.replace(i.old_name, i.name) entry = ChangesetEntry(current_field) entry.action_kind = { 'add_file': entry.ADDED, 'add_directory': entry.ADDED, 'modify_file': entry.UPDATED, 'remove_file': entry.DELETED, 'remove_directory': entry.DELETED }[name] entry.is_directory = name.endswith('directory') self.current['entries'].append(entry) def characters(self, data): self.current_field.append(data) parser = make_parser() handler = DarcsXMLChangesHandler() parser.setContentHandler(handler) parser.setErrorHandler(ErrorHandler()) def fixup_badchars(s, map): if not map: return s ret = [map.get(c, c) for c in s] return "".join(ret) chunk = fixup_badchars(changes.read(chunksize), replace_badchars) while chunk: parser.feed(chunk) for cs in handler.changesets: yield cs handler.changesets = [] chunk = fixup_badchars(changes.read(chunksize), replace_badchars) parser.close() for cs in handler.changesets: yield cs class DarcsSourceWorkingDir(UpdatableSourceWorkingDir): """ A source working directory under ``darcs``. """ is_hash_rx = re.compile('[0-9a-f]{14}-[0-9a-f]{5}-[0-9a-f]{40}\.gz') def _getUpstreamChangesets(self, sincerev): """ Do the actual work of fetching the upstream changeset. """ # Use the newer pull --xml-output, if possible use_xml = False if self.repository.darcs_version.startswith('2'): cmd = self.repository.command("pull", "--dry-run", "--xml-output") pull = ExternalCommand(cwd=self.repository.basedir, command=cmd) output,error = pull.execute(self.repository.repository, stdout=PIPE, stderr=PIPE, TZ='UTC0') # pull --xml-output was introduced *after* 2.0.0 if pull.exit_status: errormsg = error.read() if "unrecognized option `--xml-output'" in errormsg: self.log.warning('Using darcs 1.0 non-XML parser: it may fail ' 'on patches recorded before november 2003! ' 'I would suggest of upgrading to latest darcs 2.0 ' '(later than 2.0+233).') # No way, fall back to old behaviour, that will possibly fail, # on patches recorded before 2003-11-01... :-| else: raise GetUpstreamChangesetsFailure( "%s returned status %d saying\n%s" % (str(pull), pull.exit_status, errormsg)) else: use_xml = True if not use_xml: cmd = self.repository.command("pull", "--dry-run") pull = ExternalCommand(cwd=self.repository.basedir, command=cmd) output = pull.execute(self.repository.repository, stdout=PIPE, stderr=STDOUT, TZ='UTC0')[0] if pull.exit_status: raise GetUpstreamChangesetsFailure( "%s returned status %d saying\n%s" % (str(pull), pull.exit_status, output.read())) return self._parseDarcsPull(output) else: from cStringIO import StringIO # My initial implementation of --xml-output on darcs pull # wasn't perfect, as it was printing useless verbosity before # and after the actual xml. Around 2.0.0+275 I removed that... line = output.readline() # Would pull from "/home/lele/wip/darcs-2.0"... if line.startswith('Would pull from '): # Skip the first two lines and drop the last two as well output.readline() # Would pull the following changes: xml = StringIO(''.join(output.readlines()[:-2])) xml.seek(0) else: output.seek(0) xml = output badchars = self.repository.replace_badchars return changesets_from_darcschanges(xml, replace_badchars=badchars) def _parseDarcsPull(self, output): """Process 'darcs pull' output to Changesets. """ from datetime import datetime from time import strptime from sha import new l = output.readline() while l and not (l.startswith('Would pull the following changes:') or l == 'No remote changes to pull in!\n'): l = output.readline() if l <> 'No remote changes to pull in!\n': ## Sat Jul 17 01:22:08 CEST 2004 lele@nautilus ## * Refix _getUpstreamChangesets for darcs fsep = re.compile('[ :]+') l = output.readline() while not l.startswith('Making no changes: this is a dry run.'): # Assume it's a line like # Sun Jan 2 00:24:04 UTC 2005 lele@nautilus.homeip.net # Use a regular expression matching multiple spaces or colons # to split it, and use the first 7 fields to build up a datetime. pieces = fsep.split(l.rstrip(), 8) assert len(pieces)>=7, "Cannot parse %r as a patch timestamp" % l date = ' '.join(pieces[:8]) try: author = pieces[8] except IndexError, s: # darcs allows patches with empty author author = "" y,m,d,hh,mm,ss,d1,d2,d3 = strptime(date, "%a %b %d %H %M %S %Z %Y") date = datetime(y,m,d,hh,mm,ss,0,UTC) l = output.readline().rstrip() assert (l.startswith(' *') or l.startswith(' UNDO:') or l.startswith(' tagged')), \ "Got %r but expected the start of the log" % l if l.startswith(' *'): name = l[4:] else: name = l[2:] changelog = [] l = output.readline() while l.startswith(' '): changelog.append(l[2:-1]) l = output.readline() cset = DarcsChangeset(name, date, author, '\n'.join(changelog)) compactdate = date.strftime("%Y%m%d%H%M%S") if name.startswith('UNDO: '): name = name[6:] inverted = 't' else: inverted = 'f' if name.startswith('tagged '): name = name[7:] if cset.tags is None: cset.tags = [name] else: cset.tags.append(name) name = "TAG " + name phash = new() phash.update(name) phash.update(author) phash.update(compactdate) phash.update(''.join(changelog)) phash.update(inverted) cset.darcs_hash = '%s-%s-%s.gz' % (compactdate, new(author).hexdigest()[:5], phash.hexdigest()) yield cset while not l.strip(): l = output.readline() def _applyChangeset(self, changeset): """ Do the actual work of applying the changeset to the working copy. """ needspatchesopt = False if hasattr(changeset, 'darcs_hash'): selector = '--match' revtag = 'hash ' + changeset.darcs_hash elif changeset.revision.startswith('tagged '): selector = '--tag' revtag = changeset.revision[7:] else: selector = '--match' revtag = 'date "%s" && author "%s"' % ( changeset.date.strftime("%Y%m%d%H%M%S"), changeset.author) # The 'exact' matcher doesn't groke double quotes: # """currently there is no provision for escaping a double # quote, so you have to choose between matching double # quotes and matching spaces""" if not '"' in changeset.revision: revtag += ' && exact "%s"' % changeset.revision.replace('%', '%%') else: needspatchesopt = True cmd = self.repository.command("pull", "--all", "--quiet", selector, revtag) if needspatchesopt: cmd.extend(['--patches', re.escape(changeset.revision)]) pull = ExternalCommand(cwd=self.repository.basedir, command=cmd) output = pull.execute(stdout=PIPE, stderr=STDOUT, input='y')[0] if pull.exit_status: raise ChangesetApplicationFailure( "%s returned status %d saying\n%s" % (str(pull), pull.exit_status, output.read())) conflicts = [] line = output.readline() while line: if line.startswith('We have conflicts in the following files:'): files = output.readline()[:-1].split(' ') self.log.warning("Conflict after 'darcs pull': %s", ' '.join(files)) conflicts.extend(files) line = output.readline() # Complete the changeset with its entries cmd = self.repository.command("changes", selector, revtag, "--xml-output", "--summ") changes = ExternalCommand(cwd=self.repository.basedir, command=cmd) last = changesets_from_darcschanges(changes.execute(stdout=PIPE)[0], replace_badchars=self.repository.replace_badchars) try: entries = last.next().entries except StopIteration: entries = None if entries: for e in entries: changeset.addEntry(e, changeset.revision) return conflicts def _handleConflict(self, changeset, conflicts, conflict): """ Handle the conflict raised by the application of the upstream changeset. Override parent behaviour: with darcs, we need to execute a revert on the conflicted files, **trashing** local changes, but there should be none of them in tailor context. """ from os import walk, unlink from os.path import join from re import compile self.log.info("Reverting changes to %s, to solve the conflict", ' '.join(conflict)) cmd = self.repository.command("revert", "--all") revert = ExternalCommand(cwd=self.repository.basedir, command=cmd) revert.execute(conflict, input="\n") # Remove also the backups made by darcs bckre = compile('-darcs-backup[0-9]+$') for root, dirs, files in walk(self.repository.basedir): backups = [f for f in files if bckre.search(f)] for bck in backups: self.log.debug("Removing backup file %r in %r", bck, root) unlink(join(root, bck)) def _checkoutUpstreamRevision(self, revision): """ Concretely do the checkout of the upstream revision and return the last applied changeset. """ from os.path import join, exists from os import mkdir from vcpx.source import InvocationError if not self.repository.repository: raise InvocationError("Must specify a the darcs source repository") if revision == 'INITIAL' or self.is_hash_rx.match(revision): initial = True if revision == 'INITIAL': cmd = self.repository.command("changes", "--xml-output", "--repo", self.repository.repository, "--reverse") changes = ExternalCommand(command=cmd) output = changes.execute(stdout=PIPE)[0] if changes.exit_status: raise ChangesetApplicationFailure( "%s returned status %d saying\n%s" % (str(changes), changes.exit_status, output and output.read() or '')) csets = changesets_from_darcschanges(output, replace_badchars=self.repository.replace_badchars) try: changeset = csets.next() except StopIteration: # No changesets, no party! return None revision = 'hash %s' % changeset.darcs_hash else: revision = 'hash %s' % revision else: initial = False # Darcs 2.0 fails with "darcs get --to-match", see issue885 darcs2 = self.repository.darcs_version.startswith('2') if darcs2 or self.repository.subdir == '.' or exists(self.repository.basedir): # This is currently *very* slow, compared to the darcs get # below! if not exists(join(self.repository.basedir, '_darcs')): if not exists(self.repository.basedir): mkdir(self.repository.basedir) cmd = self.repository.command("initialize") init = ExternalCommand(cwd=self.repository.basedir, command=cmd) init.execute() if init.exit_status: raise TargetInitializationFailure( "%s returned status %s" % (str(init), init.exit_status)) cmd = self.repository.command("pull", "--all", "--quiet") if revision and revision<>'HEAD': cmd.extend([initial and "--match" or "--tag", revision]) dpull = ExternalCommand(cwd=self.repository.basedir, command=cmd) output = dpull.execute(self.repository.repository, stdout=PIPE, stderr=STDOUT)[0] if dpull.exit_status: raise TargetInitializationFailure( "%s returned status %d saying\n%s" % (str(dpull), dpull.exit_status, output.read())) else: # Use much faster 'darcs get' cmd = self.repository.command("get", "--quiet") if revision and revision<>'HEAD': cmd.extend([initial and "--to-match" or "--tag", revision]) else: cmd.append("--partial") dget = ExternalCommand(command=cmd) output = dget.execute(self.repository.repository, self.repository.basedir, stdout=PIPE, stderr=STDOUT)[0] if dget.exit_status: raise TargetInitializationFailure( "%s returned status %d saying\n%s" % (str(dget), dget.exit_status, output.read())) cmd = self.repository.command("changes", "--last", "1", "--xml-output") changes = ExternalCommand(cwd=self.repository.basedir, command=cmd) output = changes.execute(stdout=PIPE)[0] if changes.exit_status: raise ChangesetApplicationFailure( "%s returned status %d saying\n%s" % (str(changes), changes.exit_status, output.read())) try: last = changesets_from_darcschanges( output, replace_badchars=self.repository.replace_badchars).next() except StopIteration: last = None return last tailor-0.9.35+darcs20090615/vcpx/repository/darcs/__init__.py0000644000175000017500000001227511215407134023570 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- Darcs details # :Creato: ven 18 giu 2004 14:45:28 CEST # :Autore: Lele Gaifax # :Licenza: GNU General Public License # """ This module contains supporting classes for the ``darcs`` versioning system. """ __docformat__ = 'reStructuredText' import re from vcpx.repository import Repository from vcpx.shwrap import ExternalCommand, PIPE from vcpx.target import TargetInitializationFailure class DarcsRepository(Repository): METADIR = '_darcs' # Darcs needs the deletion, because of the new way the backend assemble # the new patches by writing directly to the pending file. EXTRA_RSYNC_FLAGS = ['--delete'] def _load(self, project): Repository._load(self, project) cget = project.config.get self.EXECUTABLE = cget(self.name, 'darcs-command', 'darcs') self._darcs_version = None init_options = cget(self.name, 'init-options', '') if init_options: self.init_options = tuple(init_options.split(' ')) else: self.init_options = None self.use_look_for_adds = cget(self.name, 'look-for-adds', 'False') self.split_initial_import_level = int( cget(self.name, 'split-initial-changeset-level', '0')) self.replace_badchars = eval(cget(self.name, 'replace-badchars', "{" "'\xb4': '´'," "'\xc1': 'Á'," "'\xc4': 'Ä'," "'\xc5': 'Å'," "'\xc9': 'É'," "'\xcd': 'Í'," "'\xd3': 'Ó'," "'\xd5': 'Ő'," "'\xd6': 'Ö'," "'\xda': 'Ú'," "'\xdb': 'Ű'," "'\xdc': 'Ü'," "'\xdf': 'ß'," "'\xe1': 'á'," "'\xe5': 'å'," "'\xe9': 'é'," "'\xed': 'í'," "'\xf1': 'ñ'," "'\xf3': 'ó'," "'\xf5': 'ő'," "'\xf6': 'ö'," "'\xfa': 'ú'," "'\xfb': 'ű'," "'\xfc': 'ü'," "}")) @property def darcs_version(self): if self._darcs_version is None: cmd = self.command('--version') version = ExternalCommand(command=cmd) self._darcs_version = version.execute(stdout=PIPE)[0].read().strip() self.log.debug('Using %s, version %s', self.EXECUTABLE, self._darcs_version) return self._darcs_version def command(self, *args, **kwargs): if args[0] == 'record' and self.use_look_for_adds: args = args + ('--look-for-adds',) elif args[0] == 'initialize' and self.init_options: args = args + self.init_options return Repository.command(self, *args, **kwargs) def create(self): from vcpx.dualwd import IGNORED_METADIRS from os.path import join cmd = self.command("initialize") init = ExternalCommand(cwd=self.basedir, command=cmd) init.execute() if init.exit_status: raise TargetInitializationFailure( "%s returned status %s" % (str(init), init.exit_status)) metadir = join(self.basedir, '_darcs') prefsdir = join(metadir, 'prefs') prefsname = join(prefsdir, 'prefs') boringname = join(prefsdir, 'boring') boring = open(boringname, 'rU') ignored = boring.read().rstrip().split('\n') boring.close() # Augment the boring file, that contains a regexp per line # with all known VCs metadirs to be skipped. ignored.extend(['(^|/)%s($|/)' % re.escape(md) for md in IGNORED_METADIRS]) # Eventually omit our own log... logfile = self.projectref().logfile if logfile.startswith(self.basedir): ignored.append('^%s$' % re.escape(logfile[len(self.basedir)+1:])) # ... and state file sfname = self.projectref().state_file.filename if sfname.startswith(self.basedir): sfrelname = sfname[len(self.basedir)+1:] ignored.append('^%s$' % re.escape(sfrelname)) ignored.append('^%s$' % re.escape(sfrelname+'.old')) ignored.append('^%s$' % re.escape(sfrelname+'.journal')) boring = open(boringname, 'w') boring.write('\n'.join(ignored)) boring.write('\n') boring.close() tailor-0.9.35+darcs20090615/vcpx/repository/monotone.py0000644000175000017500000012625611215407134022600 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- Monotone details # :Creato: Tue Apr 12 01:28:10 CEST 2005 # :Autore: Markus Schiltknecht # :Autore: Riccardo Ghetta # :Autore: Henry Nestler # :Licenza: GNU General Public License # """ This module contains supporting classes for Monotone. """ __docformat__ = 'reStructuredText' from os.path import exists, join, isdir from os import getenv from string import whitespace from vcpx.repository import Repository from vcpx.shwrap import ExternalCommand, PIPE, ReopenableNamedTemporaryFile from vcpx.source import UpdatableSourceWorkingDir, InvocationError, \ ChangesetApplicationFailure, GetUpstreamChangesetsFailure from vcpx.target import SynchronizableTargetWorkingDir, TargetInitializationFailure from vcpx.changes import Changeset from vcpx.tzinfo import UTC MONOTONERC = """\ function get_passphrase(KEYPAIR_ID) return "%s" end """ class MonotoneRepository(Repository): METADIR = '_MTN' def _load(self, project): Repository._load(self, project) cget = project.config.get self.EXECUTABLE = cget(self.name, 'monotone-command', 'mtn') self.keyid = cget(self.name, 'keyid') or \ cget(self.name, '%s-keyid' % self.which) self.passphrase = cget(self.name, 'passphrase') or \ cget(self.name, '%s-passphrase' % self.which) self.keygenid = cget(self.name, 'keygenid') or \ cget(self.name, '%s-keygenid' % self.which) self.custom_lua = (cget(self.name, 'custom-lua') or cget(self.name, '%s-custom-lua' % self.which) or # for backward compatibility cget(self.name, 'custom_lua') or cget(self.name, '%s-custom_lua' % self.which)) def create(self): """ Create a new monotone DB, storing the commit keys, if available """ if not self.repository or exists(self.repository): return cmd = self.command("db", "init", "--db", self.repository) init = ExternalCommand(command=cmd) init.execute(stdout=PIPE, stderr=PIPE) if init.exit_status: raise TargetInitializationFailure("Was not able to initialize " "the monotone db at %r" % self.repository) if self.keyid: self.log.info("Using key %s for commits" % (self.keyid,)) else: # keystore key id unspecified, look at other options if self.keygenid: keyfile = join(getenv("HOME"), '.monotone', 'keys', self.keygenid) if exists(keyfile): self.log.info("Key %s exist, don't genkey again" % self.keygenid) else: # requested a new key cmd = self.command("genkey", "--db", self.repository) regkey = ExternalCommand(command=cmd) if self.passphrase: passp = "%s\n%s\n" % (self.passphrase, self.passphrase) else: passp = None regkey.execute(self.keygenid, input=passp, stdout=PIPE, stderr=PIPE) if regkey.exit_status: raise TargetInitializationFailure("Was not able to setup " "the monotone initial key at %r" % self.repository) else: raise TargetInitializationFailure("Can't setup the monotone " "repository %r. " "A keyid or keygenid " "must be provided." % self.repository) class ExternalCommandChain: """ This class implements command piping, i.e. a chain of ExternalCommand, each feeding its stdout to the stdin of next command in the chain. If a command fails, the chain breaks and returns error. Note: This class implements only a subset of ExternalCommand functionality """ def __init__(self, command, cwd=None): self.commandchain =command self.cwd = cwd self.exit_status = 0 def execute(self): outstr = None for cmd in self.commandchain: input = outstr exc = ExternalCommand(cwd=self.cwd, command=cmd) out, err = exc.execute(input=input, stdout=PIPE, stderr=PIPE) self.exit_status = exc.exit_status if self.exit_status: break outstr = out.getvalue() if len(outstr) <= 0: break return out, err class MonotoneChangeset(Changeset): """ Monotone changesets differ from standard Changeset because: 1. only the "revision" field is used for eq/ne comparison 2. have additional properties used to handle history linearization """ def __init__(self, linearized_ancestor, revision): """ Initializes a new MonotoneChangeset. The linearized_ancestor parameters is the fake ancestor used for linearization. The very first revision tailorized has lin_ancestor==None """ Changeset.__init__(self, revision=revision, date=None, author=None, log="") self.lin_ancestor = linearized_ancestor self.real_ancestors = None def __eq__(self, other): return (self.revision == other.revision) def __ne__(self, other): return (self.revision <> other.revision) def __str__(self): s = [Changeset.__str__(self)] s.append('linearized ancestor: %s' % self.lin_ancestor) s.append('real ancestor(s): %s' % (self.real_ancestors and ','.join(self.real_ancestors) or 'None')) return '\n'.join(s) def update(self, real_dates, authors, log, real_ancestors, branches, tags): """ Updates the monotone changeset secondary data """ self.author=".".join(authors) self.setLog(log) self.date = real_dates[0] self.real_dates = real_dates self.real_ancestors = real_ancestors self.branches = branches self.tags = tags class MonotoneCertsParser: """ Obtain and parse a "mtn list certs" output, reconstructing the revision information """ class PrefixRemover: """ Helper class. Matches a prefix, allowing access to the text following """ def __init__(self, str): self.str = str if len(self.str) > 10 and self.str[10] == '"': self.value = self.str[11:-1] else: self.value = None def __call__(self, prefix): # name "date" # value "2007-06-11T00:08:33" #|---------| #01234567890 Output from mtn automate certs # Mix spaces with prefix for search from left side spaced = " "[:-len(prefix)] + prefix + ' ' if self.str.startswith(spaced): return True else: return False # certs states DUMMY = 0 # Nothing or unknown AUTHOR = 1 # Author, multiple BRANCH = 2 # Branch DATE = 3 # Date, multiple TAG = 4 # in tags listing LOG = 5 # in changelog listing CMT = 6 # in comment listing TESTRESULT = 7 # in testresults listing def __init__(self, repository, working_dir): self.working_dir = working_dir self.repository = repository def parse(self, revision): from datetime import datetime self.revision="" self.ancestors=[] self.authors=[] self.dates=[] self.changelog="" self.branches=[] self.tags=[] # Get ancestors from automate parents cmd = self.repository.command("automate", "parents", revision, "--db", self.repository.repository) mtl = ExternalCommand(cwd=self.working_dir, command=cmd) outstr = mtl.execute(stdout=PIPE, stderr=PIPE) if mtl.exit_status: raise GetUpstreamChangesetsFailure("mtn automate parents returned " "status %d" % mtl.exit_status) self.ancestors = outstr[0].getvalue().splitlines() # Get informations about revision from list certs cmd = self.repository.command("automate", "certs", revision, "--db", self.repository.repository) mtl = ExternalCommand(cwd=self.working_dir, command=cmd) outstr = mtl.execute(stdout=PIPE, stderr=PIPE) if mtl.exit_status: raise GetUpstreamChangesetsFailure("mtn automate certs returned " "status %d" % mtl.exit_status) testresults = "" logs = "" comments = "" state = self.DUMMY line_continues = False loglines = outstr[0].getvalue().splitlines() for curline in loglines: if line_continues: if curline == '"': state = self.DUMMY line_continues = False else: # Example output for comments # (it's real from one certs!) # # key "key-dummy" #signature "ok" # name "changelog" # value "initial commit #" # trust "trusted" # # key "key-dummy" #signature "ok" # name "comment" # value "And a second comment #with more lines" # trust "trusted" # # key "key-dummy" #signature "ok" # name "comment" # value "This is a comment" # trust "trusted" # Find the single non escaped " as string end # Replace all escaped \" with single " # 007 helps not to find the " in sequence of \ " temp = curline.replace('\\"', '\007') pos = temp.find('"') if pos > 0: temp = temp[:pos] temp = temp.replace('\007', '"') if state == self.LOG: logs = logs + temp + "\n" elif state == self.CMT: comments = comments + temp + "\n" else: assert False if pos > 0: line_continues = False continue pr = self.PrefixRemover(curline) if pr.value is None: state = self.DUMMY continue if pr("name"): if pr.value == "author": state = self.AUTHOR elif pr.value == "branch": state = self.BRANCH elif pr.value == "date": state = self.DATE elif pr.value == "changelog": state = self.LOG elif pr.value == "comment": comments = comments + "\nNote:\n" state = self.CMT elif pr.value == "tag": state = self.TAG elif pr.value == "testresult": state = self.TESTRESULT else: state = self.DUMMY elif pr("value"): if state == self.AUTHOR: self.authors.append(pr.value) elif state == self.BRANCH: # branch data self.branches.append(pr.value) elif state == self.DATE: # monotone dates are expressed in ISO8601, always UTC dateparts = pr.value.split('T') assert len(dateparts) >= 2, `dateparts` day = dateparts[0] time = dateparts[1] y,m,d = map(int, day.split(day[4])) # recent mtn adds microsecs to the timestamp timeparts = time.split('.') hh,mm,ss = map(int, timeparts[0].split(':')) date = datetime(y,m,d,hh,mm,ss,0,UTC) self.dates.append(date) elif state == self.LOG or state == self.CMT: # comment or log line, accumulate string temp = curline[11:].replace('\\"', '\007') pos = temp.find('"') if pos > 0: temp = temp[:pos] else: line_continues = True temp = temp.replace('\007', '"') if state == self.LOG: logs = logs + temp + "\n" else: comments = comments + temp + "\n" elif state == self.TAG: self.tags.append(pr.value) elif state == self.TESTRESULT: # Testresult print into ChangeLog testresults = testresults + "Testresult: " + pr.value + "\n" else: pass # we ignore cset info elif pr("key") or pr("signature") or pr("trust"): pass # we ignore cset info else: raise GetUpstreamChangesetsFailure("Unexpected certs token: '%s' " % curline) # parsing terminated, verify the data if len(self.authors)<1 or len(self.dates)<1 or revision=="": raise GetUpstreamChangesetsFailure("Error parsing certs of revision %s. Missing data" % revision) self.changelog = testresults + logs + comments def convertLog(self, chset): self.parse(chset.revision) chset.update(real_dates=self.dates, authors=self.authors, log=self.changelog, real_ancestors=self.ancestors, branches=self.branches, tags=self.tags) return chset class MonotoneDiffParser: """ This class obtains a diff beetween two arbitrary revisions, parsing it to get changeset entries. Note: since monotone tracks directories implicitly, a fake "add dir" cset entry is generated when a file is added to a subdir """ class BasicIOTokenizer: # To write its control files, monotone uses a format called # internally "basic IO", a stanza file format with items # separated by blank lines. Lines are terminated by newlines. # The format supports strings, sequence of chars contained by # ". String could contain newlines and to insert a " in the # middle you escape it with \ (and \\ is used to obtain the \ # char itself) basic IO files are always UTF-8 # This class implements a small tokenizer for basic IO def __init__(self, stream): self.stream = stream def _string_token(self): # called at start of string, returns the complete string # Note: Exceptions checked outside escape = False str=['"'] while True: ch = self.it.next() if escape: escape=False str.append(ch) continue elif ch=='\\': escape=True continue else: str.append(ch) if ch=='"': break # end of filename string return "".join(str) def _normal_token(self, startch): # called at start of a token, stops at first whitespace # Note: Exceptions checked outside tok=[startch] while True: ch = self.it.next() if ch in whitespace: break tok.append(ch) return "".join(tok) def __iter__(self): # restart the iteration self.it = iter(self.stream) return self def next(self): token ="" while True: ch = self.it.next() # here we just propagate the StopIteration ... if ch in whitespace or ch=='#': continue # skip spaces beetween tokens ... elif ch == '"': try: token = self._string_token() break except StopIteration: # end of stream reached while in a string: Error!! raise GetUpstreamChangesetsFailure("diff end while in string parsing.") else: token = self._normal_token(ch) break return token def __init__(self, repository, working_dir): self.working_dir = working_dir self.repository = repository def _addPathToSet(self, s, path): parts = path.split('/') while parts: s.add('/'.join(parts)) parts.pop() def convertDiff(self, chset): """ Fills a chset with the details data coming by a diff between chset lin_ancestor and revision (i.e. the linearized history) """ if (not chset.lin_ancestor or not chset.revision or chset.lin_ancestor == chset.revision): raise GetUpstreamChangesetsFailure( "Internal error: MonotoneDiffParser.convertDiff called " "with invalid parameters: lin_ancestor %s, revision %s" % (chset.lin_ancestor, chset.revision)) # the order of revisions is very important. Monotone gives a # diff from the first to the second cmd = self.repository.command("diff", "--db", self.repository.repository, "--revision", chset.lin_ancestor, "--revision", chset.revision) mtl = ExternalCommand(cwd=self.working_dir, command=cmd) outstr = mtl.execute(stdout=PIPE, stderr=PIPE, LANG='POSIX') if mtl.exit_status: raise GetUpstreamChangesetsFailure( "mtn diff returned status %d" % mtl.exit_status) # monotone diffs are prefixed by a section containing # metainformations about files # The section terminates with the first file diff, and each # line is prepended by the patch comment char (#). tk = self.BasicIOTokenizer(outstr[0].getvalue()) tkiter = iter(tk) in_item = False try: while True: token = tkiter.next() if token.startswith("========"): # found first patch marker. Changeset info terminated in_item = False break else: in_item = False # now, next token should be a string or an hash, # or the two tokens are "no changes" fname = tkiter.next() if token == "no" and fname == "changes": break elif fname[0] != '"' and fname[0] != '[': raise GetUpstreamChangesetsFailure( "Unexpected token sequence: '%s' " "followed by '%s'" %(token, fname)) ename = fname[1:-1] if token == "content": pass # ignore it # ok, is a file/dir, control changesets data elif token == "add_file" or token=="add_directory": for e in chset.entries: if e.action_kind == e.DELETED and e.name == ename: # If just deleted, collapse the two into an update e.action_kind = e.UPDATED break else: chentry = chset.addEntry(ename, chset.revision) chentry.action_kind = chentry.ADDED elif token == "add_dir": chentry = chset.addEntry(ename, chset.revision) chentry.action_kind = chentry.ADDED elif token == "delete": chentry = chset.addEntry(ename, chset.revision) chentry.action_kind = chentry.DELETED elif token == "rename": # renames are in the form: oldname to newname tow = tkiter.next() newname = tkiter.next() if tow != "to" or newname[0] != '"': raise GetUpstreamChangesetsFailure( "Unexpected rename token sequence: '%s' " "followed by '%s'" % (tow, newname)) # Hack a bug from Monotone: rename with same name if fname == newname: self.repository.log.warning("Can not rename '%s' to " "'%s' self" % (fname, newname)) else: # From this commands: # mtn rename dir/file file # mtn drop dir # Has output: # delete "dir" # rename "dir/file" # to "file" # # Fix this by insert the RENAME before the DELETE. before = None for e in chset.entries: if e.action_kind == e.DELETED and ename.startswith(e.name): before = e break chentry = chset.addEntry(newname[1:-1], chset.revision, before) chentry.action_kind = chentry.RENAMED chentry.old_name = ename elif token == "patch": # patch entries are in the form: from oldrev to newrev fromw = tkiter.next() oldr = tkiter.next() tow = tkiter.next() newr = tkiter.next() if fromw != "from" or tow != "to": raise GetUpstreamChangesetsFailure( "Unexpected patch token sequence: '%s' " "followed by '%s','%s','%s'" % (fromw, oldr, tow, newr)) # Add file to the list only if it isn't already in the changeset. for e in chset.entries: if e.name == ename: break else: chentry = chset.addEntry(ename, chset.revision) chentry.action_kind = chentry.UPDATED except StopIteration: if in_item: raise GetUpstreamChangesetsFailure("Unexpected end of 'diff' parsing changeset info") class MonotoneRevToCset: """ This class is used to create changesets from revision ids. Since most backends (and tailor itself) don't support monotone multihead feature, sometimes we need to linearize the revision graph, creating syntethized (i.e. fake) edges between revisions. The revision itself is real, only its ancestors (and all changes between) are faked. To properly do this, changeset are created by a mixture of 'list certs' and 'diff' output. Certs gives the revision data, diff the differences beetween revisions. Monotone also supports multiple authors/tags/comments for each revision, while tailor allows only single values. We collapse those multiple data (when present) to single entries in the following manner: author all entries separated by a comma date chooses only one, at random changelog all entries appended, without a specific order comment all comments are appended to the changelog string, prefixed by a "Note:" line tag all entries separated by comma as source, stripped into single tags on targets branch used to restrict source revs (tailor follows only a single branch) testresult appended to changelog string, prefixed by a "Testresult:" other certs ignored Changesets created by monotone will have additional fields with the original data: real_ancestors list of the real revision ancestor(s) real_dates list with all date certs lin_ancestor linearized ancestor (i.e. previous revision in the linearized history) """ def __init__(self, repository, working_dir, branch): self.working_dir = working_dir self.repository = repository self.branch = branch self.logparser = MonotoneCertsParser(repository=repository, working_dir=working_dir) self.diffparser = MonotoneDiffParser(repository=repository, working_dir=working_dir) def updateCset(self, chset): # Parsing the log fills the changeset from revision data self.logparser.convertLog(chset) # if an ancestor is available, fills the cset with file/dir entries if chset.lin_ancestor: self.diffparser.convertDiff(chset) def getCset(self, revlist, onlyFirst): """ receives a revlist, already toposorted (i.e. ordered by ancestry) and outputs a list of changesets, filtering out revs outside the chosen branch. If onlyFirst is true, only the first valid element is considered """ cslist=[] anc=revlist[0] if onlyFirst: start_index = 0 else: start_index = 1 for r in revlist[start_index:]: chtmp = MonotoneChangeset(anc, r) self.logparser.convertLog(chtmp) if self.branch in chtmp.branches: cslist.append(MonotoneChangeset(anc, r)) # using a new, unfilled changeset anc=r if onlyFirst: break return cslist class MonotoneWorkingDir(UpdatableSourceWorkingDir, SynchronizableTargetWorkingDir): def _convert_head_initial(self, dbrepo, module, revision, working_dir): """ This method handles HEAD and INITIAL pseudo-revisions, converting them to monotone revids """ effective_rev = revision if revision == 'HEAD' or revision=='INITIAL': # in both cases we need the head(s) of the requested branch cmd = self.repository.command("automate","heads", "--db", dbrepo, module) mtl = ExternalCommand(cwd=working_dir, command=cmd) outstr = mtl.execute(stdout=PIPE, stderr=PIPE) if mtl.exit_status: raise InvocationError("The branch '%s' is empty" % module) revision = outstr[0].getvalue().split() if revision == 'HEAD': if len(revision)>1: raise InvocationError("Branch '%s' has multiple heads. " "Please choose only one." % module) effective_rev=revision[0] else: # INITIAL requested. We must get the ancestors of # current head(s), topologically sort them and pick # the first (i.e. the "older" revision). Unfortunately # if the branch has multiple heads then we could end # up with only part of the ancestry graph. if len(revision)>1: self.log.info('Branch "%s" has multiple heads. There ' 'is no guarantee to reconstruct the ' 'full history.', module) cmd = [ self.repository.command("automate","ancestors", "--db",dbrepo), self.repository.command("automate","toposort", "--db",dbrepo, "-@-") ] cmd[0].extend(revision) cld = ExternalCommandChain(cwd=working_dir, command=cmd) outstr = cld.execute() if cld.exit_status: raise InvocationError("Ancestor reading returned " "status %d" % cld.exit_status) revlist = outstr[0].getvalue().split() if len(revlist)>1: mtr = MonotoneRevToCset(repository=self.repository, working_dir=working_dir, branch=module) first_cset = mtr.getCset(revlist, True) if len(first_cset)==0: raise InvocationError("Can't find an INITIAL revision on branch '%s'." % module) effective_rev=first_cset[0].revision elif len(revlist)==0: # Special case: only one revision in branch - is the head self effective_rev=revision[0] else: effective_rev=revlist[0] return effective_rev ## UpdatableSourceWorkingDir def _getUpstreamChangesets(self, sincerev=None): # mtn descendents returns results sorted in alpha order # here we want ancestry order, so descendents output is feed back to # mtn for a toposort ... cmd = [ self.repository.command("automate","descendents", "--db", self.repository.repository, sincerev), self.repository.command("automate","toposort", "--db", self.repository.repository, "-@-") ] cld = ExternalCommandChain(cwd=self.repository.rootdir, command=cmd) outstr = cld.execute() if cld.exit_status: raise InvocationError("mtn descendents returned " "status %d" % cld.exit_status) # now childs is a list of revids, we must transform it in a # list of monotone changesets. We fill only the # linearized ancestor and revision ids, because at this time # we need only to know WICH changesets must be applied to the # target repo, not WHAT are the changesets (apart for filtering # the outside-branch revs) childs = [sincerev] +outstr[0].getvalue().split() mtr = MonotoneRevToCset(repository=self.repository, working_dir=self.repository.rootdir, branch=self.repository.module) chlist = mtr.getCset(childs, False) return chlist def _applyChangeset(self, changeset): cmd = self.repository.command("update", "--revision", changeset.revision) mtl = ExternalCommand(cwd=self.repository.basedir, command=cmd) mtl.execute(stdout=PIPE, stderr=PIPE) if mtl.exit_status: raise ChangesetApplicationFailure("'mtn update' returned " "status %s" % mtl.exit_status) mtr = MonotoneRevToCset(repository=self.repository, working_dir=self.repository.basedir, branch=self.repository.module) mtr.updateCset( changeset ) return False # no conflicts def _checkoutUpstreamRevision(self, revision): """ Concretely do the checkout of the FIRST upstream revision. """ effrev = self._convert_head_initial(self.repository.repository, self.repository.module, revision, self.repository.rootdir) if not exists(join(self.repository.basedir, '_MTN')): # actually check out the revision self.log.info("Checking out a working copy") if self.shared_basedirs: basedir = '.' cwd = self.repository.basedir else: basedir = self.repository.basedir cwd = self.repository.rootdir cmd = self.repository.command("co", "--db", self.repository.repository, "--revision", effrev, "--branch", self.repository.module, basedir) mtl = ExternalCommand(cwd=cwd, command=cmd) mtl.execute(stdout=PIPE, stderr=PIPE) if mtl.exit_status: raise TargetInitializationFailure( "'mtn co' returned status %s" % mtl.exit_status) else: self.log.debug("%r already exists, assuming it's a monotone " "working dir already populated", self.repository.basedir) # Ok, now the workdir contains the checked out revision. We # need to return a changeset describing it. Since this is the # first revision checked out, we don't have a (linearized) # ancestor, so we must use None as the lin_ancestor parameter chset = MonotoneChangeset(None, effrev) # now we update the new chset with basic data - without the # linearized ancestor, changeset entries will NOT be filled mtr = MonotoneRevToCset(repository=self.repository, working_dir=self.repository.basedir, branch=self.repository.module) mtr.updateCset(chset) return chset ## SynchronizableTargetWorkingDir def _addPathnames(self, names): """ Add some new filesystem objects, skipping directories. In monotone *explicit* directory addition is always recursive, so adding a directory here might interfere with renames. Adding files without directories doesn't cause problems, because adding a file implicitly adds the parent directory (non-recursively). """ fnames=[] for fn in names: if isdir(join(self.repository.basedir, fn)): self.log.debug("ignoring addition of directory %r " "(dirs are implicitly added by files)", fn) else: fnames.append(fn) if len(fnames): # ok, we still have something to add cmd = self.repository.command("add", "--") add = ExternalCommand(cwd=self.repository.basedir, command=cmd) add.execute(fnames, stdout=PIPE, stderr=PIPE) if add.exit_status: raise ChangesetApplicationFailure("%s returned status %s" % (str(add),add.exit_status)) def _addSubtree(self, subdir): """ Add a whole subtree (recursively) """ cmd = self.repository.command("add", "--recursive", "--") add = ExternalCommand(cwd=self.repository.basedir, command=cmd) add.execute(subdir, stdout=PIPE, stderr=PIPE) if add.exit_status: raise ChangesetApplicationFailure("%s returned status %s" % (str(add),add.exit_status)) def _tag(self, tag, date, author): """ TAG current revision. """ # Get current revision from working copy # FIXME: Should cache the last revision somethere cmd = self.repository.command("automate", "get_base_revision_id") mtl = ExternalCommand(cwd=self.repository.basedir, command=cmd) outstr = mtl.execute(stdout=PIPE, stderr=PIPE) if mtl.exit_status: raise ChangesetApplicationFailure("%s returned status %s" % (str(mtl),mtl.exit_status)) revision = outstr[0].getvalue().split() effective_rev=revision[0] # Add the tag cmd = self.repository.command("tag", effective_rev, tag) mtl = ExternalCommand(cwd=self.repository.basedir, command=cmd) outstr = mtl.execute(stdout=PIPE, stderr=PIPE) if mtl.exit_status: raise ChangesetApplicationFailure("%s returned status %s" % (str(mtl),mtl.exit_status)) def _commit(self, date, author, patchname, changelog=None, entries=None, tags = [], isinitialcommit = False): """ Commit the changeset. """ encode = self.repository.encode logmessage = [] if patchname: logmessage.append(patchname) if changelog: logmessage.append(changelog) rontf = ReopenableNamedTemporaryFile('mtn', 'tailor') log = open(rontf.name, "w") log.write(encode('\n'.join(logmessage))) log.close() date = date.astimezone(UTC).replace(microsecond=0, tzinfo=None) # monotone wants UTC cmd = self.repository.command("commit", "--author", encode(author), "--date", date.isoformat(), "--message-file", rontf.name) commit = ExternalCommand(cwd=self.repository.basedir, command=cmd) # Always commit everything, ignoring given entries... # XXX is this right? entries = ['.'] output, error = commit.execute(entries, stdout=PIPE, stderr=PIPE) # monotone complaints if there are no changes from the last commit. # we ignore those errors ... if commit.exit_status: text = error.read() if not "mtn: misuse: no changes to commit" in text: self.log.error("Monotone commit said: %s", text) raise ChangesetApplicationFailure( "%s returned status %s" % (str(commit),commit.exit_status)) else: self.log.info("No changes to commit - changeset ignored") def _removePathnames(self, names): """ Remove some filesystem object. """ cmd = self.repository.command("drop", "--recursive", "--") drop = ExternalCommand(cwd=self.repository.basedir, command=cmd) dum, error = drop.execute(names, stdout=PIPE, stderr=PIPE) if drop.exit_status: errtext = error.read() self.log.error("Monotone drop said: %s", errtext) raise ChangesetApplicationFailure("%s returned status %s" % (str(drop), drop.exit_status)) def _renamePathname(self, oldname, newname): """ Rename a filesystem object. """ cmd = self.repository.command("rename", "--") rename = ExternalCommand(cwd=self.repository.basedir, command=cmd) rename.execute(oldname, newname, stdout=PIPE, stderr=PIPE) if rename.exit_status: raise ChangesetApplicationFailure( "%s returned status %s" % (str(rename),rename.exit_status)) def _prepareTargetRepository(self): """ Check for target repository existence, eventually create it. """ self.repository.create() def _prepareWorkingDirectory(self, source_repo): """ Possibly checkout a working copy of the target VC, that will host the upstream source tree, when overriden by subclasses. """ from re import escape if not self.repository.repository or exists(join(self.repository.basedir, '_MTN')): return if not self.repository.module: raise TargetInitializationFailure("Monotone needs a module " "defined (to be used as " "commit branch)") cmd = self.repository.command("setup", "--db", self.repository.repository, "--branch", self.repository.module) if self.repository.keygenid: self.repository.keyid = self.repository.keygenid if self.repository.keyid: cmd.extend( ("--key", self.repository.keyid) ) setup = ExternalCommand(command=cmd) setup.execute(self.repository.basedir, stdout=PIPE, stderr=PIPE) if self.repository.passphrase or self.repository.custom_lua: monotonerc = open(join(self.repository.basedir, '_MTN', 'monotonerc'), 'w') if self.repository.passphrase: monotonerc.write(MONOTONERC % self.repository.passphrase) else: raise TargetInitializationFailure("The passphrase must be specified") if self.repository.custom_lua: self.log.info("Adding custom lua script") monotonerc.write(self.repository.custom_lua) monotonerc.close() # Add the tailor log file and state file to _MTN's list of # ignored files ignored = [] logfile = self.repository.projectref().logfile if logfile.startswith(self.repository.basedir): ignored.append('^%s$' % escape(logfile[len(self.repository.basedir)+1:])) sfname = self.repository.projectref().state_file.filename if sfname.startswith(self.repository.basedir): sfrelname = sfname[len(self.repository.basedir)+1:] ignored.append('^%s$' % escape(sfrelname)) ignored.append('^%s$' % escape(sfrelname + '.old')) ignored.append('^%s$' % escape(sfrelname + '.journal')) if len(ignored) > 0: mt_ignored = open(join(self.repository.basedir, '.mtn-ignore'), 'a') mt_ignored.write('\n'.join(ignored)) mt_ignored.close() def _initializeWorkingDir(self): """ Setup the monotone working copy The user must setup a monotone working directory himself or use the tailor config file to provide parameters for creation. Then we simply use 'mtn commit', without having to specify a database file or branch. Monotone looks up the database and branch in it's _MTN directory. """ if not exists(join(self.repository.basedir, '_MTN')): raise TargetInitializationFailure("Please setup '%s' as a " "monotone working directory" % self.repository.basedir) SynchronizableTargetWorkingDir._initializeWorkingDir(self) tailor-0.9.35+darcs20090615/vcpx/repository/git/0000755000175000017500000000000011215407134021137 5ustar vdanjeanvdanjeantailor-0.9.35+darcs20090615/vcpx/repository/git/target.py0000644000175000017500000003063011215407134023001 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- Git target (using git-core) # :Creato: Thu 1 Sep 2005 04:01:37 EDT # :Autore: Todd Mokros # Brendan Cully # Yann Dirson # :Licenza: GNU General Public License # """ This module implements the target backend for Git using git-core. """ __docformat__ = 'reStructuredText' from vcpx import TailorException from vcpx.config import ConfigurationError from vcpx.repository.git import GitExternalCommand, PIPE from vcpx.source import ChangesetApplicationFailure from vcpx.target import SynchronizableTargetWorkingDir, TargetInitializationFailure from vcpx.tzinfo import FixedOffset class BranchpointFailure(TailorException): "Specified branchpoint not found in parent branch" class GitTargetWorkingDir(SynchronizableTargetWorkingDir): def _addPathnames(self, names): """ Add some new filesystem objects. """ from os.path import join, isdir # Currently git does not handle directories at all, so filter # them out. notdirs = [n for n in names if not isdir(join(self.repository.basedir, n))] if notdirs: self.repository.runCommand(['update-index', '--add'] + notdirs) def _editPathnames(self, names): """ Records a sequence of filesystem objects as updated. """ from os.path import join, isdir # can we assume we don't have directories in the list ? Nope. notdirs = [n for n in names if not isdir(join(self.repository.basedir, n))] if notdirs: self.repository.runCommand(['update-index'] + notdirs) def __parse_author(self, author): """ Parse the author field, returning (name, email) """ from email.Utils import parseaddr from vcpx.target import AUTHOR, HOST if author.find('@') > -1: name, email = parseaddr(author) else: name, email = author, '' name = name.strip() email = email.strip() if not name: name = AUTHOR if not email: email = "%s@%s" % (name, HOST) return (name, email) def _commit(self, date, author, patchname, changelog=None, entries=None, tags=[], isinitialcommit=False): """ Commit the changeset. """ from os import environ try: self.repository.runCommand(['status']) except Exception, e: self.log.info("git-status returned an error---assuming nothing to do") return encode = self.repository.encode logmessage = [] if patchname: logmessage.append(patchname) if changelog: logmessage.append(changelog) env = {} env.update(environ) # update the index self.repository.runCommand(['add', '-u']) treeid = self.repository.runCommand(['write-tree'])[0] # in single-repository mode, only update the relevant branch if self.repository.branch_name: refname = self.repository.branch_name else: refname = 'HEAD' # find the previous commit on the branch if any c = GitExternalCommand(self.repository, cwd=self.repository.basedir, command=self.repository.command('rev-parse', refname)) (out, err) = c.execute(stdout=PIPE, stderr=PIPE) if c.exit_status: # Do we need to check err to be sure there was no error ? self.log.info("Doing initial commit") parent = False else: # FIXME: I'd prefer to avoid all those "if parent" parent = out.read().split('\n')[0] (name, email) = self.__parse_author(author) if name: env['GIT_AUTHOR_NAME'] = encode(name) env['GIT_COMMITTER_NAME'] = encode(name) if email: env['GIT_AUTHOR_EMAIL']=email env['GIT_COMMITTER_EMAIL']=email if date: env['GIT_AUTHOR_DATE']=date.strftime("%Y-%m-%d %H:%M:%S %z") env['GIT_COMMITTER_DATE']=env['GIT_AUTHOR_DATE'] if parent: cmd = self.repository.command('commit-tree', treeid, '-p', parent) else: cmd = self.repository.command('commit-tree', treeid) c = GitExternalCommand(self.repository, cwd=self.repository.basedir, command=cmd) logmessage = encode('\n'.join(logmessage)) if not logmessage: logmessage = 'No commit message\n' if not logmessage.endswith('\n'): logmessage += '\n' (out, _) = c.execute(stdout=PIPE, env=env, input=logmessage) if c.exit_status: failed = True if out: for line in [x.strip() for x in out if x[0] != '#']: if line == 'nothing to commit': failed = False if failed: raise ChangesetApplicationFailure("%s returned status %d" % (str(c), c.exit_status)) else: commitid=out.read().split('\n')[0] if parent: self.repository.runCommand(['update-ref', refname, commitid, parent]) else: self.repository.runCommand(['update-ref', refname, commitid]) def _tag(self, tag, date, author): # in single-repository mode, only update the relevant branch if self.repository.branch_name: refname = self.repository.branch_name else: refname = 'HEAD' # Allow a new tag to overwrite an older one with -f args = ["tag", "-a",] if self.repository.overwrite_tags: args.append("-f") # Escape the tag name for git import re tag_git = re.sub('_*$', '', re.sub('__', '_', re.sub('[^A-Za-z0-9_-]', '_', tag))) args += ["-m", tag, tag_git, refname] cmd = self.repository.command(*args) c = GitExternalCommand(self.repository, cwd=self.repository.basedir, command=cmd) from os import environ env = {} env.update(environ) (name, email) = self.__parse_author(author) if name: env['GIT_AUTHOR_NAME'] = self.repository.encode(name) env['GIT_COMMITTER_NAME'] = self.repository.encode(name) if email: env['GIT_AUTHOR_EMAIL']=email env['GIT_COMMITTER_EMAIL']=email if date: env['GIT_AUTHOR_DATE']=date.strftime("%Y-%m-%d %H:%M:%S %z") env['GIT_COMMITTER_DATE']=env['GIT_AUTHOR_DATE'] c.execute(env=env) if c.exit_status: if not self.repository.overwrite_tags: self.log.critical("Couldn't set tag '%s': maybe it's a " "conflict with a previous tag, and " "overwrite-tags=True may help" % tag_git) raise ChangesetApplicationFailure("%s returned status %d" % (str(c), c.exit_status)) def _removePathnames(self, names): """ Remove some filesystem object. """ from os.path import exists, isdir, join # Currently git does not handle directories at all, so filter # them out. notdirs = [] for name in names: fname = join(self.repository.basedir, name) if not exists(fname): self.log.warning("Ignoring deletion of non existing '%s'", name) elif not isdir(fname): notdirs.append(name) if notdirs: self.repository.runCommand(['rm'] + notdirs) def _renamePathname(self, oldname, newname): """ Rename a filesystem object. """ # Git does not seem to allow # $ mv a.txt b.txt # $ git mv a.txt b.txt # Here we are in this situation, since upstream VCS already # moved the item. from os import mkdir, rename, rmdir, listdir from os.path import join, exists, isdir oldpath = join(self.repository.basedir, oldname) newpath = join(self.repository.basedir, newname) # These are used with disjunct directories. newpathtmp = newpath + '-TAILOR-HACKED-TEMP-NAME' newnametmp = newname + '-TAILOR-HACKED-TEMP-NAME' # Git does not track empty directories, so if there is only an # empty dir, we have nothing to do. if (isdir(newpath) and not len(listdir(newpath))) or \ (isdir(newpathtmp) and not len(listdir(newpathtmp))): return # rename() won't work for rename(a/b, a) if newpath.startswith(oldpath+"/"): oldpathtmp = oldpath+"-TAILOR-HACKED-TEMP-NAME" oldnametmp = oldname+"-TAILOR-HACKED-TEMP-NAME" if exists(oldpathtmp): rename(oldpathtmp, oldpath) rename(newpath, oldpathtmp) rmdir(oldpath) rename(oldpathtmp, oldpath) mkdir(oldpathtmp) self.repository.runCommand(['mv', oldname, newname.replace(oldname, oldnametmp, 1)]) self.repository.runCommand(['mv', oldnametmp, oldname]) else: if self.shared_basedirs: # Recent gits handle this correctly self.repository.runCommand(['mv', oldname, newname]) else: # For disjunct directories, the real new entry has been moved # out of the way, and the superclass expects us to rename the # the file or directory via git. # First, some sanity checks. if exists(newpath): raise ChangesetApplicationFailure( "Cannot rename since target already exists: %s" % newname) if not exists(newpathtmp): raise ChangesetApplicationFailure( "Cannot rename since actual target not found: %s" % newnametmp) if exists(oldpath): # Under normal operation, just git-mv the old name to the new # name. Git will notice the changes too. self.repository.runCommand(['mv', oldname, newname]) else: # If a revision renames directory A/ to B/, plus file A/a to # B/b, *and if* A -> B happened already, then the superclass # already made B/b. We cannot git-mv B/a to B/b since B/a is # gone. The workaround is git-add B/b-TAILOR-HACKED-TEMP-NAME, # then git-mv it to B/b. self.repository.runCommand(['add', newnametmp]) self.repository.runCommand(['mv', newnametmp, newname]) def _prepareTargetRepository(self): self.repository.create() def _prepareWorkingDirectory(self, source_repo): """ Create the .git/info/exclude. """ from os.path import join, exists from os import mkdir from vcpx.dualwd import IGNORED_METADIRS # create info/excludes in storagedir infodir = join(self.repository.basedir, self.repository.storagedir, 'info') if not exists(infodir): mkdir(infodir) # Create the .git/info/exclude file, that contains an # fnmatch per line with metadirs to be skipped. ignore = open(join(infodir, 'exclude'), 'a') ignore.write('\n') ignore.write('\n'.join(['%s' % md for md in IGNORED_METADIRS])) ignore.write('\n') if self.logfile.startswith(self.repository.basedir): ignore.write(self.logfile[len(self.repository.basedir)+1:]) ignore.write('\n') if self.state_file.filename.startswith(self.repository.basedir): sfrelname = self.state_file.filename[len(self.repository.basedir)+1:] ignore.write(sfrelname) ignore.write('\n') ignore.write(sfrelname+'.old') ignore.write('\n') ignore.write(sfrelname+'.journal') ignore.write('\n') ignore.close() def importFirstRevision(self, source_repo, changeset, initial): # If we have a parent repository, always track from INITIAL SynchronizableTargetWorkingDir.importFirstRevision( self, source_repo, changeset, initial or self.repository.branch_point) tailor-0.9.35+darcs20090615/vcpx/repository/git/source.py0000644000175000017500000001363011215407134023014 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- Git target (using git-core) # :Creato: Thu 1 Sep 2005 04:01:37 EDT # :Autore: Todd Mokros # Brendan Cully # Yann Dirson # :Licenza: GNU General Public License # """ This module implements the source backend for Git using git-core. """ __docformat__ = 'reStructuredText' from vcpx.shwrap import ExternalCommand, PIPE from vcpx.config import ConfigurationError from vcpx.source import UpdatableSourceWorkingDir, GetUpstreamChangesetsFailure from vcpx.source import ChangesetApplicationFailure from vcpx.tzinfo import FixedOffset class GitSourceWorkingDir(UpdatableSourceWorkingDir): def _checkoutUpstreamRevision(self, revision): """ git clone """ from os import rename, rmdir from os.path import join # Right now we clone the entire repository and just check out to the # current rev because it makes revision parsing easier. We can't # easily check out arbitrary revisions anyway, but we could probably # handle HEAD (master) as a special case... # git clone won't checkout into an existing directory target = join(self.repository.basedir, '.gittmp') # might want -s if we can determine that the path is local. Then again, # that makes it a little unsafe to do git write actions here self.repository.runCommand(['clone', '-n', self.repository.repository, target], ChangesetApplicationFailure, False) rename(join(target, '.git'), join(self.repository.basedir, '.git')) rmdir(target) rev = self._getRev(revision) if rev != revision: self.log.info('Checking out revision %s (%s)' % (rev, revision)) else: self.log.info('Checking out revision ' + rev) self.repository.runCommand(['reset', '--hard', rev], ChangesetApplicationFailure, False) return self._changesetForRevision(rev) def _getUpstreamChangesets(self, since): # Brute-force tag search from os import listdir from os.path import exists, join tags = {} tagdir = join(self.repository.basedir, '.git', 'refs', 'tags') if exists(tagdir): for tag in listdir(tagdir): tagrev = self.repository.runCommand(['rev-list', '--max-count=1', tag])[0] tags.setdefault(tagrev, []).append(tag) self.repository.runCommand(['fetch'], GetUpstreamChangesetsFailure, False) revs = self.repository.runCommand(['rev-list', '^' + since, 'origin'], GetUpstreamChangesetsFailure)[:-1] revs.reverse() for rev in revs: cs = self._changesetForRevision(rev) if rev in tags: cs.tags = tags[rev] yield cs def _applyChangeset(self, changeset): out = self.repository.runCommand(['merge', '-n', '--no-commit', 'fastforward', 'HEAD', changeset.revision], ChangesetApplicationFailure) conflicts = [] for line in out: if line.endswith(': needs update'): conflicts.append(line[:-14]) if conflicts: self.log.warning("Conflict after 'git merge': %s", ' '.join(conflicts)) return conflicts def _changesetForRevision(self, revision): from datetime import datetime from vcpx.changes import Changeset, ChangesetEntry action_map = {'A': ChangesetEntry.ADDED, 'D': ChangesetEntry.DELETED, 'M': ChangesetEntry.UPDATED, 'R': ChangesetEntry.RENAMED} # find parent lines = self.repository.runCommand(['rev-list', '--pretty=raw', '--max-count=1', revision], GetUpstreamChangesetsFailure) parents = [] user = Changeset.ANONYMOUS_USER loglines = [] date = None for line in lines: if line.startswith('parent'): parents.append(line.split(' ').pop()) if line.startswith('author'): author_fields = line.split(' ')[1:] tz = int(author_fields.pop()) dt = int(author_fields.pop()) user = ' '.join(author_fields) tzsecs = abs(tz) tzsecs = (tzsecs / 100 * 60 + tzsecs % 100) * 60 if tz < 0: tzsecs = -tzsecs date = datetime.fromtimestamp(dt, FixedOffset(tzsecs/60)) if line.startswith(' '): loglines.append(line.lstrip(' ')) message = '\n'.join(loglines) entries = [] cmd = ['diff-tree', '--root', '-r', '-M', '--name-status'] # haven't thought about merges yet... if parents: cmd.append(parents[0]) cmd.append(revision) files = self.repository.runCommand(cmd, GetUpstreamChangesetsFailure)[:-1] if not parents: # git lets us know what it's diffing against if we omit parent if len(files) > 0: files.pop(0) for line in files: fields = line.split('\t') state = fields.pop(0) name = fields.pop() e = ChangesetEntry(name) e.action_kind = action_map[state[0]] if e.action_kind == ChangesetEntry.RENAMED: e.old_name = fields.pop() entries.append(e) return Changeset(revision, date, user, message, entries) def _getRev(self, revision): """ Return the git object corresponding to the symbolic revision """ if revision == 'INITIAL': return self.repository.runCommand(['rev-list', 'HEAD'], GetUpstreamChangesetsFailure)[-2] return self.repository.runCommand(['rev-parse', '--verify', revision], GetUpstreamChangesetsFailure)[0] tailor-0.9.35+darcs20090615/vcpx/repository/git/__init__.py0000644000175000017500000001362611215407134023260 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- Git target (using git-core) # :Creato: Thu 1 Sep 2005 04:01:37 EDT # :Autore: Todd Mokros # Brendan Cully # Yann Dirson # :Licenza: GNU General Public License # """ This module implements the parts of the backend for Git using git-core, common to source and target modules. """ __docformat__ = 'reStructuredText' from vcpx.config import ConfigurationError from vcpx.repository import Repository from vcpx.shwrap import ExternalCommand, PIPE from vcpx.target import TargetInitializationFailure class GitRepository(Repository): METADIR = '.git' def _load(self, project): Repository._load(self, project) self.EXECUTABLE = project.config.get(self.name, 'git-command', 'git') self.overwrite_tags = project.config.get(self.name, 'overwrite-tags', False) self.parent_repo = project.config.get(self.name, 'parent-repo') self.branch_point = project.config.get(self.name, 'branchpoint', 'HEAD') self.branch_name = project.config.get(self.name, 'branch') if self.branch_name: self.branch_name = 'refs/heads/' + self.branch_name if self.repository and self.parent_repo: self.log.critical('Cannot make sense of both "repository" and "parent-repo" parameters') raise ConfigurationError ('Must specify only one of "repository" and "parent-repo"') if self.branch_name and not self.repository: self.log.critical('Cannot make sense of "branch" if "repository" is not set') raise ConfigurationError ('Missing "repository" to make use o "branch"') self.env = {} # The git storage directory can track both the repository and # the working directory. If the repository directory is # specified, make sure git stores its repository there by # setting $GIT_DIR. However, this repository will typically be # a "bare" repository that can't directly track a working # directory. As such, it is necessary to tell it where to find # the working directory and index through $GIT_WORK_TREE and # $GIT_INDEX_FILE. if self.repository: from os.path import join self.storagedir = self.repository self.env['GIT_DIR'] = self.storagedir self.env['GIT_INDEX_FILE'] = join(self.METADIR, 'index') self.env['GIT_WORK_TREE'] = self.basedir else: self.storagedir = self.METADIR # No need to set $GIT_*, since the defaults are appropriate def runCommand(self, cmd, exception=Exception, pipe=True): """ Facility to run a git command in a controlled context. """ c = GitExternalCommand(self, command = self.command(*cmd), cwd = self.basedir) if pipe: output = c.execute(stdout=PIPE)[0] else: c.execute() if c.exit_status: raise exception(str(c) + ' failed') if pipe: return output.read().split('\n') def create(self): """ Initialize .git through ``git init-db`` or ``git-clone``. """ from os import renames, mkdir from os.path import join, exists if exists(join(self.basedir, self.METADIR)): return if self.parent_repo: cmd = self.command("clone", "--shared", "-n", self.parent_repo, 'tmp') clone = GitExternalCommand(self, cwd=self.basedir, command=cmd) clone.execute() if clone.exit_status: raise TargetInitializationFailure( "%s returned status %s" % (str(clone), clone.exit_status)) renames(join(self.basedir, 'tmp', '.git'), join(self.basedir, '.git')) cmd = self.command("reset", "--soft", self.branch_point) reset = GitExternalCommand(self, cwd=self.basedir, command=cmd) reset.execute() if reset.exit_status: raise TargetInitializationFailure( "%s returned status %s" % (str(reset), reset.exit_status)) elif self.repository and self.branch_name: # ...and exists(self.storagedir) ? # initialization of a new branch in single-repository mode mkdir(join(self.basedir, self.METADIR)) bp = self.runCommand(['rev-parse', self.branch_point])[0] self.runCommand(['read-tree', bp]) self.runCommand(['update-ref', self.branch_name, bp]) #self.runCommand(['checkout-index']) else: if exists(join(self.basedir, self.storagedir)): raise TargetInitializationFailure( "Repository %s already exists - " "did you forget to set \"branch\" parameter ?" % self.storagedir) self.runCommand(['init-db']) if self.repository: # in this mode, the db is not stored in working dir, so we # may have to create .git ourselves try: mkdir(join(self.basedir, self.METADIR)) except OSError: # if it's already there, that's not a no problem pass class GitExternalCommand(ExternalCommand): def __init__(self, repo, command=None, cwd=None): """ Initialize an ExternalCommand instance tied to a GitRepository from which it inherits a set of environment variables to use for each execute(). """ self.repo = repo return ExternalCommand.__init__(self, command, cwd) def execute(self, *args, **kwargs): """Execute the command, with controlled environment.""" if not kwargs.has_key('env'): kwargs['env'] = {} kwargs['env'].update(self.repo.env) return ExternalCommand.execute(self, *args, **kwargs) tailor-0.9.35+darcs20090615/vcpx/repository/bzr.py0000644000175000017500000003544511215407134021536 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- Bazaar support using the bzrlib instead of the frontend # :Creato: Fri Aug 19 01:06:08 CEST 2005 # :Autore: Johan Rydberg # Jelmer Vernooij # Lalo Martins # Olaf Conradi # :Licenza: GNU General Public License # """ This module implements the backends for Bazaar. """ __docformat__ = 'reStructuredText' from sys import version_info assert version_info >= (2,4), "Bazaar backend requires Python 2.4" del version_info from bzrlib import errors from bzrlib.branch import Branch from bzrlib.bzrdir import BzrDir from bzrlib.errors import NoSuchRevision from bzrlib.missing import find_unmerged from bzrlib.osutils import normpath, pathjoin from bzrlib.plugin import load_plugins from vcpx.changes import Changeset, ChangesetEntry from vcpx.repository import Repository from vcpx.source import UpdatableSourceWorkingDir, ChangesetApplicationFailure from vcpx.target import SynchronizableTargetWorkingDir from vcpx.workdir import WorkingDir class BzrChangeset(Changeset): """ Manage the particular reordering of the entries. Apparently TreeDelta doesn't expose the entries in a sensible order, they are grouped by kind. """ def __init__(self, revision, date, author, log, entries=None, **other): """ Initialize a new BzrChangeset, inserting the entries in a sensible order. """ from os.path import split, join super(BzrChangeset, self).__init__(revision, date, author, log, entries=None, **other) if entries is not None: for e in entries: self.addEntry(e, revision) # Adjust old_name on renamed entries: bzr tell us the *original* # name of the rename... # Consider this: # # $ bzr mv newnamedir/subdir/a newnamedir/subdir/b # newnamedir/subdir/a => newnamedir/subdir/b # $ bzr mv newnamedir/subdir newnamedir/newsubdir # newnamedir/subdir => newnamedir/newsubdir # $ bzr mv newnamedir dir # newnamedir => dir # $ bzr st # renamed: # newnamedir => dir # newnamedir/subdir => dir/newsubdir # newnamedir/subdir/a => dir/newsubdir/b renames = {} for e in self.entries: if e.action_kind == e.RENAMED: renames[e.old_name] = e.name d,f = split(e.old_name) while d: if d in renames: e.old_name = join(renames[d], e.old_name[len(d)+1:]) break d,f = split(d) def addEntry(self, entry, revision): """ Fixup the ordering of the entries, by giving precedence to directories """ if entry.action_kind in (entry.ADDED, entry.RENAMED) and entry.is_directory: dirname = entry.name + '/' # does bzr on windows use this too? for i,e in enumerate(self.entries): if e.name.startswith(dirname): self.entries.insert(i, entry) return elif entry.action_kind == entry.DELETED: for i,e in enumerate(self.entries): if e.action_kind == e.RENAMED and e.name == entry.name: # This is the following case: # $ bzr rm A # $ bzr mv B A self.entries.insert(i, entry) return elif (e.action_kind == e.DELETED and e.is_directory and entry.name.startswith(e.name)): # Remove dir contents before dir itself self.entries.insert(i, entry) return elif (e.action_kind == e.ADDED and e.name == entry.name): # put replacement (rm+add) in the right order self.entries.insert(i, entry) return self.entries.append(entry) class BzrRepository(Repository): METADIR = '.bzr' def _load(self, project): Repository._load(self, project) ppath = project.config.get(self.name, 'python-path') if ppath: from sys import path if ppath not in path: path.insert(0, ppath) def create(self): """ Create a branch with a working tree at the base directory. If the base directory is inside a Bazaar style "shared repository", it will use that to create a branch and working tree (make sure it allows working trees). """ self.log.info('Initializing new repository in %r...', self.basedir) try: bzrdir = BzrDir.open(self.basedir) except errors.NotBranchError: # really a NotBzrDir error... branch = BzrDir.create_branch_convenience(self.basedir, force_new_tree=True) wtree = branch.bzrdir.open_workingtree() else: bzrdir.create_branch() wtree = bzrdir.create_workingtree() return wtree class BzrWorkingDir(UpdatableSourceWorkingDir, SynchronizableTargetWorkingDir): def __init__(self, repository): from os.path import split from bzrlib import version_info, IGNORE_FILENAME if version_info > (0,9): from bzrlib.ignores import add_runtime_ignores, parse_ignore_file else: from bzrlib import DEFAULT_IGNORE WorkingDir.__init__(self, repository) # TODO: check if there is a "repository" in the configuration, # and use it as a bzr repository self.ignored = [] self._working_tree = None # The bzr repository may have some plugins that needs to be activated load_plugins() try: bzrdir = BzrDir.open(self.repository.basedir) wt = self._working_tree = bzrdir.open_workingtree() # read .bzrignore for _addSubtree() if wt.has_filename(IGNORE_FILENAME): f = wt.get_file_byname(IGNORE_FILENAME) if version_info > (0,9): self.ignored.extend(parse_ignore_file(f)) else: self.ignored.extend([ line.rstrip("\n\r") for line in f.readlines() ]) f.close() except (errors.NotBranchError, errors.NoWorkingTree): pass # Omit our own log... logfile = self.repository.projectref().logfile dir, file = split(logfile) if dir == self.repository.basedir: self.ignored.append(file) # ... and state file sfname = self.repository.projectref().state_file.filename dir, file = split(sfname) if dir == self.repository.basedir: self.ignored.append(file) self.ignored.append(file+'.old') self.ignored.append(file+'.journal') if version_info > (0,9): add_runtime_ignores(self.ignored) else: DEFAULT_IGNORE.extend(self.ignored) ############################# ## UpdatableSourceWorkingDir def _changesetFromRevision(self, branch, revision_id): """ Generate changeset for the given Bzr revision """ from datetime import datetime from vcpx.tzinfo import FixedOffset, UTC revision = branch.repository.get_revision(revision_id) deltatree = branch.get_revision_delta(branch.revision_id_to_revno(revision_id)) entries = [] for delta in deltatree.renamed: e = ChangesetEntry(delta[1]) e.action_kind = ChangesetEntry.RENAMED e.old_name = delta[0] e.is_directory = delta[3] == 'directory' entries.append(e) for delta in deltatree.added: e = ChangesetEntry(delta[0]) e.action_kind = ChangesetEntry.ADDED e.is_directory = delta[2] == 'directory' entries.append(e) for delta in deltatree.removed: e = ChangesetEntry(delta[0]) e.action_kind = ChangesetEntry.DELETED e.is_directory = delta[2] == 'directory' entries.append(e) for delta in deltatree.modified: e = ChangesetEntry(delta[0]) e.action_kind = ChangesetEntry.UPDATED entries.append(e) if revision.timezone is not None: timezone = FixedOffset(revision.timezone / 60) else: timezone = UTC return BzrChangeset(revision.revision_id, datetime.fromtimestamp(revision.timestamp, timezone), revision.committer, revision.message, entries) def _getUpstreamChangesets(self, sincerev): """ See what other revisions exist upstream and return them """ from bzrlib import version_info parent_branch = Branch.open(self.repository.repository) branch = self._working_tree.branch branch.lock_read() try: parent_branch.lock_read() try: if version_info > (1, 6): revisions = find_unmerged(branch, parent_branch, 'remote')[1] else: revisions = find_unmerged(branch, parent_branch)[1] self.log.info("Collecting %d missing changesets", len(revisions)) for id, revision in revisions: yield self._changesetFromRevision(parent_branch, revision) except: parent_branch.unlock() raise parent_branch.unlock() except: branch.unlock() raise branch.unlock() self.log.info("Fetching concrete changesets") branch.lock_write() try: branch.fetch(parent_branch) finally: branch.unlock() def _applyChangeset(self, changeset): """ Apply the given changeset to the working tree """ parent_branch = BzrDir.open(self.repository.repository).open_branch() self._working_tree.lock_write() try: count = self._working_tree.pull(parent_branch, stop_revision=changeset.revision) # XXX: this does not seem to return a true value on conflicts! conflicts = self._working_tree.update() finally: self._working_tree.unlock() try: pulled_revnos = count.new_revno - count.old_revno except AttributeError: # Prior to 0.15 pull returned a simple integer instead of a result object pulled_revnos = count self.log.info('Updated to %r, applied %d changesets', changeset.revision, count) if conflicts: # No conflict handling yet raise ChangesetApplicationFailure('Unsupported: conflicts') return [] def _checkoutUpstreamRevision(self, revision): """ Initial checkout of upstream branch, equivalent of 'bzr branch -r', and return the last changeset. """ parent_bzrdir = BzrDir.open(self.repository.repository) parent_branch = parent_bzrdir.open_branch() if revision == "INITIAL": try: revid = parent_branch.get_rev_id(1) except NoSuchRevision: return None elif revision == "HEAD": revid = None else: revid = revision self.log.info('Extracting %r out of %r in %r...', revid, parent_bzrdir.root_transport.base, self.repository.basedir) bzrdir = parent_bzrdir.sprout(self.repository.basedir, revid) self._working_tree = bzrdir.open_workingtree() return self._changesetFromRevision(parent_branch, revid) ################################# ## SynchronizableTargetWorkingDir def _addPathnames(self, names): if len(names): names = [ pathjoin(self.repository.basedir, n) for n in names ] self._working_tree.smart_add(names, recurse=False) def _addSubtree(self, subdir): subdir = pathjoin(self.repository.basedir, subdir) added, ignored = self._working_tree.smart_add([subdir], recurse=True) from vcpx.dualwd import IGNORED_METADIRS for meta in IGNORED_METADIRS + self.ignored: if ignored.has_key(meta): del ignored[meta] if len(ignored): f = [] map(f.extend, ignored.values()) self._addPathnames(f) def _commit(self, date, author, patchname, changelog=None, entries=None, tags = [], isinitialcommit = False): """ Commit the changeset. """ from calendar import timegm # like mktime(), but returns UTC timestamp from binascii import hexlify from re import search logmessage = [] if patchname: logmessage.append(patchname) if changelog: logmessage.append(changelog) if logmessage: self.log.info('Committing %s...', logmessage[0]) logmessage = '\n'.join(logmessage) else: self.log.info('Committing...') logmessage = "Empty changelog" timestamp = timegm(date.utctimetuple()) timezone = date.utcoffset().seconds + date.utcoffset().days * 24 * 3600 # Normalize file names if entries: entries = [normpath(entry) for entry in entries] self._working_tree.commit(logmessage, committer=author, specific_files=entries, verbose=self.repository.projectref().verbose, timestamp=timestamp, timezone=timezone) def _removePathnames(self, names): """ Remove files from the tree. """ self.log.info('Removing %s...', ', '.join(names)) names.sort(reverse=True) # remove files before the dir they're in self._working_tree.remove(names) def _renamePathname(self, oldname, newname): """ Rename a file from oldname to newname. """ self.log.info('Renaming %r to %r...', oldname, newname) self._working_tree.rename_one(oldname, newname) def _prepareTargetRepository(self): from bzrlib import version_info from vcpx.dualwd import IGNORED_METADIRS if self._working_tree is None: self._working_tree = self.repository.create() if version_info > (0,9): from bzrlib.ignores import add_runtime_ignores add_runtime_ignores(IGNORED_METADIRS) else: from bzrlib import DEFAULT_IGNORE DEFAULT_IGNORE.extend(IGNORED_METADIRS) tailor-0.9.35+darcs20090615/vcpx/repository/cg.py0000644000175000017500000001445411215407134021327 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- Git target (using cogito) # :Creato: Wed 24 ago 2005 18:34:27 EDT # :Autore: Todd Mokros # :Licenza: GNU General Public License # """ This module implements the backend for Git by using Cogito. """ __docformat__ = 'reStructuredText' from vcpx.repository import Repository from vcpx.shwrap import ExternalCommand from vcpx.target import SynchronizableTargetWorkingDir, TargetInitializationFailure from vcpx.source import ChangesetApplicationFailure class CgRepository(Repository): METADIR = '.git' def _load(self, project): Repository._load(self, project) self.EXECUTABLE = project.config.get(self.name, 'cg-command', 'cg') def create(self): """ Execute ``cg init``. """ from os.path import join, exists if exists(join(self.basedir, self.METADIR)): return cmd = self.command("init", "-I") init = ExternalCommand(cwd=self.basedir, command=cmd) init.execute() if init.exit_status: raise TargetInitializationFailure( "%s returned status %s" % (str(init), init.exit_status)) class CgWorkingDir(SynchronizableTargetWorkingDir): ## SynchronizableTargetWorkingDir def _addPathnames(self, names): """ Add some new filesystem objects. """ from os.path import join, isdir # Currently git/cogito does not handle directories at all, so filter # them out. notdirs = [n for n in names if not isdir(join(self.repository.basedir, n))] if notdirs: cmd = self.repository.command("add") ExternalCommand(cwd=self.repository.basedir, command=cmd).execute(notdirs) def __parse_author(self, author): """ Parse the author field, returning (name, email) """ from email.Utils import parseaddr from vcpx.target import AUTHOR, HOST if author.find('@') > -1: name, email = parseaddr(author) else: name, email = author, '' name = name.strip() email = email.strip() if not name: name = AUTHOR if not email: email = "%s@%s" % (AUTHOR, HOST) return (name, email) def _commit(self, date, author, patchname, changelog=None, entries=None, tags = [], isinitialcommit = False): """ Commit the changeset. """ from os import environ encode = self.repository.encode logmessage = [] if patchname: logmessage.append(patchname) if changelog: logmessage.append(changelog) env = {} env.update(environ) (name, email) = self.__parse_author(author) if name: env['GIT_AUTHOR_NAME'] = encode(name) if email: env['GIT_AUTHOR_EMAIL']=email if date: env['GIT_AUTHOR_DATE']=date.strftime('%Y-%m-%d %H:%M:%S %z') # '-f' flag means we can get empty commits, which # shouldn't be a problem. cmd = self.repository.command("commit", "-f") c = ExternalCommand(cwd=self.repository.basedir, command=cmd) c.execute(env=env, input=encode('\n'.join(logmessage))) if c.exit_status: raise ChangesetApplicationFailure("%s returned status %d" % (str(c), c.exit_status)) def _removePathnames(self, names): """ Remove some filesystem object. """ from os.path import join, isdir # Currently git does not handle directories at all, so filter # them out. notdirs = [n for n in names if not isdir(join(self.repository.basedir, n))] if notdirs: cmd = self.repository.command("rm") c=ExternalCommand(cwd=self.repository.basedir, command=cmd) c.execute(notdirs) def _renamePathname(self, oldname, newname): """ Rename a filesystem object. """ # In the future, we may want to switch to using # git rename, in case renames ever get more support # in git. It currently just does and add and remove. from os.path import join, isdir from os import walk from vcpx.dualwd import IGNORED_METADIRS if isdir(join(self.repository.basedir, newname)): # Given lack of support for directories in current Git, # loop over all files under the new directory and # do a add/remove on them. skip = len(self.repository.basedir)+len(newname)+2 for dir, subdirs, files in walk(join(self.repository.basedir, newname)): prefix = dir[skip:] for excd in IGNORED_METADIRS: if excd in subdirs: subdirs.remove(excd) for f in files: self._removePathnames([join(oldname, prefix, f)]) self._addPathnames([join(newname, prefix, f)]) else: self._removePathnames([oldname]) self._addPathnames([newname]) def _prepareTargetRepository(self): self.repository.create() def _prepareWorkingDirectory(self, source_repo): """ Create the .git/info/exclude. """ from os.path import join from vcpx.dualwd import IGNORED_METADIRS # Create the .git/info/exclude file, that contains an # fnmatch per line with metadirs to be skipped. ignore = open(join(self.repository.basedir, self.repository.METADIR, 'info', 'exclude'), 'a') ignore.write('\n') ignore.write('\n'.join(['%s' % md for md in IGNORED_METADIRS])) ignore.write('\n') if self.logfile.startswith(self.repository.basedir): ignore.write(self.logfile[len(self.repository.basedir)+1:]) ignore.write('\n') if self.state_file.filename.startswith(self.repository.basedir): sfrelname = self.state_file.filename[len(self.repository.basedir)+1:] ignore.write(sfrelname) ignore.write('\n') ignore.write(sfrelname+'.old') ignore.write('\n') ignore.write(sfrelname+'.journal') ignore.write('\n') ignore.close() tailor-0.9.35+darcs20090615/vcpx/repository/p4/0000755000175000017500000000000011215407134020677 5ustar vdanjeanvdanjeantailor-0.9.35+darcs20090615/vcpx/repository/p4/source.py0000644000175000017500000001523211215407134022554 0ustar vdanjeanvdanjean# -*- mode: python; coding: utf-8 -*- # :Progetto: vcpx -- p4 source # :Creato: Fri Mar 16 23:06:43 PDT 2007 # :Autore: Dustin Sallings # :Licenza: GNU General Public License # """ This module implements the source backend for p4. """ __docformat__ = 'reStructuredText' from vcpx.shwrap import ExternalCommand, PIPE from vcpx.config import ConfigurationError from vcpx.source import UpdatableSourceWorkingDir, GetUpstreamChangesetsFailure from vcpx.source import ChangesetApplicationFailure from vcpx.changes import Changeset from vcpx.tzinfo import UTC from datetime import datetime import exceptions import string import time import os import re import p4lib P4_DATE_FMT="%Y/%m/%d %H:%M:%S" class P4SourceWorkingDir(UpdatableSourceWorkingDir): branchRE = re.compile(r'^branch from (?P//.*?)#') def __getP4(self): p4=self.repository.EXECUTABLE args={} if self.repository.p4client is not None: args['client']=self.repository.p4client if self.repository.p4port is not None: args['port']=self.repository.p4port return p4lib.P4(p4=p4, **args) def __getNativeChanges(self, sincerev): changes=self.__getP4().changes(self.repository.depot_path + "...") changes.reverse() # Get rid of changes that are too low sincerev = int(sincerev) changes = [c for c in changes if int(c['change']) > sincerev] return changes def __parseDate(self, d): return datetime.fromtimestamp(time.mktime( time.strptime(d, P4_DATE_FMT)), UTC) def __adaptChanges(self, changes): # most of the info about a changeset is filled in later return [Changeset(str(c['change']), None, c['user'], None) for c in changes] def _getUpstreamChangesets(self, sincerev): return self.__adaptChanges(self.__getNativeChanges(sincerev)) def _localFilename(self, f, dp=None): if dp is None: dp=self.repository.depot_path trans=string.maketrans(" ", "_") fn=f['depotFile'] rv=fn if not fn.startswith(dp): return None rv=fn[len(dp):] if rv[0]=='/': rv=rv[1:] return rv def _applyChangeset(self, changeset): p4 = self.__getP4() desc = p4.describe(changeset.revision, shortForm=True) changeset.author = desc['user'] changeset.date = self.__parseDate(desc['date']) changeset.log = desc['description'] desc['files'] = [f for f in desc['files'] if self._localFilename(f) is not None] # check for added dirs for f in desc['files']: if f['action'] in ['add', 'branch']: name = self._localFilename(f) self._addParents(name, changeset) p4.sync(self.repository.depot_path + '...@' + str(changeset.revision)) # dict of {path:str -> e:ChangesetEntry} branched = dict() for f in desc['files']: name = self._localFilename(f) path = f['depotFile'] act = f['action'] if act == 'branch': e = changeset.addEntry(name, changeset.revision) e.action_kind = e.ADDED log = p4.filelog(path+'#'+str(f['rev']), maxRevs=1) # rev['notes'] may be empty notes = log[0]['revs'][0]['notes'] if len(notes) > 0: m = self.branchRE.match(notes[0]) if m: old = m.group('path') branched[old] = e self.log.info('Branch %r to %r' % (old, name)) for f in desc['files']: name = self._localFilename(f) path = f['depotFile'] act = f['action'] # branches were already handled if act == 'branch': continue # deletes might be renames if act == 'delete' and path in branched: e = branched[path] e.action_kind = e.RENAMED e.old_name = name self.log.info('Rename %r to %r' % (name, e.name)) continue e = changeset.addEntry(name, changeset.revision) if act == 'add': e.action_kind = e.ADDED elif act == 'delete': e.action_kind = e.DELETED elif act in ['edit', 'integrate']: e.action_kind = e.UPDATED else: assert False # check for removed dirs for f in desc['files']: if f['action'] == 'delete': name = self._localFilename(f) self._delParents(name, changeset) changes = ','.join([repr(e.name) for e in changeset.entries]) self.log.info('Updated %s', changes) return [] # Perforce doesn't track directories, so we have to notice # when a file add implies a directory add. Otherwise targets # like svn will barf. # xxx This is a little fragile, because it depends on having # a clean p4 workdir with sequential updates. It might make # more sense for the svn target to notice missing dir adds. def _addParents(self, name, changeset): parent = os.path.dirname(name) if parent == '': return path = os.path.join(self.repository.basedir, parent) if os.path.exists(path): return self._addParents(parent, changeset) self.log.info('Adding dir %r' % parent) e = changeset.addEntry(parent, changeset.revision) e.action_kind = e.ADDED e.is_directory = True os.mkdir(path) # Try to guess when a directory should be removed. # xxx This is also kind of fragile def _delParents(self, name, changeset): parent = os.path.dirname(name) if parent == '': return path = os.path.join(self.repository.basedir, parent) if not os.path.exists(path): return if len(os.listdir(path)) > 0: return self.log.info('Removing dir %r' % parent) e = changeset.addEntry(parent, changeset.revision) e.action_kind = e.DELETED e.is_directory = True os.rmdir(path) self._delParents(parent, changeset) def _checkoutUpstreamRevision(self, revision): force=False if revision == 'INITIAL': revision = self.__getNativeChanges(-1)[0]['change'] force=True p4=self.__getP4() desc=p4.describe(revision, shortForm=True) p4.sync(self.repository.depot_path + '...@' + str(revision), force=force) ts=self.__parseDate(desc['date']) return Changeset(revision, ts, desc['user'], desc['description']) tailor-0.9.35+darcs20090615/vcpx/repository/p4/p4lib.py0000644000175000017500000026440311215407134022274 0ustar vdanjeanvdanjean#!/usr/bin/env python # # arch-tag: ED474BFA-4169-11D8-904A-000393CFE6B8 # Copyright (c) 2002 Trent Mick # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ An OO interface to 'p4' (the Perforce client command line app). Usage: import p4lib p4 = p4lib.P4() result = p4.() For more information see the doc string on each command. For example: print p4lib.P4.opened.__doc__ Implemented commands: add (limited test suite), branch, branches, change, changes (no test suite), client, clients, delete, describe (no test suite), diff, edit (no test suite), files (no test suite), filelog (no test suite), flush, have (no test suite), label, labels, opened, print (as print_, no test suite), resolve, revert (no test suite), submit, sync, where (no test suite) Partially implemented commands: diff2 Unimplemented commands: admin, counter, counters, depot, depots, dirs, fix, fixes, fstat, group, groups, help (no point), integrate, integrated, job, jobs, jobspec, labelsync, lock, obliterate, passwd, protect, rename (no point), reopen, resolved, review, reviews, set, triggers, typemap, unlock, user, users, verify XXX Describe usage of parseForm() and makeForm(). """ #TODO: # - There is much similarity in some commands, e.g. clients, changes, # branches in one group; client, change, branch, label in another. # Should share implementation between these all. import os import sys import pprint import cmd import re import types import marshal import getopt import tempfile #---- exceptions class P4LibError(Exception): pass #---- global data _version_ = (0, 7, 2) #---- internal logging facility class _Logger: DEBUG, INFO, WARN, ERROR, CRITICAL = range(5) def __init__(self, threshold=None, streamOrFileName=sys.stderr): if threshold is None: self.threshold = self.WARN else: self.threshold = threshold if type(streamOrFileName) == types.StringType: self.stream = open(streamOrFileName, 'w') self._opennedStream = 1 else: self.stream = streamOrFileName self._opennedStream = 0 def __del__(self): if self._opennedStream: self.stream.close() def _getLevelName(self, level): levelNameMap = { self.DEBUG: "DEBUG", self.INFO: "INFO", self.WARN: "WARN", self.ERROR: "ERROR", self.CRITICAL: "CRITICAL", } return levelNameMap[level] def log(self, level, msg, *args): if level < self.threshold: return message = "%s: " % self._getLevelName(level).lower() message = message + (msg % args) + "\n" self.stream.write(message) self.stream.flush() def debug(self, msg, *args): self.log(self.DEBUG, msg, *args) def info(self, msg, *args): self.log(self.INFO, msg, *args) def warn(self, msg, *args): self.log(self.WARN, msg, *args) def error(self, msg, *args): self.log(self.ERROR, msg, *args) def fatal(self, msg, *args): self.log(self.CRITICAL, msg, *args) if 1: # normal log = _Logger(_Logger.WARN) else: # debugging log = _Logger(_Logger.DEBUG) #---- internal support stuff def _escapeArg(arg): """Escape the given command line argument for the shell.""" #XXX There is a *lot* more that we should escape here. #XXX This is also not right on Linux, just try putting 'p4' is a dir # with spaces. return arg.replace('"', r'\"').replace("'", r"\'") def _joinArgv(argv): r"""Join an arglist to a string appropriate for running. >>> import os >>> _joinArgv(['foo', 'bar "baz']) 'foo "bar \\"baz"' """ cmdstr = "" for arg in argv: # Quote args with '*' because don't want shell to expand the # argument. (XXX Perhaps that should only be done for Windows.) # if ' ' in arg or '*' in arg: # cmdstr += '"%s"' % _escapeArg(arg) # else: # cmdstr += _escapeArg(arg) # Why not always quote it? cmdstr += "'%s'" % _escapeArg(arg) cmdstr += ' ' if cmdstr.endswith(' '): cmdstr = cmdstr[:-1] # strip trailing space return cmdstr def _run(argv): """Prepare and run the given arg vector, 'argv', and return the results. Returns (, , ). Note: 'argv' may also just be the command string. """ if type(argv) in (types.ListType, types.TupleType): cmd = _joinArgv(argv) else: cmd = argv log.debug("Running '%s'..." % cmd) if sys.platform.startswith('win'): i, o, e = os.popen3(cmd) output = o.read() error = e.read() i.close() e.close() retval = o.close() else: import popen2 p = popen2.Popen3(cmd, 1) i, o, e = p.tochild, p.fromchild, p.childerr output = o.read() error = e.read() i.close() o.close() e.close() rv = p.wait() if os.WIFEXITED(rv): retval = os.WEXITSTATUS(rv) else: raise P4LibError("Error running '%s', it did not exit "\ "properly: rv=%d" % (cmd, rv)) if retval: raise P4LibError("Error running '%s': error='%s' retval='%s'"\ % (cmd, error, retval)) log.debug("output='%s'", output) log.debug("error='%s'", error) log.debug("retval='%s'", retval) return output, error, retval def _specialsLast(a, b, specials): """A cmp-like function, sorting in alphabetical order with 'special's last. """ if a in specials and b in specials: return cmp(a, b) elif a in specials: return 1 elif b in specials: return -1 else: return cmp(a, b) #---- public stuff def makeForm(**kwargs): """Return an appropriate P4 form filled out with the given data. In general this just means tranforming each keyword and (string) value to separate blocks in the form. The section name is the capitalized keyword. Single line values go on the same line as the section name. Multi-line value succeed the section name, prefixed with a tab, except some special section names (e.g. 'differences'). Text for "special" sections are NOT indented, have a blank line after the header, and are placed at the end of the form. Sections are separated by a blank line. The 'files' key is handled specially. It is expected to be a list of dicts of the form: {'action': 'add', # 'action' may or may not be there 'depotFile': '//depot/test_edit_pending_change.txt'} As well, the 'change' value may be an int. """ # Do special preprocessing on the data. for key, value in kwargs.items(): if key == 'files': strval = '' for f in value: if f.has_key('action'): strval += '%(depotFile)s\t# %(action)s\n' % f else: strval += '%(depotFile)s\n' % f kwargs[key] = strval if key == 'change': kwargs[key] = str(value) # Create the form form = '' specials = ['differences'] keys = kwargs.keys() keys.sort(lambda a,b,s=specials: _specialsLast(a,b,s)) for key in keys: value = kwargs[key] if value is None: pass elif len(value.split('\n')) > 1: # a multi-line block form += '%s:\n' % key.capitalize() if key in specials: form += '\n' for line in value.split('\n'): if key in specials: form += line + '\n' else: form += '\t' + line + '\n' else: form += '%s:\t%s\n' % (key.capitalize(), value) form += '\n' return form def parseForm(content): """Parse an arbitrary Perforce form and return a dict result. The result is a dict with a key for each "section" in the form (the key name will be the section name lowercased), whose value will, in general, be a string with the following exceptions: - A "Files" section will translate into a list of dicts each with 'depotFile' and 'action' keys. - A "Change" value will be converted to an int if appropriate. """ if type(content) in types.StringTypes: lines = content.splitlines(1) else: lines = content # Example form: # # A Perforce Change Specification. # # # # Change: The change number. 'new' on a n... # # # to this changelist. You may de... # # Change: 1 # # Date: 2002/05/08 23:24:54 # # Description: # create the initial change # # Files: # //depot/test_edit_pending_change.txt # add spec = {} # Parse out all sections into strings. currkey = None # If non-None, then we are in a multi-line block. for line in lines: if line.strip().startswith('#'): continue # skip comment lines if currkey: # i.e. accumulating a multi-line block if line.startswith('\t'): spec[currkey] += line[1:] elif not line.strip(): spec[currkey] += '\n' else: # This is the start of a new section. Trim all # trailing newlines from block section, as # Perforce does. while spec[currkey].endswith('\n'): spec[currkey] = spec[currkey][:-1] currkey = None if not currkey: # i.e. not accumulating a multi-line block if not line.strip(): continue # skip empty lines key, remainder = line.split(':', 1) if not remainder.strip(): # this is a multi-line block currkey = key.lower() spec[currkey] = '' else: spec[key.lower()] = remainder.strip() if currkey: # Trim all trailing newlines from block section, as # Perforce does. while spec[currkey].endswith('\n'): spec[currkey] = spec[currkey][:-1] # Do any special processing on values. for key, value in spec.items(): if key == "change": try: spec[key] = int(value) except ValueError: pass elif key == "files": spec[key] = [] fileRe = re.compile('^(?P//.+?)\t'\ '# (?P\w+)$') for line in value.split('\n'): if not line.strip(): continue match = fileRe.match(line) try: spec[key].append(match.groupdict()) except AttributeError: pprint.pprint(value) pprint.pprint(spec) err = "Internal error: could not parse P4 form "\ "'Files:' section line: '%s'" % line raise P4LibError(err) return spec def makeOptv(**options): """Create a p4 option vector from the given p4 option dictionary. "options" is an option dictionary. Valid keys and values are defined by what class P4's constructor accepts via P4(**optd). Example: >>> makeOptv(client='swatter', dir='D:\\trentm') ['-c', 'client', '-d', 'D:\\trentm'] >>> makeOptv(client='swatter', dir=None) ['-c', 'client'] """ optv = [] for key, val in options.items(): if val is None: continue if key == 'client': optv.append('-c') elif key == 'dir': optv.append('-d') elif key == 'host': optv.append('-H') elif key == 'port': optv.append('-p') elif key == 'password': optv.append('-P') elif key == 'user': optv.append('-u') else: raise P4LibError("Unexpected keyword arg: '%s'" % key) optv.append(val) return optv def parseOptv(optv): """Return an option dictionary representing the given p4 option vector. "optv" is a list of p4 options. See 'p4 help usage' for a list. The returned option dictionary is suitable passing to the P4 constructor. Example: >>> parseP4Optv(['-c', 'swatter', '-d', 'D:\\trentm']) {'client': 'swatter', 'dir': 'D:\\trentm'} """ # Some of p4's options are not appropriate for later # invocations. For example, '-h' and '-V' override output from # running, say, 'p4 opened'; and '-G' and '-s' control the # output format which this module is parsing (hence this module # should control use of those options). optlist, dummy = getopt.getopt(optv, 'hVc:d:H:p:P:u:x:Gs') optd = {} for opt, optarg in optlist: if opt in ('-h', '-V', '-x'): raise P4LibError("The '%s' p4 option is not appropriate "\ "for p4lib.P4." % opt) elif opt in ('-G', '-s'): log.info("Ignoring '%s' option." % opt) elif opt == '-c': optd['client'] = optarg elif opt == '-d': optd['dir'] = optarg elif opt == '-H': optd['host'] = optarg elif opt == '-p': optd['port'] = optarg elif opt == '-P': optd['password'] = optarg elif opt == '-u': optd['user'] = optarg return optd class P4: """A proxy to the Perforce client app 'p4'.""" def __init__(self, p4='p4', **options): """Create a 'p4' proxy object. "p4" is the Perforce client to execute commands with. Defaults to 'p4'. Optional keyword arguments: "client" specifies the client name, overriding the value of $P4CLIENT in the environment and the default (the hostname). "dir" specifies the current directory, overriding the value of $PWD in the environment and the default (the current directory). "host" specifies the host name, overriding the value of $P4HOST in the environment and the default (the hostname). "port" specifies the server's listen address, overriding the value of $P4PORT in the environment and the default (perforce:1666). "password" specifies the password, overriding the value of $P4PASSWD in the environment. "user" specifies the user name, overriding the value of $P4USER, $USER, and $USERNAME in the environment. """ self.p4 = p4 self.optd = options self._optv = makeOptv(**self.optd) def _p4run(self, argv, **p4options): """Run the given p4 command. The current instance's p4 and p4 options (optionally overriden by **p4options) are used. The 3-tuple (, , ) is returned. """ if p4options: d = self.optd d.update(p4options) p4optv = makeOptv(**d) else: p4optv = self._optv argv = [self.p4] + p4optv + argv return _run(argv) def opened(self, files=[], allClients=0, change=None, _raw=0, **p4options): """Get a list of files opened in a pending changelist. "files" is a list of files or file wildcards to check. Defaults to the whole client view. "allClients" (-a) specifies to list opened files in all clients. "change" (-c) is a pending change with which to associate the opened file(s). Returns a list of dicts, each representing one opened file. The dict contains the keys 'depotFile', 'rev', 'action', 'change', 'type', and, as well, 'user' and 'client' if the -a option is used. If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ # Output examples: # - normal: # //depot/apps/px/px.py#3 - edit default change (text) # - with '-a': # //depot/foo.txt#1 - edit change 12345 (text+w) by trentm@trentm-pliers # - none opened: # foo.txt - file(s) not opened on this client. optv = [] if allClients: optv += ['-a'] if change: optv += ['-c', str(change)] if type(files) in types.StringTypes: files = [files] argv = ['opened'] + optv if files: argv += files output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} lineRe = re.compile('''^ (?P.*?)\#(?P\d+) # //depot/foo.txt#1 \s-\s(?P\w+) # - edit \s(default\schange|change\s(?P\d+)) # change 12345 \s\((?P[\w+]+)\) # (text+w) (\sby\s)? # by ((?P[^\s@]+)@(?P[^\s@]+))? # trentm@trentm-pliers ''', re.VERBOSE) files = [] for line in output.splitlines(1): match = lineRe.search(line) if not match: raise P4LibError("Internal error: 'p4 opened' regex did not "\ "match '%s'. Please report this to the "\ "author." % line) file = match.groupdict() file['rev'] = int(file['rev']) if not file['change']: file['change'] = 'default' else: file['change'] = int(file['change']) for key in file.keys(): if file[key] is None: del file[key] files.append(file) return files def where(self, files=[], _raw=0, **p4options): """Show how filenames map through the client view. "files" is a list of files or file wildcards to check. Defaults to the whole client view. Returns a list of dicts, each representing one element of the mapping. Each mapping include a 'depotFile', 'clientFile', and 'localFile' and a 'minus' boolean (indicating if the entry is an Exclusion. If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ # Output examples: # -//depot/foo/Py-2_1/... //trentm-ra/foo/Py-2_1/... c:\trentm\foo\Py-2_1\... # //depot/foo/win/... //trentm-ra/foo/win/... c:\trentm\foo\win\... # //depot/foo/Py Exts.dsw //trentm-ra/foo/Py Exts.dsw c:\trentm\foo\Py Exts.dsw # //depot/foo/%1 //trentm-ra/foo/%1 c:\trentm\foo\%1 # The last one is surprising. It comes from using '*' in the # client spec. if type(files) in types.StringTypes: files = [files] argv = ['where'] if files: argv += files output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} results = [] for line in output.splitlines(1): file = {} if line[-1] == '\n': line = line[:-1] if line.startswith('-'): file['minus'] = 1 line = line[1:] else: file['minus'] = 0 depotFileStart = line.find('//') clientFileStart = line.find('//', depotFileStart+2) file['depotFile'] = line[depotFileStart:clientFileStart-1] if sys.platform.startswith('win'): assert ':' not in file['depotFile'],\ "Current parsing cannot handle this line '%s'." % line localFileStart = line.find(':', clientFileStart+2) - 1 else: assert file['depotFile'].find(' /') == -1,\ "Current parsing cannot handle this line '%s'." % line localFileStart = line.find(' /', clientFileStart+2) + 1 file['clientFile'] = line[clientFileStart:localFileStart-1] file['localFile'] = line[localFileStart:] results.append(file) return results def have(self, files=[], _raw=0, **p4options): """Get list of file revisions last synced. "files" is a list of files or file wildcards to check. Defaults to the whole client view. "options" can be any of p4 option specifiers allowed by .__init__() (they override values given in the constructor for just this command). Returns a list of dicts, each representing one "hit". Each "hit" includes 'depotFile', 'rev', and 'localFile' keys. If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ if type(files) in types.StringTypes: files = [files] argv = ['have'] if files: argv += files output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} # Output format is 'depot-file#revision - client-file' hits = [] for line in output.splitlines(1): if line[-1] == '\n': line = line[:-1] hit = {} hit['depotFile'], line = line.split('#') hit['rev'], hit['localFile'] = line.split(' - ', 1) hit['rev'] = int(hit['rev']) hits.append(hit) return hits def describe(self, change, diffFormat='', shortForm=0, _raw=0, **p4options): """Get a description of the given changelist. "change" is the changelist number to describe. "diffFormat" (-d) is a flag to pass to the built-in diff routine to control the output format. Valid values are '' (plain, default), 'n' (RCS), 'c' (context), 's' (summary), 'u' (unified). "shortForm" (-s) specifies to exclude the diff from the description. Returns a dict representing the change description. Keys are: 'change', 'date', 'client', 'user', 'description', 'files', 'diff' (the latter is not included iff 'shortForm'). If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ if diffFormat not in ('', 'n', 'c', 's', 'u'): raise P4LibError("Incorrect diff format flag: '%s'" % diffFormat) optv = [] if diffFormat: optv.append('-d%s' % diffFormat) if shortForm: optv.append('-s') argv = ['describe'] + optv + [str(change)] output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} desc = {} lines = output.splitlines(1) lines = [line for line in lines if not line.strip().startswith("#")] changeRe = re.compile('^Change (?P\d+) by (?P[^\s@]+)@'\ '(?P[^\s@]+) on (?P[\d/ :]+)$') desc = changeRe.match(lines[0]).groupdict() desc['change'] = int(desc['change']) filesIdx = lines.index("Affected files ...\n") desc['description'] = "" for line in lines[2:filesIdx-1]: desc['description'] += line[1:] # drop the leading \t if shortForm: diffsIdx = len(lines) else: diffsIdx = lines.index("Differences ...\n") desc['files'] = [] fileRe = re.compile('^... (?P.+?)#(?P\d+) '\ '(?P\w+)$') for line in lines[filesIdx+2:diffsIdx-1]: file = fileRe.match(line).groupdict() file['rev'] = int(file['rev']) desc['files'].append(file) if not shortForm: desc['diff'] = self._parseDiffOutput(lines[diffsIdx+2:]) return desc def change(self, files=None, description=None, change=None, delete=0, _raw=0, **p4options): """Create, update, delete, or get a changelist description. Creating a changelist: p4.change([], "change description") OR p4.change(description="change description for all opened files") Updating a pending changelist: p4.change(description="change description", change=) OR p4.change(files=[], change=) Deleting a pending changelist: p4.change(change=, delete=1) Getting a change description: ch = p4.change(change=) Returns a dict. When getting a change desc the dict will include 'change', 'user', 'description', 'status', and possibly 'files' keys. For all other actions the dict will include a 'change' key, an 'action' key iff the intended action was successful, and possibly a 'comment' key. If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } Limitations: The -s (jobs) and -f (force) flags are not supported. """ #XXX .change() API should look more like .client() and .label(), # i.e. passing around a dictionary. Should strings also be # allowed: presumed to be forms? formfile = None try: if type(files) in types.StringTypes: files = [files] action = None # note action to know how to parse output below if change and files is None and not description: if delete: # Delete a pending change list. action = 'delete' argv = ['change', '-d', str(change)] else: # Get a change description. action = 'get' argv = ['change', '-o', str(change)] else: if delete: raise P4LibError("Cannot specify 'delete' with either "\ "'files' or 'description'.") if change: # Edit a current pending changelist. action = 'update' ch = self.change(change=change) if files is None: # 'files' was not specified. pass elif files == []: # Explicitly specified no files. # Explicitly specified no files. ch['files'] = [] else: depotfiles = [{'depotFile': f['depotFile']}\ for f in self.where(files)] ch['files'] = depotfiles if description: ch['description'] = description form = makeForm(**ch) elif description: # Creating a pending changelist. action = 'create' # Empty 'files' should default to all opened files in the # 'default' changelist. if files is None: files = [{'depotFile': f['depotFile']}\ for f in self.opened()] elif files == []: # Explicitly specified no files. pass else: #TODO: Add test to expect P4LibError if try to use # p4 wildcards in files. Currently *do* get # correct behaviour. files = [{'depotFile': f['depotFile']}\ for f in self.where(files)] form = makeForm(files=files, description=description, change='new') else: raise P4LibError("Incomplete/missing arguments.") # Build submission form file. formfile = tempfile.mktemp() fout = open(formfile, 'w') fout.write(form) fout.close() argv = ['change', '-i', '<', formfile] output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} if action == 'get': change = parseForm(output) elif action in ('create', 'update', 'delete'): lines = output.splitlines(1) resultRes = [ re.compile("^Change (?P\d+)"\ " (?Pcreated|updated|deleted)\.$"), re.compile("^Change (?P\d+) (?Pcreated)"\ " (?P.+?)\.$"), re.compile("^Change (?P\d+) (?Pupdated)"\ ", (?P.+?)\.$"), # e.g., Change 1 has 1 open file(s) associated with it and can't be deleted. re.compile("^Change (?P\d+) (?P.+?)\.$"), ] for resultRe in resultRes: match = resultRe.match(lines[0]) if match: change = match.groupdict() change['change'] = int(change['change']) break else: err = "Internal error: could not parse change '%s' "\ "output: '%s'" % (action, output) raise P4LibError(err) else: raise P4LibError("Internal error: unexpected action: '%s'"\ % action) return change finally: if formfile: os.remove(formfile) def changes(self, files=[], followIntegrations=0, longOutput=0, max=None, status=None, _raw=0, **p4options): """Return a list of pending and submitted changelists. "files" is a list of files or file wildcards that will limit the results to changes including these files. Defaults to the whole client view. "followIntegrations" (-i) specifies to include any changelists integrated into the given files. "longOutput" (-l) includes changelist descriptions. "max" (-m) limits the results to the given number of most recent relevant changes. "status" (-s) limits the output to 'pending' or 'submitted' changelists. Returns a list of dicts, each representing one change spec. Keys are: 'change', 'date', 'client', 'user', 'description'. If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ if max is not None and type(max) != types.IntType: raise P4LibError("Incorrect 'max' value. It must be an integer: "\ "'%s' (type '%s')" % (max, type(max))) if status is not None and status not in ("pending", "submitted"): raise P4LibError("Incorrect 'status' value: '%s'" % status) if type(files) in types.StringTypes: files = [files] optv = [] if followIntegrations: optv.append('-i') if longOutput: optv.append('-l') if max is not None: optv += ['-m', str(max)] if status is not None: optv += ['-s', status] argv = ['changes'] + optv if files: argv += files output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} changes = [] if longOutput: changeRe = re.compile("^Change (?P\d+) on "\ "(?P[\d/]+) by (?P[^\s@]+)@"\ "(?P[^\s@]+)$") for line in output.splitlines(1): if not line.strip(): continue # skip blank lines if line.startswith('\t'): # Append this line (minus leading tab) to last # change's description. changes[-1]['description'] += line[1:] else: change = changeRe.match(line).groupdict() change['change'] = int(change['change']) change['description'] = '' changes.append(change) else: changeRe = re.compile("^Change (?P\d+) on "\ "(?P[\d/]+) by (?P[^\s@]+)@"\ "(?P[^\s@]+) (\*pending\* )?"\ "'(?P.*?)'$") for line in output.splitlines(1): match = changeRe.match(line) if match: change = match.groupdict() change['change'] = int(change['change']) changes.append(change) else: raise P4LibError("Internal error: could not parse "\ "'p4 changes' output line: '%s'" % line) return changes def sync(self, files=[], force=0, dryrun=0, _raw=0, **p4options): """Synchronize the client with its view of the depot. "files" is a list of files or file wildcards to sync. Defaults to the whole client view. "force" (-f) forces resynchronization even if the client already has the file, and clobbers writable files. "dryrun" (-n) causes sync to go through the motions and report results but not actually make any changes. Returns a list of dicts representing the sync'd files. Keys are: 'depotFile', 'rev', 'comment', and possibly 'notes'. If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ if type(files) in types.StringTypes: files = [files] optv = [] if force: optv.append('-f') if dryrun: optv.append('-n') argv = ['sync'] + optv if files: argv += files output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} # Forms of output: # //depot/foo#1 - updating C:\foo # //depot/foo#1 - is opened and not being changed # //depot/foo#1 - is opened at a later revision - not changed # //depot/foo#1 - deleted as C:\foo # ... //depot/foo - must resolve #2 before submitting # There are probably others forms. hits = [] lineRe = re.compile('^(?P.+?)#(?P\d+) - '\ '(?P.+?)$') for line in output.splitlines(1): if line.startswith('... '): note = line.split(' - ')[-1].strip() hits[-1]['notes'].append(note) continue match = lineRe.match(line) if match: hit = match.groupdict() hit['rev'] = int(hit['rev']) hit['notes'] = [] hits.append(hit) continue raise P4LibError("Internal error: could not parse 'p4 sync'"\ "output line: '%s'" % line) return hits def edit(self, files, change=None, filetype=None, _raw=0, **p4options): """Open an existing file for edit. "files" is a list of files or file wildcards to open for edit. "change" (-c) is a pending changelist number in which to put the opened files. "filetype" (-t) specifies to explicitly open the files with the given filetype. Returns a list of dicts representing commentary on each file opened for edit. Keys are: 'depotFile', 'rev', 'comment', 'notes'. If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ if type(files) in types.StringTypes: files = [files] optv = [] if change: optv += ['-c', str(change)] if filetype: optv += ['-t', filetype] argv = ['edit'] + optv + files output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} # Example output: # //depot/build.py#142 - opened for edit # ... //depot/build.py - must sync/resolve #143,#148 before submitting # ... //depot/build.py - also opened by davida@davida-bertha # ... //depot/build.py - also opened by davida@davida-loom # ... //depot/build.py - also opened by davida@davida-marteau # ... //depot/build.py - also opened by trentm@trentm-razor # //depot/BuildNum.txt#3 - currently opened for edit hits = [] lineRe = re.compile('^(?P.+?)#(?P\d+) - '\ '(?P.*)$') for line in output.splitlines(1): if line.startswith("..."): # this is a note for the latest hit note = line.split(' - ')[-1].strip() hits[-1]['notes'].append(note) else: hit = lineRe.match(line).groupdict() hit['rev'] = int(hit['rev']) hit['notes'] = [] hits.append(hit) return hits def add(self, files, change=None, filetype=None, _raw=0, **p4options): """Open a new file to add it to the depot. "files" is a list of files or file wildcards to open for add. "change" (-c) is a pending changelist number in which to put the opened files. "filetype" (-t) specifies to explicitly open the files with the given filetype. Returns a list of dicts representing commentary on each file *attempted* to be opened for add. Keys are: 'depotFile', 'rev', 'comment', 'notes'. If a given file is NOT added then the 'rev' will be None. If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ if type(files) in types.StringTypes: files = [files] optv = [] if change: optv += ['-c', str(change)] if filetype: optv += ['-t', filetype] argv = ['add'] + optv + files output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} # Example output: # //depot/apps/px/p4.py#1 - opened for add # c:\trentm\apps\px\p4.py - missing, assuming text. # # //depot/apps/px/px.py - can't add (already opened for edit) # ... //depot/apps/px/px.py - warning: add of existing file # # //depot/apps/px/px.cpp - can't add existing file # # //depot/apps/px/t#1 - opened for add # hits = [] hitRe = re.compile('^(?P//.+?)(#(?P\d+))? - '\ '(?P.*)$') for line in output.splitlines(1): match = hitRe.match(line) if match: hit = match.groupdict() if hit['rev'] is not None: hit['rev'] = int(hit['rev']) hit['notes'] = [] hits.append(hit) else: if line.startswith("..."): note = line.split(' - ')[-1].strip() else: note = line.strip() hits[-1]['notes'].append(note) return hits def files(self, files, _raw=0, **p4options): """List files in the depot. "files" is a list of files or file wildcards to list. Defaults to the whole client view. Returns a list of dicts, each representing one matching file. Keys are: 'depotFile', 'rev', 'type', 'change', 'action'. If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ if type(files) in types.StringTypes: files = [files] if not files: raise P4LibError("Missing/wrong number of arguments.") argv = ['files'] + files output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} hits = [] fileRe = re.compile("^(?P//.*?)#(?P\d+) - "\ "(?P\w+) change (?P\d+) "\ "\((?P[\w+]+)\)$") for line in output.splitlines(1): match = fileRe.match(line) hit = match.groupdict() hit['rev'] = int(hit['rev']) hit['change'] = int(hit['change']) hits.append(hit) return hits def filelog(self, files, followIntegrations=0, longOutput=0, maxRevs=None, _raw=0, **p4options): """List revision histories of files. "files" is a list of files or file wildcards to describe. "followIntegrations" (-i) specifies to follow branches. "longOutput" (-l) includes changelist descriptions. "maxRevs" (-m) limits the results to the given number of most recent revisions. Returns a list of hits. Each hit is a dict with the following keys: 'depotFile', 'revs'. 'revs' is a list of dicts, each representing one submitted revision of 'depotFile' and containing the following keys: 'action', 'change', 'client', 'date', 'type', 'notes', 'rev', 'user'. If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ if maxRevs is not None and type(maxRevs) != types.IntType: raise P4LibError("Incorrect 'maxRevs' value. It must be an "\ "integer: '%s' (type '%s')"\ % (maxRevs, type(maxRevs))) if type(files) in types.StringTypes: files = [files] if not files: raise P4LibError("Missing/wrong number of arguments.") optv = [] if followIntegrations: optv.append('-i') if longOutput: optv.append('-l') if maxRevs is not None: optv += ['-m', str(maxRevs)] argv = ['filelog'] + optv + files output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} hits = [] revRe = re.compile("^... #(?P\d+) change (?P\d+) "\ "(?P\w+) on (?P[\d/]+) by "\ "(?P[^\s@]+)@(?P[^\s@]+) "\ "\((?P[\w+]+)\)( '(?P.*?)')?$") for line in output.splitlines(1): if longOutput and not line.strip(): continue # skip blank lines elif line.startswith('//'): hit = {'depotFile': line.strip(), 'revs': []} hits.append(hit) elif line.startswith('... ... '): hits[-1]['revs'][-1]['notes'].append(line[8:].strip()) elif line.startswith('... '): match = revRe.match(line) if match: d = match.groupdict('') d['change'] = int(d['change']) d['rev'] = int(d['rev']) hits[-1]['revs'].append(d) hits[-1]['revs'][-1]['notes'] = [] else: raise P4LibError("Internal parsing error: '%s'" % line) elif longOutput and line.startswith('\t'): # Append this line (minus leading tab) to last hit's # last rev's description. hits[-1]['revs'][-1]['description'] += line[1:] else: raise P4LibError("Unexpected 'p4 filelog' output: '%s'"\ % line) return hits def print_(self, files, localFile=None, quiet=0, **p4options): """Retrieve depot file contents. "files" is a list of files or file wildcards to print. "localFile" (-o) is the name of a local file in which to put the output text. "quiet" (-q) suppresses some file meta-information. Returns a list of dicts, each representing one matching file. Keys are: 'depotFile', 'rev', 'type', 'change', 'action', and 'text'. If 'quiet', the first five keys will not be present. The 'text' key will not be present if the file is binary. If both 'quiet' and 'localFile', there will be no hits at all. """ if type(files) in types.StringTypes: files = [files] if not files: raise P4LibError("Missing/wrong number of arguments.") optv = [] if localFile: optv += ['-o', localFile] if quiet: optv.append('-q') # There is *no* way to properly and reliably parse out multiple file # output without using -s or -G. Use the latter. if p4options: d = self.optd d.update(p4options) p4optv = makeOptv(**d) else: p4optv = self._optv argv = [self.p4, '-G'] + p4optv + ['print'] + optv + files cmd = _joinArgv(argv) log.debug("popen3 '%s'..." % cmd) i, o, e = os.popen3(cmd) hits = [] fileRe = re.compile("^(?P//.*?)#(?P\d+) - "\ "(?P\w+) change (?P\d+) "\ "\((?P[\w+]+)\)$") try: startHitWithNextNode = 1 while 1: node = marshal.load(o) if node['code'] == 'info': # Always start a new hit with an 'info' node. match = fileRe.match(node['data']) hit = match.groupdict() hit['change'] = int(hit['change']) hit['rev'] = int(hit['rev']) hits.append(hit) startHitWithNextNode = 0 elif node['code'] == 'text': if startHitWithNextNode: hit = {'text': node['data']} hits.append(hit) else: if not hits[-1].has_key('text')\ or hits[-1]['text'] is None: hits[-1]['text'] = node['data'] else: hits[-1]['text'] += node['data'] startHitWithNextNode = not node['data'] except EOFError: pass return hits def diff(self, files=[], diffFormat='', force=0, satisfying=None, text=0, _raw=0, **p4options): """Display diff of client files with depot files. "files" is a list of files or file wildcards to diff. "diffFormat" (-d) is a flag to pass to the built-in diff routine to control the output format. Valid values are '' (plain, default), 'n' (RCS), 'c' (context), 's' (summary), 'u' (unified). "force" (-f) forces a diff of every file. "satifying" (-s) limits the output to the names of files satisfying certain criteria: 'a' Opened files that are different than the revision in the depot, or missing. 'd' Unopened files that are missing on the client. 'e' Unopened files that are different than the revision in the depot. 'r' Opened files that are the same as the revision in the depot. "text" (-t) forces diffs of non-text files. Returns a list of dicts representing each file diff'd. If "satifying" is specified each dict will simply include a 'localFile' key. Otherwise, each dict will include 'localFile', 'depotFile', 'rev', and 'binary' (boolean) keys and possibly a 'text' or a 'notes' key iff there are any differences. Generally you will get a 'notes' key for differing binary files. If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ if type(files) in types.StringTypes: files = [files] if diffFormat not in ('', 'n', 'c', 's', 'u'): raise P4LibError("Incorrect diff format flag: '%s'" % diffFormat) if satisfying is not None\ and satisfying not in ('a', 'd', 'e', 'r'): raise P4LibError("Incorrect 'satisfying' flag: '%s'" % satisfying) optv = [] if diffFormat: optv.append('-d%s' % diffFormat) if satisfying: optv.append('-s%s' % satisfying) if force: optv.append('-f') if text: optv.append('-t') # There is *no* to properly and reliably parse out multiple file # output without using -s or -G. Use the latter. (XXX Huh?) argv = ['diff'] + optv + files output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} if satisfying is not None: hits = [{'localFile': line[:-1]} for line in output.splitlines(1)] else: hits = self._parseDiffOutput(output) return hits def _parseDiffOutput(self, output): if type(output) in types.StringTypes: outputLines = output.splitlines(1) else: outputLines = output hits = [] # Example header lines: # - from 'p4 describe': # ==== //depot/apps/px/ReadMe.txt#5 (text) ==== # - from 'p4 diff': # ==== //depot/apps/px/p4lib.py#12 - c:\trentm\apps\px\p4lib.py ==== # ==== //depot/foo.doc#42 - c:\trentm\foo.doc ==== (binary) header1Re = re.compile("^==== (?P//.*?)#(?P\d+) "\ "\((?P\w+)\) ====$") header2Re = re.compile("^==== (?P//.*?)#(?P\d+) - "\ "(?P.+?) ===="\ "(?P \(binary\))?$") for line in outputLines: header1 = header1Re.match(line) header2 = header2Re.match(line) if header1: hit = header1.groupdict() hit['rev'] = int(hit['rev']) hits.append(hit) elif header2: hit = header2.groupdict() hit['rev'] = int(hit['rev']) hit['binary'] = not not hit['binary'] # get boolean value hits.append(hit) elif (len(hits) > 0) and (not hits[-1].has_key('text'))\ and line == "(... files differ ...)\n": hits[-1]['notes'] = [line] elif len(hits) > 0: # This is a diff line. if not hits[-1].has_key('text'): hits[-1]['text'] = '' # XXX 'p4 describe' diff text includes a single # blank line after each header line before the # actual diff. Should this be stripped? hits[-1]['text'] += line return hits def diff2(self, file1, file2, diffFormat='', quiet=0, text=0, **p4options): """Compare two depot files. "file1" and "file2" are the two files to diff. "diffFormat" (-d) is a flag to pass to the built-in diff routine to control the output format. Valid values are '' (plain, default), 'n' (RCS), 'c' (context), 's' (summary), 'u' (unified). "quiet" (-q) suppresses some meta information and all information if the files do not differ. Returns a dict representing the diff. Keys are: 'depotFile1', 'rev1', 'type1', 'depotFile2', 'rev2', 'type2', 'summary', 'notes', 'text'. There may not be a 'text' key if the files are the same or are binary. The first eight keys will not be present if 'quiet'. Note that the second 'p4 diff2' style is not supported: p4 diff2 [ -d -q -t ] -b branch [ [ file1 ] file2 ] """ if diffFormat not in ('', 'n', 'c', 's', 'u'): raise P4LibError("Incorrect diff format flag: '%s'" % diffFormat) optv = [] if diffFormat: optv.append('-d%s' % diffFormat) if quiet: optv.append('-q') if text: optv.append('-t') # There is *no* way to properly and reliably parse out multiple # file output without using -s or -G. Use the latter. if p4options: d = self.optd d.update(p4options) p4optv = makeOptv(**d) else: p4optv = self._optv argv = [self.p4, '-G'] + p4optv + ['diff2'] + optv + [file1, file2] cmd = _joinArgv(argv) i, o, e = os.popen3(cmd) diff = {} infoRe = re.compile("^==== (?P.+?)#(?P\d+) "\ "\((?P[\w+]+)\) - "\ "(?P.+?)#(?P\d+) "\ "\((?P[\w+]+)\) "\ "==== (?P\w+)$") try: while 1: node = marshal.load(o) if node['code'] == 'info'\ and node['data'] == '(... files differ ...)': if diff.has_key('notes'): diff['notes'].append(node['data']) else: diff['notes'] = [ node['data'] ] elif node['code'] == 'info': match = infoRe.match(node['data']) d = match.groupdict() d['rev1'] = int(d['rev1']) d['rev2'] = int(d['rev2']) diff.update( match.groupdict() ) elif node['code'] == 'text': if not diff.has_key('text') or diff['text'] is None: diff['text'] = node['data'] else: diff['text'] += node['data'] except EOFError: pass return diff def revert(self, files=[], change=None, unchangedOnly=0, _raw=0, **p4options): """Discard changes for the given opened files. "files" is a list of files or file wildcards to revert. Default to the whole client view. "change" (-c) will limit to files opened in the given changelist. "unchangedOnly" (-a) will only revert opened files that are not different than the version in the depot. Returns a list of dicts representing commentary on each file reverted. Keys are: 'depotFile', 'rev', 'comment'. If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ if type(files) in types.StringTypes: files = [files] optv = [] if change: optv += ['-c', str(change)] if unchangedOnly: optv += ['-a'] if not unchangedOnly and not files: raise P4LibError("Missing/wrong number of arguments.") argv = ['revert'] + optv + files output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} # Example output: # //depot/hello.txt#1 - was edit, reverted # //depot/test_g.txt#none - was add, abandoned hits = [] hitRe = re.compile('^(?P//.+?)(#(?P\w+))? - '\ '(?P.*)$') for line in output.splitlines(1): match = hitRe.match(line) if match: hit = match.groupdict() try: hit['rev'] = int(hit['rev']) except ValueError: pass hits.append(hit) else: raise P4LibError("Internal parsing error: '%s'" % line) return hits def resolve(self, files=[], autoMode='', force=0, dryrun=0, text=0, verbose=0, _raw=0, **p4options): """Merge open files with other revisions or files. This resolve, for obvious reasons, only supports the options to 'p4 resolve' that will result is *no* command line interaction. 'files' is a list of files, of file wildcards, to resolve. 'autoMode' (-a*) tells how to resolve merges. See below for valid values. 'force' (-f) allows previously resolved files to be resolved again. 'dryrun' (-n) lists the integrations that *would* be performed without performing them. 'text' (-t) will force a textual merge, even for binary file types. 'verbose' (-v) will cause markers to be placed in all changed files not just those that conflict. Valid values of 'autoMode' are: '' '-a' I believe this is equivalent to '-am'. 'f', 'force' '-af' Force acceptance of merged files with conflicts. 'm', 'merge' '-am' Attempts to merge. 's', 'safe' '-as' Does not attempt to merge. 't', 'theirs' '-at' Accepts "their" changes, OVERWRITING yours. 'y', 'yours' '-ay' Accepts your changes, OVERWRITING "theirs". Invalid values of 'autoMode': None As if no -a option had been specified. Invalid because this may result in command line interaction. Returns a list of dicts representing commentary on each file for which a resolve was attempted. Keys are: 'localFile', 'clientFile' 'comment', and 'action'; and possibly 'diff chunks' if there was anything to merge. If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ if type(files) in types.StringTypes: files = [files] optv = [] if autoMode is None: raise P4LibError("'autoMode' must be non-None, otherwise "\ "'p4 resolve' may initiate command line "\ "interaction, which will hang this method.") else: optv += ['-a%s' % autoMode] if force: optv += ['-f'] if dryrun: optv += ['-n'] if text: optv += ['-t'] if verbose: optv += ['-v'] argv = ['resolve'] + optv + files output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} hits = [] # Example output: # C:\rootdir\foo.txt - merging //depot/foo.txt#2 # Diff chunks: 0 yours + 0 theirs + 0 both + 1 conflicting # //client-name/foo.txt - resolve skipped. # Proposed result: # [{'localFile': 'C:\\rootdir\\foo.txt', # 'depotFile': '//depot/foo.txt', # 'rev': 2 # 'clientFile': '//client-name/foo.txt', # 'diff chunks': {'yours': 0, 'theirs': 0, 'both': 0, # 'conflicting': 1} # 'action': 'resolve skipped'}] # # Example output: # C:\rootdir\foo.txt - vs //depot/foo.txt#2 # //client-name/foo.txt - ignored //depot/foo.txt # Proposed result: # [{'localFile': 'C:\\rootdir\\foo.txt', # 'depotFile': '//depot/foo.txt', # 'rev': 2 # 'clientFile': '//client-name/foo.txt', # 'diff chunks': {'yours': 0, 'theirs': 0, 'both': 0, # 'conflicting': 1} # 'action': 'ignored //depot/foo.txt'}] # introRe = re.compile('^(?P.+?) - (merging|vs) '\ '(?P//.+?)#(?P\d+)$') diffRe = re.compile('^Diff chunks: (?P\d+) yours \+ '\ '(?P\d+) theirs \+ (?P\d+) both '\ '\+ (?P\d+) conflicting$') actionRe = re.compile('^(?P//.+?) - (?P.+?)(\.)?$') for line in output.splitlines(1): match = introRe.match(line) if match: hit = match.groupdict() hit['rev'] = int(hit['rev']) hits.append(hit) log.info("parsed resolve 'intro' line: '%s'" % line.strip()) continue match = diffRe.match(line) if match: diff = match.groupdict() diff['yours'] = int(diff['yours']) diff['theirs'] = int(diff['theirs']) diff['both'] = int(diff['both']) diff['conflicting'] = int(diff['conflicting']) hits[-1]['diff chunks'] = diff log.info("parsed resolve 'diff' line: '%s'" % line.strip()) continue match = actionRe.match(line) if match: hits[-1].update(match.groupdict()) log.info("parsed resolve 'action' line: '%s'" % line.strip()) continue raise P4LibError("Internal error: could not parse 'p4 resolve' "\ "output line: line='%s' argv=%s" % (line, argv)) return hits def submit(self, files=None, description=None, change=None, _raw=0, **p4options): """Submit open files to the depot. There are two ways to call this method: - Submit specific files: p4.submit([...], "checkin message") - Submit a pending changelist: p4.submit(change=123) Note: 'change' should always be specified with a keyword argument. I reserve the right to extend this method by adding kwargs *before* the change arg. So p4.submit(None, None, 123) is not guaranteed to work. Returns a dict with a 'files' key (which is a list of dicts with 'depotFile', 'rev', and 'action' keys), and 'action' (=='submitted') and 'change' keys iff the submit is succesful. Note: An equivalent for the '-s' option to 'p4 submit' is not supported, because I don't know how to use it and have never. Nor is the '-i' option supported, although it *is* used internally to drive 'p4 submit'. If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ #TODO: # - test when submission fails because files need to be # resolved # - Structure this code more like change, client, label, & branch. formfile = None try: if type(files) in types.StringTypes: files = [files] if change and not files and not description: argv = ['submit', '-c', str(change)] elif not change and files is not None and description: # Empty 'files' should default to all opened files in the # 'default' changelist. if not files: files = [{'depotFile': f['depotFile']}\ for f in self.opened()] else: #TODO: Add test to expect P4LibError if try to use # p4 wildcards in files. files = [{'depotFile': f['depotFile']}\ for f in self.where(files)] # Build submission form file. formfile = tempfile.mktemp() form = makeForm(files=files, description=description, change='new') fout = open(formfile, 'w') fout.write(form) fout.close() argv = ['submit', '-i', '<', formfile] else: raise P4LibError("Incorrect arguments. You must specify "\ "'change' OR you must specify 'files' and "\ "'description'.") output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} # Example output: # Change 1 created with 1 open file(s). # Submitting change 1. # Locking 1 files ... # add //depot/test_simple_submit.txt#1 # Change 1 submitted. # This returns (similar to .change() output): # {'change': 1, # 'action': 'submitted', # 'files': [{'depotFile': '//depot/test_simple_submit.txt', # 'rev': 1, # 'action': 'add'}]} # i.e. only the file actions and the last "submitted" line are # looked for. skipRes = [ re.compile('^Change \d+ created with \d+ open file\(s\)\.$'), re.compile('^Submitting change \d+\.$'), re.compile('^Locking \d+ files \.\.\.$')] fileRe = re.compile('^(?P\w+) (?P//.+?)'\ '#(?P\d+)$') resultRe = re.compile('^Change (?P\d+) '\ '(?Psubmitted)\.') result = {'files': []} for line in output.splitlines(1): match = fileRe.match(line) if match: file = match.groupdict() file['rev'] = int(file['rev']) result['files'].append(file) log.info("parsed submit 'file' line: '%s'", line.strip()) continue match = resultRe.match(line) if match: result.update(match.groupdict()) result['change'] = int(result['change']) log.info("parsed submit 'result' line: '%s'", line.strip()) continue # The following is technically just overhead but it is # considered more robust if we explicitly try to recognize # all output. Unrecognized output can be warned or raised. for skipRe in skipRes: match = skipRe.match(line) if match: log.info("parsed submit 'skip' line: '%s'", line.strip()) break else: log.warn("Unrecognized output line from running %s: "\ "'%s'. Please report this to the maintainer."\ % (argv, line)) return result finally: if formfile: os.remove(formfile) def delete(self, files, change=None, _raw=0, **p4options): """Open an existing file to delete it from the depot. "files" is a list of files or file wildcards to open for delete. "change" (-c) is a pending change with which to associate the opened file(s). Returns a list of dicts each representing a file *attempted* to be open for delete. Keys are 'depotFile', 'rev', and 'comment'. If the file could *not* be openned for delete then 'rev' will be None. If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ if type(files) in types.StringTypes: files = [files] optv = [] if change: optv += ['-c', str(change)] argv = ['delete'] + optv + files output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} # Example output: # //depot/foo.txt#1 - opened for delete # //depot/foo.txt - can't delete (already opened for edit) hits = [] hitRe = re.compile('^(?P.+?)(#(?P\d+))? - '\ '(?P.*)$') for line in output.splitlines(1): match = hitRe.match(line) if match: hit = match.groupdict() if hit['rev'] is not None: hit['rev'] = int(hit['rev']) hits.append(hit) else: raise P4LibError("Internal error: could not parse "\ "'p4 delete' output line: '%s'. Please "\ "report this to the author." % line) return hits def client(self, name=None, client=None, delete=0, _raw=0, **p4options): """Create, update, delete, or get a client specification. Creating a new client spec or updating an existing one: p4.client(client=) OR p4.client(name=, client=) Returns a dictionary of the following form: {'client': , 'action': } Deleting a client spec: p4.client(name=, delete=1) Returns a dictionary of the following form: {'client': , 'action': 'deleted'} Getting a client spec: ch = p4.client(name=) Returns a dictionary describing the client. For example: {'access': '2002/07/16 00:05:31', 'client': 'trentm-ra', 'description': 'Created by trentm.', 'host': 'ra', 'lineend': 'local', 'options': 'noallwrite noclobber nocompress unlocked nomodtime normdir', 'owner': 'trentm', 'root': 'c:\\trentm\\', 'update': '2002/03/18 22:33:18', 'view': '//depot/... //trentm-ra/...'} If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } Limitations: The -f (force) and -t (template) flags are not supported. However, there is no strong need to support -t because the use of dictionaries in this API makes this trivial. """ formfile = None try: action = None # note action to know how to parse output below if delete: action = "delete" if name is None: raise P4LibError("Incomplete/missing arguments: must "\ "specify 'name' of client to delete.") argv = ['client', '-d', name] elif client is None: action = "get" if name is None: raise P4LibError("Incomplete/missing arguments: must "\ "specify 'name' of client to get.") argv = ['client', '-o', name] else: action = "create/update" if client.has_key("client"): name = client["client"] if name is not None: cl = self.client(name=name) else: cl = {} cl.update(client) form = makeForm(**cl) # Build submission form file. formfile = tempfile.mktemp() fout = open(formfile, 'w') fout.write(form) fout.close() argv = ['client', '-i', '<', formfile] output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} if action == 'get': rv = parseForm(output) elif action in ('create/update', 'delete'): lines = output.splitlines(1) # Example output: # Client trentm-ra not changed. # Client bertha-test deleted. # Client bertha-test saved. resultRe = re.compile("^Client (?P[^\s@]+)"\ " (?Pnot changed|deleted|saved)\.$") match = resultRe.match(lines[0]) if match: rv = match.groupdict() else: err = "Internal error: could not parse p4 client "\ "output: '%s'" % output raise P4LibError(err) else: raise P4LibError("Internal error: unexpected action: '%s'"\ % action) return rv finally: if formfile: os.remove(formfile) def clients(self, _raw=0, **p4options): """Return a list of clients. Returns a list of dicts, each representing one client spec, e.g.: [{'client': 'trentm-ra', # client name 'update': '2002/03/18', # client last modification date 'root': 'c:\\trentm\\', # the client root directory 'description': 'Created by trentm. '}, # *part* of the client description ... ] If '_raw' is true then the return value is simply a dictionary with the unprocessed results of calling p4: {'stdout': , 'stderr': , 'retval': } """ argv = ['clients'] output, error, retval = self._p4run(argv, **p4options) if _raw: return {'stdout': output, 'stderr': error, 'retval': retval} # Examples: # Client trentm-ra 2002/03/18 root c:\trentm\ 'Created by trentm. ' clientRe = re.compile("^Client (?P[^\s@]+) "\ "(?P[\d/]+) "\ "root (?P.*?) '(?P.*?)'$") clients = [] for line in output.splitlines(1): match = clientRe.match(line) if match: client = match.groupdict() clients.append(client) else: raise P4LibError("Internal error: could not parse "\ "'p4 clients' output line: '%s'" % line) return clients def label(self, name=None, label=None, delete=0, _raw=0, **p4options): r"""Create, update, delete, or get a label specification. Creating a new label spec or updating an existing one: p4.label(label=