Go to:
Gentoo Home
Documentation
Forums
Lists
Bugs
Planet
Store
Wiki
Get Gentoo!
Gentoo's Bugzilla – Attachment 198262 Details for
Bug 278127
Misc portage code cleanup.
Home
|
New
–
[Ex]
|
Browse
|
Search
|
Privacy Policy
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
[x]
|
Forgot Password
Login:
[x]
[patch]
replace foo,bar -> foo, bar
comma_spaces.patch (text/plain), 220.97 KB, created by
Alec Warner (RETIRED)
on 2009-07-17 09:13:21 UTC
(
hide
)
Description:
replace foo,bar -> foo, bar
Filename:
MIME Type:
Creator:
Alec Warner (RETIRED)
Created:
2009-07-17 09:13:21 UTC
Size:
220.97 KB
patch
obsolete
>Index: pym/repoman/utilities.py >=================================================================== >--- pym/repoman/utilities.py (revision 13832) >+++ pym/repoman/utilities.py (working copy) >@@ -52,7 +52,7 @@ > Returns: > None (calls sys.exit on fatal problems) > """ >- retval = ("","") >+ retval = ("", "") > if vcs == 'cvs': > logging.info("Performing a " + output.green("cvs -n up") + \ > " with a little magic grep to check for updates.") >@@ -72,9 +72,9 @@ > for line in mylines: > if not line: > continue >- if line[0] not in "UPMARD": # Updates,Patches,Modified,Added,Removed/Replaced(svn),Deleted(svn) >+ if line[0] not in "UPMARD": # Updates, Patches, Modified, Added, Removed/Replaced(svn), Deleted(svn) > logging.error(red("!!! Please fix the following issues reported " + \ >- "from cvs: ")+green("(U,P,M,A,R,D are ok)")) >+ "from cvs: ")+green("(U, P, M, A, R, D are ok)")) > logging.error(red("!!! Note: This is a pretend/no-modify pass...")) > logging.error(retval[1]) > sys.exit(1) >@@ -115,7 +115,7 @@ > try: > metadatadom = minidom.parse(mylines) > except ExpatError, e: >- raise exception.ParseError("metadata.xml: %s" % (e,)) >+ raise exception.ParseError("metadata.xml: %s" % (e, )) > > try: > usetag = metadatadom.getElementsByTagName("use") >Index: pym/repoman/checks.py >=================================================================== >--- pym/repoman/checks.py (revision 13832) >+++ pym/repoman/checks.py (working copy) >@@ -102,7 +102,7 @@ > yield 'Useless blank line on last line' > > class EbuildQuote(LineCheck): >- """Ensure ebuilds have valid quoting around things like D,FILESDIR, etc...""" >+ """Ensure ebuilds have valid quoting around things like D, FILESDIR, etc...""" > > repoman_check_name = 'ebuild.minorsyn' > _message_commands = ["die", "echo", "eerror", >Index: pym/portage/dbapi/bintree.py >=================================================================== >--- pym/portage/dbapi/bintree.py (revision 13832) >+++ pym/portage/dbapi/bintree.py (working copy) >@@ -6,11 +6,11 @@ > > import portage > portage.proxy.lazyimport.lazyimport(globals(), >- 'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list', >- 'portage.output:EOutput,colorize', >+ 'portage.dep:dep_getkey, isjustname, isvalidatom, match_from_list', >+ 'portage.output:EOutput, colorize', > 'portage.update:update_dbentries', >- 'portage.util:ensure_dirs,normalize_path,writemsg,writemsg_stdout', >- 'portage.versions:best,catpkgsplit,catsplit', >+ 'portage.util:ensure_dirs, normalize_path, writemsg, writemsg_stdout', >+ 'portage.versions:best, catpkgsplit, catsplit', > ) > > from portage.cache.mappings import slot_dict_class >@@ -153,7 +153,7 @@ > self._all_directory = os.path.isdir( > os.path.join(self.pkgdir, "All")) > self._pkgindex_version = 0 >- self._pkgindex_hashes = ["MD5","SHA1"] >+ self._pkgindex_hashes = ["MD5", "SHA1"] > self._pkgindex_file = os.path.join(self.pkgdir, "Packages") > self._pkgindex_keys = self.dbapi._aux_cache_keys.copy() > self._pkgindex_keys.update(["CPV", "MTIME", "SIZE"]) >@@ -233,7 +233,7 @@ > continue > > tbz2path = self.getname(mycpv) >- if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK): >+ if os.path.exists(tbz2path) and not os.access(tbz2path, os.W_OK): > writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv, > noiselevel=-1) > continue >@@ -1009,7 +1009,7 @@ > writemsg("mydep: %s\n" % mydep, 1) > mykey = dep_getkey(mydep) > writemsg("mykey: %s\n" % mykey, 1) >- mymatch = best(match_from_list(mydep,self.dbapi.cp_list(mykey))) >+ mymatch = best(match_from_list(mydep, self.dbapi.cp_list(mykey))) > writemsg("mymatch: %s\n" % mymatch, 1) > if mymatch is None: > return "" >@@ -1047,7 +1047,7 @@ > return True > > def get_use(self, pkgname): >- writemsg("deprecated use of binarytree.get_use()," + \ >+ writemsg("deprecated use of binarytree.get_use(), " + \ > " use dbapi.aux_get() instead", noiselevel=-1) > return self.dbapi.aux_get(pkgname, ["USE"])[0].split() > >@@ -1155,7 +1155,7 @@ > ok, reason = verify_all(pkg_path, digests) > if not ok: > raise portage.exception.DigestException( >- (pkg_path,) + tuple(reason)) >+ (pkg_path, ) + tuple(reason)) > > return True > >@@ -1163,7 +1163,7 @@ > "Get a slot for a catpkg; assume it exists." > myslot = "" > try: >- myslot = self.dbapi.aux_get(mycatpkg,["SLOT"])[0] >+ myslot = self.dbapi.aux_get(mycatpkg, ["SLOT"])[0] > except SystemExit, e: > raise > except Exception, e: >Index: pym/portage/dbapi/vartree.py >=================================================================== >--- pym/portage/dbapi/vartree.py (revision 13832) >+++ pym/portage/dbapi/vartree.py (working copy) >@@ -9,17 +9,17 @@ > import portage > portage.proxy.lazyimport.lazyimport(globals(), > 'portage.checksum:perform_md5', >- 'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list,' + \ >- 'use_reduce,paren_reduce', >+ 'portage.dep:dep_getkey, isjustname, isvalidatom, match_from_list, ' + \ >+ 'use_reduce, paren_reduce', > 'portage.elog:elog_process', >- 'portage.elog.filtering:filter_mergephases,filter_unmergephases', >- 'portage.locks:lockdir,unlockdir', >- 'portage.output:bold,colorize', >+ 'portage.elog.filtering:filter_mergephases, filter_unmergephases', >+ 'portage.locks:lockdir, unlockdir', >+ 'portage.output:bold, colorize', > 'portage.update:fixdbentries', >- 'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \ >- 'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \ >- 'grabfile,grabdict,normalize_path,new_protect_filename,getlibpaths', >- 'portage.versions:best,catpkgsplit,catsplit,pkgcmp,pkgsplit', >+ 'portage.util:apply_secpass_permissions, ConfigProtect, ensure_dirs, ' + \ >+ 'writemsg, writemsg_level, write_atomic, atomic_ofstream, writedict, ' + \ >+ 'grabfile, grabdict, normalize_path, new_protect_filename, getlibpaths', >+ 'portage.versions:best, catpkgsplit, catsplit, pkgcmp, pkgsplit', > ) > > from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \ >@@ -251,7 +251,7 @@ > return isinstance(self._key, tuple) > > class _LibGraphNode(_ObjectKey): >- __slots__ = ("alt_paths",) >+ __slots__ = ("alt_paths", ) > > def __init__(self, obj, root): > LinkageMap._ObjectKey.__init__(self, obj, root) >@@ -309,7 +309,7 @@ > fields = l.split(";") > if len(fields) < 5: > writemsg_level(_("\nWrong number of fields " \ >- "returned from scanelf: %s\n\n") % (l,), >+ "returned from scanelf: %s\n\n") % (l, ), > level=logging.ERROR, noiselevel=-1) > continue > fields[1] = fields[1][root_len:] >@@ -333,7 +333,7 @@ > for x in filter(None, fields[3].replace( > "${ORIGIN}", os.path.dirname(obj)).replace( > "$ORIGIN", os.path.dirname(obj)).split(":"))]) >- needed = filter(None, fields[4].split(",")) >+ needed = filter(None, fields[4].split(", ")) > > obj_key = self._obj_key(obj) > indexed = True >@@ -421,7 +421,7 @@ > # Get the arch and soname from LinkageMap._obj_properties if > # it exists. Otherwise, None. > arch, _, _, soname, _ = \ >- self._obj_properties.get(obj_key, (None,)*5) >+ self._obj_properties.get(obj_key, (None, )*5) > return cache_self.cache.setdefault(obj, \ > (arch, soname, obj_key, True)) > else: >@@ -787,7 +787,7 @@ > except (KeyError, ValueError): > pass > writemsg_level(_("portage: COUNTER for %s was corrupted; " \ >- "resetting to value of 0\n") % (mycpv,), >+ "resetting to value of 0\n") % (mycpv, ), > level=logging.ERROR, noiselevel=-1) > return 0 > >@@ -1123,7 +1123,7 @@ > since the data was cached. The cache is stored in a pickled dict > object with the following format: > >- {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}} >+ {version:"1", "packages":{cpv1:(mtime, {k1, v1, k2, v2, ...}), cpv2...}} > > If an error occurs while loading the cache pickle or the version is > unrecognized, the cache will simple be recreated from scratch (it is >@@ -1632,7 +1632,7 @@ > > def dep_bestmatch(self, mydep, use_cache=1): > "compatibility method -- all matches, not just visible ones" >- #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi)) >+ #mymatch=best(match(dep_expand(mydep, self.dbapi), self.dbapi)) > mymatch = best(self.dbapi.match( > dep_expand(mydep, mydb=self.dbapi, settings=self.settings), > use_cache=use_cache)) >@@ -1643,7 +1643,7 @@ > > def dep_match(self, mydep, use_cache=1): > "compatibility method -- we want to see all matches, not just visible ones" >- #mymatch = match(mydep,self.dbapi) >+ #mymatch = match(mydep, self.dbapi) > mymatch = self.dbapi.match(mydep, use_cache=use_cache) > if mymatch is None: > return [] >@@ -1689,7 +1689,7 @@ > if not mykey: > return [] > mysplit = catsplit(mykey) >- mydirlist = listdir(self.getpath(mysplit[0]),EmptyOnError=1) >+ mydirlist = listdir(self.getpath(mysplit[0]), EmptyOnError=1) > returnme = [] > for x in mydirlist: > mypsplit = pkgsplit(x) >@@ -1761,7 +1761,7 @@ > @type myroot: String (Path) > @param mysettings: Typically portage.config > @type mysettings: An instance of portage.config >- @param treetype: one of ['porttree','bintree','vartree'] >+ @param treetype: one of ['porttree', 'bintree', 'vartree'] > @type treetype: String > @param vartree: an instance of vartree corresponding to myroot. > @type vartree: vartree >@@ -1861,7 +1861,7 @@ > return self.contentscache > pkgfiles = {} > try: >- myc = open(contents_file,"r") >+ myc = open(contents_file, "r") > except EnvironmentError, e: > if e.errno != errno.ENOENT: > raise >@@ -2091,7 +2091,7 @@ > # already called LinkageMap.rebuild() and passed it's NEEDED file > # in as an argument. > if not others_in_slot: >- self._linkmap_rebuild(exclude_pkgs=(self.mycpv,)) >+ self._linkmap_rebuild(exclude_pkgs=(self.mycpv, )) > > # remove preserved libraries that don't have any consumers left > cpv_lib_map = self._find_unused_preserved_libs() >@@ -2389,7 +2389,7 @@ > try: > unlink(obj, lstatobj) > show_unmerge("<<<", "", file_type, obj) >- except (OSError, IOError),e: >+ except (OSError, IOError), e: > if e.errno not in ignored_unlink_errnos: > raise > del e >@@ -2563,7 +2563,7 @@ > except CommandNotFound, e: > self._linkmap_broken = True > self._display_merge(_("!!! Disabling preserve-libs " \ >- "due to error: Command Not Found: %s\n") % (e,), >+ "due to error: Command Not Found: %s\n") % (e, ), > level=logging.ERROR, noiselevel=-1) > > def _find_libs_to_preserve(self): >@@ -2687,7 +2687,7 @@ > # contents. Such a path might belong to some other package, so > # it shouldn't be preserved here. > showMessage(_("!!! File '%s' will not be preserved " >- "due to missing contents entry\n") % (f_abs,), >+ "due to missing contents entry\n") % (f_abs, ), > level=logging.ERROR, noiselevel=-1) > preserve_paths.remove(f) > continue >@@ -2800,7 +2800,7 @@ > # This means that a symlink is in the preserved libs > # registry, but the actual lib it points to is not. > self._display_merge(_("!!! symlink to lib is preserved, " >- "but not the lib itself:\n!!! '%s'\n") % (obj,), >+ "but not the lib itself:\n!!! '%s'\n") % (obj, ), > level=logging.ERROR, noiselevel=-1) > continue > removed = cpv_lib_map.get(cpv) >@@ -3300,7 +3300,7 @@ > " enough information to determine if a real problem" > " exists. Please do NOT file a bug report at" > " http://bugs.gentoo.org unless you report exactly which" >- " two packages install the same file(s). Once again," >+ " two packages install the same file(s). Once again, " > " please do NOT file a bug report unless you have" > " completely understood the above message.") > >@@ -3402,12 +3402,12 @@ > > # write local package counter for recording > counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv) >- lcfile = open(os.path.join(self.dbtmpdir, "COUNTER"),"w") >+ lcfile = open(os.path.join(self.dbtmpdir, "COUNTER"), "w") > lcfile.write(str(counter)) > lcfile.close() > > # open CONTENTS file (possibly overwriting old one) for recording >- outfile = open(os.path.join(self.dbtmpdir, "CONTENTS"),"w") >+ outfile = open(os.path.join(self.dbtmpdir, "CONTENTS"), "w") > > self.updateprotect() > >@@ -3520,7 +3520,7 @@ > autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes" > > if autoclean: >- emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom,)) >+ emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom, )) > > others_in_slot.append(self) # self has just been merged > for dblnk in list(others_in_slot): >@@ -3529,16 +3529,16 @@ > if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self): > continue > showMessage(_(">>> Safely unmerging already-installed instance...\n")) >- emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv,)) >+ emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv, )) > others_in_slot.remove(dblnk) # dblnk will unmerge itself now > dblnk._linkmap_broken = self._linkmap_broken > unmerge_rval = dblnk.unmerge(trimworld=0, > ldpath_mtimes=prev_mtimes, others_in_slot=others_in_slot) > > if unmerge_rval == os.EX_OK: >- emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv,)) >+ emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv, )) > else: >- emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,)) >+ emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv, )) > > # TODO: Check status and abort if necessary. > dblnk.delete() >@@ -3799,7 +3799,7 @@ > if self.settings.selinux_enabled(): > import selinux > sid = selinux.get_sid(mysrc) >- selinux.secure_mkdir(mydest,sid) >+ selinux.secure_mkdir(mydest, sid) > else: > os.mkdir(mydest) > if bsd_chflags: >@@ -3908,7 +3908,7 @@ > > if mymtime != None: > outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n") >- showMessage("%s %s\n" % (zing,mydest)) >+ showMessage("%s %s\n" % (zing, mydest)) > else: > # we are merging a fifo or device node > zing = "!!!" >@@ -3996,36 +3996,36 @@ > self.unlockdb() > return retval > >- def getstring(self,name): >+ def getstring(self, name): > "returns contents of a file with whitespace converted to spaces" > if not os.path.exists(self.dbdir+"/"+name): > return "" >- myfile = open(self.dbdir+"/"+name,"r") >+ myfile = open(self.dbdir+"/"+name, "r") > mydata = myfile.read().split() > myfile.close() > return " ".join(mydata) > >- def copyfile(self,fname): >- shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname)) >+ def copyfile(self, fname): >+ shutil.copyfile(fname, self.dbdir+"/"+os.path.basename(fname)) > >- def getfile(self,fname): >+ def getfile(self, fname): > if not os.path.exists(self.dbdir+"/"+fname): > return "" >- myfile = open(self.dbdir+"/"+fname,"r") >+ myfile = open(self.dbdir+"/"+fname, "r") > mydata = myfile.read() > myfile.close() > return mydata > >- def setfile(self,fname,data): >+ def setfile(self, fname, data): > mode = 'w' > if fname == 'environment.bz2' or not isinstance(data, basestring): > mode = 'wb' > write_atomic(os.path.join(self.dbdir, fname), data, mode=mode) > >- def getelements(self,ename): >+ def getelements(self, ename): > if not os.path.exists(self.dbdir+"/"+ename): > return [] >- myelement = open(self.dbdir+"/"+ename,"r") >+ myelement = open(self.dbdir+"/"+ename, "r") > mylines = myelement.readlines() > myreturn = [] > for x in mylines: >@@ -4034,8 +4034,8 @@ > myelement.close() > return myreturn > >- def setelements(self,mylist,ename): >- myelement = open(self.dbdir+"/"+ename,"w") >+ def setelements(self, mylist, ename): >+ myelement = open(self.dbdir+"/"+ename, "w") > for x in mylist: > myelement.write(x+"\n") > myelement.close() >Index: pym/portage/dbapi/__init__.py >=================================================================== >--- pym/portage/dbapi/__init__.py (revision 13832) >+++ pym/portage/dbapi/__init__.py (working copy) >@@ -12,8 +12,8 @@ > 'portage.dep:match_from_list', > 'portage.locks:unlockfile', > 'portage.output:colorize', >- 'portage.util:cmp_sort_key,writemsg', >- 'portage.versions:catpkgsplit,vercmp', >+ 'portage.util:cmp_sort_key, writemsg', >+ 'portage.versions:catpkgsplit, vercmp', > ) > > from portage import auxdbkeys, dep_expand >@@ -98,10 +98,10 @@ > """Return the metadata keys in mylist for mycpv > Args: > mycpv - "sys-apps/foo-1.0" >- mylist - ["SLOT","DEPEND","HOMEPAGE"] >+ mylist - ["SLOT", "DEPEND", "HOMEPAGE"] > Returns: > a list of results, in order of keys in mylist, such as: >- ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found' >+ ["0", ">=sys-libs/bar-1.0", "http://www.foo.com"] or [] if mycpv not found' > """ > raise NotImplementedError > >Index: pym/portage/dbapi/porttree.py >=================================================================== >--- pym/portage/dbapi/porttree.py (revision 13832) >+++ pym/portage/dbapi/porttree.py (working copy) >@@ -7,10 +7,10 @@ > import portage > portage.proxy.lazyimport.lazyimport(globals(), > 'portage.checksum', >- 'portage.dep:dep_getkey,match_from_list,paren_reduce,use_reduce', >+ 'portage.dep:dep_getkey, match_from_list, paren_reduce, use_reduce', > 'portage.env.loaders:KeyValuePairFileLoader', >- 'portage.util:ensure_dirs,writemsg,writemsg_level', >- 'portage.versions:best,catpkgsplit,pkgsplit,ver_regexp', >+ 'portage.util:ensure_dirs, writemsg, writemsg_level', >+ 'portage.versions:best, catpkgsplit, pkgsplit, ver_regexp', > ) > > from portage.cache.cache_errors import CacheError >@@ -43,27 +43,27 @@ > if operator is not None: > raise portage.exception.InvalidDependString( > ("getFetchMap(): '%s' SRC_URI arrow missing " + \ >- "right operand") % (cpv,)) >+ "right operand") % (cpv, )) > uri = None > _src_uri_validate(cpv, eapi, x) > continue > if x == '||': > raise portage.exception.InvalidDependString( > ("getFetchMap(): '%s' SRC_URI contains invalid " + \ >- "|| operator") % (cpv,)) >+ "|| operator") % (cpv, )) > > if x[-1:] == "?": > if operator is not None: > raise portage.exception.InvalidDependString( > ("getFetchMap(): '%s' SRC_URI arrow missing " + \ >- "right operand") % (cpv,)) >+ "right operand") % (cpv, )) > uri = None > continue > if uri is None: > if x == "->": > raise portage.exception.InvalidDependString( > ("getFetchMap(): '%s' SRC_URI arrow missing " + \ >- "left operand") % (cpv,)) >+ "left operand") % (cpv, )) > uri = x > continue > if x == "->": >@@ -86,7 +86,7 @@ > if x[-1:] == "?": > raise portage.exception.InvalidDependString( > ("getFetchMap(): '%s' SRC_URI arrow missing " + \ >- "right operand") % (cpv,)) >+ "right operand") % (cpv, )) > > # Found the right operand, so reset state. > uri = None >@@ -95,7 +95,7 @@ > if operator is not None: > raise portage.exception.InvalidDependString( > "getFetchMap(): '%s' SRC_URI arrow missing right operand" % \ >- (cpv,)) >+ (cpv, )) > > class _repo_info(object): > __slots__ = ('name', 'path', 'eclass_db', 'portdir', 'portdir_overlay') >@@ -399,7 +399,7 @@ > return license_path > return None > >- def findname(self,mycpv): >+ def findname(self, mycpv): > return self.findname2(mycpv)[0] > > def getRepositoryPath(self, repository_id): >@@ -429,7 +429,7 @@ > the file we wanted. > """ > if not mycpv: >- return "",0 >+ return "", 0 > mysplit = mycpv.split("/") > psplit = pkgsplit(mysplit[1]) > if psplit is None or len(mysplit) != 2: >@@ -519,7 +519,7 @@ > emtime = st[stat.ST_MTIME] > except OSError: > writemsg(_("!!! aux_get(): ebuild for " \ >- "'%s' does not exist at:\n") % (cpv,), noiselevel=-1) >+ "'%s' does not exist at:\n") % (cpv, ), noiselevel=-1) > writemsg("!!! %s\n" % ebuild_path, noiselevel=-1) > raise KeyError(cpv) > >@@ -566,8 +566,8 @@ > > def aux_get(self, mycpv, mylist, mytree=None): > "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc." >- 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]' >- 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error' >+ 'input: "sys-apps/foo-1.0", ["SLOT", "DEPEND", "HOMEPAGE"]' >+ 'return: ["0", ">=sys-libs/bar-1.0", "http://www.foo.com"] or raise KeyError if error' > cache_me = False > if not mytree: > cache_me = True >@@ -578,7 +578,7 @@ > return [aux_cache.get(x, "") for x in mylist] > cache_me = True > global auxdbkeys, auxdbkeylen >- cat,pkg = mycpv.split("/", 1) >+ cat, pkg = mycpv.split("/", 1) > > myebuild, mylocation = self.findname2(mycpv, mytree) > >@@ -925,7 +925,7 @@ > self.xcache = {} > self.frozen = 0 > >- def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None): >+ def xmatch(self, level, origdep, mydep=None, mykey=None, mylist=None): > "caching match function; very trick stuff" > #if no updates are being made to the tree, we can consult our xcache... > if self.frozen: >@@ -1068,7 +1068,7 @@ > visible.append(cpv) > return visible > >- def gvisible(self,mylist): >+ def gvisible(self, mylist): > "strip out group-masked (not in current group) entries" > > if mylist is None: >@@ -1150,21 +1150,21 @@ > self.dbapi = portdbapi( > settings["PORTDIR"], mysettings=settings) > >- def dep_bestmatch(self,mydep): >+ def dep_bestmatch(self, mydep): > "compatibility method" >- mymatch = self.dbapi.xmatch("bestmatch-visible",mydep) >+ mymatch = self.dbapi.xmatch("bestmatch-visible", mydep) > if mymatch is None: > return "" > return mymatch > >- def dep_match(self,mydep): >+ def dep_match(self, mydep): > "compatibility method" >- mymatch = self.dbapi.xmatch("match-visible",mydep) >+ mymatch = self.dbapi.xmatch("match-visible", mydep) > if mymatch is None: > return [] > return mymatch > >- def exists_specific(self,cpv): >+ def exists_specific(self, cpv): > return self.dbapi.cpv_exists(cpv) > > def getallnodes(self): >@@ -1194,7 +1194,7 @@ > def depcheck(self, mycheck, use="yes", myusesplit=None): > return dep_check(mycheck, self.dbapi, use=use, myuse=myusesplit) > >- def getslot(self,mycatpkg): >+ def getslot(self, mycatpkg): > "Get a slot for a catpkg; assume it exists." > myslot = "" > try: >Index: pym/portage/dbapi/virtual.py >=================================================================== >--- pym/portage/dbapi/virtual.py (revision 13832) >+++ pym/portage/dbapi/virtual.py (working copy) >@@ -90,7 +90,7 @@ > if not mycpv in self.cpdict[mycp]: > self.cpdict[mycp].append(mycpv) > >- def cpv_remove(self,mycpv): >+ def cpv_remove(self, mycpv): > """Removes a cpv from the list of available packages.""" > self._clear_cache() > mycp = cpv_getkey(mycpv) >Index: pym/portage/localization.py >=================================================================== >--- pym/portage/localization.py (revision 13832) >+++ pym/portage/localization.py (working copy) >@@ -16,6 +16,6 @@ > > a_value = "value.of.a" > b_value = 123 >- c_value = [1,2,3,4] >- print _("A: %(a)s -- B: %(b)s -- C: %(c)s") % {"a":a_value,"b":b_value,"c":c_value} >+ c_value = [1, 2, 3, 4] >+ print _("A: %(a)s -- B: %(b)s -- C: %(c)s") % {"a":a_value, "b":b_value, "c":c_value} > >Index: pym/portage/proxy/lazyimport.py >=================================================================== >--- pym/portage/proxy/lazyimport.py (revision 13832) >+++ pym/portage/proxy/lazyimport.py (working copy) >@@ -86,7 +86,7 @@ > > class _LazyImportFrom(_LazyImport): > >- __slots__ = ('_attr_name',) >+ __slots__ = ('_attr_name', ) > > def __init__(self, scope, name, attr_name, alias): > object.__setattr__(self, '_attr_name', attr_name) >@@ -113,7 +113,7 @@ > > Syntax Result > foo import foo >- foo:bar,baz from foo import bar, baz >+ foo:bar, baz from foo import bar, baz > foo:bar@baz from foo import bar as baz > > @param scope: the scope in which to place the import, typically globals() >@@ -157,7 +157,7 @@ > else: > name, fromlist = parts > already_imported = modules.get(name) >- fromlist = fromlist.split(',') >+ fromlist = fromlist.split(', ') > for s in fromlist: > alias = s.split('@', 1) > if len(alias) == 1: >Index: pym/portage/exception.py >=================================================================== >--- pym/portage/exception.py (revision 13832) >+++ pym/portage/exception.py (working copy) >@@ -5,7 +5,7 @@ > > class PortageException(Exception): > """General superclass for portage exceptions""" >- def __init__(self,value): >+ def __init__(self, value): > self.value = value[:] > def __str__(self): > if isinstance(self.value, basestring): >Index: pym/portage/__init__.py >=================================================================== >--- pym/portage/__init__.py (revision 13832) >+++ pym/portage/__init__.py (working copy) >@@ -52,34 +52,34 @@ > import portage.proxy as proxy > proxy.lazyimport.lazyimport(globals(), > 'portage.checksum', >- 'portage.checksum:perform_checksum,perform_md5,prelink_capable', >+ 'portage.checksum:perform_checksum, perform_md5, prelink_capable', > 'portage.cvstree', > 'portage.dep', >- 'portage.dep:best_match_to_list,dep_getcpv,dep_getkey,' + \ >- 'get_operator,isjustname,isspecific,isvalidatom,' + \ >- 'match_from_list,match_to_list', >+ 'portage.dep:best_match_to_list, dep_getcpv, dep_getkey, ' + \ >+ 'get_operator, isjustname, isspecific, isvalidatom, ' + \ >+ 'match_from_list, match_to_list', > 'portage.eclass_cache', > 'portage.getbinpkg', > 'portage.locks', >- 'portage.locks:lockdir,lockfile,unlockdir,unlockfile', >+ 'portage.locks:lockdir, lockfile, unlockdir, unlockfile', > 'portage.output', >- 'portage.output:bold,colorize', >+ 'portage.output:bold, colorize', > 'portage.process', >- 'portage.process:atexit_register,run_exitfuncs', >- 'portage.update:dep_transform,fixdbentries,grab_updates,' + \ >- 'parse_updates,update_config_files,update_dbentries,' + \ >+ 'portage.process:atexit_register, run_exitfuncs', >+ 'portage.update:dep_transform, fixdbentries, grab_updates, ' + \ >+ 'parse_updates, update_config_files, update_dbentries, ' + \ > 'update_dbentry', > 'portage.util', >- 'portage.util:atomic_ofstream,apply_secpass_permissions,' + \ >- 'apply_recursive_permissions,dump_traceback,getconfig,' + \ >- 'grabdict,grabdict_package,grabfile,grabfile_package,' + \ >- 'map_dictlist_vals,new_protect_filename,normalize_path,' + \ >- 'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \ >- 'stack_lists,unique_array,varexpand,writedict,writemsg,' + \ >- 'writemsg_stdout,write_atomic', >+ 'portage.util:atomic_ofstream, apply_secpass_permissions, ' + \ >+ 'apply_recursive_permissions, dump_traceback, getconfig, ' + \ >+ 'grabdict, grabdict_package, grabfile, grabfile_package, ' + \ >+ 'map_dictlist_vals, new_protect_filename, normalize_path, ' + \ >+ 'pickle_read, pickle_write, stack_dictlist, stack_dicts, ' + \ >+ 'stack_lists, unique_array, varexpand, writedict, writemsg, ' + \ >+ 'writemsg_stdout, write_atomic', > 'portage.versions', >- 'portage.versions:best,catpkgsplit,catsplit,endversion_keys,' + \ >- 'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify', >+ 'portage.versions:best, catpkgsplit, catsplit, endversion_keys, ' + \ >+ 'suffix_value@endversion, pkgcmp, pkgsplit, vercmp, ververify', > 'portage.xpak', > ) > >@@ -90,7 +90,7 @@ > EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \ > MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \ > DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \ >- INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\ >+ INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE, \ > INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE > > from portage.data import ostype, lchown, userland, secpass, uid, wheelgid, \ >@@ -136,7 +136,7 @@ > > bsd_chflags = None > >-if platform.system() in ('FreeBSD',): >+if platform.system() in ('FreeBSD', ): > > class bsd_chflags(object): > >@@ -208,7 +208,7 @@ > cacheMiss=0 > cacheStale=0 > def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True): >- global cacheHit,cacheMiss,cacheStale >+ global cacheHit, cacheMiss, cacheStale > mypath = normalize_path(my_original_path) > if mypath in dircache: > cacheHit += 1 >@@ -277,7 +277,7 @@ > ret_list.append(list[x]) > ret_ftype.append(ftype[x]) > >- writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10) >+ writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale), 10) > return ret_list, ret_ftype > > _ignorecvs_dirs = ('CVS', 'SCCS', '.svn', '.git') >@@ -293,7 +293,7 @@ > @type recursive: Boolean > @param filesonly; Only return files, not more directories > @type filesonly: Boolean >- @param ignorecvs: Ignore CVS directories ('CVS','SCCS','.svn','.git') >+ @param ignorecvs: Ignore CVS directories ('CVS', 'SCCS', '.svn', '.git') > @type ignorecvs: Boolean > @param ignorelist: List of filenames/directories to exclude > @type ignorelist: List >@@ -322,18 +322,18 @@ > while x<len(ftype): > if ftype[x] == 1 and not \ > (ignorecvs and os.path.basename(list[x]) in _ignorecvs_dirs): >- l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError, >+ l, f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError, > followSymlinks) > > l=l[:] >- for y in range(0,len(l)): >+ for y in range(0, len(l)): > l[y]=list[x]+"/"+l[y] > list=list+l > ftype=ftype+f > x+=1 > if filesonly: > rlist=[] >- for x in range(0,len(ftype)): >+ for x in range(0, len(ftype)): > if ftype[x]==0: > rlist=rlist+[list[x]] > elif dirsonly: >@@ -347,8 +347,8 @@ > return rlist > > def flatten(mytokens): >- """this function now turns a [1,[2,3]] list into >- a [1,2,3] list and returns it.""" >+ """this function now turns a [1, [2, 3]] list into >+ a [1, 2, 3] list and returns it.""" > newlist=[] > for x in mytokens: > if isinstance(x, list): >@@ -625,13 +625,13 @@ > def output(s): > writemsg(s, noiselevel=-1) > for node in self.nodes: >- output("%s " % (node,)) >+ output("%s " % (node, )) > if self.nodes[node][0]: > output("depends on\n") > else: > output("(no children)\n") > for child, priorities in self.nodes[node][0].iteritems(): >- output(" %s (%s)\n" % (child, priorities[-1],)) >+ output(" %s (%s)\n" % (child, priorities[-1], )) > > #parse /etc/env.d and generate /etc/profile.env > >@@ -758,7 +758,7 @@ > newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n") > newprelink.write("# contents of /etc/env.d directory\n") > >- for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]: >+ for x in ["/bin", "/sbin", "/usr/bin", "/usr/sbin", "/lib", "/usr/lib"]: > newprelink.write("-l "+x+"\n"); > for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]: > if not x: >@@ -788,7 +788,7 @@ > current_time = long(time.time()) > mtime_changed = False > lib_dirs = set() >- for lib_dir in portage.util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']): >+ for lib_dir in portage.util.unique_array(specials["LDPATH"]+['usr/lib', 'usr/lib64', 'usr/lib32', 'lib', 'lib64', 'lib32']): > x = os.path.join(target_root, lib_dir.lstrip(os.sep)) > try: > newldpathtime = long(os.stat(x).st_mtime) >@@ -822,7 +822,7 @@ > contents is not None: > libdir_contents_changed = False > for mypath, mydata in contents.iteritems(): >- if mydata[0] not in ("obj","sym"): >+ if mydata[0] not in ("obj", "sym"): > continue > head, tail = os.path.split(mypath) > if head in lib_dirs: >@@ -846,12 +846,12 @@ > # to overwrite the symlinks we just made. -X means no links. After 'clean' > # we can safely create links. > writemsg_level(">>> Regenerating %setc/ld.so.cache...\n" % \ >- (target_root,)) >+ (target_root, )) > if makelinks: > os.system("cd / ; %s -r '%s'" % (ldconfig, target_root)) > else: > os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root)) >- elif ostype in ("FreeBSD","DragonFly"): >+ elif ostype in ("FreeBSD", "DragonFly"): > writemsg_level(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % \ > target_root) > os.system(("cd / ; %s -elf -i " + \ >@@ -940,7 +940,7 @@ > > # Grab a list of files named localversion* and sort them > localversions = os.listdir(base_dir) >- for x in range(len(localversions)-1,-1,-1): >+ for x in range(len(localversions)-1, -1, -1): > if localversions[x][:12] != "localversion": > del localversions[x] > localversions.sort() >@@ -954,7 +954,7 @@ > if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig: > version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split()) > >- return (version,None) >+ return (version, None) > > def autouse(myvartree, use_cache=1, mysettings=None): > """ >@@ -979,7 +979,7 @@ > for myuse in usedefaults: > dep_met = True > for mydep in usedefaults[myuse]: >- if not myvartree.dep_match(mydep,use_cache=True): >+ if not myvartree.dep_match(mydep, use_cache=True): > dep_met = False > break > if dep_met: >@@ -1004,7 +1004,7 @@ > return regex > > class _local_repo_config(object): >- __slots__ = ('aliases', 'eclass_overrides', 'masters', 'name',) >+ __slots__ = ('aliases', 'eclass_overrides', 'masters', 'name', ) > def __init__(self, name, repo_opts): > self.name = name > >@@ -1322,7 +1322,7 @@ > else: > self.incrementals = copy.deepcopy(config_incrementals) > >- self.module_priority = ["user","default"] >+ self.module_priority = ["user", "default"] > self.modules = {} > self.modules["user"] = getconfig( > os.path.join(config_root, MODULES_FILE_PATH)) >@@ -1363,7 +1363,7 @@ > raise portage.exception.ParseError( > "Profile contains unsupported " + \ > "EAPI '%s': '%s'" % \ >- (eapi, os.path.realpath(eapi_file),)) >+ (eapi, os.path.realpath(eapi_file), )) > if os.path.exists(parentsFile): > parents = grabfile(parentsFile) > if not parents: >@@ -1798,7 +1798,7 @@ > self["PORTAGE_DEPCACHEDIR"] = self.depcachedir > self.backup_changes("PORTAGE_DEPCACHEDIR") > >- overlays = self.get("PORTDIR_OVERLAY","").split() >+ overlays = self.get("PORTDIR_OVERLAY", "").split() > if overlays: > new_ov = [] > for ov in overlays: >@@ -1980,13 +1980,13 @@ > writemsg("!!! FEATURES=fakeroot is enabled, but the " + \ > "fakeroot binary is not installed.\n", noiselevel=-1) > >- def loadVirtuals(self,root): >+ def loadVirtuals(self, root): > """Not currently used by portage.""" > writemsg("DEPRECATED: portage.config.loadVirtuals\n") > self.getvirtuals(root) > >- def load_best_module(self,property_string): >- best_mod = best_from_dict(property_string,self.modules,self.module_priority) >+ def load_best_module(self, property_string): >+ best_mod = best_from_dict(property_string, self.modules, self.module_priority) > mod = None > try: > mod = load_mod(best_mod) >@@ -2011,14 +2011,14 @@ > if self.locked: > raise Exception("Configuration is locked.") > >- def backup_changes(self,key=None): >+ def backup_changes(self, key=None): > self.modifying() > if key and key in self.configdict["env"]: > self.backupenv[key] = copy.deepcopy(self.configdict["env"][key]) > else: > raise KeyError("No such key defined in environment: %s" % key) > >- def reset(self,keeping_pkg=0,use_cache=1): >+ def reset(self, keeping_pkg=0, use_cache=1): > """ > Restore environment from self.backupenv, call self.regenerate() > @param keeping_pkg: Should we keep the set_cpv() data or delete it. >@@ -2045,7 +2045,7 @@ > self.useforce_list, incremental=True)) > self.regenerate(use_cache=use_cache) > >- def load_infodir(self,infodir): >+ def load_infodir(self, infodir): > warnings.warn("portage.config.load_infodir() is deprecated", > DeprecationWarning) > return 1 >@@ -2297,7 +2297,7 @@ > defaults.insert(pos, self.make_defaults_use[i]) > pos = len(defaults) > defaults = " ".join(defaults) >- if defaults != self.configdict["defaults"].get("USE",""): >+ if defaults != self.configdict["defaults"].get("USE", ""): > self.configdict["defaults"]["USE"] = defaults > has_changed = True > >@@ -2329,7 +2329,7 @@ > self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE > > if has_changed: >- self.reset(keeping_pkg=1,use_cache=use_cache) >+ self.reset(keeping_pkg=1, use_cache=use_cache) > > # Ensure that "pkg" values are always preferred over "env" values. > # This must occur _after_ the above reset() call, since reset() >@@ -2431,8 +2431,8 @@ > be implicit members of IUSE: > * Flags derived from ARCH > * Flags derived from USE_EXPAND_HIDDEN variables >- * Masked flags, such as those from {,package}use.mask >- * Forced flags, such as those from {,package}use.force >+ * Masked flags, such as those from {, package}use.mask >+ * Forced flags, such as those from {, package}use.force > * build and bootstrap flags used by bootstrap.sh > """ > iuse_implicit = set() >@@ -2765,7 +2765,7 @@ > return self._accept_chost_re.match( > metadata.get('CHOST', '')) is not None > >- def setinst(self,mycpv,mydbapi): >+ def setinst(self, mycpv, mydbapi): > """This updates the preferences for old-style virtuals, > affecting the behavior of dep_expand() and dep_check() > calls. It can change dbapi.match() behavior since that >@@ -2819,7 +2819,7 @@ > # env_d will be None if profile.env doesn't exist. > self.configdict["env.d"].update(env_d) > >- def regenerate(self,useonly=0,use_cache=1): >+ def regenerate(self, useonly=0, use_cache=1): > """ > Regenerate settings > This involves regenerating valid USE flags, re-expanding USE_EXPAND flags >@@ -2839,7 +2839,7 @@ > self.modifying() > if self.already_in_regenerate: > # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops. >- writemsg("!!! Looping in regenerate.\n",1) >+ writemsg("!!! Looping in regenerate.\n", 1) > return > else: > self.already_in_regenerate = 1 >@@ -3074,7 +3074,7 @@ > temp_vartree = vartree(myroot, None, > categories=self.categories, settings=self) > # Reduce the provides into a list by CP. >- self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides()) >+ self.treeVirtuals = map_dictlist_vals(getCPFromCPV, temp_vartree.get_all_provides()) > > self.virtuals = self.__getvirtuals_compile() > return self.virtuals >@@ -3104,14 +3104,14 @@ > self.dirVirtuals, self._depgraphVirtuals]) > return virtuals > >- def __delitem__(self,mykey): >+ def __delitem__(self, mykey): > self.modifying() > for x in self.lookuplist: > if x != None: > if mykey in x: > del x[mykey] > >- def __getitem__(self,mykey): >+ def __getitem__(self, mykey): > for d in self.lookuplist: > if mykey in d: > return d[mykey] >@@ -3137,7 +3137,7 @@ > raise KeyError(key) > return v > >- def has_key(self,mykey): >+ def has_key(self, mykey): > warnings.warn("portage.config.has_key() is deprecated, " > "use the in operator instead", > DeprecationWarning) >@@ -3177,10 +3177,10 @@ > def items(self): > return list(self.iteritems()) > >- def __setitem__(self,mykey,myvalue): >+ def __setitem__(self, mykey, myvalue): > "set a value; will be thrown away at reset() time" > if not isinstance(myvalue, basestring): >- raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue))) >+ raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey), str(myvalue))) > self.modifying() > self.modifiedkeys += [mykey] > self.configdict["env"][mykey]=myvalue >@@ -3351,7 +3351,7 @@ > > @param mystring: Command to run > @type mystring: String >- @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config >+ @param mysettings: Either a Dict of Key, Value pairs or an instance of portage.config > @type mysettings: Dictionary or config instance > @param debug: Ignored > @type debug: Boolean >@@ -3380,7 +3380,7 @@ > keywords["opt_name"] = "[%s]" % mysettings.mycpv > else: > keywords["opt_name"] = "[%s/%s]" % \ >- (mysettings.get("CATEGORY",""), mysettings.get("PF","")) >+ (mysettings.get("CATEGORY", ""), mysettings.get("PF", "")) > > fd_pipes = keywords.get("fd_pipes") > if fd_pipes is None: >@@ -3441,8 +3441,8 @@ > # permissions in the merge phase. > fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable > if droppriv and not uid and portage_gid and portage_uid: >- keywords.update({"uid":portage_uid,"gid":portage_gid, >- "groups":userpriv_groups,"umask":002}) >+ keywords.update({"uid":portage_uid, "gid":portage_gid, >+ "groups":userpriv_groups, "umask":002}) > if not free: > free=((droppriv and "usersandbox" not in features) or \ > (not droppriv and "sandbox" not in features and \ >@@ -3701,14 +3701,14 @@ > 'Y' : 80, > } > >-def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1): >+def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks", use_locks=1, try_mirrors=1): > "fetch files. Will use digest file if available." > > if not myuris: > return 1 > > features = mysettings.features >- restrict = mysettings.get("PORTAGE_RESTRICT","").split() >+ restrict = mysettings.get("PORTAGE_RESTRICT", "").split() > > from portage.data import secpass > userfetch = secpass >= 2 and "userfetch" in features >@@ -3801,7 +3801,7 @@ > if "skiprocheck" in features: > fetch_to_ro = 1 > >- if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro: >+ if not os.access(mysettings["DISTDIR"], os.W_OK) and fetch_to_ro: > if use_locks: > writemsg(colorize("BAD", > "!!! For fetching to a read-only filesystem, " + \ >@@ -3837,7 +3837,7 @@ > if os.path.isdir(x)] > > fsmirrors = [] >- for x in range(len(mymirrors)-1,-1,-1): >+ for x in range(len(mymirrors)-1, -1, -1): > if mymirrors[x] and mymirrors[x][0]=='/': > fsmirrors += [mymirrors[x]] > del mymirrors[x] >@@ -3869,7 +3869,7 @@ > for myfile, myuri in file_uri_tuples: > if myfile not in filedict: > filedict[myfile]=[] >- for y in range(0,len(locations)): >+ for y in range(0, len(locations)): > filedict[myfile].append(locations[y]+"/distfiles/"+myfile) > if myuri[:9]=="mirror://": > eidx = myuri.find("/", 9) >@@ -4385,7 +4385,7 @@ > locfetch=fetchcommand > command_var = fetchcommand_var > writemsg_stdout(">>> Downloading '%s'\n" % \ >- re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc)) >+ re.sub(r'//(.+):.+@(.+)/', r'//\1:*password*@\2/', loc)) > variables = { > "DISTDIR": mysettings["DISTDIR"], > "URI": loc, >@@ -4462,7 +4462,7 @@ > mystat.st_size < mydigests[myfile]["size"]: > # Fetch failed... Try the next one... Kill 404 files though. > if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")): >- html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M) >+ html404=re.compile("<title>.*(not found|404).*</title>", re.I|re.M) > if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()): > try: > os.unlink(mysettings["DISTDIR"]+"/"+myfile) >@@ -4478,7 +4478,7 @@ > # file NOW, for those users who don't have a stable/continuous > # net connection. This way we have a chance to try to download > # from another mirror... >- verified_ok,reason = portage.checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile]) >+ verified_ok, reason = portage.checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile]) > if not verified_ok: > print reason > writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n", >@@ -4717,7 +4717,7 @@ > > try: > st = os.stat(os.path.join( >- mysettings["DISTDIR"],myfile)) >+ mysettings["DISTDIR"], myfile)) > except OSError: > st = None > >@@ -4749,12 +4749,12 @@ > "Manifest\n") % e, noiselevel=-1) > return 0 > except portage.exception.PortagePackageException, e: >- writemsg(("!!! %s\n") % (e,), noiselevel=-1) >+ writemsg(("!!! %s\n") % (e, ), noiselevel=-1) > return 0 > try: > mf.write(sign=False) > except portage.exception.PermissionDenied, e: >- writemsg("!!! Permission Denied: %s\n" % (e,), noiselevel=-1) >+ writemsg("!!! Permission Denied: %s\n" % (e, ), noiselevel=-1) > return 0 > if "assume-digests" not in mysettings.features: > distlist = mf.fhashdict.get("DIST", {}).keys() >@@ -5131,10 +5131,10 @@ > os.system("mtree -c -p %s -k flags > %s" % \ > (_shell_quote(mysettings["D"]), > _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree")))) >- os.system("chflags -R noschg,nouchg,nosappnd,nouappnd %s" % \ >- (_shell_quote(mysettings["D"]),)) >- os.system("chflags -R nosunlnk,nouunlnk %s 2>/dev/null" % \ >- (_shell_quote(mysettings["D"]),)) >+ os.system("chflags -R noschg, nouchg, nosappnd, nouappnd %s" % \ >+ (_shell_quote(mysettings["D"]), )) >+ os.system("chflags -R nosunlnk, nouunlnk %s 2>/dev/null" % \ >+ (_shell_quote(mysettings["D"]), )) > > for parent, dirs, files in os.walk(mysettings["D"]): > for fname in chain(dirs, files): >@@ -5311,7 +5311,7 @@ > mypv = os.path.basename(ebuild_path)[:-7] > > mycpv = cat+"/"+mypv >- mysplit=pkgsplit(mypv,silent=0) >+ mysplit=pkgsplit(mypv, silent=0) > if mysplit is None: > raise portage.exception.IncorrectParameter( > "Invalid ebuild path: '%s'" % myebuild) >@@ -5422,7 +5422,7 @@ > mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage" > mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/binpkgs" > >- # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate >+ # Package {pre, post}inst and {pre, post}rm may overlap, so they must have separate > # locations in order to prevent interference. > if mydo in ("unmerge", "prerm", "postrm", "cleanrm"): > mysettings["PORTAGE_BUILDDIR"] = os.path.join( >@@ -5452,7 +5452,7 @@ > mydo in ('compile', 'config', 'configure', 'info', > 'install', 'nofetch', 'postinst', 'postrm', 'preinst', > 'prepare', 'prerm', 'setup', 'test', 'unpack'): >- mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux")) >+ mykv, err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux")) > if mykv: > # Regular source tree > mysettings["KV"]=mykv >@@ -5584,7 +5584,7 @@ > dirmode = 02070 > filemode = 060 > modemask = 02 >- restrict = mysettings.get("PORTAGE_RESTRICT","").split() >+ restrict = mysettings.get("PORTAGE_RESTRICT", "").split() > from portage.data import secpass > droppriv = secpass >= 2 and \ > "userpriv" in mysettings.features and \ >@@ -5654,7 +5654,7 @@ > except portage.exception.DirectoryNotFound, e: > failure = True > writemsg("\n!!! Directory does not exist: '%s'\n" % \ >- (e,), noiselevel=-1) >+ (e, ), noiselevel=-1) > writemsg("!!! Disabled FEATURES='%s'\n" % myfeature, > noiselevel=-1) > >@@ -5828,7 +5828,7 @@ > @type use_cache: Boolean > @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals) > @type fetchall: Boolean >- @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree' >+ @param tree: Which tree to use ('vartree', 'porttree', 'bintree', etc..), defaults to 'porttree' > @type tree: String > @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance. > @type mydbapi: portdbapi instance >@@ -5886,12 +5886,12 @@ > from portage.data import secpass > > clean_phases = ("clean", "cleanrm") >- validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst", >+ validcommands = ["help", "clean", "prerm", "postrm", "cleanrm", "preinst", "postinst", > "config", "info", "setup", "depend", "pretend", > "fetch", "fetchall", "digest", > "unpack", "prepare", "configure", "compile", "test", > "install", "rpm", "qmerge", "merge", >- "package","unmerge", "manifest"] >+ "package", "unmerge", "manifest"] > > if mydo not in validcommands: > validcommands.sort() >@@ -5940,7 +5940,7 @@ > _doebuild_manifest_cache = None > if not os.path.exists(manifest_path): > out = portage.output.EOutput() >- out.eerror("Manifest not found for '%s'" % (myebuild,)) >+ out.eerror("Manifest not found for '%s'" % (myebuild, )) > _doebuild_broken_ebuilds.add(myebuild) > return 1 > mf = Manifest(pkgdir, mysettings["DISTDIR"]) >@@ -5952,13 +5952,13 @@ > mf.checkFileHashes("EBUILD", os.path.basename(myebuild)) > except KeyError: > out = portage.output.EOutput() >- out.eerror("Missing digest for '%s'" % (myebuild,)) >+ out.eerror("Missing digest for '%s'" % (myebuild, )) > _doebuild_broken_ebuilds.add(myebuild) > return 1 > except portage.exception.FileNotFound: > out = portage.output.EOutput() > out.eerror("A file listed in the Manifest " + \ >- "could not be found: '%s'" % (myebuild,)) >+ "could not be found: '%s'" % (myebuild, )) > _doebuild_broken_ebuilds.add(myebuild) > return 1 > except portage.exception.DigestException, e: >@@ -5990,7 +5990,7 @@ > if f not in _doebuild_broken_ebuilds: > out = portage.output.EOutput() > out.eerror("A file is not listed in the " + \ >- "Manifest: '%s'" % (f,)) >+ "Manifest: '%s'" % (f, )) > _doebuild_broken_manifests.add(manifest_path) > return 1 > >@@ -6031,7 +6031,7 @@ > # we can temporarily override PORTAGE_TMPDIR with a random temp dir > # so that there's no need for locking and it can be used even if the > # user isn't in the portage group. >- if mydo in ("info",): >+ if mydo in ("info", ): > from tempfile import mkdtemp > tmpdir = mkdtemp() > tmpdir_orig = mysettings["PORTAGE_TMPDIR"] >@@ -6117,7 +6117,7 @@ > if "PORTAGE_TMPDIR" not in mysettings or \ > not os.path.isdir(mysettings["PORTAGE_TMPDIR"]): > writemsg("The directory specified in your " + \ >- "PORTAGE_TMPDIR variable, '%s',\n" % \ >+ "PORTAGE_TMPDIR variable, '%s', \n" % \ > mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1) > writemsg("does not exist. Please create this directory or " + \ > "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1) >@@ -6204,7 +6204,7 @@ > os.WEXITSTATUS(retval) == os.EX_OK and \ > env_stat and env_stat.st_size > 0: > # This is a signal to ebuild.sh, so that it knows to filter >- # out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that >+ # out things like SANDBOX_{DENY, PREDICT, READ, WRITE} that > # would be preserved between normal phases. > open(env_file + ".raw", "w") > else: >@@ -6385,7 +6385,7 @@ > # and it can interfere with parallel tasks. > digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi) > except portage.exception.PermissionDenied, e: >- writemsg("!!! Permission Denied: %s\n" % (e,), noiselevel=-1) >+ writemsg("!!! Permission Denied: %s\n" % (e, ), noiselevel=-1) > if mydo in ("digest", "manifest"): > return 1 > >@@ -6523,7 +6523,7 @@ > myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi, > vartree=vartree, prev_mtimes=prev_mtimes) > else: >- print "!!! Unknown mydo:",mydo >+ print "!!! Unknown mydo:", mydo > return 1 > > return retval >@@ -6592,7 +6592,7 @@ > > if msgs: > portage.util.writemsg_level("Error(s) in metadata for '%s':\n" % \ >- (mysettings.mycpv,), level=logging.ERROR, noiselevel=-1) >+ (mysettings.mycpv, ), level=logging.ERROR, noiselevel=-1) > for x in msgs: > portage.util.writemsg_level(x, > level=logging.ERROR, noiselevel=-1) >@@ -6614,7 +6614,7 @@ > """moves a file from src to dest, preserving all permissions and attributes; mtime will > be preserved even when moving across filesystems. Returns true on success and false on > failure. Move is atomic.""" >- #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")" >+ #print "movefile("+str(src)+", "+str(dest)+", "+str(newmtime)+", "+str(sstat)+")" > global lchown > if mysettings is None: > global settings >@@ -6628,7 +6628,7 @@ > raise > except Exception, e: > print "!!! Stating source file failed... movefile()" >- print "!!!",e >+ print "!!!", e > return None > > destexists=1 >@@ -6667,10 +6667,10 @@ > os.unlink(dest) > if selinux_enabled: > sid = selinux.get_lsid(src) >- selinux.secure_symlink(target,dest,sid) >+ selinux.secure_symlink(target, dest, sid) > else: >- os.symlink(target,dest) >- lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) >+ os.symlink(target, dest) >+ lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID]) > # utime() only works on the target of a symlink, so it's not > # possible to perserve mtime on symlinks. > return os.lstat(dest)[stat.ST_MTIME] >@@ -6678,8 +6678,8 @@ > raise > except Exception, e: > print "!!! failed to properly create symlink:" >- print "!!!",dest,"->",target >- print "!!!",e >+ print "!!!", dest, "->", target >+ print "!!!", e > return None > > hardlinked = False >@@ -6696,8 +6696,8 @@ > except OSError, e: > if e.errno != errno.ENOENT: > writemsg("!!! Failed to remove hardlink temp file: %s\n" % \ >- (hardlink_tmp,), noiselevel=-1) >- writemsg("!!! %s\n" % (e,), noiselevel=-1) >+ (hardlink_tmp, ), noiselevel=-1) >+ writemsg("!!! %s\n" % (e, ), noiselevel=-1) > return None > del e > for hardlink_src in hardlink_candidates: >@@ -6711,7 +6711,7 @@ > except OSError, e: > writemsg("!!! Failed to rename %s to %s\n" % \ > (hardlink_tmp, dest), noiselevel=-1) >- writemsg("!!! %s\n" % (e,), noiselevel=-1) >+ writemsg("!!! %s\n" % (e, ), noiselevel=-1) > return None > hardlinked = True > break >@@ -6722,17 +6722,17 @@ > if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev): > try: > if selinux_enabled: >- ret=selinux.secure_rename(src,dest) >+ ret=selinux.secure_rename(src, dest) > else: >- ret=os.rename(src,dest) >+ ret=os.rename(src, dest) > renamefailed=0 > except SystemExit, e: > raise > except Exception, e: > if e[0]!=errno.EXDEV: > # Some random error. >- print "!!! Failed to move",src,"to",dest >- print "!!!",e >+ print "!!! Failed to move", src, "to", dest >+ print "!!!", e > return None > # Invalid cross-device-link 'bind' mounted or actually Cross-Device > if renamefailed: >@@ -6740,17 +6740,17 @@ > if stat.S_ISREG(sstat[stat.ST_MODE]): > try: # For safety copy then move it over. > if selinux_enabled: >- selinux.secure_copy(src,dest+"#new") >- selinux.secure_rename(dest+"#new",dest) >+ selinux.secure_copy(src, dest+"#new") >+ selinux.secure_rename(dest+"#new", dest) > else: >- shutil.copyfile(src,dest+"#new") >- os.rename(dest+"#new",dest) >+ shutil.copyfile(src, dest+"#new") >+ os.rename(dest+"#new", dest) > didcopy=1 > except SystemExit, e: > raise > except Exception, e: >- print '!!! copy',src,'->',dest,'failed.' >- print "!!!",e >+ print '!!! copy', src, '->', dest, 'failed.' >+ print "!!!", e > return None > else: > #we don't yet handle special, so we need to fall back to /bin/mv >@@ -6761,22 +6761,22 @@ > if a[0]!=0: > print "!!! Failed to move special file:" > print "!!! '"+src+"' to '"+dest+"'" >- print "!!!",a >+ print "!!!", a > return None # failure > try: > if didcopy: > if stat.S_ISLNK(sstat[stat.ST_MODE]): >- lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) >+ lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID]) > else: >- os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) >+ os.chown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID]) > os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown > os.unlink(src) > except SystemExit, e: > raise > except Exception, e: > print "!!! Failed to chown/chmod/unlink in movefile()" >- print "!!!",dest >- print "!!!",e >+ print "!!!", dest >+ print "!!!", e > return None > > try: >@@ -6915,11 +6915,11 @@ > if x.blocker and x.blocker.overlap.forbid and \ > eapi in ("0", "1") and portage.dep._dep_check_strict: > raise portage.exception.ParseError( >- "invalid atom: '%s'" % (x,)) >+ "invalid atom: '%s'" % (x, )) > if x.use and eapi in ("0", "1") and \ > portage.dep._dep_check_strict: > raise portage.exception.ParseError( >- "invalid atom: '%s'" % (x,)) >+ "invalid atom: '%s'" % (x, )) > > if repoman and x.use and x.use.conditional: > evaluated_atom = portage.dep.remove_slot(x) >@@ -7302,7 +7302,7 @@ > > if mysplit == []: > #dependencies were reduced to nothing >- return [1,[]] >+ return [1, []] > > # Recursively expand new-style virtuals so as to > # collapse one or more levels of indirection. >@@ -7315,9 +7315,9 @@ > return [0, str(e)] > > mysplit2=mysplit[:] >- mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache) >+ mysplit2=dep_wordreduce(mysplit2, mysettings, mydbapi, mode, use_cache=use_cache) > if mysplit2 is None: >- return [0,"Invalid token"] >+ return [0, "Invalid token"] > > writemsg("\n\n\n", 1) > writemsg("mysplit: %s\n" % (mysplit), 1) >@@ -7331,7 +7331,7 @@ > raise # This shouldn't happen. > # dbapi.match() failed due to an invalid atom in > # the dependencies of an installed package. >- return [0, "Invalid atom: '%s'" % (e,)] >+ return [0, "Invalid atom: '%s'" % (e, )] > > mylist = flatten(myzaps) > writemsg("myzaps: %s\n" % (myzaps), 1) >@@ -7341,15 +7341,15 @@ > for x in mylist: > mydict[x]=1 > writemsg("mydict: %s\n" % (mydict), 1) >- return [1,mydict.keys()] >+ return [1, mydict.keys()] > >-def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1): >+def dep_wordreduce(mydeplist, mysettings, mydbapi, mode, use_cache=1): > "Reduces the deplist to ones and zeros" > deplist=mydeplist[:] > for mypos, token in enumerate(deplist): > if isinstance(deplist[mypos], list): > #recurse >- deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache) >+ deplist[mypos]=dep_wordreduce(deplist[mypos], mysettings, mydbapi, mode, use_cache=use_cache) > elif deplist[mypos]=="||": > pass > elif token[:1] == "!": >@@ -7374,7 +7374,7 @@ > else: > mydep = x > else: >- mydep=mydbapi.match(deplist[mypos],use_cache=use_cache) >+ mydep=mydbapi.match(deplist[mypos], use_cache=use_cache) > if mydep!=None: > tmp=(len(mydep)>=1) > if deplist[mypos][0]=="!": >@@ -7407,7 +7407,7 @@ > if len(mysplit)==1: > if hasattr(mydb, "cp_list"): > for x in mydb.categories: >- if mydb.cp_list(x+"/"+mykey,use_cache=use_cache): >+ if mydb.cp_list(x+"/"+mykey, use_cache=use_cache): > return x+"/"+mykey > if mykey in virts_p: > return(virts_p[mykey][0]) >@@ -7443,7 +7443,7 @@ > writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1) > if hasattr(mydb, "cp_list"): > if not mydb.cp_list(mykey, use_cache=use_cache): >- writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1) >+ writemsg("virts[%s]: %s\n" % (str(mykey), virts[mykey]), 1) > mykey_orig = mykey[:] > for vkey in virts[mykey]: > # The virtuals file can contain a versioned atom, so >@@ -7469,7 +7469,7 @@ > matches=[] > if mydb and hasattr(mydb, "categories"): > for x in mydb.categories: >- if mydb.cp_list(x+"/"+myp,use_cache=use_cache): >+ if mydb.cp_list(x+"/"+myp, use_cache=use_cache): > matches.append(x+"/"+myp) > if len(matches) > 1: > virtual_name_collision = False >@@ -7876,7 +7876,7 @@ > writemsg(colorize("BAD", "\n!!! Your current profile is " + \ > "deprecated and not supported anymore.") + "\n", noiselevel=-1) > if not dcontent: >- writemsg(colorize("BAD","!!! Please refer to the " + \ >+ writemsg(colorize("BAD", "!!! Please refer to the " + \ > "Gentoo Upgrading Guide.") + "\n", noiselevel=-1) > return True > newprofile = dcontent[0] >@@ -7919,7 +7919,7 @@ > pass > > def portageexit(): >- global uid,portage_gid,portdb,db >+ global uid, portage_gid, portdb, db > if secpass and os.environ.get("SANDBOX_ON") != "1": > close_portdbapi_caches() > commit_mtimedb() >@@ -8125,11 +8125,11 @@ > if world_modified: > world_list.sort() > write_atomic(world_file, >- "".join("%s\n" % (x,) for x in world_list)) >+ "".join("%s\n" % (x, ) for x in world_list)) > > update_config_files("/", >- mysettings.get("CONFIG_PROTECT","").split(), >- mysettings.get("CONFIG_PROTECT_MASK","").split(), >+ mysettings.get("CONFIG_PROTECT", "").split(), >+ mysettings.get("CONFIG_PROTECT_MASK", "").split(), > myupd) > > trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"], >@@ -8296,7 +8296,7 @@ > that are initialized on demand. > """ > >- __slots__ = ('_name',) >+ __slots__ = ('_name', ) > > def __init__(self, name): > proxy.objectproxy.ObjectProxy.__init__(self) >@@ -8329,7 +8329,7 @@ > The mtimedb is independent from the portdb and other globals. > """ > >- __slots__ = ('_name',) >+ __slots__ = ('_name', ) > > def __init__(self, name): > proxy.objectproxy.ObjectProxy.__init__(self) >Index: pym/portage/env/loaders.py >=================================================================== >--- pym/portage/env/loaders.py (revision 13832) >+++ pym/portage/env/loaders.py (working copy) >@@ -130,7 +130,7 @@ > @type: Boolean > @rtype: tuple > @returns: >- Returns (data,errors), both may be empty dicts or populated. >+ Returns (data, errors), both may be empty dicts or populated. > """ > data = {} > errors = {} >@@ -199,7 +199,7 @@ > > >>>>key foo1 foo2 foo3 > becomes >- {'key':['foo1','foo2','foo3']} >+ {'key':['foo1', 'foo2', 'foo3']} > """ > > def __init__(self, filename, validator=None, valuevalidator=None): >Index: pym/portage/checksum.py >=================================================================== >--- pym/portage/checksum.py (revision 13832) >+++ pym/portage/checksum.py (working copy) >@@ -3,7 +3,7 @@ > # Distributed under the terms of the GNU General Public License v2 > # $Id$ > >-from portage.const import PRIVATE_PATH,PRELINK_BINARY,HASHING_BLOCKSIZE >+from portage.const import PRIVATE_PATH, PRELINK_BINARY, HASHING_BLOCKSIZE > import os > import errno > import stat >@@ -149,13 +149,13 @@ > 4) If all checks succeed, return True and a fake reason > """ > # Dict relates to single file only. >- # returns: (passed,reason) >+ # returns: (passed, reason) > file_is_ok = True > reason = "Reason unknown" > try: > mysize = os.stat(filename)[stat.ST_SIZE] > if mydict["size"] != mysize: >- return False,("Filesize does not match recorded size", mysize, mydict["size"]) >+ return False, ("Filesize does not match recorded size", mysize, mydict["size"]) > except OSError, e: > if e.errno == errno.ENOENT: > raise portage.exception.FileNotFound(filename) >@@ -189,9 +189,9 @@ > {"file" : filename, "type" : x}) > else: > file_is_ok = False >- reason = (("Failed on %s verification" % x), myhash,mydict[x]) >+ reason = (("Failed on %s verification" % x), myhash, mydict[x]) > break >- return file_is_ok,reason >+ return file_is_ok, reason > > def perform_checksum(filename, hashname="MD5", calc_prelink=0): > """ >@@ -256,7 +256,7 @@ > @type calc_prelink: Integer > @rtype: Tuple > @return: A dictionary in the form: >- return_value[hash_name] = (hash_result,size) >+ return_value[hash_name] = (hash_result, size) > for each given checksum > """ > rVal = {} >Index: pym/portage/dispatch_conf.py >=================================================================== >--- pym/portage/dispatch_conf.py (revision 13832) >+++ pym/portage/dispatch_conf.py (working copy) >@@ -35,12 +35,12 @@ > if key == "merge": > opts["merge"] = "sdiff --suppress-common-lines --output='%s' '%s' '%s'" > else: >- print >> sys.stderr, 'dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal' % (key,) >+ print >> sys.stderr, 'dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal' % (key, ) > > if not os.path.exists(opts['archive-dir']): > os.mkdir(opts['archive-dir']) > elif not os.path.isdir(opts['archive-dir']): >- print >> sys.stderr, 'dispatch-conf: Config archive dir [%s] must exist; fatal' % (opts['archive-dir'],) >+ print >> sys.stderr, 'dispatch-conf: Config archive dir [%s] must exist; fatal' % (opts['archive-dir'], ) > sys.exit(1) > > return opts >@@ -63,7 +63,7 @@ > except(IOError, os.error), why: > print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \ > (curconf, archive, str(why)) >- if os.path.exists(archive + ',v'): >+ if os.path.exists(archive + ', v'): > os.system(RCS_LOCK + ' ' + archive) > os.system(RCS_PUT + ' ' + archive) > >@@ -106,7 +106,7 @@ > > # Archive the current config file if it isn't already saved > if os.path.exists(archive) \ >- and len(commands.getoutput("diff -aq '%s' '%s'" % (curconf,archive))) != 0: >+ and len(commands.getoutput("diff -aq '%s' '%s'" % (curconf, archive))) != 0: > suf = 1 > while suf < 9 and os.path.exists(archive + '.' + str(suf)): > suf += 1 >Index: pym/portage/manifest.py >=================================================================== >--- pym/portage/manifest.py (revision 13832) >+++ pym/portage/manifest.py (working copy) >@@ -6,7 +6,7 @@ > > import portage > portage.proxy.lazyimport.lazyimport(globals(), >- 'portage.checksum:hashfunc_map,perform_multiple_checksums,verify_all', >+ 'portage.checksum:hashfunc_map, perform_multiple_checksums, verify_all', > 'portage.util:write_atomic', > ) > >@@ -85,7 +85,7 @@ > return not self.__eq__(other) > > class Manifest(object): >- parsers = (parseManifest2,) >+ parsers = (parseManifest2, ) > def __init__(self, pkgdir, distdir, fetchlist_dict=None, > manifest1_compat=False, from_scratch=False): > """ create new Manifest instance for package in pkgdir >@@ -274,7 +274,7 @@ > del self.fhashdict[ftype][fname] > > def hasFile(self, ftype, fname): >- """ Return whether the Manifest contains an entry for the given type,filename pair """ >+ """ Return whether the Manifest contains an entry for the given type, filename pair """ > return (fname in self.fhashdict[ftype]) > > def findFile(self, fname): >@@ -403,7 +403,7 @@ > def checkFileHashes(self, ftype, fname, ignoreMissing=False): > myhashes = self.fhashdict[ftype][fname] > try: >- ok,reason = verify_all(self._getAbsname(ftype, fname), self.fhashdict[ftype][fname]) >+ ok, reason = verify_all(self._getAbsname(ftype, fname), self.fhashdict[ftype][fname]) > if not ok: > raise DigestException(tuple([self._getAbsname(ftype, fname)]+list(reason))) > return ok, reason >@@ -484,7 +484,7 @@ > return self.updateFileHashes(mytype, fname, *args, **kwargs) > > def getFileData(self, ftype, fname, key): >- """ Return the value of a specific (type,filename,key) triple, mainly useful >+ """ Return the value of a specific (type, filename, key) triple, mainly useful > to get the size for distfiles.""" > return self.fhashdict[ftype][fname][key] > >Index: pym/portage/dep.py >=================================================================== >--- pym/portage/dep.py (revision 13832) >+++ pym/portage/dep.py (working copy) >@@ -41,7 +41,7 @@ > > Example Usage: > >>> from portage.dep import cpvequal >- >>> cpvequal("sys-apps/portage-2.1","sys-apps/portage-2.1") >+ >>> cpvequal("sys-apps/portage-2.1", "sys-apps/portage-2.1") > >>> True > > """ >@@ -70,15 +70,15 @@ > > _paren_whitespace_re = re.compile(r'\S(\(|\))|(\(|\))\S') > >-def paren_reduce(mystr,tokenize=1): >+def paren_reduce(mystr, tokenize=1): > """ > Take a string and convert all paren enclosed entities into sublists, optionally > futher splitting the list elements by spaces. > > Example usage: >- >>> paren_reduce('foobar foo ( bar baz )',1) >+ >>> paren_reduce('foobar foo ( bar baz )', 1) > ['foobar', 'foo', ['bar', 'baz']] >- >>> paren_reduce('foobar foo ( bar baz )',0) >+ >>> paren_reduce('foobar foo ( bar baz )', 0) > ['foobar foo ', [' bar baz ']] > > @param mystr: The string to reduce >@@ -105,23 +105,23 @@ > subsec = None > tail = "" > elif mystr[0] == ")": >- return [mylist,mystr[1:]] >+ return [mylist, mystr[1:]] > elif has_left_paren and not has_right_paren: > raise portage.exception.InvalidDependString( > "missing right parenthesis: '%s'" % mystr) > elif has_left_paren and left_paren < right_paren: >- freesec,subsec = mystr.split("(",1) >+ freesec, subsec = mystr.split("(", 1) > sublist = paren_reduce(subsec, tokenize=tokenize) > if len(sublist) != 2: > raise portage.exception.InvalidDependString( > "malformed syntax: '%s'" % mystr) > subsec, tail = sublist > else: >- subsec,tail = mystr.split(")",1) >+ subsec, tail = mystr.split(")", 1) > if tokenize: > subsec = strip_empty(subsec.split(" ")) >- return [mylist+subsec,tail] >- return mylist+[subsec],tail >+ return [mylist+subsec, tail] >+ return mylist+[subsec], tail > if not isinstance(tail, basestring): > raise portage.exception.InvalidDependString( > "malformed syntax: '%s'" % mystr) >@@ -176,7 +176,7 @@ > Convert a list to a string with sublists enclosed with parens. > > Example usage: >- >>> test = ['foobar','foo',['bar','baz']] >+ >>> test = ['foobar', 'foo', ['bar', 'baz']] > >>> paren_enclose(test) > 'foobar foo ( bar baz )' > >@@ -259,7 +259,7 @@ > sys.stderr.write("Note: Nested use flags without parenthesis (Deprecated)\n") > warned = 1 > if warned: >- sys.stderr.write(" --> "+" ".join(map(str,[head]+newdeparray))+"\n") >+ sys.stderr.write(" --> "+" ".join(map(str, [head]+newdeparray))+"\n") > > # Check that each flag matches > ismatch = True >@@ -406,7 +406,7 @@ > > def _validate_flag(self, token, flag): > if self._valid_use_re.match(flag) is None: >- raise InvalidAtom("Invalid use dep: '%s'" % (token,)) >+ raise InvalidAtom("Invalid use dep: '%s'" % (token, )) > return flag > > def __nonzero__(self): >@@ -415,7 +415,7 @@ > def __str__(self): > if not self.tokens: > return "" >- return "[%s]" % (",".join(self.tokens),) >+ return "[%s]" % (", ".join(self.tokens), ) > > def __repr__(self): > return "portage.dep._use_dep(%s)" % repr(self.tokens) >@@ -514,10 +514,10 @@ > "slot", "use", "_str") > > class _blocker(object): >- __slots__ = ("overlap",) >+ __slots__ = ("overlap", ) > > class _overlap(object): >- __slots__ = ("forbid",) >+ __slots__ = ("forbid", ) > > def __init__(self, forbid=False): > self.forbid = forbid >@@ -774,7 +774,7 @@ > Pull a listing of USE Dependencies out of a dep atom. > > Example usage: >- >>> dep_getusedeps('app-misc/test:3[foo,-bar]') >+ >>> dep_getusedeps('app-misc/test:3[foo, -bar]') > ('foo', '-bar') > > @param depend: The depstring to process >@@ -794,7 +794,7 @@ > bracket_count += 1 > if bracket_count > 1: > raise InvalidAtom("USE Dependency with more " + \ >- "than one set of brackets: %s" % (depend,)) >+ "than one set of brackets: %s" % (depend, )) > close_bracket = depend.find(']', open_bracket ) > if close_bracket == -1: > raise InvalidAtom("USE Dependency with no closing bracket: %s" % depend ) >@@ -804,14 +804,14 @@ > raise InvalidAtom("USE Dependency with " + \ > "no use flag ([]): %s" % depend ) > if not comma_separated: >- comma_separated = "," in use >+ comma_separated = ", " in use > > if comma_separated and bracket_count > 1: > raise InvalidAtom("USE Dependency contains a mixture of " + \ > "comma and bracket separators: %s" % depend ) > > if comma_separated: >- for x in use.split(","): >+ for x in use.split(", "): > if x: > use_list.append(x) > else: >Index: pym/portage/versions.py >=================================================================== >--- pym/portage/versions.py (revision 13832) >+++ pym/portage/versions.py (working copy) >@@ -26,11 +26,11 @@ > Compare two versions > Example usage: > >>> from portage.versions import vercmp >- >>> vercmp('1.0-r1','1.2-r3') >+ >>> vercmp('1.0-r1', '1.2-r3') > negative number >- >>> vercmp('1.3','1.2-r3') >+ >>> vercmp('1.3', '1.2-r3') > positive number >- >>> vercmp('1.0_p3','1.0_p3') >+ >>> vercmp('1.0_p3', '1.0_p3') > 0 > > @param pkg1: version to compare with (see ver_regexp in portage.versions.py) >@@ -137,11 +137,11 @@ > for i in range(0, max(len(list1), len(list2))): > # Implicit _p0 is given a value of -1, so that 1 < 1_p0 > if len(list1) <= i: >- s1 = ("p","-1") >+ s1 = ("p", "-1") > else: > s1 = suffix_regexp.match(list1[i]).groups() > if len(list2) <= i: >- s2 = ("p","-1") >+ s2 = ("p", "-1") > else: > s2 = suffix_regexp.match(list2[i]).groups() > if s1[0] != s2[0]: >@@ -185,9 +185,9 @@ > > Example usage: > >>> from portage.versions import * >- >>> pkgcmp(pkgsplit('test-1.0-r1'),pkgsplit('test-1.2-r3')) >+ >>> pkgcmp(pkgsplit('test-1.0-r1'), pkgsplit('test-1.2-r3')) > -1 >- >>> pkgcmp(pkgsplit('test-1.3'),pkgsplit('test-1.2-r3')) >+ >>> pkgcmp(pkgsplit('test-1.3'), pkgsplit('test-1.2-r3')) > 1 > > @param pkg1: package to compare with >@@ -207,7 +207,7 @@ > > pkgcache={} > >-def pkgsplit(mypkg,silent=1): >+def pkgsplit(mypkg, silent=1): > try: > if not pkgcache[mypkg]: > return None >@@ -218,7 +218,7 @@ > > if len(myparts)<2: > if not silent: >- print "!!! Name error in",mypkg+": missing a version or name part." >+ print "!!! Name error in", mypkg+": missing a version or name part." > pkgcache[mypkg]=None > return None > >@@ -243,7 +243,7 @@ > pkgcache[mypkg]=None > return None > else: >- myval=("-".join(myparts[:verPos]),myparts[verPos],revision) >+ myval=("-".join(myparts[:verPos]), myparts[verPos], revision) > pkgcache[mypkg]=myval > return myval > else: >@@ -251,7 +251,7 @@ > return None > > catcache={} >-def catpkgsplit(mydata,silent=1): >+def catpkgsplit(mydata, silent=1): > """ > Takes a Category/Package-Version-Rev and returns a list of each. > >@@ -276,10 +276,10 @@ > p_split=None > if len(mysplit)==1: > retval=["null"] >- p_split=pkgsplit(mydata,silent=silent) >+ p_split=pkgsplit(mydata, silent=silent) > elif len(mysplit)==2: > retval=[mysplit[0]] >- p_split=pkgsplit(mysplit[1],silent=silent) >+ p_split=pkgsplit(mysplit[1], silent=silent) > if not p_split: > catcache[mydata]=None > return None >Index: pym/portage/tests/__init__.py >=================================================================== >--- pym/portage/tests/__init__.py (revision 13832) >+++ pym/portage/tests/__init__.py (working copy) >@@ -70,7 +70,7 @@ > self.todoed = [] > > def addTodo(self, test, info): >- self.todoed.append((test,info)) >+ self.todoed.append((test, info)) > if self.showAll: > self.stream.writeln("TODO") > elif self.dots: >@@ -120,7 +120,7 @@ > ok = True > except self.failureException: > if self.todo: >- result.addTodo(self,"%s: TODO" % testMethod) >+ result.addTodo(self, "%s: TODO" % testMethod) > else: > result.addFailure(self, sys.exc_info()) > except (KeyboardInterrupt, SystemExit): >@@ -174,8 +174,8 @@ > self.stream.writeln("OK") > return result > >-test_cps = ['sys-apps/portage','virtual/portage'] >-test_versions = ['1.0', '1.0-r1','2.3_p4','1.0_alpha57'] >-test_slots = [ None, '1','gentoo-sources-2.6.17','spankywashere'] >-test_usedeps = ['foo','-bar', ('foo','bar'), >- ('foo','-bar'), ('foo?', '!bar?') ] >+test_cps = ['sys-apps/portage', 'virtual/portage'] >+test_versions = ['1.0', '1.0-r1', '2.3_p4', '1.0_alpha57'] >+test_slots = [ None, '1', 'gentoo-sources-2.6.17', 'spankywashere'] >+test_usedeps = ['foo', '-bar', ('foo', 'bar'), >+ ('foo', '-bar'), ('foo?', '!bar?') ] >Index: pym/portage/tests/bin/setup_env.py >=================================================================== >--- pym/portage/tests/bin/setup_env.py (revision 13832) >+++ pym/portage/tests/bin/setup_env.py (working copy) >@@ -53,7 +53,7 @@ > # we don't care about the output of the programs, > # just their exit value and the state of $D > f = open('/dev/null', 'w') >- fd_pipes = {0:0,1:f.fileno(),2:f.fileno()} >+ fd_pipes = {0:0, 1:f.fileno(), 2:f.fileno()} > spawn(func+" "+args, env=os.environ, fd_pipes=fd_pipes) > f.close() > >@@ -68,5 +68,5 @@ > if bin.startswith("do") or \ > bin.startswith("new") or \ > bin.startswith("prep") or \ >- bin in ["ecompress","ecompressdir","fowners","fperms"]: >+ bin in ["ecompress", "ecompressdir", "fowners", "fperms"]: > globals()[bin] = create_portage_wrapper(bin) >Index: pym/portage/tests/dep/test_dep_getslot.py >=================================================================== >--- pym/portage/tests/dep/test_dep_getslot.py (revision 13832) >+++ pym/portage/tests/dep/test_dep_getslot.py (working copy) >@@ -15,7 +15,7 @@ > slot_char = ":" > slots = ( "a", "1.2", "1", "IloveVapier", None ) > cpvs = ["sys-apps/portage"] >- versions = ["2.1.1","2.1-r1"] >+ versions = ["2.1.1", "2.1-r1"] > for cpv in cpvs: > for version in versions: > for slot in slots: >Index: pym/portage/tests/dep/test_get_operator.py >=================================================================== >--- pym/portage/tests/dep/test_get_operator.py (revision 13832) >+++ pym/portage/tests/dep/test_get_operator.py (working copy) >@@ -16,8 +16,8 @@ > ( ">~", ">" ), ("~<", "~"), ( "=~", "=" ), > ( "=>", "=" ), ("=<", "=") ] > >- test_cpvs = ["sys-apps/portage","sys-apps/portage-2.1"] >- slots = [ None,"1","linux-2.5.6" ] >+ test_cpvs = ["sys-apps/portage", "sys-apps/portage-2.1"] >+ slots = [ None, "1", "linux-2.5.6" ] > for cpv in test_cpvs: > for test in tests: > for slot in slots: >Index: pym/portage/tests/dep/test_match_from_list.py >=================================================================== >--- pym/portage/tests/dep/test_match_from_list.py (revision 13832) >+++ pym/portage/tests/dep/test_match_from_list.py (working copy) >@@ -16,7 +16,7 @@ > ("=sys-fs/udev-4*", "sys-fs/udev-456" ) ] > > # I need to look up the cvs syntax >-# ("=sys-fs/udev_cvs*","sys-fs/udev_cvs_pre4" ) ] >+# ("=sys-fs/udev_cvs*", "sys-fs/udev_cvs_pre4" ) ] > > for test in tests: > self.assertEqual( len(match_from_list( test[0], [test[1]] )), 1 ) >Index: pym/portage/tests/dep/test_isvalidatom.py >=================================================================== >--- pym/portage/tests/dep/test_isvalidatom.py (revision 13832) >+++ pym/portage/tests/dep/test_isvalidatom.py (working copy) >@@ -26,7 +26,7 @@ > ( "sys-apps/portage-2.1:foo", False ), > ( "sys-apps/portage-2.1:", False ), > ( "sys-apps/portage-2.1:[foo]", False ), >- ( "=sys-apps/portage-2.2*:foo[bar?,!baz?,!doc=,build=]", True ), >+ ( "=sys-apps/portage-2.2*:foo[bar?, !baz?, !doc=, build=]", True ), > ( "=sys-apps/portage-2.2*:foo[doc?]", True ), > ( "=sys-apps/portage-2.2*:foo[!doc?]", True ), > ( "=sys-apps/portage-2.2*:foo[doc=]", True ), >@@ -40,11 +40,11 @@ > ( "=sys-apps/portage-2.2*:foo[-doc!=]", False ), > ( "=sys-apps/portage-2.2*:foo[-doc=]", False ), > ( "=sys-apps/portage-2.2*:foo[bar][-baz][doc?][!build?]", False ), >- ( "=sys-apps/portage-2.2*:foo[bar,-baz,doc?,!build?]", True ), >- ( "=sys-apps/portage-2.2*:foo[bar,-baz,doc?,!build?,]", False ), >- ( "=sys-apps/portage-2.2*:foo[,bar,-baz,doc?,!build?]", False ), >- ( "=sys-apps/portage-2.2*:foo[bar,-baz][doc?,!build?]", False ), >- ( "=sys-apps/portage-2.2*:foo[bar][doc,build]", False ), >+ ( "=sys-apps/portage-2.2*:foo[bar, -baz, doc?, !build?]", True ), >+ ( "=sys-apps/portage-2.2*:foo[bar, -baz, doc?, !build?, ]", False ), >+ ( "=sys-apps/portage-2.2*:foo[, bar, -baz, doc?, !build?]", False ), >+ ( "=sys-apps/portage-2.2*:foo[bar, -baz][doc?, !build?]", False ), >+ ( "=sys-apps/portage-2.2*:foo[bar][doc, build]", False ), > ( ">~cate-gory/foo-1.0", False ), > ( ">~category/foo-1.0", False ), > ( "<~category/foo-1.0", False ), >Index: pym/portage/tests/dep/test_dep_getcpv.py >=================================================================== >--- pym/portage/tests/dep/test_dep_getcpv.py (revision 13832) >+++ pym/portage/tests/dep/test_dep_getcpv.py (working copy) >@@ -13,14 +13,14 @@ > def testDepGetCPV(self): > > prefix_ops = ["<", ">", "=", "~", "!", "<=", >- ">=", "!=", "!<", "!>", "!~",""] >+ ">=", "!=", "!<", "!>", "!~", ""] > > bad_prefix_ops = [ ">~", "<~", "~>", "~<" ] > postfix_ops = [ "*", "" ] > > cpvs = ["sys-apps/portage", "sys-apps/portage-2.1", "sys-apps/portage-2.1", > "sys-apps/portage-2.1"] >- slots = [None,":",":2"] >+ slots = [None, ":", ":2"] > for cpv in cpvs: > for slot in slots: > for prefix in prefix_ops: >Index: pym/portage/tests/dep/test_dep_getusedeps.py >=================================================================== >--- pym/portage/tests/dep/test_dep_getusedeps.py (revision 13832) >+++ pym/portage/tests/dep/test_dep_getusedeps.py (working copy) >@@ -25,13 +25,13 @@ > if slot: > cpv += ":" + slot > if isinstance(use, tuple): >- cpv += "[%s]" % (",".join(use),) >+ cpv += "[%s]" % (", ".join(use), ) > self.assertEqual( dep_getusedeps( > cpv ), use ) > else: > if len(use): > self.assertEqual( dep_getusedeps( >- cpv + "[" + use + "]" ), (use,) ) >+ cpv + "[" + use + "]" ), (use, ) ) > else: > self.assertEqual( dep_getusedeps( > cpv + "[" + use + "]" ), () ) >Index: pym/portage/tests/env/config/test_PortageModulesFile.py >=================================================================== >--- pym/portage/tests/env/config/test_PortageModulesFile.py (revision 13832) >+++ pym/portage/tests/env/config/test_PortageModulesFile.py (working copy) >@@ -7,9 +7,9 @@ > > class PortageModulesFileTestCase(TestCase): > >- keys = ['foo.bar','baz','bob','extra_key'] >- invalid_keys = ['',""] >- modules = ['spanky','zmedico','antarus','ricer','5','6'] >+ keys = ['foo.bar', 'baz', 'bob', 'extra_key'] >+ invalid_keys = ['', ""] >+ modules = ['spanky', 'zmedico', 'antarus', 'ricer', '5', '6'] > > def setUp(self): > self.items = {} >@@ -31,7 +31,7 @@ > fd, self.fname = mkstemp() > f = os.fdopen(fd, 'w') > for k, v in self.items.iteritems(): >- f.write('%s=%s\n' % (k,v)) >+ f.write('%s=%s\n' % (k, v)) > f.close() > > def NukeFile(self): >Index: pym/portage/tests/env/config/test_PackageKeywordsFile.py >=================================================================== >--- pym/portage/tests/env/config/test_PackageKeywordsFile.py (revision 13832) >+++ pym/portage/tests/env/config/test_PackageKeywordsFile.py (working copy) >@@ -34,7 +34,7 @@ > fd, self.fname = mkstemp() > f = os.fdopen(fd, 'w') > for c in self.cpv: >- f.write("%s %s\n" % (c,' '.join(self.keywords))) >+ f.write("%s %s\n" % (c, ' '.join(self.keywords))) > f.close() > > def NukeFile(self): >Index: pym/portage/tests/versions/test_vercmp.py >=================================================================== >--- pym/portage/tests/versions/test_vercmp.py (revision 13832) >+++ pym/portage/tests/versions/test_vercmp.py (working copy) >@@ -12,19 +12,19 @@ > > def testVerCmpGreater(self): > >- tests = [ ( "6.0", "5.0"), ("5.0","5"), >+ tests = [ ( "6.0", "5.0"), ("5.0", "5"), > ("1.0-r1", "1.0-r0"), > ("1.0-r1", "1.0"), >- ("999999999999999999999999999999", "999999999999999999999999999998"),] >+ ("999999999999999999999999999999", "999999999999999999999999999998"), ] > for test in tests: >- self.failIf( vercmp( test[0], test[1] ) <= 0, msg="%s < %s? Wrong!" % (test[0],test[1]) ) >+ self.failIf( vercmp( test[0], test[1] ) <= 0, msg="%s < %s? Wrong!" % (test[0], test[1]) ) > > def testVerCmpLess(self): > """ > pre < alpha < beta < rc < p -> test each of these, they are inductive (or should be..) > """ >- tests = [ ( "4.0", "5.0"), ("5", "5.0"), ("1.0_pre2","1.0_p2"), >- ("1.0_alpha2", "1.0_p2"),("1.0_alpha1", "1.0_beta1"),("1.0_beta3","1.0_rc3"), >+ tests = [ ( "4.0", "5.0"), ("5", "5.0"), ("1.0_pre2", "1.0_p2"), >+ ("1.0_alpha2", "1.0_p2"), ("1.0_alpha1", "1.0_beta1"), ("1.0_beta3", "1.0_rc3"), > ("1.001000000000000000001", "1.001000000000000000002"), > ("1.00100000000", "1.0010000000000000001"), > ("999999999999999999999999999998", "999999999999999999999999999999"), >@@ -32,7 +32,7 @@ > ("1.0-r0", "1.0-r1"), > ("1.0", "1.0-r1")] > for test in tests: >- self.failIf( vercmp( test[0], test[1]) >= 0, msg="%s > %s? Wrong!" % (test[0],test[1])) >+ self.failIf( vercmp( test[0], test[1]) >= 0, msg="%s > %s? Wrong!" % (test[0], test[1])) > > > def testVerCmpEqual(self): >@@ -44,15 +44,15 @@ > ("1.0-r0", "1.0-r0"), > ("1.0-r1", "1.0-r1")] > for test in tests: >- self.failIf( vercmp( test[0], test[1]) != 0, msg="%s != %s? Wrong!" % (test[0],test[1])) >+ self.failIf( vercmp( test[0], test[1]) != 0, msg="%s != %s? Wrong!" % (test[0], test[1])) > > def testVerNotEqual(self): > >- tests = [ ("1","2"),("1.0_alpha","1.0_pre"),("1.0_beta","1.0_alpha"), >+ tests = [ ("1", "2"), ("1.0_alpha", "1.0_pre"), ("1.0_beta", "1.0_alpha"), > ("0", "0.0"), > ("1.0-r0", "1.0-r1"), > ("1.0-r1", "1.0-r0"), > ("1.0", "1.0-r1"), > ("1.0-r1", "1.0")] > for test in tests: >- self.failIf( vercmp( test[0], test[1]) == 0, msg="%s == %s? Wrong!" % (test[0],test[1])) >+ self.failIf( vercmp( test[0], test[1]) == 0, msg="%s == %s? Wrong!" % (test[0], test[1])) >Index: pym/portage/tests/util/test_stackLists.py >=================================================================== >--- pym/portage/tests/util/test_stackLists.py (revision 13832) >+++ pym/portage/tests/util/test_stackLists.py (working copy) >@@ -10,9 +10,9 @@ > > def testStackLists(self): > >- tests = [ ( [ ['a','b','c'], ['d','e','f'] ], ['a','c','b','e','d','f'], False ), >- ( [ ['a','x'], ['b','x'] ], ['a','x','b'], False ), >- ( [ ['a','b','c'], ['-*'] ], [], True ), >+ tests = [ ( [ ['a', 'b', 'c'], ['d', 'e', 'f'] ], ['a', 'c', 'b', 'e', 'd', 'f'], False ), >+ ( [ ['a', 'x'], ['b', 'x'] ], ['a', 'x', 'b'], False ), >+ ( [ ['a', 'b', 'c'], ['-*'] ], [], True ), > ( [ ['a'], ['-a'] ], [], True ) ] > > for test in tests: >Index: pym/portage/tests/util/test_stackDictList.py >=================================================================== >--- pym/portage/tests/util/test_stackDictList.py (revision 13832) >+++ pym/portage/tests/util/test_stackDictList.py (working copy) >@@ -10,9 +10,9 @@ > def testStackDictList(self): > from portage.util import stack_dictlist > >- tests = [ ({'a':'b'},{'x':'y'},False,{'a':['b'],'x':['y']}) ] >- tests.append(( {'KEYWORDS':['alpha','x86']},{'KEYWORDS':['-*']},True,{} )) >- tests.append(( {'KEYWORDS':['alpha','x86']},{'KEYWORDS':['-x86']},True,{'KEYWORDS':['alpha']} )) >+ tests = [ ({'a':'b'}, {'x':'y'}, False, {'a':['b'], 'x':['y']}) ] >+ tests.append(( {'KEYWORDS':['alpha', 'x86']}, {'KEYWORDS':['-*']}, True, {} )) >+ tests.append(( {'KEYWORDS':['alpha', 'x86']}, {'KEYWORDS':['-x86']}, True, {'KEYWORDS':['alpha']} )) > for test in tests: > self.assertEqual( >- stack_dictlist([test[0],test[1]],incremental=test[2]), test[3] ) >+ stack_dictlist([test[0], test[1]], incremental=test[2]), test[3] ) >Index: pym/portage/tests/util/test_uniqueArray.py >=================================================================== >--- pym/portage/tests/util/test_uniqueArray.py (revision 13832) >+++ pym/portage/tests/util/test_uniqueArray.py (working copy) >@@ -15,8 +15,8 @@ > > import os > >- tests = [ ( ["a","a","a",os,os,[],[],[]], ['a',os,[]] ), >- ( [1,1,1,2,3,4,4] , [1,2,3,4]) ] >+ tests = [ ( ["a", "a", "a", os, os, [], [], []], ['a', os, []] ), >+ ( [1, 1, 1, 2, 3, 4, 4] , [1, 2, 3, 4]) ] > > for test in tests: > result = unique_array( test[0] ) >Index: pym/portage/news.py >=================================================================== >--- pym/portage/news.py (revision 13832) >+++ pym/portage/news.py (working copy) >@@ -245,7 +245,7 @@ > if invalids: > self._valid = False > msg = [] >- msg.append("Invalid news item: %s" % (self.path,)) >+ msg.append("Invalid news item: %s" % (self.path, )) > for lineno, line in invalids: > msg.append(" line %d: %s" % (lineno, line)) > writemsg_level("".join("!!! %s\n" % x for x in msg), >Index: pym/portage/cache/flat_hash.py >=================================================================== >--- pym/portage/cache/flat_hash.py (revision 13832) >+++ pym/portage/cache/flat_hash.py (working copy) >@@ -14,7 +14,7 @@ > autocommits = True > > def __init__(self, *args, **config): >- super(database,self).__init__(*args, **config) >+ super(database, self).__init__(*args, **config) > self.location = os.path.join(self.location, > self.label.lstrip(os.path.sep).rstrip(os.path.sep)) > write_keys = set(self._known_keys) >@@ -53,7 +53,7 @@ > def _setitem(self, cpv, values): > # import pdb;pdb.set_trace() > s = cpv.rfind("/") >- fp = os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:])) >+ fp = os.path.join(self.location, cpv[:s], ".update.%i.%s" % (os.getpid(), cpv[s+1:])) > try: > myf = codecs.open(fp, mode='w', > encoding='utf_8', errors='replace') >@@ -63,7 +63,7 @@ > self._ensure_dirs(cpv) > myf = codecs.open(fp, mode='w', > encoding='utf_8', errors='replace') >- except (OSError, IOError),e: >+ except (OSError, IOError), e: > raise cache_errors.CacheCorruption(cpv, e) > else: > raise cache_errors.CacheCorruption(cpv, e) >@@ -80,7 +80,7 @@ > > #update written. now we move it. > >- new_fp = os.path.join(self.location,cpv) >+ new_fp = os.path.join(self.location, cpv) > try: > os.rename(fp, new_fp) > except (OSError, IOError), e: >@@ -91,7 +91,7 @@ > def _delitem(self, cpv): > # import pdb;pdb.set_trace() > try: >- os.remove(os.path.join(self.location,cpv)) >+ os.remove(os.path.join(self.location, cpv)) > except OSError, e: > if errno.ENOENT == e.errno: > raise KeyError(cpv) >@@ -119,7 +119,7 @@ > for l in dir_list: > if l.endswith(".cpickle"): > continue >- p = os.path.join(dirs[0],l) >+ p = os.path.join(dirs[0], l) > st = os.lstat(p) > if stat.S_ISDIR(st.st_mode): > dirs.append(p) >Index: pym/portage/cache/sql_template.py >=================================================================== >--- pym/portage/cache/sql_template.py (revision 13832) >+++ pym/portage/cache/sql_template.py (working copy) >@@ -48,7 +48,7 @@ > > super(SQLDatabase, self).__init__(location, label, auxdbkeys, *args, **config) > >- config.setdefault("host","127.0.0.1") >+ config.setdefault("host", "127.0.0.1") > config.setdefault("autocommit", self.autocommits) > self._initdb_con(config) > >@@ -62,7 +62,7 @@ > self.con = self.db.cursor() > > >- def _initdb_con(self,config): >+ def _initdb_con(self, config): > """ensure needed tables are in place. > If the derived class needs a different set of table creation commands, overload the approriate > SCHEMA_ attributes. If it needs additional execution beyond, override""" >@@ -95,7 +95,7 @@ > > def _sfilter(self, s): > """meta escaping, returns quoted string for use in sql statements""" >- return "\"%s\"" % s.replace("\\","\\\\").replace("\"","\\\"") >+ return "\"%s\"" % s.replace("\\", "\\\\").replace("\"", "\\\"") > > > def _getitem(self, cpv): >@@ -111,7 +111,7 @@ > if len(rows) == 0: > raise KeyError(cpv) > >- vals = dict([(k,"") for k in self._known_keys]) >+ vals = dict([(k, "") for k in self._known_keys]) > vals.update(dict(rows)) > return vals > >@@ -184,7 +184,7 @@ > > cpv = self._sfilter(cpv) > if self._supports_replace: >- query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT","REPLACE",1) >+ query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT", "REPLACE", 1) > else: > # just delete it. > try: >@@ -258,7 +258,7 @@ > yield cpv, d > l.clear() > oldcpv = x >- l.append((y,v)) >+ l.append((y, v)) > if oldcpv != None: > d = dict(l) > if "_eclasses_" in d: >@@ -270,13 +270,13 @@ > def commit(self): > self.db.commit() > >- def get_matches(self,match_dict): >+ def get_matches(self, match_dict): > query_list = [] >- for k,v in match_dict.items(): >+ for k, v in match_dict.items(): > if k not in self._known_keys: > raise cache_errors.InvalidRestriction(k, v, "key isn't known to this cache instance") >- v = v.replace("%","\\%") >- v = v.replace(".*","%") >+ v = v.replace("%", "\\%") >+ v = v.replace(".*", "%") > query_list.append("(key=%s AND value LIKE %s)" % (self._sfilter(k), self._sfilter(v))) > > if len(query_list): >Index: pym/portage/cache/cache_errors.py >=================================================================== >--- pym/portage/cache/cache_errors.py (revision 13832) >+++ pym/portage/cache/cache_errors.py (working copy) >@@ -21,7 +21,7 @@ > > > class GeneralCacheCorruption(CacheError): >- def __init__(self,ex): self.ex = ex >+ def __init__(self, ex): self.ex = ex > def __str__(self): return "corruption detected: %s" % str(self.ex) > > >@@ -60,4 +60,4 @@ > def __repr__(self): > return "portage.cache.cache_errors.StatCollision(%s)" % \ > (', '.join((repr(self.key), repr(self.filename), >- repr(self.mtime), repr(self.size))),) >+ repr(self.mtime), repr(self.size))), ) >Index: pym/portage/cache/sqlite.py >=================================================================== >--- pym/portage/cache/sqlite.py (revision 13832) >+++ pym/portage/cache/sqlite.py (working copy) >@@ -48,7 +48,7 @@ > def _db_escape_string(self, s): > """meta escaping, returns quoted string for use in sql statements""" > # This is equivalent to the _quote function from pysqlite 1.1. >- return "'%s'" % str(s).replace("'","''") >+ return "'%s'" % str(s).replace("'", "''") > > def _db_init_connection(self, config): > self._dbpath = self.location + ".sqlite" >@@ -89,7 +89,7 @@ > for k in self._allowed_keys: > table_parameters.append("%s TEXT" % k) > table_parameters.append("UNIQUE(%s)" % self._db_table["packages"]["package_key"]) >- create_statement.append(",".join(table_parameters)) >+ create_statement.append(", ".join(table_parameters)) > create_statement.append(")") > > self._db_table["packages"]["create"] = " ".join(create_statement) >@@ -133,7 +133,7 @@ > actual_cache_size = int(cursor.fetchone()[0]) > del cursor > if actual_cache_size != cache_size: >- raise cache_errors.InitializationError(self.__class__,"actual cache_size = "+actual_cache_size+" does does not match requested size of "+cache_size) >+ raise cache_errors.InitializationError(self.__class__, "actual cache_size = "+actual_cache_size+" does does not match requested size of "+cache_size) > > def _db_init_synchronous(self, synchronous): > cursor = self._db_cursor >@@ -142,7 +142,7 @@ > actual_synchronous=int(cursor.fetchone()[0]) > del cursor > if actual_synchronous!=synchronous: >- raise cache_errors.InitializationError(self.__class__,"actual synchronous = "+actual_synchronous+" does does not match requested value of "+synchronous) >+ raise cache_errors.InitializationError(self.__class__, "actual synchronous = "+actual_synchronous+" does does not match requested value of "+synchronous) > > def _getitem(self, cpv): > cursor = self._db_cursor >@@ -171,7 +171,7 @@ > update_statement = [] > update_statement.append("REPLACE INTO %s" % self._db_table["packages"]["table_name"]) > update_statement.append("(") >- update_statement.append(','.join([self._db_table["packages"]["package_key"]] + self._allowed_keys)) >+ update_statement.append(', '.join([self._db_table["packages"]["package_key"]] + self._allowed_keys)) > update_statement.append(")") > update_statement.append("VALUES") > update_statement.append("(") >@@ -179,7 +179,7 @@ > values_parameters.append(self._db_escape_string(cpv)) > for k in self._allowed_keys: > values_parameters.append(self._db_escape_string(values.get(k, ''))) >- update_statement.append(",".join(values_parameters)) >+ update_statement.append(", ".join(values_parameters)) > update_statement.append(")") > cursor = self._db_cursor > try: >Index: pym/portage/cache/fs_template.py >=================================================================== >--- pym/portage/cache/fs_template.py (revision 13832) >+++ pym/portage/cache/fs_template.py (working copy) >@@ -59,7 +59,7 @@ > base='/' > > for dir in path.lstrip(os.path.sep).rstrip(os.path.sep).split(os.path.sep): >- base = os.path.join(base,dir) >+ base = os.path.join(base, dir) > if not os.path.exists(base): > if self._perms != -1: > um = os.umask(0) >Index: pym/portage/cache/anydbm.py >=================================================================== >--- pym/portage/cache/anydbm.py (revision 13832) >+++ pym/portage/cache/anydbm.py (working copy) >@@ -21,9 +21,9 @@ > serialize_eclasses = False > > def __init__(self, *args, **config): >- super(database,self).__init__(*args, **config) >+ super(database, self).__init__(*args, **config) > >- default_db = config.get("dbtype","anydbm") >+ default_db = config.get("dbtype", "anydbm") > if not default_db.startswith("."): > default_db = '.' + default_db > >@@ -56,7 +56,7 @@ > return pickle.loads(self.__db[cpv]) > > def _setitem(self, cpv, values): >- self.__db[cpv] = pickle.dumps(values,pickle.HIGHEST_PROTOCOL) >+ self.__db[cpv] = pickle.dumps(values, pickle.HIGHEST_PROTOCOL) > > def _delitem(self, cpv): > del self.__db[cpv] >Index: pym/portage/cache/template.py >=================================================================== >--- pym/portage/cache/template.py (revision 13832) >+++ pym/portage/cache/template.py (working copy) >@@ -48,7 +48,7 @@ > mtime = long(mtime) > except ValueError: > raise cache_errors.CacheCorruption(cpv, >- '_mtime_ conversion to long failed: %s' % (mtime,)) >+ '_mtime_ conversion to long failed: %s' % (mtime, )) > d['_mtime_'] = mtime > return d > >@@ -98,7 +98,7 @@ > self.commit() > self.updates = 0 > >- def _delitem(self,cpv): >+ def _delitem(self, cpv): > """__delitem__ calls this after readonly checks. override it in derived classes""" > raise NotImplementedError > >@@ -166,13 +166,13 @@ > > import re > restricts = {} >- for key,match in match_dict.iteritems(): >+ for key, match in match_dict.iteritems(): > # XXX this sucks. > try: > if isinstance(match, basestring): > restricts[key] = re.compile(match).match > else: >- restricts[key] = re.compile(match[0],match[1]).match >+ restricts[key] = re.compile(match[0], match[1]).match > except re.error, e: > raise InvalidRestriction(key, match, e) > if key not in self.__known_keys: >Index: pym/portage/cache/util.py >=================================================================== >--- pym/portage/cache/util.py (revision 13832) >+++ pym/portage/cache/util.py (working copy) >@@ -28,7 +28,7 @@ > trg_cache.sync(100) > > for x in valid_nodes_iterable: >-# print "processing x=",x >+# print "processing x=", x > count+=1 > dead_nodes.discard(x) > try: >@@ -151,19 +151,19 @@ > # call_update_every is used by mirror_cache to determine how often to call in. > # quiet defaults to 2^24 -1. Don't call update, 'cept once every 16 million or so :) > call_update_min = 0xffffff >- def update(self,key,*arg): pass >- def exception(self,key,*arg): pass >- def eclass_stale(self,*arg): pass >+ def update(self, key, *arg): pass >+ def exception(self, key, *arg): pass >+ def eclass_stale(self, *arg): pass > def missing_entry(self, key): pass >- def misc(self,key,*arg): pass >+ def misc(self, key, *arg): pass > def corruption(self, key, s): pass > def finish(self, *arg): pass > > class non_quiet_mirroring(quiet_mirroring): > call_update_min=1 >- def update(self,key,*arg): print "processed",key >- def exception(self, key, *arg): print "exec",key,arg >- def missing(self,key): print "key %s is missing", key >- def corruption(self,key,*arg): print "corrupt %s:" % key,arg >- def eclass_stale(self,key,*arg):print "stale %s:"%key,arg >+ def update(self, key, *arg): print "processed", key >+ def exception(self, key, *arg): print "exec", key, arg >+ def missing(self, key): print "key %s is missing", key >+ def corruption(self, key, *arg): print "corrupt %s:" % key, arg >+ def eclass_stale(self, key, *arg):print "stale %s:"%key, arg > >Index: pym/portage/cache/mappings.py >=================================================================== >--- pym/portage/cache/mappings.py (revision 13832) >+++ pym/portage/cache/mappings.py (working copy) >@@ -193,7 +193,7 @@ > given an initial dict, this wraps that dict storing changes in a secondary dict, protecting > the underlying dict from changes > """ >- __slots__=("orig","new","blacklist") >+ __slots__=("orig", "new", "blacklist") > > def __init__(self, orig): > self.orig = orig >@@ -316,7 +316,7 @@ > > allowed_keys = keys_set > _prefix = prefix >- __slots__ = ("__weakref__",) + \ >+ __slots__ = ("__weakref__", ) + \ > tuple(prefix + k for k in allowed_keys) > > def __init__(self, *args, **kwargs): >Index: pym/portage/cache/metadata.py >=================================================================== >--- pym/portage/cache/metadata.py (revision 13832) >+++ pym/portage/cache/metadata.py (working copy) >@@ -28,7 +28,7 @@ > def __init__(self, location, *args, **config): > loc = location > super(database, self).__init__(location, *args, **config) >- self.location = os.path.join(loc, "metadata","cache") >+ self.location = os.path.join(loc, "metadata", "cache") > self.ec = None > self.raise_stat_collision = False > >@@ -112,7 +112,7 @@ > existing_mtime, existing_st.st_size) > > s = cpv.rfind("/") >- fp = os.path.join(self.location,cpv[:s], >+ fp = os.path.join(self.location, cpv[:s], > ".update.%i.%s" % (os.getpid(), cpv[s+1:])) > try: > myf = open(fp, 'wb') >Index: pym/portage/cache/flat_list.py >=================================================================== >--- pym/portage/cache/flat_list.py (revision 13832) >+++ pym/portage/cache/flat_list.py (working copy) >@@ -14,7 +14,7 @@ > 'PDEPEND', 'PROVIDE', 'EAPI', 'PROPERTIES', 'DEFINED_PHASES') > > def __init__(self, label, auxdbkeys, **config): >- super(database,self).__init__(label, auxdbkeys, **config) >+ super(database, self).__init__(label, auxdbkeys, **config) > self._base = os.path.join(self._base, > self.label.lstrip(os.path.sep).rstrip(os.path.sep)) > >@@ -27,10 +27,10 @@ > def _getitem(self, cpv): > d = {} > try: >- myf = open(os.path.join(self._base, cpv),"r") >- for k,v in zip(self.auxdbkey_order, myf): >+ myf = open(os.path.join(self._base, cpv), "r") >+ for k, v in zip(self.auxdbkey_order, myf): > d[k] = v.rstrip("\n") >- except (OSError, IOError),e: >+ except (OSError, IOError), e: > if errno.ENOENT == e.errno: > raise KeyError(cpv) > raise cache_errors.CacheCorruption(cpv, e) >@@ -46,27 +46,27 @@ > > def _setitem(self, cpv, values): > s = cpv.rfind("/") >- fp=os.path.join(self._base,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:])) >+ fp=os.path.join(self._base, cpv[:s], ".update.%i.%s" % (os.getpid(), cpv[s+1:])) > try: > myf = open(fp, "w") > except (OSError, IOError), e: > if errno.ENOENT == e.errno: > try: > self._ensure_dirs(cpv) >- myf=open(fp,"w") >- except (OSError, IOError),e: >+ myf=open(fp, "w") >+ except (OSError, IOError), e: > raise cache_errors.CacheCorruption(cpv, e) > else: > raise cache_errors.CacheCorruption(cpv, e) > > > for x in self.auxdbkey_order: >- myf.write(values.get(x,"")+"\n") >+ myf.write(values.get(x, "")+"\n") > > myf.close() > self._ensure_access(fp, mtime=values["_mtime_"]) > #update written. now we move it. >- new_fp = os.path.join(self._base,cpv) >+ new_fp = os.path.join(self._base, cpv) > try: > os.rename(fp, new_fp) > except (OSError, IOError), e: >@@ -76,7 +76,7 @@ > > def _delitem(self, cpv): > try: >- os.remove(os.path.join(self._base,cpv)) >+ os.remove(os.path.join(self._base, cpv)) > except OSError, e: > if errno.ENOENT == e.errno: > raise KeyError(cpv) >@@ -96,7 +96,7 @@ > for l in os.listdir(dirs[0]): > if l.endswith(".cpickle"): > continue >- p = os.path.join(dirs[0],l) >+ p = os.path.join(dirs[0], l) > st = os.lstat(p) > if stat.S_ISDIR(st.st_mode): > dirs.append(p) >Index: pym/portage/cache/metadata_overlay.py >=================================================================== >--- pym/portage/cache/metadata_overlay.py (revision 13832) >+++ pym/portage/cache/metadata_overlay.py (working copy) >@@ -89,12 +89,12 @@ > return value["EAPI"] == "whiteout" > > def _create_whiteout(self, value): >- return {"EAPI":"whiteout","_eclasses_":value["_eclasses_"],"_mtime_":value["_mtime_"]} >+ return {"EAPI":"whiteout", "_eclasses_":value["_eclasses_"], "_mtime_":value["_mtime_"]} > > def _is_whiteout_valid(self, name, value_rw): > try: > value_ro = self.db_ro[name] >- return self._are_values_identical(value_rw,value_ro) >+ return self._are_values_identical(value_rw, value_ro) > except KeyError: > return False > >Index: pym/portage/cache/ebuild_xattr.py >=================================================================== >--- pym/portage/cache/ebuild_xattr.py (revision 13832) >+++ pym/portage/cache/ebuild_xattr.py (working copy) >@@ -12,7 +12,7 @@ > from portage.util import writemsg > import os > import xattr >-from errno import ENODATA,ENOSPC,E2BIG >+from errno import ENODATA, ENOSPC, E2BIG > > class NoValueException(Exception): > pass >@@ -22,7 +22,7 @@ > autocommits = True > > def __init__(self, *args, **config): >- super(database,self).__init__(*args, **config) >+ super(database, self).__init__(*args, **config) > self.portdir = self.label > self.ns = xattr.NS_USER + '.gentoo.cache' > self.keys = set(self._known_keys) >@@ -32,15 +32,15 @@ > self.max_len = self.__get_max() > > def __get_max(self): >- path = os.path.join(self.portdir,'profiles/repo_name') >+ path = os.path.join(self.portdir, 'profiles/repo_name') > try: >- return int(self.__get(path,'value_max_len')) >- except NoValueException,e: >+ return int(self.__get(path, 'value_max_len')) >+ except NoValueException, e: > max = self.__calc_max(path) >- self.__set(path,'value_max_len',str(max)) >+ self.__set(path, 'value_max_len', str(max)) > return max > >- def __calc_max(self,path): >+ def __calc_max(self, path): > """ Find out max attribute length supported by the file system """ > > hundred = '' >@@ -52,57 +52,57 @@ > # Could use finally but needs python 2.5 then > try: > while True: >- self.__set(path,'test_max',s) >+ self.__set(path, 'test_max', s) > s+=hundred >- except IOError,e: >+ except IOError, e: > # ext based give wrong errno > # http://bugzilla.kernel.org/show_bug.cgi?id=12793 >- if e.errno in (E2BIG,ENOSPC): >+ if e.errno in (E2BIG, ENOSPC): > result = len(s)-100 > else: > raise e > > try: >- self.__remove(path,'test_max') >- except IOError,e: >+ self.__remove(path, 'test_max') >+ except IOError, e: > if e.errno is not ENODATA: > raise e > > return result > >- def __get_path(self,cpv): >- cat,pn = catsplit(cpv_getkey(cpv)) >- return os.path.join(self.portdir,cat,pn,os.path.basename(cpv) + ".ebuild") >+ def __get_path(self, cpv): >+ cat, pn = catsplit(cpv_getkey(cpv)) >+ return os.path.join(self.portdir, cat, pn, os.path.basename(cpv) + ".ebuild") > >- def __has_cache(self,path): >+ def __has_cache(self, path): > try: >- self.__get(path,'_mtime_') >- except NoValueException,e: >+ self.__get(path, '_mtime_') >+ except NoValueException, e: > return False > > return True > >- def __get(self,path,key,default=None): >+ def __get(self, path, key, default=None): > try: >- return xattr.get(path,key,namespace=self.ns) >- except IOError,e: >+ return xattr.get(path, key, namespace=self.ns) >+ except IOError, e: > if not default is None and ENODATA == e.errno: > return default > else: > raise NoValueException() > >- def __remove(self,path,key): >- xattr.remove(path,key,namespace=self.ns) >+ def __remove(self, path, key): >+ xattr.remove(path, key, namespace=self.ns) > >- def __set(self,path,key,value): >- xattr.set(path,key,value,namespace=self.ns) >+ def __set(self, path, key, value): >+ xattr.set(path, key, value, namespace=self.ns) > > def _getitem(self, cpv): > values = {} > path = self.__get_path(cpv) > all = {} >- for tuple in xattr.get_all(path,namespace=self.ns): >- key,value = tuple >+ for tuple in xattr.get_all(path, namespace=self.ns): >+ key, value = tuple > all[key] = value > > if not '_mtime_' in all: >@@ -110,11 +110,11 @@ > > # We default to '' like other caches > for key in self.keys: >- attr_value = all.get(key,'1:') >- parts,sep,value = attr_value.partition(':') >+ attr_value = all.get(key, '1:') >+ parts, sep, value = attr_value.partition(':') > parts = int(parts) > if parts > 1: >- for i in range(1,parts): >+ for i in range(1, parts): > value += all.get(key+str(i)) > values[key] = value > >@@ -123,7 +123,7 @@ > def _setitem(self, cpv, values): > path = self.__get_path(cpv) > max = self.max_len >- for key,value in values.iteritems(): >+ for key, value in values.iteritems(): > # mtime comes in as long so need to convert to strings > s = str(value) > # We need to split long values >@@ -136,15 +136,15 @@ > parts += 1 > > # Only the first entry carries the number of parts >- self.__set(path,key,'%s:%s'%(parts,s[0:max])) >+ self.__set(path, key, '%s:%s'%(parts, s[0:max])) > > # Write out the rest >- for i in range(1,parts): >+ for i in range(1, parts): > start = i * max > val = s[start:start+max] >- self.__set(path,key+str(i),val) >+ self.__set(path, key+str(i), val) > else: >- self.__set(path,key,"%s:%s"%(1,s)) >+ self.__set(path, key, "%s:%s"%(1, s)) > > def _delitem(self, cpv): > pass # Will be gone with the ebuild >@@ -153,11 +153,11 @@ > return os.path.exists(self.__get_path(cpv)) > > def __iter__(self): >- for root,dirs,files in os.walk(self.portdir): >+ for root, dirs, files in os.walk(self.portdir): > for file in files: > if file[-7:] == '.ebuild': > cat = os.path.basename(os.path.dirname(root)) > pn_pv = file[:-7] >- path = os.path.join(root,file) >+ path = os.path.join(root, file) > if self.__has_cache(path): >- yield "%s/%s/%s" % (cat,os.path.basename(root),file[:-7]) >+ yield "%s/%s/%s" % (cat, os.path.basename(root), file[:-7]) >Index: pym/portage/util.py >=================================================================== >--- pym/portage/util.py (revision 13832) >+++ pym/portage/util.py (working copy) >@@ -49,7 +49,7 @@ > """ > logging.basicConfig(level=logging.WARN, format='[%(levelname)-4s] %(message)s') > >-def writemsg(mystr,noiselevel=0,fd=None): >+def writemsg(mystr, noiselevel=0, fd=None): > """Prints out warning and debug messages based on the noiselimit setting""" > global noiselimit > if fd is None: >@@ -58,7 +58,7 @@ > fd.write(mystr) > fd.flush() > >-def writemsg_stdout(mystr,noiselevel=0): >+def writemsg_stdout(mystr, noiselevel=0): > """Prints messages stdout based on the noiselimit setting""" > writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout) > >@@ -109,7 +109,7 @@ > if myline[0]=="#": > # Check if we have a compat-level string. BC-integration data. > # '##COMPAT==>N<==' 'some string attached to it' >- mylinetest = myline.split("<==",1) >+ mylinetest = myline.split("<==", 1) > if len(mylinetest) == 2: > myline_potential = mylinetest[1] > mylinetest = mylinetest[0].split("##COMPAT==>") >@@ -123,13 +123,13 @@ > newlines.append(myline) > return newlines > >-def map_dictlist_vals(func,myDict): >+def map_dictlist_vals(func, myDict): > """Performs a function on each value of each key in a dictlist. > Returns a new dictlist.""" > new_dl = {} > for key in myDict: > new_dl[key] = [] >- new_dl[key] = map(func,myDict[key]) >+ new_dl[key] = map(func, myDict[key]) > return new_dl > > def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0): >@@ -140,17 +140,17 @@ > > Example usage: > >>> from portage.util import stack_dictlist >- >>> print stack_dictlist( [{'a':'b'},{'x':'y'}]) >- >>> {'a':'b','x':'y'} >- >>> print stack_dictlist( [{'a':'b'},{'a':'c'}], incremental = True ) >- >>> {'a':['b','c'] } >- >>> a = {'KEYWORDS':['x86','alpha']} >+ >>> print stack_dictlist( [{'a':'b'}, {'x':'y'}]) >+ >>> {'a':'b', 'x':'y'} >+ >>> print stack_dictlist( [{'a':'b'}, {'a':'c'}], incremental = True ) >+ >>> {'a':['b', 'c'] } >+ >>> a = {'KEYWORDS':['x86', 'alpha']} > >>> b = {'KEYWORDS':['-x86']} >- >>> print stack_dictlist( [a,b] ) >- >>> { 'KEYWORDS':['x86','alpha','-x86']} >- >>> print stack_dictlist( [a,b], incremental=True) >+ >>> print stack_dictlist( [a, b] ) >+ >>> { 'KEYWORDS':['x86', 'alpha', '-x86']} >+ >>> print stack_dictlist( [a, b], incremental=True) > >>> { 'KEYWORDS':['alpha'] } >- >>> print stack_dictlist( [a,b], incrementals=['KEYWORDS']) >+ >>> print stack_dictlist( [a, b], incrementals=['KEYWORDS']) > >>> { 'KEYWORDS':['alpha'] } > > @param original_dicts a list of (dictionary objects or None) >@@ -304,7 +304,7 @@ > del(pkgs[x]) > return pkgs > >-def grablines(myfilename,recursive=0): >+def grablines(myfilename, recursive=0): > mylines=[] > if recursive and os.path.isdir(myfilename): > if myfilename in ["RCS", "CVS", "SCCS"]: >@@ -326,7 +326,7 @@ > pass > return mylines > >-def writedict(mydict,myfilename,writekey=True): >+def writedict(mydict, myfilename, writekey=True): > """Writes out a dict to a file; writekey=0 mode doesn't write out > the key and assumes all values are strings, not lists.""" > myfile = None >@@ -388,7 +388,7 @@ > # attribute is properly set. > lex = shlex_class(f, infile=mycfg, posix=True) > lex.wordchars = string.digits + string.ascii_letters + \ >- "~!@#$%*_\:;?,./-+{}" >+ "~!@#$%*_\:;?, ./-+{}" > lex.quotes="\"'" > if allow_sourcing: > lex.source="source" >@@ -402,7 +402,7 @@ > equ=lex.get_token() > if (equ==''): > #unexpected end of file >- #lex.error_leader(self.filename,lex.lineno) >+ #lex.error_leader(self.filename, lex.lineno) > if not tolerant: > writemsg("!!! Unexpected end of config file: variable "+str(key)+"\n", > noiselevel=-1) >@@ -411,7 +411,7 @@ > return mykeys > elif (equ!='='): > #invalid token >- #lex.error_leader(self.filename,lex.lineno) >+ #lex.error_leader(self.filename, lex.lineno) > if not tolerant: > raise Exception("ParseError: Invalid token " + \ > "'%s' (not '='): %s: line %s" % \ >@@ -421,7 +421,7 @@ > val=lex.get_token() > if val is None: > #unexpected end of file >- #lex.error_leader(self.filename,lex.lineno) >+ #lex.error_leader(self.filename, lex.lineno) > if not tolerant: > writemsg("!!! Unexpected end of config file: variable "+str(key)+"\n", > noiselevel=-1) >@@ -551,10 +551,10 @@ > # broken and removed, but can still be imported > pickle_write = None > >-def pickle_read(filename,default=None,debug=0): >+def pickle_read(filename, default=None, debug=0): > import os > if not os.access(filename, os.R_OK): >- writemsg("pickle_read(): File not readable. '"+filename+"'\n",1) >+ writemsg("pickle_read(): File not readable. '"+filename+"'\n", 1) > return default > data = None > try: >@@ -562,12 +562,12 @@ > mypickle = pickle.Unpickler(myf) > data = mypickle.load() > myf.close() >- del mypickle,myf >- writemsg("pickle_read(): Loaded pickle. '"+filename+"'\n",1) >+ del mypickle, myf >+ writemsg("pickle_read(): Loaded pickle. '"+filename+"'\n", 1) > except SystemExit, e: > raise > except Exception, e: >- writemsg("!!! Failed to load pickle: "+str(e)+"\n",1) >+ writemsg("!!! Failed to load pickle: "+str(e)+"\n", 1) > data = default > return data > >@@ -597,7 +597,7 @@ > It works by generating key objects which use the given cmp function to > implement their __lt__ method. > """ >- __slots__ = ("_cmp_func",) >+ __slots__ = ("_cmp_func", ) > > def __init__(self, cmp_func): > """ >@@ -1003,7 +1003,7 @@ > for lazy initialization of values via callable objects. Lazy items can be > overwritten and deleted just as normal items.""" > >- __slots__ = ('lazy_items',) >+ __slots__ = ('lazy_items', ) > > def __init__(self, *args, **kwargs): > >@@ -1243,7 +1243,7 @@ > def new_protect_filename(mydest, newmd5=None): > """Resolves a config-protect filename for merging, optionally > using the last filename if the md5 matches. >- (dest,md5) ==> 'string' --- path_to_target_filename >+ (dest, md5) ==> 'string' --- path_to_target_filename > (dest) ==> ('next', 'highest') --- next_target and most-recent_target > """ > >Index: pym/portage/update.py >=================================================================== >--- pym/portage/update.py (revision 13832) >+++ pym/portage/update.py (working copy) >@@ -6,9 +6,9 @@ > > import portage > portage.proxy.lazyimport.lazyimport(globals(), >- 'portage.dep:dep_getkey,get_operator,isvalidatom,isjustname,remove_slot', >- 'portage.util:ConfigProtect,grabfile,new_protect_filename,' + \ >- 'normalize_path,write_atomic,writemsg', >+ 'portage.dep:dep_getkey, get_operator, isvalidatom, isjustname, remove_slot', >+ 'portage.util:ConfigProtect, grabfile, new_protect_filename, ' + \ >+ 'normalize_path, write_atomic, writemsg', > 'portage.versions:ververify' > ) > >@@ -171,7 +171,7 @@ > myxfiles = recursivefiles > for x in myxfiles: > try: >- myfile = open(os.path.join(abs_user_config, x),"r") >+ myfile = open(os.path.join(abs_user_config, x), "r") > file_contents[x] = myfile.readlines() > myfile.close() > except IOError: >Index: pym/portage/mail.py >=================================================================== >--- pym/portage/mail.py (revision 13832) >+++ pym/portage/mail.py (working copy) >@@ -53,13 +53,13 @@ > if "@" in mymailuri: > myauthdata, myconndata = mymailuri.rsplit("@", 1) > try: >- mymailuser,mymailpasswd = myauthdata.split(":") >+ mymailuser, mymailpasswd = myauthdata.split(":") > except ValueError: > print "!!! invalid SMTP AUTH configuration, trying unauthenticated ..." > else: > myconndata = mymailuri > if ":" in myconndata: >- mymailhost,mymailport = myconndata.split(":") >+ mymailhost, mymailport = myconndata.split(":") > else: > mymailhost = myconndata > else: >Index: pym/portage/glsa.py >=================================================================== >--- pym/portage/glsa.py (revision 13832) >+++ pym/portage/glsa.py (working copy) >@@ -188,7 +188,7 @@ > raise GlsaFormatException("Invalid Tag found: ", subnode.nodeName) > if format == "strip": > rValue = rValue.strip(" \n\t") >- rValue = re.sub("[\s]{2,}", " ", rValue) >+ rValue = re.sub("[\s]{2, }", " ", rValue) > return rValue.encode("utf_8") > > def getMultiTagsText(rootnode, tagname, format): >Index: pym/portage/const.py >=================================================================== >--- pym/portage/const.py (revision 13832) >+++ pym/portage/const.py (working copy) >@@ -57,19 +57,19 @@ > "PROFILE_ONLY_VARIABLES"] > EBUILD_PHASES = ["setup", "unpack", "prepare", "configure", > "compile", "test", "install", >- "package", "preinst", "postinst","prerm", "postrm", >+ "package", "preinst", "postinst", "prerm", "postrm", > "nofetch", "config", "info", "other"] > > EAPI = 2 > > HASHING_BLOCKSIZE = 32768 >-MANIFEST1_HASH_FUNCTIONS = ["MD5","SHA256","RMD160"] >-MANIFEST2_HASH_FUNCTIONS = ["SHA1","SHA256","RMD160"] >+MANIFEST1_HASH_FUNCTIONS = ["MD5", "SHA256", "RMD160"] >+MANIFEST2_HASH_FUNCTIONS = ["SHA1", "SHA256", "RMD160"] > > MANIFEST1_REQUIRED_HASH = "MD5" > MANIFEST2_REQUIRED_HASH = "SHA1" > >-MANIFEST2_IDENTIFIERS = ["AUX","MISC","DIST","EBUILD"] >+MANIFEST2_IDENTIFIERS = ["AUX", "MISC", "DIST", "EBUILD"] > # =========================================================================== > # END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANT > # =========================================================================== >Index: pym/portage/sets/dbapi.py >=================================================================== >--- pym/portage/sets/dbapi.py (revision 13832) >+++ pym/portage/sets/dbapi.py (working copy) >@@ -200,7 +200,7 @@ > metadatadb = options.get("metadata-source", "porttree") > if not metadatadb in trees: > raise SetConfigError(_("invalid value '%s' for option " >- "metadata-source") % (metadatadb,)) >+ "metadata-source") % (metadatadb, )) > > return cls(trees["vartree"].dbapi, > metadatadb=trees[metadatadb].dbapi) >Index: pym/portage/sets/files.py >=================================================================== >--- pym/portage/sets/files.py (revision 13832) >+++ pym/portage/sets/files.py (working copy) >@@ -60,7 +60,7 @@ > return bool(atom[:1] == SETPREFIX or ValidAtomValidator(atom)) > > def write(self): >- write_atomic(self._filename, "".join("%s\n" % (atom,) \ >+ write_atomic(self._filename, "".join("%s\n" % (atom, ) \ > for atom in sorted(chain(self._atoms, self._nonatoms)))) > > def load(self): >Index: pym/portage/sets/profiles.py >=================================================================== >--- pym/portage/sets/profiles.py (revision 13832) >+++ pym/portage/sets/profiles.py (working copy) >@@ -31,19 +31,19 @@ > debug = self._debug > if debug: > writemsg_level("\nPackagesSystemSet: profile paths: %s\n" % \ >- (self._profile_paths,), level=logging.DEBUG, noiselevel=-1) >+ (self._profile_paths, ), level=logging.DEBUG, noiselevel=-1) > > mylist = [grabfile_package(os.path.join(x, "packages")) for x in self._profile_paths] > > if debug: > writemsg_level("\nPackagesSystemSet: raw packages: %s\n" % \ >- (mylist,), level=logging.DEBUG, noiselevel=-1) >+ (mylist, ), level=logging.DEBUG, noiselevel=-1) > > mylist = stack_lists(mylist, incremental=1) > > if debug: > writemsg_level("\nPackagesSystemSet: stacked packages: %s\n" % \ >- (mylist,), level=logging.DEBUG, noiselevel=-1) >+ (mylist, ), level=logging.DEBUG, noiselevel=-1) > > self._setAtoms([x[1:] for x in mylist if x[0] == "*"]) > >Index: pym/portage/eclass_cache.py >=================================================================== >--- pym/portage/eclass_cache.py (revision 13832) >+++ pym/portage/eclass_cache.py (working copy) >@@ -17,7 +17,7 @@ > """ > def __init__(self, porttree_root, overlays=[]): > >- self.eclasses = {} # {"Name": ("location","_mtime_")} >+ self.eclasses = {} # {"Name": ("location", "_mtime_")} > self._eclass_locations = {} > > # screw with the porttree ordering, w/out having bash inherit match it, and I'll hurt you. >@@ -77,7 +77,7 @@ > master_eclasses = {} > eclass_len = len(".eclass") > ignored_listdir_errnos = (errno.ENOENT, errno.ENOTDIR) >- for x in [normalize_path(os.path.join(y,"eclass")) for y in self.porttrees]: >+ for x in [normalize_path(os.path.join(y, "eclass")) for y in self.porttrees]: > try: > eclass_filenames = os.listdir(x) > except OSError, e: >Index: pym/portage/elog/__init__.py >=================================================================== >--- pym/portage/elog/__init__.py (revision 13832) >+++ pym/portage/elog/__init__.py (working copy) >@@ -63,12 +63,12 @@ > def elog_process(cpv, mysettings, phasefilter=None): > global _elog_atexit_handlers, _emerge_elog_listener, _preserve_logentries > >- logsystems = mysettings.get("PORTAGE_ELOG_SYSTEM","").split() >+ logsystems = mysettings.get("PORTAGE_ELOG_SYSTEM", "").split() > for s in logsystems: > # allow per module overrides of PORTAGE_ELOG_CLASSES > if ":" in s: > s, levels = s.split(":", 1) >- levels = levels.split(",") >+ levels = levels.split(", ") > # - is nicer than _ for module names, so allow people to use it. > s = s.replace("-", "_") > try: >@@ -105,7 +105,7 @@ > for token in mysettings.get("PORTAGE_ELOG_SYSTEM", "").split(): > if ":" in token: > s, levels = token.split(":", 1) >- levels = levels.split(",") >+ levels = levels.split(", ") > else: > s = token > levels = () >Index: pym/portage/elog/mod_syslog.py >=================================================================== >--- pym/portage/elog/mod_syslog.py (revision 13832) >+++ pym/portage/elog/mod_syslog.py (working copy) >@@ -11,7 +11,7 @@ > for phase in EBUILD_PHASES: > if not phase in logentries: > continue >- for msgtype,msgcontent in logentries[phase]: >+ for msgtype, msgcontent in logentries[phase]: > pri = {"INFO": syslog.LOG_INFO, > "WARN": syslog.LOG_WARNING, > "ERROR": syslog.LOG_ERR, >Index: pym/portage/output.py >=================================================================== >--- pym/portage/output.py (revision 13832) >+++ pym/portage/output.py (working copy) >@@ -270,13 +270,13 @@ > shell = find_binary("sh") > if shell: > spawn([shell, "-c", prompt_command], env=os.environ, >- fd_pipes={0:sys.stdin.fileno(),1:sys.stderr.fileno(), >+ fd_pipes={0:sys.stdin.fileno(), 1:sys.stderr.fileno(), > 2:sys.stderr.fileno()}) > else: > os.system(prompt_command) > return > else: >- pwd = os.getenv('PWD','') >+ pwd = os.getenv('PWD', '') > home = os.getenv('HOME', '') > if home != '' and pwd.startswith(home): > pwd = '~' + pwd[len(home):] >@@ -322,9 +322,9 @@ > else: > return text > >-compat_functions_colors = ["bold","white","teal","turquoise","darkteal", >- "fuchsia","purple","blue","darkblue","green","darkgreen","yellow", >- "brown","darkyellow","red","darkred"] >+compat_functions_colors = ["bold", "white", "teal", "turquoise", "darkteal", >+ "fuchsia", "purple", "blue", "darkblue", "green", "darkgreen", "yellow", >+ "brown", "darkyellow", "red", "darkred"] > > def create_color_func(color_key): > def derived_func(*args): >Index: pym/portage/xpak.py >=================================================================== >--- pym/portage/xpak.py (revision 13832) >+++ pym/portage/xpak.py (working copy) >@@ -16,16 +16,16 @@ > # (integer) == encodeint(integer) ===> 4 characters (big-endian copy) > # '+' means concatenate the fields ===> All chunks are strings > >-import sys,os,shutil,errno >+import sys, os, shutil, errno > from stat import * > >-def addtolist(mylist,curdir): >+def addtolist(mylist, curdir): > """(list, dir) --- Takes an array(list) and appends all files from dir down > the directory tree. Returns nothing. list is modified.""" > for x in os.listdir("."): > if os.path.isdir(x): > os.chdir(x) >- addtolist(mylist,curdir+x+"/") >+ addtolist(mylist, curdir+x+"/") > os.chdir("..") > else: > if curdir+x not in mylist: >@@ -50,8 +50,8 @@ > myint=myint+(ord(mystring[0]) << 24) > return myint > >-def xpak(rootdir,outfile=None): >- """(rootdir,outfile) -- creates an xpak segment of the directory 'rootdir' >+def xpak(rootdir, outfile=None): >+ """(rootdir, outfile) -- creates an xpak segment of the directory 'rootdir' > and under the name 'outfile' if it is specified. Otherwise it returns the > xpak segment.""" > try: >@@ -64,7 +64,7 @@ > os.chdir(rootdir) > mylist=[] > >- addtolist(mylist,"") >+ addtolist(mylist, "") > mylist.sort() > mydata = {} > for x in mylist: >@@ -104,7 +104,7 @@ > """(infile) -- Splits the infile into two files. > 'infile.index' contains the index segment. > 'infile.dat' contails the data segment.""" >- myfile=open(infile,"r") >+ myfile=open(infile, "r") > mydat=myfile.read() > myfile.close() > >@@ -112,10 +112,10 @@ > if not splits: > return False > >- myfile=open(infile+".index","w") >+ myfile=open(infile+".index", "w") > myfile.write(splits[0]) > myfile.close() >- myfile=open(infile+".dat","w") >+ myfile=open(infile+".dat", "w") > myfile.write(splits[1]) > myfile.close() > return True >@@ -130,7 +130,7 @@ > > def getindex(infile): > """(infile) -- grabs the index segment from the infile and returns it.""" >- myfile=open(infile,"r") >+ myfile=open(infile, "r") > myheader=myfile.read(16) > if myheader[0:8]!="XPAKPACK": > myfile.close() >@@ -142,8 +142,8 @@ > > def getboth(infile): > """(infile) -- grabs the index and data segments from the infile. >- Returns an array [indexSegment,dataSegment]""" >- myfile=open(infile,"r") >+ Returns an array [indexSegment, dataSegment]""" >+ myfile=open(infile, "r") > myheader=myfile.read(16) > if myheader[0:8]!="XPAKPACK": > myfile.close() >@@ -171,8 +171,8 @@ > startpos=startpos+mytestlen+12 > return myret > >-def searchindex(myindex,myitem): >- """(index,item) -- Finds the offset and length of the file 'item' in the >+def searchindex(myindex, myitem): >+ """(index, item) -- Finds the offset and length of the file 'item' in the > datasegment via the index 'index' provided.""" > mylen=len(myitem) > myindexlen=len(myindex) >@@ -187,15 +187,15 @@ > return datapos, datalen > startpos=startpos+mytestlen+12 > >-def getitem(myid,myitem): >+def getitem(myid, myitem): > myindex=myid[0] > mydata=myid[1] >- myloc=searchindex(myindex,myitem) >+ myloc=searchindex(myindex, myitem) > if not myloc: > return None > return mydata[myloc[0]:myloc[0]+myloc[1]] > >-def xpand(myid,mydest): >+def xpand(myid, mydest): > myindex=myid[0] > mydata=myid[1] > try: >@@ -217,14 +217,14 @@ > if dirname: > if not os.path.exists(dirname): > os.makedirs(dirname) >- mydat=open(myname,"w") >+ mydat=open(myname, "w") > mydat.write(mydata[datapos:datapos+datalen]) > mydat.close() > startpos=startpos+namelen+12 > os.chdir(origdir) > > class tbz2(object): >- def __init__(self,myfile): >+ def __init__(self, myfile): > self.file=myfile > self.filestat=None > self.index="" >@@ -236,7 +236,7 @@ > self.datapos=None > self.scan() > >- def decompose(self,datadir,cleanup=1): >+ def decompose(self, datadir, cleanup=1): > """Alias for unpackinfo() --- Complement to recompose() but optionally > deletes the destination directory. Extracts the xpak from the tbz2 into > the directory provided. Raises IOError if scan() fails. >@@ -248,10 +248,10 @@ > if not os.path.exists(datadir): > os.makedirs(datadir) > return self.unpackinfo(datadir) >- def compose(self,datadir,cleanup=0): >+ def compose(self, datadir, cleanup=0): > """Alias for recompose().""" >- return self.recompose(datadir,cleanup) >- def recompose(self,datadir,cleanup=0): >+ return self.recompose(datadir, cleanup) >+ def recompose(self, datadir, cleanup=0): > """Creates an xpak segment from the datadir provided, truncates the tbz2 > to the end of regular data if an xpak segment already exists, and adds > the new segment to the file with terminating info.""" >@@ -262,10 +262,10 @@ > > def recompose_mem(self, xpdata): > self.scan() # Don't care about condition... We'll rewrite the data anyway. >- myfile=open(self.file,"a+") >+ myfile=open(self.file, "a+") > if not myfile: > raise IOError >- myfile.seek(-self.xpaksize,2) # 0,2 or -0,2 just mean EOF. >+ myfile.seek(-self.xpaksize, 2) # 0, 2 or -0, 2 just mean EOF. > myfile.truncate() > myfile.write(xpdata+encodeint(len(xpdata))+"STOP") > myfile.flush() >@@ -298,8 +298,8 @@ > if not changed: > return 1 > self.filestat=mystat >- a=open(self.file,"r") >- a.seek(-16,2) >+ a=open(self.file, "r") >+ a.seek(-16, 2) > trailer=a.read() > self.infosize=0 > self.xpaksize=0 >@@ -311,7 +311,7 @@ > return 0 > self.infosize=decodeint(trailer[8:12]) > self.xpaksize=self.infosize+8 >- a.seek(-(self.xpaksize),2) >+ a.seek(-(self.xpaksize), 2) > header=a.read(16) > if header[0:8]!="XPAKPACK": > a.close() >@@ -334,27 +334,27 @@ > return None > return getindex_mem(self.index) > >- def getfile(self,myfile,mydefault=None): >+ def getfile(self, myfile, mydefault=None): > """Finds 'myfile' in the data segment and returns it.""" > if not self.scan(): > return None >- myresult=searchindex(self.index,myfile) >+ myresult=searchindex(self.index, myfile) > if not myresult: > return mydefault >- a=open(self.file,"r") >- a.seek(self.datapos+myresult[0],0) >+ a=open(self.file, "r") >+ a.seek(self.datapos+myresult[0], 0) > myreturn=a.read(myresult[1]) > a.close() > return myreturn > >- def getelements(self,myfile): >+ def getelements(self, myfile): > """A split/array representation of tbz2.getfile()""" > mydat=self.getfile(myfile) > if not mydat: > return [] > return mydat.split() > >- def unpackinfo(self,mydest): >+ def unpackinfo(self, mydest): > """Unpacks all the files from the dataSegment into 'mydest'.""" > if not self.scan(): > return 0 >@@ -365,7 +365,7 @@ > except: > os.chdir("/") > origdir="/" >- a=open(self.file,"r") >+ a=open(self.file, "r") > if not os.path.exists(mydest): > os.makedirs(mydest) > os.chdir(mydest) >@@ -379,7 +379,7 @@ > if dirname: > if not os.path.exists(dirname): > os.makedirs(dirname) >- mydat=open(myname,"w") >+ mydat=open(myname, "w") > a.seek(self.datapos+datapos) > mydat.write(a.read(datalen)) > mydat.close() >@@ -407,11 +407,11 @@ > return mydata > > def getboth(self): >- """Returns an array [indexSegment,dataSegment]""" >+ """Returns an array [indexSegment, dataSegment]""" > if not self.scan(): > return None > >- a = open(self.file,"r") >+ a = open(self.file, "r") > a.seek(self.datapos) > mydata =a.read(self.datasize) > a.close() >Index: pym/portage/getbinpkg.py >=================================================================== >--- pym/portage/getbinpkg.py (revision 13832) >+++ pym/portage/getbinpkg.py (working copy) >@@ -23,19 +23,19 @@ > try: > import ftplib > except ImportError, e: >- sys.stderr.write(colorize("BAD","!!! CANNOT IMPORT FTPLIB: ")+str(e)+"\n") >+ sys.stderr.write(colorize("BAD", "!!! CANNOT IMPORT FTPLIB: ")+str(e)+"\n") > > try: > import httplib > except ImportError, e: >- sys.stderr.write(colorize("BAD","!!! CANNOT IMPORT HTTPLIB: ")+str(e)+"\n") >+ sys.stderr.write(colorize("BAD", "!!! CANNOT IMPORT HTTPLIB: ")+str(e)+"\n") > > def make_metadata_dict(data): >- myid,myglob = data >+ myid, myglob = data > > mydict = {} > for x in portage.xpak.getindex_mem(myid): >- mydict[x] = portage.xpak.getitem(data,x) >+ mydict[x] = portage.xpak.getitem(data, x) > > return mydict > >@@ -49,7 +49,7 @@ > def get_anchors(self): > return self.PL_anchors > >- def get_anchors_by_prefix(self,prefix): >+ def get_anchors_by_prefix(self, prefix): > newlist = [] > for x in self.PL_anchors: > if x.startswith(prefix): >@@ -57,7 +57,7 @@ > newlist.append(x[:]) > return newlist > >- def get_anchors_by_suffix(self,suffix): >+ def get_anchors_by_suffix(self, suffix): > newlist = [] > for x in self.PL_anchors: > if x.endswith(suffix): >@@ -65,10 +65,10 @@ > newlist.append(x[:]) > return newlist > >- def handle_endtag(self,tag): >+ def handle_endtag(self, tag): > pass > >- def handle_starttag(self,tag,attrs): >+ def handle_starttag(self, tag, attrs): > if tag == "a": > for x in attrs: > if x[0] == 'href': >@@ -76,16 +76,16 @@ > self.PL_anchors.append(urllib2.unquote(x[1])) > > >-def create_conn(baseurl,conn=None): >- """(baseurl,conn) --- Takes a protocol://site:port/address url, and an >+def create_conn(baseurl, conn=None): >+ """(baseurl, conn) --- Takes a protocol://site:port/address url, and an > optional connection. If connection is already active, it is passed on. >- baseurl is reduced to address and is returned in tuple (conn,address)""" >+ baseurl is reduced to address and is returned in tuple (conn, address)""" > >- parts = baseurl.split("://",1) >+ parts = baseurl.split("://", 1) > if len(parts) != 2: > raise ValueError("Provided URL does not " + \ > "contain protocol identifier. '%s'" % baseurl) >- protocol,url_parts = parts >+ protocol, url_parts = parts > del parts > > url_parts = url_parts.split("/") >@@ -96,7 +96,7 @@ > address = "/"+"/".join(url_parts[1:]) > del url_parts > >- userpass_host = host.split("@",1) >+ userpass_host = host.split("@", 1) > if len(userpass_host) == 1: > host = userpass_host[0] > userpass = ["anonymous"] >@@ -138,11 +138,11 @@ > host = host[:-1] > conn = ftplib.FTP(host) > if password: >- conn.login(username,password) >+ conn.login(username, password) > else: > sys.stderr.write(colorize("WARN", > " * No password provided for username")+" '%s'" % \ >- (username,) + "\n\n") >+ (username, ) + "\n\n") > conn.login(username) > conn.set_pasv(passive) > conn.set_debuglevel(0) >@@ -158,10 +158,10 @@ > else: > raise NotImplementedError, "%s is not a supported protocol." % protocol > >- return (conn,protocol,address, http_params, http_headers) >+ return (conn, protocol, address, http_params, http_headers) > > def make_ftp_request(conn, address, rest=None, dest=None): >- """(conn,address,rest) --- uses the conn object to request the data >+ """(conn, address, rest) --- uses the conn object to request the data > from address and issuing a rest if it is passed.""" > try: > >@@ -201,14 +201,14 @@ > conn.voidresp() > conn.voidcmd("TYPE A") > >- return mydata,not (fsize==data_size),"" >+ return mydata, not (fsize==data_size), "" > > except ValueError, e: >- return None,int(str(e)[:4]),str(e) >+ return None, int(str(e)[:4]), str(e) > > > def make_http_request(conn, address, params={}, headers={}, dest=None): >- """(conn,address,params,headers) --- uses the conn object to request >+ """(conn, address, params, headers) --- uses the conn object to request > the data from address, performing Location forwarding and using the > optional params and headers.""" > >@@ -217,12 +217,12 @@ > while (rc == 0) or (rc == 301) or (rc == 302): > try: > if (rc != 0): >- conn,ignore,ignore,ignore,ignore = create_conn(address) >+ conn, ignore, ignore, ignore, ignore = create_conn(address) > conn.request("GET", address, params, headers) > except SystemExit, e: > raise > except Exception, e: >- return None,None,"Server request failed: "+str(e) >+ return None, None, "Server request failed: "+str(e) > response = conn.getresponse() > rc = response.status > >@@ -231,7 +231,7 @@ > ignored_data = response.read() > del ignored_data > for x in str(response.msg).split("\n"): >- parts = x.split(": ",1) >+ parts = x.split(": ", 1) > if parts[0] == "Location": > if (rc == 301): > sys.stderr.write(colorize("BAD", >@@ -244,13 +244,13 @@ > break > > if (rc != 200) and (rc != 206): >- return None,rc,"Server did not respond successfully ("+str(response.status)+": "+str(response.reason)+")" >+ return None, rc, "Server did not respond successfully ("+str(response.status)+": "+str(response.reason)+")" > > if dest: > dest.write(response.read()) >- return "",0,"" >+ return "", 0, "" > >- return response.read(),0,"" >+ return response.read(), 0, "" > > > def match_in_array(array, prefix="", suffix="", match_both=1, allow_overlap=0): >@@ -289,8 +289,8 @@ > > > >-def dir_get_list(baseurl,conn=None): >- """(baseurl[,connection]) -- Takes a base url to connect to and read from. >+def dir_get_list(baseurl, conn=None): >+ """(baseurl[, connection]) -- Takes a base url to connect to and read from. > URL should be in the for <proto>://<site>[:port]<path> > Connection is used for persistent connection instances.""" > >@@ -299,15 +299,15 @@ > else: > keepconnection = 1 > >- conn,protocol,address,params,headers = create_conn(baseurl, conn) >+ conn, protocol, address, params, headers = create_conn(baseurl, conn) > > listing = None >- if protocol in ["http","https"]: >+ if protocol in ["http", "https"]: > if not address.endswith("/"): > # http servers can return a 400 error here > # if the address doesn't end with a slash. > address += "/" >- page,rc,msg = make_http_request(conn,address,params,headers) >+ page, rc, msg = make_http_request(conn, address, params, headers) > > if page: > parser = ParseLinks() >@@ -317,7 +317,7 @@ > else: > import portage.exception > raise portage.exception.PortageException( >- "Unable to get listing: %s %s" % (rc,msg)) >+ "Unable to get listing: %s %s" % (rc, msg)) > elif protocol in ["ftp"]: > if address[-1] == '/': > olddir = conn.pwd() >@@ -337,8 +337,8 @@ > > return listing > >-def file_get_metadata(baseurl,conn=None, chunk_size=3000): >- """(baseurl[,connection]) -- Takes a base url to connect to and read from. >+def file_get_metadata(baseurl, conn=None, chunk_size=3000): >+ """(baseurl[, connection]) -- Takes a base url to connect to and read from. > URL should be in the for <proto>://<site>[:port]<path> > Connection is used for persistent connection instances.""" > >@@ -347,13 +347,13 @@ > else: > keepconnection = 1 > >- conn,protocol,address,params,headers = create_conn(baseurl, conn) >+ conn, protocol, address, params, headers = create_conn(baseurl, conn) > >- if protocol in ["http","https"]: >+ if protocol in ["http", "https"]: > headers["Range"] = "bytes=-"+str(chunk_size) >- data,rc,msg = make_http_request(conn, address, params, headers) >+ data, rc, msg = make_http_request(conn, address, params, headers) > elif protocol in ["ftp"]: >- data,rc,msg = make_ftp_request(conn, address, -chunk_size) >+ data, rc, msg = make_ftp_request(conn, address, -chunk_size) > elif protocol == "sftp": > f = conn.open(address) > try: >@@ -377,10 +377,10 @@ > > myid = portage.xpak.xsplit_mem(xpak_data) > if not myid: >- myid = None,None >+ myid = None, None > del xpak_data > else: >- myid = None,None >+ myid = None, None > > if not keepconnection: > conn.close() >@@ -388,12 +388,12 @@ > return myid > > >-def file_get(baseurl,dest,conn=None,fcmd=None): >- """(baseurl,dest,fcmd=) -- Takes a base url to connect to and read from. >+def file_get(baseurl, dest, conn=None, fcmd=None): >+ """(baseurl, dest, fcmd=) -- Takes a base url to connect to and read from. > URL should be in the for <proto>://[user[:pass]@]<site>[:port]<path>""" > > if not fcmd: >- return file_get_lib(baseurl,dest,conn) >+ return file_get_lib(baseurl, dest, conn) > > variables = { > "DISTDIR": dest, >@@ -416,8 +416,8 @@ > return 0 > return 1 > >-def file_get_lib(baseurl,dest,conn=None): >- """(baseurl[,connection]) -- Takes a base url to connect to and read from. >+def file_get_lib(baseurl, dest, conn=None): >+ """(baseurl[, connection]) -- Takes a base url to connect to and read from. > URL should be in the for <proto>://<site>[:port]<path> > Connection is used for persistent connection instances.""" > >@@ -426,13 +426,13 @@ > else: > keepconnection = 1 > >- conn,protocol,address,params,headers = create_conn(baseurl, conn) >+ conn, protocol, address, params, headers = create_conn(baseurl, conn) > > sys.stderr.write("Fetching '"+str(os.path.basename(address)+"'\n")) >- if protocol in ["http","https"]: >- data,rc,msg = make_http_request(conn, address, params, headers, dest=dest) >+ if protocol in ["http", "https"]: >+ data, rc, msg = make_http_request(conn, address, params, headers, dest=dest) > elif protocol in ["ftp"]: >- data,rc,msg = make_ftp_request(conn, address, dest=dest) >+ data, rc, msg = make_ftp_request(conn, address, dest=dest) > elif protocol == "sftp": > rc = 0 > try: >@@ -462,7 +462,7 @@ > > > def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=1, makepickle=None): >- """(baseurl,conn,chunk_size,verbose) -- >+ """(baseurl, conn, chunk_size, verbose) -- > """ > if not conn: > keepconnection = 0 >@@ -480,7 +480,7 @@ > except socket.error, e: > # ftplib.FTP(host) can raise errors like this: > # socket.error: (111, 'Connection refused') >- sys.stderr.write("!!! %s\n" % (e,)) >+ sys.stderr.write("!!! %s\n" % (e, )) > return {} > > out = sys.stdout >@@ -535,7 +535,7 @@ > (metadata[baseurl]["timestamp"] < int(time.time()-(60*60*24)))): > # Try to download new cache until we succeed on one. > data="" >- for trynum in [1,2,3]: >+ for trynum in [1, 2, 3]: > mytempfile = tempfile.TemporaryFile() > try: > file_get(baseurl+"/"+mfile, mytempfile, conn) >@@ -549,13 +549,13 @@ > sys.stderr.flush() > mytempfile.close() > continue >- if match_in_array([mfile],suffix=".gz"): >+ if match_in_array([mfile], suffix=".gz"): > out.write("gzip'd\n") > out.flush() > try: > import gzip > mytempfile.seek(0) >- gzindex = gzip.GzipFile(mfile[:-3],'rb',9,mytempfile) >+ gzindex = gzip.GzipFile(mfile[:-3], 'rb', 9, mytempfile) > data = gzindex.read() > except SystemExit, e: > raise >@@ -608,7 +608,7 @@ > def display(self): > self.out.write("\r"+colorize("WARN", > "cache miss: '"+str(self.misses)+"'") + \ >- " --- "+colorize("GOOD","cache hit: '"+str(self.hits)+"'")) >+ " --- "+colorize("GOOD", "cache hit: '"+str(self.hits)+"'")) > self.out.flush() > > cache_stats = CacheStats(out) >Index: pym/portage/locks.py >=================================================================== >--- pym/portage/locks.py (revision 13832) >+++ pym/portage/locks.py (working copy) >@@ -22,7 +22,7 @@ > _quiet = False > > def lockdir(mydir): >- return lockfile(mydir,wantnewlockfile=1) >+ return lockfile(mydir, wantnewlockfile=1) > def unlockdir(mylock): > return unlockfile(mylock) > >@@ -99,7 +99,7 @@ > # we're waiting on lockfile and use a blocking attempt. > locking_method = fcntl.lockf > try: >- fcntl.lockf(myfd,fcntl.LOCK_EX|fcntl.LOCK_NB) >+ fcntl.lockf(myfd, fcntl.LOCK_EX|fcntl.LOCK_NB) > except IOError, e: > if "errno" not in dir(e): > raise >@@ -148,13 +148,13 @@ > myfd != HARDLINK_FD and _fstat_nlink(myfd) == 0: > # The file was deleted on us... Keep trying to make one... > os.close(myfd) >- writemsg("lockfile recurse\n",1) >+ writemsg("lockfile recurse\n", 1) > lockfilename, myfd, unlinkfile, locking_method = lockfile( > mypath, wantnewlockfile=wantnewlockfile, unlinkfile=unlinkfile, > waiting_msg=waiting_msg, flags=flags) > >- writemsg(str((lockfilename,myfd,unlinkfile))+"\n",1) >- return (lockfilename,myfd,unlinkfile,locking_method) >+ writemsg(str((lockfilename, myfd, unlinkfile))+"\n", 1) >+ return (lockfilename, myfd, unlinkfile, locking_method) > > def _fstat_nlink(fd): > """ >@@ -177,10 +177,10 @@ > > #XXX: Compatability hack. > if len(mytuple) == 3: >- lockfilename,myfd,unlinkfile = mytuple >+ lockfilename, myfd, unlinkfile = mytuple > locking_method = fcntl.flock > elif len(mytuple) == 4: >- lockfilename,myfd,unlinkfile,locking_method = mytuple >+ lockfilename, myfd, unlinkfile, locking_method = mytuple > else: > raise InvalidData > >@@ -191,16 +191,16 @@ > # myfd may be None here due to myfd = mypath in lockfile() > if isinstance(lockfilename, basestring) and \ > not os.path.exists(lockfilename): >- writemsg("lockfile does not exist '%s'\n" % lockfilename,1) >+ writemsg("lockfile does not exist '%s'\n" % lockfilename, 1) > if myfd is not None: > os.close(myfd) > return False > > try: > if myfd is None: >- myfd = os.open(lockfilename, os.O_WRONLY,0660) >+ myfd = os.open(lockfilename, os.O_WRONLY, 0660) > unlinkfile = 1 >- locking_method(myfd,fcntl.LOCK_UN) >+ locking_method(myfd, fcntl.LOCK_UN) > except OSError: > if isinstance(lockfilename, basestring): > os.close(myfd) >@@ -214,21 +214,21 @@ > # commenting until it is proved necessary. > #time.sleep(0.0001) > if unlinkfile: >- locking_method(myfd,fcntl.LOCK_EX|fcntl.LOCK_NB) >+ locking_method(myfd, fcntl.LOCK_EX|fcntl.LOCK_NB) > # We won the lock, so there isn't competition for it. > # We can safely delete the file. >- writemsg("Got the lockfile...\n",1) >+ writemsg("Got the lockfile...\n", 1) > if _fstat_nlink(myfd) == 1: > os.unlink(lockfilename) >- writemsg("Unlinked lockfile...\n",1) >- locking_method(myfd,fcntl.LOCK_UN) >+ writemsg("Unlinked lockfile...\n", 1) >+ locking_method(myfd, fcntl.LOCK_UN) > else: >- writemsg("lockfile does not exist '%s'\n" % lockfilename,1) >+ writemsg("lockfile does not exist '%s'\n" % lockfilename, 1) > os.close(myfd) > return False > except Exception, e: >- writemsg("Failed to get lock... someone took it.\n",1) >- writemsg(str(e)+"\n",1) >+ writemsg("Failed to get lock... someone took it.\n", 1) >+ writemsg(str(e)+"\n", 1) > > # why test lockfilename? because we may have been handed an > # fd originally, and the caller might not like having their >@@ -244,7 +244,7 @@ > def hardlock_name(path): > return path+".hardlock-"+os.uname()[1]+"-"+str(os.getpid()) > >-def hardlink_is_mine(link,lock): >+def hardlink_is_mine(link, lock): > try: > return os.stat(link).st_nlink == 2 > except OSError: >@@ -265,7 +265,7 @@ > > while(time.time() < (start_time + max_wait)): > # We only need it to exist. >- myfd = os.open(myhardlock, os.O_CREAT|os.O_RDWR,0660) >+ myfd = os.open(myhardlock, os.O_CREAT|os.O_RDWR, 0660) > os.close(myfd) > > if not os.path.exists(myhardlock): >Index: pym/portage/cvstree.py >=================================================================== >--- pym/portage/cvstree.py (revision 13832) >+++ pym/portage/cvstree.py (working copy) >@@ -4,13 +4,13 @@ > # $Id$ > > >-import os,time,sys,re >+import os, time, sys, re > from stat import * > > # [D]/Name/Version/Date/Flags/Tags > > def pathdata(entries, path): >- """(entries,path) >+ """(entries, path) > Returns the data(dict) for a specific file/dir at the path specified.""" > mysplit=path.split("/") > myentries=entries >@@ -29,10 +29,10 @@ > return None > > def fileat(entries, path): >- return pathdata(entries,path) >+ return pathdata(entries, path) > > def isadded(entries, path): >- """(entries,path) >+ """(entries, path) > Returns true if the path exists and is added to the cvs tree.""" > mytarget=pathdata(entries, path) > if mytarget: >@@ -43,7 +43,7 @@ > filename=os.path.basename(path) > > try: >- myfile=open(basedir+"/CVS/Entries","r") >+ myfile=open(basedir+"/CVS/Entries", "r") > except IOError: > return 0 > mylines=myfile.readlines() >@@ -56,8 +56,8 @@ > > return 0 > >-def findnew(entries,recursive=0,basedir=""): >- """(entries,recursive=0,basedir="") >+def findnew(entries, recursive=0, basedir=""): >+ """(entries, recursive=0, basedir="") > Recurses the entries tree to find all elements that have been added but > have not yet been committed. Returns a list of paths, optionally prepended > with a basedir.""" >@@ -70,7 +70,7 @@ > mylist.append(basedir+myfile) > if recursive: > for mydir in entries["dirs"]: >- mylist+=findnew(entries["dirs"][mydir],recursive,basedir+mydir) >+ mylist+=findnew(entries["dirs"][mydir], recursive, basedir+mydir) > return mylist > > def findoption(entries, pattern, recursive=0, basedir=""): >@@ -90,8 +90,8 @@ > recursive, basedir+mydir): > yield x > >-def findchanged(entries,recursive=0,basedir=""): >- """(entries,recursive=0,basedir="") >+def findchanged(entries, recursive=0, basedir=""): >+ """(entries, recursive=0, basedir="") > Recurses the entries tree to find all elements that exist in the cvs tree > and differ from the committed version. Returns a list of paths, optionally > prepended with a basedir.""" >@@ -106,11 +106,11 @@ > mylist.append(basedir+myfile) > if recursive: > for mydir in entries["dirs"]: >- mylist+=findchanged(entries["dirs"][mydir],recursive,basedir+mydir) >+ mylist+=findchanged(entries["dirs"][mydir], recursive, basedir+mydir) > return mylist > >-def findmissing(entries,recursive=0,basedir=""): >- """(entries,recursive=0,basedir="") >+def findmissing(entries, recursive=0, basedir=""): >+ """(entries, recursive=0, basedir="") > Recurses the entries tree to find all elements that are listed in the cvs > tree but do not exist on the filesystem. Returns a list of paths, > optionally prepended with a basedir.""" >@@ -124,11 +124,11 @@ > mylist.append(basedir+myfile) > if recursive: > for mydir in entries["dirs"]: >- mylist+=findmissing(entries["dirs"][mydir],recursive,basedir+mydir) >+ mylist+=findmissing(entries["dirs"][mydir], recursive, basedir+mydir) > return mylist > >-def findunadded(entries,recursive=0,basedir=""): >- """(entries,recursive=0,basedir="") >+def findunadded(entries, recursive=0, basedir=""): >+ """(entries, recursive=0, basedir="") > Recurses the entries tree to find all elements that are in valid cvs > directories but are not part of the cvs tree. Returns a list of paths, > optionally prepended with a basedir.""" >@@ -142,13 +142,13 @@ > mylist.append(basedir+myfile) > if recursive: > for mydir in entries["dirs"]: >- mylist+=findunadded(entries["dirs"][mydir],recursive,basedir+mydir) >+ mylist+=findunadded(entries["dirs"][mydir], recursive, basedir+mydir) > return mylist > >-def findremoved(entries,recursive=0,basedir=""): >- """(entries,recursive=0,basedir="") >+def findremoved(entries, recursive=0, basedir=""): >+ """(entries, recursive=0, basedir="") > Recurses the entries tree to find all elements that are in flagged for cvs >- deletions. Returns a list of paths, optionally prepended with a basedir.""" >+ deletions. Returns a list of paths, optionally prepended with a basedir.""" > if basedir and basedir[-1]!="/": > basedir=basedir+"/" > mylist=[] >@@ -157,24 +157,24 @@ > mylist.append(basedir+myfile) > if recursive: > for mydir in entries["dirs"]: >- mylist+=findremoved(entries["dirs"][mydir],recursive,basedir+mydir) >+ mylist+=findremoved(entries["dirs"][mydir], recursive, basedir+mydir) > return mylist > > def findall(entries, recursive=0, basedir=""): >- """(entries,recursive=0,basedir="") >+ """(entries, recursive=0, basedir="") > Recurses the entries tree to find all new, changed, missing, and unadded > entities. Returns a 4 element list of lists as returned from each find*().""" > > if basedir and basedir[-1]!="/": > basedir=basedir+"/" >- mynew = findnew(entries,recursive,basedir) >- mychanged = findchanged(entries,recursive,basedir) >- mymissing = findmissing(entries,recursive,basedir) >- myunadded = findunadded(entries,recursive,basedir) >- myremoved = findremoved(entries,recursive,basedir) >+ mynew = findnew(entries, recursive, basedir) >+ mychanged = findchanged(entries, recursive, basedir) >+ mymissing = findmissing(entries, recursive, basedir) >+ myunadded = findunadded(entries, recursive, basedir) >+ myremoved = findremoved(entries, recursive, basedir) > return [mynew, mychanged, mymissing, myunadded, myremoved] > >-ignore_list = re.compile("(^|/)(RCS(|LOG)|SCCS|CVS(|\.adm)|cvslog\..*|tags|TAGS|\.(make\.state|nse_depinfo)|.*~|(\.|)#.*|,.*|_$.*|.*\$|\.del-.*|.*\.(old|BAK|bak|orig|rej|a|olb|o|obj|so|exe|Z|elc|ln)|core)$") >+ignore_list = re.compile("(^|/)(RCS(|LOG)|SCCS|CVS(|\.adm)|cvslog\..*|tags|TAGS|\.(make\.state|nse_depinfo)|.*~|(\.|)#.*|, .*|_$.*|.*\$|\.del-.*|.*\.(old|BAK|bak|orig|rej|a|olb|o|obj|so|exe|Z|elc|ln)|core)$") > def apply_cvsignore_filter(list): > x=0 > while x < len(list): >@@ -184,13 +184,13 @@ > x+=1 > return list > >-def getentries(mydir,recursive=0): >- """(basedir,recursive=0) >+def getentries(mydir, recursive=0): >+ """(basedir, recursive=0) > Scans the given directory and returns an datadict of all the entries in > the directory seperated as a dirs dict and a files dict.""" > myfn=mydir+"/CVS/Entries" > # entries=[dirs, files] >- entries={"dirs":{},"files":{}} >+ entries={"dirs":{}, "files":{}} > if not os.path.exists(mydir): > return entries > try: >@@ -210,16 +210,16 @@ > break > mysplit=line.split("/") > if len(mysplit)!=6: >- print "Confused:",mysplit >+ print "Confused:", mysplit > continue > if mysplit[0]=="D": >- entries["dirs"][mysplit[1]]={"dirs":{},"files":{},"status":[]} >+ entries["dirs"][mysplit[1]]={"dirs":{}, "files":{}, "status":[]} > entries["dirs"][mysplit[1]]["status"]=["cvs"] > if os.path.isdir(mydir+"/"+mysplit[1]): > entries["dirs"][mysplit[1]]["status"]+=["exists"] > entries["dirs"][mysplit[1]]["flags"]=mysplit[2:] > if recursive: >- rentries=getentries(mydir+"/"+mysplit[1],recursive) >+ rentries=getentries(mydir+"/"+mysplit[1], recursive) > #print rentries.keys() > #print entries["files"].keys() > #print entries["files"][mysplit[1]] >@@ -240,10 +240,10 @@ > if file=="CVS": > continue > if file=="digest-framerd-2.4.3": >- print mydir,file >+ print mydir, file > if os.path.isdir(mydir+"/"+file): > if file not in entries["dirs"]: >- entries["dirs"][file]={"dirs":{},"files":{}} >+ entries["dirs"][file]={"dirs":{}, "files":{}} > if "status" in entries["dirs"][file]: > if "exists" not in entries["dirs"][file]["status"]: > entries["dirs"][file]["status"]+=["exists"] >@@ -253,7 +253,7 @@ > if file=="digest-framerd-2.4.3": > print "isfile" > if file not in entries["files"]: >- entries["files"][file]={"revision":"","date":"","flags":"","tags":""} >+ entries["files"][file]={"revision":"", "date":"", "flags":"", "tags":""} > if "status" in entries["files"][file]: > if file=="digest-framerd-2.4.3": > print "has status" >@@ -275,8 +275,8 @@ > print "status not set" > entries["files"][file]["status"]=[] > if file=="digest-framerd-2.4.3": >- print "date:",entries["files"][file]["date"] >- print "sdate:",mytime >+ print "date:", entries["files"][file]["date"] >+ print "sdate:", mytime > if mytime==entries["files"][file]["date"]: > entries["files"][file]["status"]+=["current"] > if file=="digest-framerd-2.4.3": >@@ -286,18 +286,18 @@ > except SystemExit, e: > raise > except Exception, e: >- print "failed to stat",file >+ print "failed to stat", file > print e > return > > else: > print >- print "File of unknown type:",mydir+"/"+file >+ print "File of unknown type:", mydir+"/"+file > print > return entries > > #class cvstree: >-# def __init__(self,basedir): >+# def __init__(self, basedir): > # self.refdir=os.cwd() > # self.basedir=basedir > # self.entries={} >@@ -305,7 +305,7 @@ > # self.entries["files"]={} > # self.entries["dirs"][self.basedir]=getentries(self.basedir) > # self.getrealdirs(self.dirs, self.files) >-# def getrealdirs(self,dirs,files): >+# def getrealdirs(self, dirs, files): > # for mydir in dirs.keys(): > # list = os.listdir( > >Index: pym/portage/process.py >=================================================================== >--- pym/portage/process.py (revision 13832) >+++ pym/portage/process.py (working copy) >@@ -269,7 +269,7 @@ > # If it failed, kill off anything else that > # isn't dead yet. > for pid in mypids: >- if os.waitpid(pid, os.WNOHANG) == (0,0): >+ if os.waitpid(pid, os.WNOHANG) == (0, 0): > os.kill(pid, signal.SIGTERM) > os.waitpid(pid, 0) > spawned_pids.remove(pid) >@@ -298,7 +298,7 @@ > @type opt_name: String > @param fd_pipes: Mapping pipes to destination; { 0:0, 1:1, 2:2 } > @type fd_pipes: Dictionary >- @param env: Key,Value mapping for Environmental Variables >+ @param env: Key, Value mapping for Environmental Variables > @type env: Dictionary > @param gid: Group ID to run the process under > @type gid: Integer >Index: pym/getbinpkg.py >=================================================================== >--- pym/getbinpkg.py (revision 13832) >+++ pym/getbinpkg.py (working copy) >@@ -1 +1,46 @@ >-link portage_compat_namespace.py >\ No newline at end of file >+# portage_compat_namespace.py -- provide compability layer with new namespace >+# Copyright 2007 Gentoo Foundation >+# Distributed under the terms of the GNU General Public License v2 >+# $Id: portage_compat_namespace.py 12364 2008-12-29 03:05:07Z zmedico $ >+ >+""" >+This module checks the name under which it is imported and attempts to load >+the corresponding module of the new portage namespace, inserting it into the >+loaded modules list. >+It also issues a warning to the caller to migrate to the new namespace. >+Note that this module should never be used with it's true name, but only by >+links pointing to it. Also it is limited to portage_foo -> portage.foo >+translations, however existing subpackages shouldn't use it anyway to maintain >+compability with 3rd party modules (like elog or cache plugins), and they >+shouldn't be directly imported by external consumers. >+ >+This module is based on an idea by Brian Harring. >+""" >+ >+import sys, warnings >+ >+__oldname = __name__ >+if __name__.startswith("portage_"): >+ __newname = __name__.replace("_", ".") >+else: >+ __newname = "portage."+__name__ >+ >+try: >+ __package = __import__(__newname, globals(), locals()) >+ __realmodule = getattr(__package, __newname[8:]) >+except (ImportError, AttributeError): >+ raise ImportError("No module named %s" % __oldname) >+ >+def _showwarning(message, category, filename, lineno, file=None, line=None): >+ if file is None: >+ import sys >+ file = sys.stderr >+ try: >+ file.write("%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)) >+ except IOError: >+ pass >+ >+warnings.showwarning = _showwarning >+ >+warnings.warn("DEPRECATION NOTICE: The %s module was replaced by %s" % (__oldname, __newname), DeprecationWarning) >+sys.modules[__oldname] = __realmodule >Index: pym/cvstree.py >=================================================================== >--- pym/cvstree.py (revision 13832) >+++ pym/cvstree.py (working copy) >@@ -1 +1,46 @@ >-link portage_compat_namespace.py >\ No newline at end of file >+# portage_compat_namespace.py -- provide compability layer with new namespace >+# Copyright 2007 Gentoo Foundation >+# Distributed under the terms of the GNU General Public License v2 >+# $Id: portage_compat_namespace.py 12364 2008-12-29 03:05:07Z zmedico $ >+ >+""" >+This module checks the name under which it is imported and attempts to load >+the corresponding module of the new portage namespace, inserting it into the >+loaded modules list. >+It also issues a warning to the caller to migrate to the new namespace. >+Note that this module should never be used with it's true name, but only by >+links pointing to it. Also it is limited to portage_foo -> portage.foo >+translations, however existing subpackages shouldn't use it anyway to maintain >+compability with 3rd party modules (like elog or cache plugins), and they >+shouldn't be directly imported by external consumers. >+ >+This module is based on an idea by Brian Harring. >+""" >+ >+import sys, warnings >+ >+__oldname = __name__ >+if __name__.startswith("portage_"): >+ __newname = __name__.replace("_", ".") >+else: >+ __newname = "portage."+__name__ >+ >+try: >+ __package = __import__(__newname, globals(), locals()) >+ __realmodule = getattr(__package, __newname[8:]) >+except (ImportError, AttributeError): >+ raise ImportError("No module named %s" % __oldname) >+ >+def _showwarning(message, category, filename, lineno, file=None, line=None): >+ if file is None: >+ import sys >+ file = sys.stderr >+ try: >+ file.write("%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)) >+ except IOError: >+ pass >+ >+warnings.showwarning = _showwarning >+ >+warnings.warn("DEPRECATION NOTICE: The %s module was replaced by %s" % (__oldname, __newname), DeprecationWarning) >+sys.modules[__oldname] = __realmodule >Index: pym/_emerge/emergelog.py >=================================================================== >--- pym/_emerge/emergelog.py (revision 13832) >+++ pym/_emerge/emergelog.py (working copy) >@@ -40,6 +40,6 @@ > if mylock: > portage.locks.unlockfile(mylock) > mylogfile.close() >- except (IOError,OSError,portage.exception.PortageException), e: >+ except (IOError, OSError, portage.exception.PortageException), e: > if secpass >= 1: >- print >> sys.stderr, "emergelog():",e >+ print >> sys.stderr, "emergelog():", e >Index: pym/_emerge/BinpkgVerifier.py >=================================================================== >--- pym/_emerge/BinpkgVerifier.py (revision 13832) >+++ pym/_emerge/BinpkgVerifier.py (working copy) >@@ -14,7 +14,7 @@ > import portage > import os > class BinpkgVerifier(AsynchronousTask): >- __slots__ = ("logfile", "pkg",) >+ __slots__ = ("logfile", "pkg", ) > > def _start(self): > """ >@@ -58,7 +58,7 @@ > pkg_path = bintree.getname(pkg.cpv) > head, tail = os.path.split(pkg_path) > temp_filename = portage._checksum_failure_temp_file(head, tail) >- writemsg("File renamed to '%s'\n" % (temp_filename,), >+ writemsg("File renamed to '%s'\n" % (temp_filename, ), > noiselevel=-1) > finally: > sys.stdout = stdout_orig >Index: pym/_emerge/BinpkgFetcher.py >=================================================================== >--- pym/_emerge/BinpkgFetcher.py (revision 13832) >+++ pym/_emerge/BinpkgFetcher.py (working copy) >@@ -137,7 +137,7 @@ > or False before calling lock(). > """ > if self._lock_obj is not None: >- raise self.AlreadyLocked((self._lock_obj,)) >+ raise self.AlreadyLocked((self._lock_obj, )) > > self._lock_obj = portage.locks.lockfile( > self.pkg_path, wantnewlockfile=1) >Index: pym/_emerge/Scheduler.py >=================================================================== >--- pym/_emerge/Scheduler.py (revision 13832) >+++ pym/_emerge/Scheduler.py (working copy) >@@ -88,7 +88,7 @@ > __slots__ = ("curval", "maxval") > > class _emerge_log_class(SlotObject): >- __slots__ = ("xterm_titles",) >+ __slots__ = ("xterm_titles", ) > > def log(self, *pargs, **kwargs): > if not self.xterm_titles: >@@ -300,7 +300,7 @@ > pkg_str += " for " + pkg.root > msg.append(pkg_str) > msg.append("") >- writemsg_level("".join("%s\n" % (l,) for l in msg), >+ writemsg_level("".join("%s\n" % (l, ) for l in msg), > level=logging.INFO, noiselevel=-1) > if self._max_jobs is True or self._max_jobs > 1: > self._set_max_jobs(1) >@@ -1395,7 +1395,7 @@ > self._status_msg(msg) > > if log_path is not None: >- self._status_msg(" '%s'" % (colorize("INFORM", log_path),)) >+ self._status_msg(" '%s'" % (colorize("INFORM", log_path), )) > > def _status_msg(self, msg): > """ >@@ -1527,9 +1527,9 @@ > continue > pkg = task > msg = "emerge --keep-going:" + \ >- " %s" % (pkg.cpv,) >+ " %s" % (pkg.cpv, ) > if pkg.root != "/": >- msg += " for %s" % (pkg.root,) >+ msg += " for %s" % (pkg.root, ) > msg += " dropped due to unsatisfied dependency." > for line in textwrap.wrap(msg, msg_width): > eerror(line, phase="other", key=pkg.cpv) >@@ -1593,7 +1593,7 @@ > world_set.add(atom) > else: > writemsg_level('\n!!! Unable to record %s in "world"\n' % \ >- (atom,), level=logging.WARN, noiselevel=-1) >+ (atom, ), level=logging.WARN, noiselevel=-1) > finally: > if world_locked: > world_set.unlock() >Index: pym/_emerge/countdown.py >=================================================================== >--- pym/_emerge/countdown.py (revision 13832) >+++ pym/_emerge/countdown.py (working copy) >@@ -9,7 +9,7 @@ > > def countdown(secs=5, doing="Starting"): > if secs: >- print ">>> Waiting",secs,"seconds before starting..." >+ print ">>> Waiting", secs, "seconds before starting..." > print ">>> (Control-C to abort)...\n"+doing+" in: ", > ticks=range(secs) > ticks.reverse() >Index: pym/_emerge/EbuildMetadataPhase.py >=================================================================== >--- pym/_emerge/EbuildMetadataPhase.py (revision 13832) >+++ pym/_emerge/EbuildMetadataPhase.py (working copy) >@@ -26,9 +26,9 @@ > > __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback", > "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \ >- ("_raw_metadata",) >+ ("_raw_metadata", ) > >- _file_names = ("ebuild",) >+ _file_names = ("ebuild", ) > _files_dict = slot_dict_class(_file_names, prefix="") > _metadata_fd = 9 > >Index: pym/_emerge/SubProcess.py >=================================================================== >--- pym/_emerge/SubProcess.py (revision 13832) >+++ pym/_emerge/SubProcess.py (working copy) >@@ -8,7 +8,7 @@ > import errno > class SubProcess(AbstractPollTask): > >- __slots__ = ("pid",) + \ >+ __slots__ = ("pid", ) + \ > ("_files", "_reg_id") > > # A file descriptor is required for the scheduler to monitor changes from >Index: pym/_emerge/Package.py >=================================================================== >--- pym/_emerge/Package.py (revision 13832) >+++ pym/_emerge/Package.py (working copy) >@@ -26,8 +26,8 @@ > "root_config", "type_name", > "category", "counter", "cp", "cpv_split", > "inherited", "invalid", "iuse", "mtime", >- "pf", "pv_split", "root", "slot", "slot_atom",) + \ >- ("_use",) >+ "pf", "pv_split", "root", "slot", "slot_atom", ) + \ >+ ("_use", ) > > metadata_keys = [ > "CHOST", "COUNTER", "DEPEND", "EAPI", >@@ -78,7 +78,7 @@ > > __slots__ = ("__weakref__", "all", "enabled", "disabled", > "iuse_implicit", "tokens") + \ >- ('_regex',) >+ ('_regex', ) > > def __init__(self, tokens, iuse_implicit): > self.tokens = tuple(tokens) >@@ -167,11 +167,11 @@ > Detect metadata updates and synchronize Package attributes. > """ > >- __slots__ = ("_pkg",) >+ __slots__ = ("_pkg", ) > _wrapped_keys = frozenset( > ["COUNTER", "INHERITED", "IUSE", "SLOT", "_mtime_"]) > _use_conditional_keys = frozenset( >- ['LICENSE', 'PROPERTIES', 'PROVIDE', 'RESTRICT',]) >+ ['LICENSE', 'PROPERTIES', 'PROVIDE', 'RESTRICT', ]) > > def __init__(self, pkg, metadata): > _PackageMetadataWrapperBase.__init__(self) >Index: pym/_emerge/EbuildBinpkg.py >=================================================================== >--- pym/_emerge/EbuildBinpkg.py (revision 13832) >+++ pym/_emerge/EbuildBinpkg.py (working copy) >@@ -8,7 +8,7 @@ > """ > This assumes that src_install() has successfully completed. > """ >- __slots__ = ("_binpkg_tmpfile",) >+ __slots__ = ("_binpkg_tmpfile", ) > > def _start(self): > self.phase = "package" >Index: pym/_emerge/MetadataRegen.py >=================================================================== >--- pym/_emerge/MetadataRegen.py (revision 13832) >+++ pym/_emerge/MetadataRegen.py (working copy) >@@ -160,7 +160,7 @@ > self._error_count += 1 > self._valid_pkgs.discard(metadata_process.cpv) > portage.writemsg("Error processing %s, continuing...\n" % \ >- (metadata_process.cpv,), noiselevel=-1) >+ (metadata_process.cpv, ), noiselevel=-1) > > if self._consumer is not None: > # On failure, still notify the consumer (in this case the metadata >Index: pym/_emerge/SlotObject.py >=================================================================== >--- pym/_emerge/SlotObject.py (revision 13832) >+++ pym/_emerge/SlotObject.py (working copy) >@@ -3,7 +3,7 @@ > # $Id$ > > class SlotObject(object): >- __slots__ = ("__weakref__",) >+ __slots__ = ("__weakref__", ) > > def __init__(self, **kwargs): > classes = [self.__class__] >Index: pym/_emerge/EbuildExecuter.py >=================================================================== >--- pym/_emerge/EbuildExecuter.py (revision 13832) >+++ pym/_emerge/EbuildExecuter.py (working copy) >@@ -15,7 +15,7 @@ > import os > class EbuildExecuter(CompositeTask): > >- __slots__ = ("pkg", "scheduler", "settings") + ("_tree",) >+ __slots__ = ("pkg", "scheduler", "settings") + ("_tree", ) > > _phases = ("prepare", "configure", "compile", "test", "install") > >Index: pym/_emerge/MergeListItem.py >=================================================================== >--- pym/_emerge/MergeListItem.py (revision 13832) >+++ pym/_emerge/MergeListItem.py (working copy) >@@ -23,7 +23,7 @@ > "find_blockers", "logger", "mtimedb", "pkg", > "pkg_count", "pkg_to_replace", "prefetcher", > "settings", "statusMessage", "world_atom") + \ >- ("_install_task",) >+ ("_install_task", ) > > def _start(self): > >Index: pym/_emerge/AbstractPollTask.py >=================================================================== >--- pym/_emerge/AbstractPollTask.py (revision 13832) >+++ pym/_emerge/AbstractPollTask.py (working copy) >@@ -6,8 +6,8 @@ > from _emerge.PollConstants import PollConstants > class AbstractPollTask(AsynchronousTask): > >- __slots__ = ("scheduler",) + \ >- ("_registered",) >+ __slots__ = ("scheduler", ) + \ >+ ("_registered", ) > > _bufsize = 4096 > _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL >Index: pym/_emerge/EbuildFetchonly.py >=================================================================== >--- pym/_emerge/EbuildFetchonly.py (revision 13832) >+++ pym/_emerge/EbuildFetchonly.py (working copy) >@@ -34,7 +34,7 @@ > mydbapi=portdb, tree="porttree") > > if rval != os.EX_OK: >- msg = "Fetch failed for '%s'" % (pkg.cpv,) >+ msg = "Fetch failed for '%s'" % (pkg.cpv, ) > eerror(msg, phase="unpack", key=pkg.cpv) > > return rval >@@ -76,7 +76,7 @@ > mydbapi=portdb, tree="porttree") > > if retval != os.EX_OK: >- msg = "Fetch failed for '%s'" % (pkg.cpv,) >+ msg = "Fetch failed for '%s'" % (pkg.cpv, ) > eerror(msg, phase="unpack", key=pkg.cpv) > > portage.elog.elog_process(self.pkg.cpv, self.settings) >Index: pym/_emerge/depgraph.py >=================================================================== >--- pym/_emerge/depgraph.py (revision 13832) >+++ pym/_emerge/depgraph.py (working copy) >@@ -536,10 +536,10 @@ > (matched_node.slot_atom, atoms[0]) > if len(atoms) > 1: > for atom in atoms[1:-1]: >- explanation += ", '%s'" % (atom,) >+ explanation += ", '%s'" % (atom, ) > if len(atoms) > 2: >- explanation += "," >- explanation += " and '%s'" % (atoms[-1],) >+ explanation += ", " >+ explanation += " and '%s'" % (atoms[-1], ) > explanation += "." > return explanation > >@@ -573,7 +573,7 @@ > # PROVIDE when necessary, while match_from_list does not. > parent, atom = parent_atom > atom_set = InternalPackageSet( >- initial_atoms=(atom,)) >+ initial_atoms=(atom, )) > if atom_set.findAtomForPackage(pkg): > parent_atoms.add(parent_atom) > else: >@@ -945,7 +945,7 @@ > removal_action = "remove" in self._dynamic_config.myparams > > edepend={} >- depkeys = ["DEPEND","RDEPEND","PDEPEND"] >+ depkeys = ["DEPEND", "RDEPEND", "PDEPEND"] > for k in depkeys: > edepend[k] = metadata[k] > >@@ -1346,7 +1346,7 @@ > noiselevel=-1) > portage.writemsg("!!! Please check ebuild(5) for full details.\n") > portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n") >- return (0,[]) >+ return (0, []) > # Don't expand categories or old-style virtuals here unless > # necessary. Expansion of old-style virtuals here causes at > # least the following problems: >@@ -1630,7 +1630,7 @@ > if missing == 0: > print > missing += 1 >- print "Missing binary for:",xs[2] >+ print "Missing binary for:", xs[2] > > try: > self.altlist() >@@ -1638,7 +1638,7 @@ > return False, myfavorites > > # We're true here unless we are missing binaries. >- return (not missing,myfavorites) >+ return (not missing, myfavorites) > > def _set_args(self, args): > """ >@@ -1798,7 +1798,7 @@ > > def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None): > atom = portage.dep.Atom(atom) >- atom_set = InternalPackageSet(initial_atoms=(atom,)) >+ atom_set = InternalPackageSet(initial_atoms=(atom, )) > atom_without_use = atom > if atom.use: > atom_without_use = portage.dep.remove_slot(atom) >@@ -2016,7 +2016,7 @@ > # old-style virtual match even in cases when the > # package does not actually PROVIDE the virtual. > # Filter out any such false matches here. >- if not InternalPackageSet(initial_atoms=(atom,) >+ if not InternalPackageSet(initial_atoms=(atom, ) > ).findAtomForPackage(pkg): > continue > yield pkg >@@ -2056,7 +2056,7 @@ > if not isinstance(atom, portage.dep.Atom): > atom = portage.dep.Atom(atom) > atom_cp = atom.cp >- atom_set = InternalPackageSet(initial_atoms=(atom,)) >+ atom_set = InternalPackageSet(initial_atoms=(atom, )) > existing_node = None > myeb = None > usepkgonly = "--usepkgonly" in self._frozen_config.myopts >@@ -2450,7 +2450,7 @@ > # due to the performance penalty that is incurred by all the > # additional dep_check calls that are required. > >- dep_keys = ["DEPEND","RDEPEND","PDEPEND"] >+ dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"] > for myroot in self._frozen_config.trees: > vardb = self._frozen_config.trees[myroot]["vartree"].dbapi > portdb = self._frozen_config.trees[myroot]["porttree"].dbapi >@@ -2572,7 +2572,7 @@ > except portage.exception.InvalidAtom, e: > depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys)) > show_invalid_depstring_notice( >- pkg, depstr, "Invalid Atom: %s" % (e,)) >+ pkg, depstr, "Invalid Atom: %s" % (e, )) > return False > for cpv in stale_cache: > del blocker_cache[cpv] >@@ -3813,7 +3813,7 @@ > mylist.append((x, 0, True)) > > last_merge_depth = 0 >- for i in xrange(len(mylist)-1,-1,-1): >+ for i in xrange(len(mylist)-1, -1, -1): > graph_key, depth, ordered = mylist[i] > if not ordered and depth == 0 and i > 0 \ > and graph_key == mylist[i-1][0] and \ >@@ -3980,7 +3980,7 @@ > if True: > # USE flag display > forced_flags = set() >- pkgsettings.setcpv(pkg) # for package.use.{mask,force} >+ pkgsettings.setcpv(pkg) # for package.use.{mask, force} > forced_flags.update(pkgsettings.useforce) > forced_flags.update(pkgsettings.usemask) > >@@ -4282,7 +4282,7 @@ > not vardb.cpv_exists(pkg.cpv) and \ > "--quiet" not in self._frozen_config.myopts: > if mylist_index < len(mylist) - 1: >- p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,")) >+ p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself, ")) > p.append(colorize("WARN", " then resume the merge.")) > > out = sys.stdout >@@ -4290,7 +4290,7 @@ > > for x in p: > if isinstance(x, basestring): >- out.write("%s\n" % (x,)) >+ out.write("%s\n" % (x, )) > continue > > myprint, verboseadd, repoadd = x >@@ -4301,7 +4301,7 @@ > if show_repos and repoadd: > myprint += " " + teal("[%s]" % repoadd) > >- out.write("%s\n" % (myprint,)) >+ out.write("%s\n" % (myprint, )) > > for x in blockers: > print x >@@ -4314,7 +4314,7 @@ > > if "--changelog" in self._frozen_config.myopts: > print >- for revision,text in changelogs: >+ for revision, text in changelogs: > print bold('*'+revision) > sys.stdout.write(text) > >@@ -5120,7 +5120,7 @@ > if pkg.invalid: > for msg_type, msgs in pkg.invalid.iteritems(): > for msg in msgs: >- mreasons.append("invalid: %s" % (msg,)) >+ mreasons.append("invalid: %s" % (msg, )) > > if not pkg.metadata["SLOT"]: > mreasons.append("invalid: SLOT is undefined") >Index: pym/_emerge/TaskSequence.py >=================================================================== >--- pym/_emerge/TaskSequence.py (revision 13832) >+++ pym/_emerge/TaskSequence.py (working copy) >@@ -13,7 +13,7 @@ > a means to trigger movement from one task to the next. > """ > >- __slots__ = ("_task_queue",) >+ __slots__ = ("_task_queue", ) > > def __init__(self, **kwargs): > AsynchronousTask.__init__(self, **kwargs) >Index: pym/_emerge/UnmergeDepPriority.py >=================================================================== >--- pym/_emerge/UnmergeDepPriority.py (revision 13832) >+++ pym/_emerge/UnmergeDepPriority.py (working copy) >@@ -4,7 +4,7 @@ > > from _emerge.AbstractDepPriority import AbstractDepPriority > class UnmergeDepPriority(AbstractDepPriority): >- __slots__ = ("optional", "satisfied",) >+ __slots__ = ("optional", "satisfied", ) > """ > Combination of properties Priority Category > >Index: pym/_emerge/BlockerCache.py >=================================================================== >--- pym/_emerge/BlockerCache.py (revision 13832) >+++ pym/_emerge/BlockerCache.py (working copy) >@@ -132,7 +132,7 @@ > > { > version : "1", >- "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...}, >+ "blockers" : {cpv1:(counter, (atom1, atom2...)), cpv2...}, > "virtuals" : vardb.settings.getvirtuals() > } > """ >Index: pym/_emerge/PackageMerge.py >=================================================================== >--- pym/_emerge/PackageMerge.py (revision 13832) >+++ pym/_emerge/PackageMerge.py (working copy) >@@ -10,7 +10,7 @@ > run while a merge is executing. > """ > >- __slots__ = ("merge",) >+ __slots__ = ("merge", ) > > def _start(self): > >Index: pym/_emerge/PipeReader.py >=================================================================== >--- pym/_emerge/PipeReader.py (revision 13832) >+++ pym/_emerge/PipeReader.py (working copy) >@@ -17,7 +17,7 @@ > current process. > """ > >- __slots__ = ("input_files",) + \ >+ __slots__ = ("input_files", ) + \ > ("_read_data", "_reg_ids") > > def _start(self): >Index: pym/_emerge/main.py >=================================================================== >--- pym/_emerge/main.py (revision 13832) >+++ pym/_emerge/main.py (working copy) >@@ -157,7 +157,7 @@ > myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1] > existsstr="already exists, for file `" > if myso!="": >- if re.search(existsstr,myso): >+ if re.search(existsstr, myso): > # Already exists... Don't increment the count for this. > pass > elif myso[:44]=="install-info: warning: no info dir entry in ": >@@ -200,7 +200,7 @@ > writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1) > else: > if icount > 0: >- out.einfo("Processed %d info files." % (icount,)) >+ out.einfo("Processed %d info files." % (icount, )) > > def display_preserved_libs(vardbapi, myopts): > MAX_DISPLAY = 3 >@@ -226,7 +226,7 @@ > try: > linkmap.rebuild() > except portage.exception.CommandNotFound, e: >- writemsg_level("!!! Command Not Found: %s\n" % (e,), >+ writemsg_level("!!! Command Not Found: %s\n" % (e, ), > level=logging.ERROR, noiselevel=-1) > del e > linkmap_broken = True >@@ -264,7 +264,7 @@ > for alt_paths in samefile_map.itervalues(): > alt_paths = sorted(alt_paths) > for p in alt_paths: >- print colorize("WARN", " * ") + " - %s" % (p,) >+ print colorize("WARN", " * ") + " - %s" % (p, ) > f = alt_paths[0] > consumers = consumer_map.get(f, []) > for c in consumers[:MAX_DISPLAY]: >@@ -312,9 +312,9 @@ > settings.regenerate() > settings.lock() > >- config_protect = settings.get("CONFIG_PROTECT","").split() >- infodirs = settings.get("INFOPATH","").split(":") + \ >- settings.get("INFODIR","").split(":") >+ config_protect = settings.get("CONFIG_PROTECT", "").split() >+ infodirs = settings.get("INFOPATH", "").split(":") + \ >+ settings.get("INFODIR", "").split(":") > > os.chdir("/") > >@@ -384,10 +384,10 @@ > > default_arg_opts = { > '--deep' : valid_integers, >- '--deselect' : ('n',), >- '--binpkg-respect-use' : ('n', 'y',), >+ '--deselect' : ('n', ), >+ '--binpkg-respect-use' : ('n', 'y', ), > '--jobs' : valid_integers, >- '--root-deps' : ('rdeps',), >+ '--root-deps' : ('rdeps', ), > } > > short_arg_opts = { >@@ -590,7 +590,7 @@ > if myoptions.deselect == "True": > myoptions.deselect = True > >- if myoptions.binpkg_respect_use in ("y", "True",): >+ if myoptions.binpkg_respect_use in ("y", "True", ): > myoptions.binpkg_respect_use = True > else: > myoptions.binpkg_respect_use = None >@@ -612,7 +612,7 @@ > deep = None > if not silent: > writemsg("!!! Invalid --deep parameter: '%s'\n" % \ >- (myoptions.deep,), noiselevel=-1) >+ (myoptions.deep, ), noiselevel=-1) > > myoptions.deep = deep > >@@ -631,7 +631,7 @@ > jobs = None > if not silent: > writemsg("!!! Invalid --jobs parameter: '%s'\n" % \ >- (myoptions.jobs,), noiselevel=-1) >+ (myoptions.jobs, ), noiselevel=-1) > > myoptions.jobs = jobs > >@@ -645,7 +645,7 @@ > load_average = None > if not silent: > writemsg("!!! Invalid --load-average parameter: '%s'\n" % \ >- (myoptions.load_average,), noiselevel=-1) >+ (myoptions.load_average, ), noiselevel=-1) > > myoptions.load_average = load_average > >@@ -716,7 +716,7 @@ > > if rval != os.EX_OK: > out = portage.output.EOutput() >- out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,)) >+ out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval, )) > out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.") > > def expand_set_arguments(myfiles, myaction, root_config): >@@ -758,7 +758,7 @@ > argpart = x[start+1:end] > > # TODO: implement proper quoting >- args = argpart.split(",") >+ args = argpart.split(", ") > options = {} > for a in args: > if "=" in a: >@@ -800,7 +800,7 @@ > if sets: > msg.append(" sets defined: %s" % ", ".join(sets)) > msg.append(" This usually means that '%s'" % \ >- (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),)) >+ (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"), )) > msg.append(" is missing or corrupt.") > for line in msg: > writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1) >@@ -860,7 +860,7 @@ > "have missing repo_name entries:") > msg.append("") > for p in missing_repo_names: >- msg.append("\t%s/profiles/repo_name" % (p,)) >+ msg.append("\t%s/profiles/repo_name" % (p, )) > msg.append("") > msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \ > "should be a plain text file containing a unique " + \ >@@ -887,9 +887,9 @@ > msg.append(' profiles/repo_name entries:') > msg.append('') > for k in sorted(ignored_repos): >- msg.append(' %s overrides' % (k,)) >+ msg.append(' %s overrides' % (k, )) > for path in ignored_repos[k]: >- msg.append(' %s' % (path,)) >+ msg.append(' %s' % (path, )) > msg.append('') > msg.extend(' ' + x for x in textwrap.wrap( > "All profiles/repo_name entries must be unique in order " + \ >@@ -967,7 +967,7 @@ > myaction, myopts, myfiles = parse_opts(tmpcmdline) > > if "--digest" in myopts: >- os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest" >+ os.environ["FEATURES"] = os.environ.get("FEATURES", "") + " digest" > # Reload the whole config from scratch so that the portdbapi internal > # config is updated with new FEATURES. > settings, trees, mtimedb = load_emerge_config(trees=trees) >@@ -1134,7 +1134,7 @@ > # check if root user is the current user for the actions where emerge needs this > if portage.secpass < 2: > # We've already allowed "--version" and "--help" above. >- if "--pretend" not in myopts and myaction not in ("search","info"): >+ if "--pretend" not in myopts and myaction not in ("search", "info"): > need_superuser = myaction in ('clean', 'depclean', 'deselect', > 'prune', 'unmerge') or not \ > (fetchonly or \ >@@ -1272,7 +1272,7 @@ > "one of the following " + \ > "fully-qualified ebuild names instead:" > for line in textwrap.wrap(msg, 70): >- writemsg_level("!!! %s\n" % (line,), >+ writemsg_level("!!! %s\n" % (line, ), > level=logging.ERROR, noiselevel=-1) > for i in e[0]: > writemsg_level(" %s\n" % colorize("INFORM", i), >@@ -1281,7 +1281,7 @@ > return 1 > continue > msg = [] >- msg.append("'%s' is not a valid package atom." % (x,)) >+ msg.append("'%s' is not a valid package atom." % (x, )) > msg.append("Please check ebuild(5) for full details.") > writemsg_level("".join("!!! %s\n" % line for line in msg), > level=logging.ERROR, noiselevel=-1) >@@ -1305,7 +1305,7 @@ > except OSError: > pass > msg = [] >- msg.append("'%s' is not a valid package atom." % (x,)) >+ msg.append("'%s' is not a valid package atom." % (x, )) > msg.append("Please check ebuild(5) for full details.") > writemsg_level("".join("!!! %s\n" % line for line in msg), > level=logging.ERROR, noiselevel=-1) >Index: pym/_emerge/help.py >=================================================================== >--- pym/_emerge/help.py (revision 13832) >+++ pym/_emerge/help.py (working copy) >@@ -120,7 +120,7 @@ > print " Displays important portage variables that will be exported to" > print " ebuild.sh when performing merges. This information is useful" > print " for bug reports and verification of settings. All settings in" >- print " make.{conf,globals,defaults} and the environment show up if" >+ print " make.{conf, globals, defaults} and the environment show up if" > print " run with the '--verbose' flag." > print > print " " + green("--list-sets") >@@ -258,7 +258,7 @@ > print " "+green("--buildpkg")+" ("+green("-b")+" short option)" > desc = "Tells emerge to build binary packages for all ebuilds processed in" + \ > " addition to actually merging the packages. Useful for maintainers" + \ >- " or if you administrate multiple Gentoo Linux systems (build once," + \ >+ " or if you administrate multiple Gentoo Linux systems (build once, " + \ > " emerge tbz2s everywhere) as well as disaster recovery. The package" + \ > " will be created in the" + \ > " ${PKGDIR}/All directory. An alternative for already-merged" + \ >@@ -289,7 +289,7 @@ > print > print " "+green("--complete-graph") > desc = "This causes emerge to consider the deep dependencies of all" + \ >- " packages from the system and world sets. With this option enabled," + \ >+ " packages from the system and world sets. With this option enabled, " + \ > " emerge will bail out if it determines that the given operation will" + \ > " break any dependencies of the packages that have been added to the" + \ > " graph. Like the --deep option, the --complete-graph" + \ >@@ -308,7 +308,7 @@ > print > print " "+green("--debug")+" ("+green("-d")+" short option)" > print " Tell emerge to run the ebuild command in --debug mode. In this" >- print " mode, the bash build environment will run with the -x option," >+ print " mode, the bash build environment will run with the -x option, " > print " causing it to output verbose debug information print to stdout." > print " --debug is great for finding bash syntax errors as providing" > print " very verbose information about the dependency and build process." >@@ -404,7 +404,7 @@ > print > print " "+green("--noreplace")+" ("+green("-n")+" short option)" > print " Skip the packages specified on the command-line that have" >- print " already been installed. Without this option, any packages," >+ print " already been installed. Without this option, any packages, " > print " ebuilds, or deps you specify on the command-line *will* cause" > print " Portage to remerge the package, even if it is already installed." > print " Note that Portage won't remerge dependencies by default." >@@ -431,9 +431,9 @@ > print " Instead of actually performing the merge, simply display what" > print " ebuilds and tbz2s *would* have been installed if --pretend" > print " weren't used. Using --pretend is strongly recommended before" >- print " installing an unfamiliar package. In the printout, N = new," >+ print " installing an unfamiliar package. In the printout, N = new, " > print " U = updating, R = replacing, F = fetch restricted, B = blocked" >- print " by an already installed package, D = possible downgrading," >+ print " by an already installed package, D = possible downgrading, " > print " S = slotted install. --verbose causes affecting use flags to be" > print " printed out accompanied by a '+' for enabled and a '-' for" > print " disabled USE flags." >Index: pym/_emerge/actions.py >=================================================================== >--- pym/_emerge/actions.py (revision 13832) >+++ pym/_emerge/actions.py (working copy) >@@ -106,7 +106,7 @@ > # "myopts" is a list for backward compatibility. > resume_opts = mtimedb["resume"].get("myopts", []) > if isinstance(resume_opts, list): >- resume_opts = dict((k,True) for k in resume_opts) >+ resume_opts = dict((k, True) for k in resume_opts) > for opt in ("--ask", "--color", "--skipfirst", "--tree"): > resume_opts.pop(opt, None) > >@@ -116,7 +116,7 @@ > myopts.update(resume_opts) > > if "--debug" in myopts: >- writemsg_level("myopts %s\n" % (myopts,)) >+ writemsg_level("myopts %s\n" % (myopts, )) > > # Adjust config according to options of the command being resumed. > for myroot in trees: >@@ -790,8 +790,8 @@ > msg.append("the following required packages not being installed:") > msg.append("") > for atom, parent in unresolvable: >- msg.append(" %s pulled in by:" % (atom,)) >- msg.append(" %s" % (parent,)) >+ msg.append(" %s pulled in by:" % (atom, )) >+ msg.append(" %s" % (parent, )) > msg.append("") > msg.append("Have you forgotten to run " + \ > good("`emerge --update --newuse --deep @system @world`") + " prior") >@@ -832,9 +832,9 @@ > parent_strs.append(str(getattr(node, "cpv", node))) > parent_strs.sort() > msg = [] >- msg.append(" %s pulled in by:\n" % (child_node.cpv,)) >+ msg.append(" %s pulled in by:\n" % (child_node.cpv, )) > for parent_str in parent_strs: >- msg.append(" %s\n" % (parent_str,)) >+ msg.append(" %s\n" % (parent_str, )) > msg.append("\n") > portage.writemsg_stdout("".join(msg), noiselevel=-1) > >@@ -1046,9 +1046,9 @@ > unique_consumers = sorted(consumer.mycpv \ > for consumer in unique_consumers) > msg.append("") >- msg.append(" %s pulled in by:" % (pkg.cpv,)) >+ msg.append(" %s pulled in by:" % (pkg.cpv, )) > for consumer in unique_consumers: >- msg.append(" %s" % (consumer,)) >+ msg.append(" %s" % (consumer, )) > msg.append("") > writemsg_level("".join(prefix + "%s\n" % line for line in msg), > level=logging.WARNING, noiselevel=-1) >@@ -1259,7 +1259,7 @@ > > output=commands.getstatusoutput("distcc --version") > if not output[0]: >- print str(output[1].split("\n",1)[0]), >+ print str(output[1].split("\n", 1)[0]), > if "distcc" in settings.features: > print "[enabled]" > else: >@@ -1267,7 +1267,7 @@ > > output=commands.getstatusoutput("ccache -V") > if not output[0]: >- print str(output[1].split("\n",1)[0]), >+ print str(output[1].split("\n", 1)[0]), > if "ccache" in settings.features: > print "[enabled]" > else: >@@ -1296,7 +1296,7 @@ > else: > print "%-20s %s" % (x+":", "[NOT VALID]") > >- libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool")) >+ libtool_vers = ", ".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool")) > > if "--verbose" in myopts: > myvars=settings.keys() >@@ -1734,8 +1734,8 @@ > except OSError: > st = None > if st is None: >- print ">>>",myportdir,"not found, creating it." >- os.makedirs(myportdir,0755) >+ print ">>>", myportdir, "not found, creating it." >+ os.makedirs(myportdir, 0755) > st = os.stat(myportdir) > > spawn_kwargs = {} >@@ -1791,7 +1791,7 @@ > emergelog(xterm_titles, msg ) > writemsg_level(msg + "\n") > exitcode = portage.process.spawn_bash("cd %s ; git pull" % \ >- (portage._shell_quote(myportdir),), **spawn_kwargs) >+ (portage._shell_quote(myportdir), ), **spawn_kwargs) > if exitcode != os.EX_OK: > msg = "!!! git pull error in %s." % myportdir > emergelog(xterm_titles, msg) >@@ -1841,7 +1841,7 @@ > > portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1) > rsync_opts.extend( >- shlex.split(settings.get("PORTAGE_RSYNC_OPTS",""))) >+ shlex.split(settings.get("PORTAGE_RSYNC_OPTS", ""))) > for opt in ("--recursive", "--times"): > if opt not in rsync_opts: > portage.writemsg(yellow("WARNING:") + " adding required option " + \ >@@ -1920,7 +1920,7 @@ > updatecache_flg=True > all_rsync_opts = set(rsync_opts) > extra_rsync_opts = shlex.split( >- settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")) >+ settings.get("PORTAGE_RSYNC_EXTRA_OPTS", "")) > all_rsync_opts.update(extra_rsync_opts) > family = socket.AF_INET > if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts: >@@ -1948,7 +1948,7 @@ > except SystemExit, e: > raise # Needed else can't exit > except Exception, e: >- print "Notice:",str(e) >+ print "Notice:", str(e) > dosyncuri=syncuri > > if ips: >@@ -1959,7 +1959,7 @@ > except SystemExit, e: > raise # Needed else can't exit > except Exception, e: >- print "Notice:",str(e) >+ print "Notice:", str(e) > dosyncuri=syncuri > > if (retries==0): >@@ -1975,8 +1975,8 @@ > else: > emergelog(xterm_titles, > ">>> Starting retry %d of %d with %s" % \ >- (retries,maxretries,dosyncuri)) >- print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri) >+ (retries, maxretries, dosyncuri)) >+ print "\n\n>>> Starting retry %d of %d with %s" % (retries, maxretries, dosyncuri) > > if mytimestamp != 0 and "--quiet" not in myopts: > print ">>> Checking server timestamp ..." >@@ -2029,7 +2029,7 @@ > # timed out > print e > del e >- if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0): >+ if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0, 0): > os.kill(mypids[0], signal.SIGTERM) > os.waitpid(mypids[0], 0) > # This is the same code rsync uses for timeout. >@@ -2077,9 +2077,9 @@ > # actual sync > mycommand = rsynccommand + [dosyncuri+"/", myportdir] > exitcode = portage.process.spawn(mycommand, **spawn_kwargs) >- if exitcode in [0,1,3,4,11,14,20,21]: >+ if exitcode in [0, 1, 3, 4, 11, 14, 20, 21]: > break >- elif exitcode in [1,3,4,11,14,20,21]: >+ elif exitcode in [1, 3, 4, 11, 14, 20, 21]: > break > else: > # Code 2 indicates protocol incompatibility, which is expected >@@ -2142,7 +2142,7 @@ > #initial checkout > print ">>> Starting initial cvs checkout with "+syncuri+"..." > if os.path.exists(cvsdir+"/gentoo-x86"): >- print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting." >+ print "!!! existing", cvsdir+"/gentoo-x86 directory; exiting." > sys.exit(1) > try: > os.rmdir(myportdir) >@@ -2152,7 +2152,7 @@ > "!!! existing '%s' directory; exiting.\n" % myportdir) > sys.exit(1) > del e >- if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1): >+ if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86", settings, free=1): > print "!!! cvs checkout error; exiting." > sys.exit(1) > os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir) >@@ -2161,12 +2161,12 @@ > print ">>> Starting cvs update with "+syncuri+"..." > retval = portage.process.spawn_bash( > "cd %s; cvs -z0 -q update -dP" % \ >- (portage._shell_quote(myportdir),), **spawn_kwargs) >+ (portage._shell_quote(myportdir), ), **spawn_kwargs) > if retval != os.EX_OK: > sys.exit(retval) > dosyncuri = syncuri > else: >- writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,), >+ writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri, ), > noiselevel=-1, level=logging.ERROR) > return 1 > >@@ -2200,7 +2200,7 @@ > trees[settings["ROOT"]]["vartree"].dbapi.match( > portage.const.PORTAGE_PACKAGE_ATOM)) > >- chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split()) >+ chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT", "").split()) > > if myaction != "metadata": > postsync = os.path.join(settings["PORTAGE_CONFIGROOT"], >@@ -2247,7 +2247,7 @@ > "one of the following " + \ > "fully-qualified ebuild names instead:" > for line in textwrap.wrap(msg, 70): >- writemsg_level("!!! %s\n" % (line,), >+ writemsg_level("!!! %s\n" % (line, ), > level=logging.ERROR, noiselevel=-1) > for i in e[0]: > writemsg_level(" %s\n" % colorize("INFORM", i), >@@ -2266,7 +2266,7 @@ > > else: > msg = [] >- msg.append("'%s' is not a valid package atom." % (x,)) >+ msg.append("'%s' is not a valid package atom." % (x, )) > msg.append("Please check ebuild(5) for full details.") > writemsg_level("".join("!!! %s\n" % line for line in msg), > level=logging.ERROR, noiselevel=-1) >@@ -2394,7 +2394,7 @@ > settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG) > settings.backup_changes("PORTAGE_DEBUG") > >- if settings.get("NOCOLOR") not in ("yes","true"): >+ if settings.get("NOCOLOR") not in ("yes", "true"): > portage.output.havecolor = 1 > > """The explicit --color < y | n > option overrides the NOCOLOR environment >@@ -2448,7 +2448,7 @@ > for x in libclist: > xs=portage.catpkgsplit(x) > if libcver: >- libcver+=","+"-".join(xs[1:]) >+ libcver+=", "+"-".join(xs[1:]) > else: > libcver="-".join(xs[1:]) > if libcver==[]: >@@ -2476,7 +2476,7 @@ > cache_db = settings.load_best_module("portdbapi.metadbmodule")( > portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True) > except CacheError, e: >- writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,), >+ writemsg_level("!!! Unable to instantiate cache: %s\n" % (e, ), > level=logging.ERROR, noiselevel=-1) > return 1 > >@@ -2485,7 +2485,7 @@ > ec_names = set(f[:-7] for f in os.listdir(ec_dir) \ > if f.endswith(".eclass")) > except OSError, e: >- writemsg_level("!!! Unable to list eclasses: %s\n" % (e,), >+ writemsg_level("!!! Unable to list eclasses: %s\n" % (e, ), > level=logging.ERROR, noiselevel=-1) > return 1 > >@@ -2507,7 +2507,7 @@ > for cpv in cache_db: > cpv_split = portage.catpkgsplit(cpv) > if cpv_split is None: >- writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,), >+ writemsg_level("!!! Invalid cache entry: %s\n" % (cpv, ), > level=logging.ERROR, noiselevel=-1) > continue > >@@ -2522,7 +2522,7 @@ > eb_mtime = cache_entry.get("_mtime_") > ec_mtimes = cache_entry.get("_eclasses_") > except KeyError: >- writemsg_level("!!! Missing cache entry: %s\n" % (cpv,), >+ writemsg_level("!!! Missing cache entry: %s\n" % (cpv, ), > level=logging.ERROR, noiselevel=-1) > continue > except CacheError, e: >@@ -2531,7 +2531,7 @@ > continue > > if eb_mtime is None: >- writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,), >+ writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv, ), > level=logging.ERROR, noiselevel=-1) > continue > >@@ -2543,7 +2543,7 @@ > continue > > if ec_mtimes is None: >- writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,), >+ writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv, ), > level=logging.ERROR, noiselevel=-1) > continue > >@@ -2562,7 +2562,7 @@ > current_eb_mtime = os.stat(eb_path) > except OSError: > writemsg_level("!!! Missing ebuild: %s\n" % \ >- (cpv,), level=logging.ERROR, noiselevel=-1) >+ (cpv, ), level=logging.ERROR, noiselevel=-1) > continue > > inconsistent = False >@@ -2666,7 +2666,7 @@ > print "config file '%s' needs updating." % x > > if procount: >- print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \ >+ print " "+yellow("*")+" See the "+colorize("INFORM", "CONFIGURATION FILES")+ \ > " section of the " + bold("emerge") > print " "+yellow("*")+" man page to learn how to update config files." > >Index: pym/_emerge/unmerge.py >=================================================================== >--- pym/_emerge/unmerge.py (revision 13832) >+++ pym/_emerge/unmerge.py (working copy) >@@ -101,11 +101,11 @@ > return 0 > for x in unmerge_files: > arg_parts = x.split('/') >- if x[0] not in [".","/"] and \ >+ if x[0] not in [".", "/"] and \ > arg_parts[-1][-7:] != ".ebuild": > #possible cat/pkg or dep; treat as such > candidate_catpkgs.append(x) >- elif unmerge_action in ["prune","clean"]: >+ elif unmerge_action in ["prune", "clean"]: > print "\n!!! Prune and clean do not accept individual" + \ > " ebuilds as arguments;\n skipping.\n" > continue >@@ -138,11 +138,11 @@ > # The Path is shorter... so it can't be inside the vdb. > print sp_absx > print absx >- print "\n!!!",x,"cannot be inside "+ \ >+ print "\n!!!", x, "cannot be inside "+ \ > vdb_path+"; aborting.\n" > return 0 > >- for idx in range(0,sp_vdb_len): >+ for idx in range(0, sp_vdb_len): > if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]: > print sp_absx > print absx >@@ -325,7 +325,7 @@ > portage.match_from_list( > portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): > msg = ("Not unmerging package %s since there is no valid " + \ >- "reason for portage to unmerge itself.") % (pkg.cpv,) >+ "reason for portage to unmerge itself.") % (pkg.cpv, ) > for line in textwrap.wrap(msg, 75): > out.eerror(line) > # adjust pkgmap so the display output is correct >@@ -384,7 +384,7 @@ > parents.append(s) > break > if parents: >- #print colorize("WARN", "Package %s is going to be unmerged," % cpv) >+ #print colorize("WARN", "Package %s is going to be unmerged, " % cpv) > #print colorize("WARN", "but still listed in the following package sets:") > #print " %s\n" % ", ".join(parents) > print colorize("WARN", "Not unmerging package %s as it is" % cpv) >@@ -441,20 +441,20 @@ > #avoid cluttering the preview printout with stuff that isn't getting unmerged > continue > if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist: >- writemsg_level(colorize("BAD","\a\n\n!!! " + \ >+ writemsg_level(colorize("BAD", "\a\n\n!!! " + \ > "'%s' is part of your system profile.\n" % cp), > level=logging.WARNING, noiselevel=-1) >- writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \ >+ writemsg_level(colorize("WARN", "\a!!! Unmerging it may " + \ > "be damaging to your system.\n\n"), > level=logging.WARNING, noiselevel=-1) > if clean_delay and "--pretend" not in myopts and "--ask" not in myopts: > countdown(int(settings["EMERGE_WARNING_DELAY"]), > colorize("UNMERGE_WARN", "Press Ctrl-C to Stop")) > if not quiet: >- writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1) >+ writemsg_level("\n %s\n" % (bold(cp), ), noiselevel=-1) > else: > writemsg_level(bold(cp) + ": ", noiselevel=-1) >- for mytype in ["selected","protected","omitted"]: >+ for mytype in ["selected", "protected", "omitted"]: > if not quiet: > writemsg_level((mytype + ": ").rjust(14), noiselevel=-1) > if pkgmap[x][mytype]: >@@ -507,7 +507,7 @@ > mysplit = y.split("/") > #unmerge... > retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"], >- mysettings, unmerge_action not in ["clean","prune"], >+ mysettings, unmerge_action not in ["clean", "prune"], > vartree=vartree, ldpath_mtimes=ldpath_mtimes, > scheduler=scheduler) > >Index: pym/_emerge/PollScheduler.py >=================================================================== >--- pym/_emerge/PollScheduler.py (revision 13832) >+++ pym/_emerge/PollScheduler.py (working copy) >@@ -96,7 +96,7 @@ > self._poll_event_queue.extend(self._poll_obj.poll(timeout)) > break > except select.error, e: >- writemsg_level("\n!!! select error: %s\n" % (e,), >+ writemsg_level("\n!!! select error: %s\n" % (e, ), > level=logging.ERROR, noiselevel=-1) > del e > if timeout is not None: >Index: pym/_emerge/EbuildFetcher.py >=================================================================== >--- pym/_emerge/EbuildFetcher.py (revision 13832) >+++ pym/_emerge/EbuildFetcher.py (working copy) >@@ -17,7 +17,7 @@ > class EbuildFetcher(SpawnProcess): > > __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \ >- ("_build_dir",) >+ ("_build_dir", ) > > def _start(self): > >@@ -93,12 +93,12 @@ > if self.logfile is not None: > if self.background: > elog_out = open(self.logfile, 'a') >- msg = "Fetch failed for '%s'" % (self.pkg.cpv,) >+ msg = "Fetch failed for '%s'" % (self.pkg.cpv, ) > if self.logfile is not None: > msg += ", Log file:" > eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out) > if self.logfile is not None: >- eerror(" '%s'" % (self.logfile,), >+ eerror(" '%s'" % (self.logfile, ), > phase="unpack", key=self.pkg.cpv, out=elog_out) > if elog_out is not None: > elog_out.close() >Index: pym/_emerge/search.py >=================================================================== >--- pym/_emerge/search.py (revision 13832) >+++ pym/_emerge/search.py (working copy) >@@ -181,7 +181,7 @@ > raise NotImplementedError(level) > return result > >- def execute(self,searchkey): >+ def execute(self, searchkey): > """Performs the search for the supplied search key""" > match_category = 0 > self.searchkey=searchkey >@@ -202,7 +202,7 @@ > match_category = 1 > self.searchkey = self.searchkey[1:] > if regexsearch: >- self.searchre=re.compile(self.searchkey,re.I) >+ self.searchre=re.compile(self.searchkey, re.I) > else: > self.searchre=re.compile(re.escape(self.searchkey), re.I) > for package in self.portdb.cp_all(): >@@ -217,7 +217,7 @@ > if self.searchre.search(match_string): > if not self.portdb.xmatch("match-visible", package): > masked=1 >- self.matches["pkg"].append([package,masked]) >+ self.matches["pkg"].append([package, masked]) > elif self.searchdesc: # DESCRIPTION searching > full_package = self.portdb.xmatch("bestmatch-visible", package) > if not full_package: >@@ -235,7 +235,7 @@ > print "emerge: search: aux_get() failed, skipping" > continue > if self.searchre.search(full_desc): >- self.matches["desc"].append([full_package,masked]) >+ self.matches["desc"].append([full_package, masked]) > > self.sdict = self.setconfig.getSets() > for setname in self.sdict: >@@ -273,7 +273,7 @@ > print " " > vardb = self.vartree.dbapi > for mtype in self.matches: >- for match,masked in self.matches[mtype]: >+ for match, masked in self.matches[mtype]: > full_package = None > if mtype == "pkg": > catpack = match >@@ -283,7 +283,7 @@ > #no match found; we don't want to query description > masked=1 > full_package = portage.best( >- self.portdb.xmatch("match-all",match)) >+ self.portdb.xmatch("match-all", match)) > elif mtype == "desc": > full_package = match > match = portage.cpv_getkey(match) >@@ -294,7 +294,7 @@ > if full_package: > try: > desc, homepage, license = self.portdb.aux_get( >- full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"]) >+ full_package, ["DESCRIPTION", "HOMEPAGE", "LICENSE"]) > except KeyError: > print "emerge: search: aux_get() failed, skipping" > continue >@@ -304,7 +304,7 @@ > print green("*")+" "+white(match) > myversion = self.getVersion(full_package, search.VERSION_RELEASE) > >- mysum = [0,0] >+ mysum = [0, 0] > file_size_str = None > mycat = match.split("/")[0] > mypkg = match.split("/")[1] >@@ -318,14 +318,14 @@ > try: > uri_map = self.portdb.getFetchMap(mycpv) > except portage.exception.InvalidDependString, e: >- file_size_str = "Unknown (%s)" % (e,) >+ file_size_str = "Unknown (%s)" % (e, ) > del e > else: > try: > mysum[0] = mf.getDistfilesSize(uri_map) > except KeyError, e: > file_size_str = "Unknown (missing " + \ >- "digest for %s)" % (e,) >+ "digest for %s)" % (e, ) > del e > > available = False >@@ -346,34 +346,34 @@ > mycount = len(mystr) > while (mycount > 3): > mycount -= 3 >- mystr = mystr[:mycount] + "," + mystr[mycount:] >+ mystr = mystr[:mycount] + ", " + mystr[mycount:] > file_size_str = mystr + " kB" > > if self.verbose: > if available: >- print " ", darkgreen("Latest version available:"),myversion >+ print " ", darkgreen("Latest version available:"), myversion > print " ", self.getInstallationStatus(mycat+'/'+mypkg) > if myebuild: > print " %s %s" % \ > (darkgreen("Size of files:"), file_size_str) >- print " ", darkgreen("Homepage:")+" ",homepage >- print " ", darkgreen("Description:")+" ",desc >- print " ", darkgreen("License:")+" ",license >+ print " ", darkgreen("Homepage:")+" ", homepage >+ print " ", darkgreen("Description:")+" ", desc >+ print " ", darkgreen("License:")+" ", license > print > # > # private interface > # >- def getInstallationStatus(self,package): >+ def getInstallationStatus(self, package): > installed_package = self.vartree.dep_bestmatch(package) > result = "" >- version = self.getVersion(installed_package,search.VERSION_RELEASE) >+ version = self.getVersion(installed_package, search.VERSION_RELEASE) > if len(version) > 0: > result = darkgreen("Latest version installed:")+" "+version > else: > result = darkgreen("Latest version installed:")+" [ Not Installed ]" > return result > >- def getVersion(self,full_package,detail): >+ def getVersion(self, full_package, detail): > if len(full_package) > 1: > package_parts = portage.catpkgsplit(full_package) > if detail == search.VERSION_RELEASE and package_parts[3] != 'r0': >Index: pym/_emerge/BinpkgPrefetcher.py >=================================================================== >--- pym/_emerge/BinpkgPrefetcher.py (revision 13832) >+++ pym/_emerge/BinpkgPrefetcher.py (working copy) >@@ -8,8 +8,8 @@ > import os > class BinpkgPrefetcher(CompositeTask): > >- __slots__ = ("pkg",) + \ >- ("pkg_path", "_bintree",) >+ __slots__ = ("pkg", ) + \ >+ ("pkg_path", "_bintree", ) > > def _start(self): > self._bintree = self.pkg.root_config.trees["bintree"] >Index: pym/_emerge/format_size.py >=================================================================== >--- pym/_emerge/format_size.py (revision 13832) >+++ pym/_emerge/format_size.py (working copy) >@@ -14,6 +14,6 @@ > mycount=len(mystr) > while (mycount > 3): > mycount-=3 >- mystr=mystr[:mycount]+","+mystr[mycount:] >+ mystr=mystr[:mycount]+", "+mystr[mycount:] > return mystr+" kB" > >Index: pym/_emerge/EbuildBuildDir.py >=================================================================== >--- pym/_emerge/EbuildBuildDir.py (revision 13832) >+++ pym/_emerge/EbuildBuildDir.py (working copy) >@@ -29,7 +29,7 @@ > or False before calling lock(). > """ > if self._lock_obj is not None: >- raise self.AlreadyLocked((self._lock_obj,)) >+ raise self.AlreadyLocked((self._lock_obj, )) > > dir_path = self.dir_path > if dir_path is None: >Index: pym/_emerge/CompositeTask.py >=================================================================== >--- pym/_emerge/CompositeTask.py (revision 13832) >+++ pym/_emerge/CompositeTask.py (working copy) >@@ -6,7 +6,7 @@ > import os > class CompositeTask(AsynchronousTask): > >- __slots__ = ("scheduler",) + ("_current_task",) >+ __slots__ = ("scheduler", ) + ("_current_task", ) > > def isAlive(self): > return self._current_task is not None >@@ -65,7 +65,7 @@ > for detecting bugs. > """ > if task is not self._current_task: >- raise AssertionError("Unrecognized task: %s" % (task,)) >+ raise AssertionError("Unrecognized task: %s" % (task, )) > > def _default_exit(self, task): > """ >Index: pym/_emerge/SpawnProcess.py >=================================================================== >--- pym/_emerge/SpawnProcess.py (revision 13832) >+++ pym/_emerge/SpawnProcess.py (working copy) >@@ -29,7 +29,7 @@ > "uid", "gid", "groups", "umask", "logfile", > "path_lookup", "pre_exec") > >- __slots__ = ("args",) + \ >+ __slots__ = ("args", ) + \ > _spawn_kwarg_names > > _file_names = ("log", "process", "stdout") >Index: pym/_emerge/changelog.py >=================================================================== >--- pym/_emerge/changelog.py (revision 13832) >+++ pym/_emerge/changelog.py (working copy) >@@ -13,7 +13,7 @@ > sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")) > import portage > >-def calc_changelog(ebuildpath,current,next): >+def calc_changelog(ebuildpath, current, next): > if ebuildpath == None or not os.path.exists(ebuildpath): > return [] > current = '-'.join(portage.catpkgsplit(current)[1:]) >@@ -22,7 +22,7 @@ > next = '-'.join(portage.catpkgsplit(next)[1:]) > if next.endswith('-r0'): > next = next[:-3] >- changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog') >+ changelogpath = os.path.join(os.path.split(ebuildpath)[0], 'ChangeLog') > try: > changelog = open(changelogpath).read() > except SystemExit, e: >@@ -30,8 +30,8 @@ > except: > return [] > divisions = _find_changelog_tags(changelog) >- #print 'XX from',current,'to',next >- #for div,text in divisions: print 'XX',div >+ #print 'XX from', current, 'to', next >+ #for div, text in divisions: print 'XX', div > # skip entries for all revisions above the one we are about to emerge > for i in range(len(divisions)): > if divisions[i][0]==next: >@@ -51,13 +51,13 @@ > divs = [] > release = None > while 1: >- match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M) >+ match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n', changelog, re.M) > if match is None: > if release is not None: >- divs.append((release,changelog)) >+ divs.append((release, changelog)) > return divs > if release is not None: >- divs.append((release,changelog[:match.start()])) >+ divs.append((release, changelog[:match.start()])) > changelog = changelog[match.end():] > release = match.group(1) > if release.endswith('.ebuild'): >Index: pym/emergehelp.py >=================================================================== >--- pym/emergehelp.py (revision 13832) >+++ pym/emergehelp.py (working copy) >@@ -1 +1,46 @@ >-link portage_compat_namespace.py >\ No newline at end of file >+# portage_compat_namespace.py -- provide compability layer with new namespace >+# Copyright 2007 Gentoo Foundation >+# Distributed under the terms of the GNU General Public License v2 >+# $Id: portage_compat_namespace.py 12364 2008-12-29 03:05:07Z zmedico $ >+ >+""" >+This module checks the name under which it is imported and attempts to load >+the corresponding module of the new portage namespace, inserting it into the >+loaded modules list. >+It also issues a warning to the caller to migrate to the new namespace. >+Note that this module should never be used with it's true name, but only by >+links pointing to it. Also it is limited to portage_foo -> portage.foo >+translations, however existing subpackages shouldn't use it anyway to maintain >+compability with 3rd party modules (like elog or cache plugins), and they >+shouldn't be directly imported by external consumers. >+ >+This module is based on an idea by Brian Harring. >+""" >+ >+import sys, warnings >+ >+__oldname = __name__ >+if __name__.startswith("portage_"): >+ __newname = __name__.replace("_", ".") >+else: >+ __newname = "portage."+__name__ >+ >+try: >+ __package = __import__(__newname, globals(), locals()) >+ __realmodule = getattr(__package, __newname[8:]) >+except (ImportError, AttributeError): >+ raise ImportError("No module named %s" % __oldname) >+ >+def _showwarning(message, category, filename, lineno, file=None, line=None): >+ if file is None: >+ import sys >+ file = sys.stderr >+ try: >+ file.write("%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)) >+ except IOError: >+ pass >+ >+warnings.showwarning = _showwarning >+ >+warnings.warn("DEPRECATION NOTICE: The %s module was replaced by %s" % (__oldname, __newname), DeprecationWarning) >+sys.modules[__oldname] = __realmodule >Index: pym/portage_exception.py >=================================================================== >--- pym/portage_exception.py (revision 13832) >+++ pym/portage_exception.py (working copy) >@@ -1 +1,46 @@ >-link portage_compat_namespace.py >\ No newline at end of file >+# portage_compat_namespace.py -- provide compability layer with new namespace >+# Copyright 2007 Gentoo Foundation >+# Distributed under the terms of the GNU General Public License v2 >+# $Id: portage_compat_namespace.py 12364 2008-12-29 03:05:07Z zmedico $ >+ >+""" >+This module checks the name under which it is imported and attempts to load >+the corresponding module of the new portage namespace, inserting it into the >+loaded modules list. >+It also issues a warning to the caller to migrate to the new namespace. >+Note that this module should never be used with it's true name, but only by >+links pointing to it. Also it is limited to portage_foo -> portage.foo >+translations, however existing subpackages shouldn't use it anyway to maintain >+compability with 3rd party modules (like elog or cache plugins), and they >+shouldn't be directly imported by external consumers. >+ >+This module is based on an idea by Brian Harring. >+""" >+ >+import sys, warnings >+ >+__oldname = __name__ >+if __name__.startswith("portage_"): >+ __newname = __name__.replace("_", ".") >+else: >+ __newname = "portage."+__name__ >+ >+try: >+ __package = __import__(__newname, globals(), locals()) >+ __realmodule = getattr(__package, __newname[8:]) >+except (ImportError, AttributeError): >+ raise ImportError("No module named %s" % __oldname) >+ >+def _showwarning(message, category, filename, lineno, file=None, line=None): >+ if file is None: >+ import sys >+ file = sys.stderr >+ try: >+ file.write("%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)) >+ except IOError: >+ pass >+ >+warnings.showwarning = _showwarning >+ >+warnings.warn("DEPRECATION NOTICE: The %s module was replaced by %s" % (__oldname, __newname), DeprecationWarning) >+sys.modules[__oldname] = __realmodule >Index: pym/portage_checksum.py >=================================================================== >--- pym/portage_checksum.py (revision 13832) >+++ pym/portage_checksum.py (working copy) >@@ -1 +1,46 @@ >-link portage_compat_namespace.py >\ No newline at end of file >+# portage_compat_namespace.py -- provide compability layer with new namespace >+# Copyright 2007 Gentoo Foundation >+# Distributed under the terms of the GNU General Public License v2 >+# $Id: portage_compat_namespace.py 12364 2008-12-29 03:05:07Z zmedico $ >+ >+""" >+This module checks the name under which it is imported and attempts to load >+the corresponding module of the new portage namespace, inserting it into the >+loaded modules list. >+It also issues a warning to the caller to migrate to the new namespace. >+Note that this module should never be used with it's true name, but only by >+links pointing to it. Also it is limited to portage_foo -> portage.foo >+translations, however existing subpackages shouldn't use it anyway to maintain >+compability with 3rd party modules (like elog or cache plugins), and they >+shouldn't be directly imported by external consumers. >+ >+This module is based on an idea by Brian Harring. >+""" >+ >+import sys, warnings >+ >+__oldname = __name__ >+if __name__.startswith("portage_"): >+ __newname = __name__.replace("_", ".") >+else: >+ __newname = "portage."+__name__ >+ >+try: >+ __package = __import__(__newname, globals(), locals()) >+ __realmodule = getattr(__package, __newname[8:]) >+except (ImportError, AttributeError): >+ raise ImportError("No module named %s" % __oldname) >+ >+def _showwarning(message, category, filename, lineno, file=None, line=None): >+ if file is None: >+ import sys >+ file = sys.stderr >+ try: >+ file.write("%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)) >+ except IOError: >+ pass >+ >+warnings.showwarning = _showwarning >+ >+warnings.warn("DEPRECATION NOTICE: The %s module was replaced by %s" % (__oldname, __newname), DeprecationWarning) >+sys.modules[__oldname] = __realmodule >Index: pym/portage_manifest.py >=================================================================== >--- pym/portage_manifest.py (revision 13832) >+++ pym/portage_manifest.py (working copy) >@@ -1 +1,46 @@ >-link portage_compat_namespace.py >\ No newline at end of file >+# portage_compat_namespace.py -- provide compability layer with new namespace >+# Copyright 2007 Gentoo Foundation >+# Distributed under the terms of the GNU General Public License v2 >+# $Id: portage_compat_namespace.py 12364 2008-12-29 03:05:07Z zmedico $ >+ >+""" >+This module checks the name under which it is imported and attempts to load >+the corresponding module of the new portage namespace, inserting it into the >+loaded modules list. >+It also issues a warning to the caller to migrate to the new namespace. >+Note that this module should never be used with it's true name, but only by >+links pointing to it. Also it is limited to portage_foo -> portage.foo >+translations, however existing subpackages shouldn't use it anyway to maintain >+compability with 3rd party modules (like elog or cache plugins), and they >+shouldn't be directly imported by external consumers. >+ >+This module is based on an idea by Brian Harring. >+""" >+ >+import sys, warnings >+ >+__oldname = __name__ >+if __name__.startswith("portage_"): >+ __newname = __name__.replace("_", ".") >+else: >+ __newname = "portage."+__name__ >+ >+try: >+ __package = __import__(__newname, globals(), locals()) >+ __realmodule = getattr(__package, __newname[8:]) >+except (ImportError, AttributeError): >+ raise ImportError("No module named %s" % __oldname) >+ >+def _showwarning(message, category, filename, lineno, file=None, line=None): >+ if file is None: >+ import sys >+ file = sys.stderr >+ try: >+ file.write("%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)) >+ except IOError: >+ pass >+ >+warnings.showwarning = _showwarning >+ >+warnings.warn("DEPRECATION NOTICE: The %s module was replaced by %s" % (__oldname, __newname), DeprecationWarning) >+sys.modules[__oldname] = __realmodule >Index: bin/md5check.py >=================================================================== >--- bin/md5check.py (revision 13832) >+++ bin/md5check.py (working copy) >@@ -3,7 +3,7 @@ > # Distributed under the terms of the GNU General Public License v2 > # $Id$ > >-import os,sys >+import os, sys > os.environ["FEATURES"]="mirror cvs" > try: > import portage >@@ -13,7 +13,7 @@ > import portage > import portage.util > >-def cstrip(mystr,mychars): >+def cstrip(mystr, mychars): > newstr = "" > for x in mystr: > if x not in mychars: >@@ -32,7 +32,7 @@ > for mycpv in hugelist: > pv = mycpv.split("/")[-1] > >- newuri = portage.db["/"]["porttree"].dbapi.aux_get(mycpv,["SRC_URI"])[0] >+ newuri = portage.db["/"]["porttree"].dbapi.aux_get(mycpv, ["SRC_URI"])[0] > newuri = newuri.split() > > digestpath = portage.db["/"]["porttree"].dbapi.findname(mycpv) >@@ -55,10 +55,10 @@ > for x in newuri: > if not x: > continue >- if (x in [")","(",":","||"]) or (x[-1] == "?"): >+ if (x in [")", "(", ":", "||"]) or (x[-1] == "?"): > # ignore it. :) > continue >- x = cstrip(x,"()|?") >+ x = cstrip(x, "()|?") > if not x: > continue > >@@ -76,14 +76,14 @@ > (md5_list[mybn]["size"] != md5sums[mybn]["size"]): > > # This associates the md5 with each file. [md5/size] >- md5joins = md5_list[mybn][2].split(",") >- md5joins = (" ["+md5_list[mybn][0]+"/"+md5_list[mybn][1]+"],").join(md5joins) >+ md5joins = md5_list[mybn][2].split(", ") >+ md5joins = (" ["+md5_list[mybn][0]+"/"+md5_list[mybn][1]+"], ").join(md5joins) > md5joins += " ["+md5_list[mybn][0]+"/"+md5_list[mybn][1]+"]" > >- portage.writemsg("Colliding md5: %s of %s [%s/%s] and %s\n" % (mybn,mycpv,md5sums[mybn][0],md5sums[mybn][1],md5joins)) >+ portage.writemsg("Colliding md5: %s of %s [%s/%s] and %s\n" % (mybn, mycpv, md5sums[mybn][0], md5sums[mybn][1], md5joins)) > col_list += [mybn] > else: >- md5_list[mybn][2] += ","+mycpv >+ md5_list[mybn][2] += ", "+mycpv > else: > md5_list[mybn] = md5sums[mybn]+[mycpv] > del md5sums[mybn] >Index: bin/pemerge.py >=================================================================== >--- bin/pemerge.py (revision 13832) >+++ bin/pemerge.py (working copy) >@@ -1,7 +1,7 @@ > #!/usr/bin/python -O > >-import profile,time,sys,os >-sys.path = ["/usr/lib/portage/bin","/usr/lib/portage/pym"]+sys.path >+import profile, time, sys, os >+sys.path = ["/usr/lib/portage/bin", "/usr/lib/portage/pym"]+sys.path > > def clock(): > return time.time() >Index: tabcheck.py >=================================================================== >--- tabcheck.py (revision 13832) >+++ tabcheck.py (working copy) >@@ -1,6 +1,6 @@ > #!/usr/bin/python -O > >-import tabnanny,sys >+import tabnanny, sys > > for x in sys.argv: > tabnanny.check(x)
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 278127
: 198262 |
198279