diff -ru portage-2.1.1.orig/bin/emerge portage-2.1.1/bin/emerge --- portage-2.1.1.orig/bin/emerge 2006-09-05 00:20:04.000000000 +0000 +++ portage-2.1.1/bin/emerge 2006-09-15 00:42:45.000000000 +0000 @@ -642,6 +642,7 @@ self.myopts = myopts self.myparams = myparams self.edebug = 0 + self.merge_slot = {} if settings.get("PORTAGE_DEBUG", "") == "1": self.edebug = 1 self.spinner = spinner @@ -693,11 +694,28 @@ """ jbigkey = " ".join(mybigkey) + " merge" + if "--debug" in self.myopts: + print "\ncreate:mybigkey is ",mybigkey,", myparent is ", myparent, ", jbigkey is ",jbigkey + + # if I have no parent, initialize the merge slot to zero and let the deps update it + if not myparent and not mybigkey[2] in self.merge_slot.keys(): + self.merge_slot[mybigkey[2]] = 0 + if self.digraph.hasnode(jbigkey): if addme and jbigkey != myparent: # Refuse to make a node depend on itself so that the we don't # don't create a bogus circular dependency in self.altlist(). self.digraph.addnode(jbigkey, myparent) + try: + if myparent and self.merge_slot[myparent.split()[2]] < self.merge_slot[jbigkey.split()[2]] + 1: + self.merge_slot[myparent.split()[2]] = self.merge_slot[jbigkey.split()[2]] + 1 + except KeyError,e: + if "--debug" in self.myopts: + print "create:Error on key ",str(e) + # Because the node is already present in the digraph, it is myparent key + # which is not present and hence needs to be initialized + self.merge_slot[myparent.split()[2]] = self.merge_slot[jbigkey.split()[2]] + 1 + return 1 jbigkey = " ".join(mybigkey) + " nomerge" if self.digraph.hasnode(jbigkey): @@ -854,6 +872,7 @@ portage.writemsg("!!! Please notify the package maintainer " + \ "that atoms must be fully-qualified.\n", noiselevel=-1) return 0 + return 1 def select_files(self,myfiles): @@ -1022,6 +1041,9 @@ use_binaries=("--usepkgonly" in self.myopts), myroot=myroot, trees=self.trees) + if "--debug" in self.myopts: + print "\nselect_dep:mycheck is ",mycheck + if not mycheck[0]: mymerge=[] else: @@ -1049,7 +1071,12 @@ # determine satisfied deps via dep_wordreduce but it does not # account for merge order (merge order is later calculated # in self.altlist() using data from the digraph). - self.mydbapi[p_root].cpv_inject(p_key) + # + # I took it out because this interferes with slot calculations. + # The same effect can be achieved with just checking if the package + # is already in the digraph. + + # self.mydbapi[p_root].cpv_inject(p_key) # Update old-style virtuals if this package provides any. # These are needed for dep_virtual calls inside dep_check. @@ -1057,6 +1084,9 @@ self.trees[p_root][self.pkg_tree_map[p_type]].dbapi) if not mymerge: + # If portage_dep tells me I have all my deps checked out, I am good to go. + if myparent: + self.merge_slot[myparent.split()[2]] = 0 return 1 if "--debug" in self.myopts: @@ -1176,11 +1206,38 @@ # ordered by type preference ("ebuild" type is the last resort) selected_pkg = matched_packages[0] - if myparent: + # initialize slot for selected_pkg[2] if not already initialized + if not selected_pkg[2] in self.merge_slot.keys(): + self.merge_slot[selected_pkg[2]] = 0 + + # if the package exists in the digraph, go on to the next one + selected_pkg_key = " ".join(selected_pkg[0:3]) + " merge" + if self.digraph.hasnode(selected_pkg_key): + if "--debug" in self.myopts: + print "select_dep: digraph has the pkg ",selected_pkg + if myparent: + try: + if self.merge_slot[myparent.split()[2]] < self.merge_slot[selected_pkg[2]] + 1: + self.merge_slot[myparent.split()[2]] = self.merge_slot[selected_pkg[2]] + 1 + except KeyError: + # Because selected_pkg[2] already has a slot, + # it must be myparent.split()[2] that's not there + self.merge_slot[myparent.split()[2]] = self.merge_slot[selected_pkg[2]] + 1 + elif myparent: #we are a dependency, so we want to be unconditionally added if not self.create(selected_pkg[0:3], myparent, myuse=selected_pkg[-1]): return 0 + try: + # merge slot for a package is 1 more than the max of the slot of each of its deps + if x[0]!="!" and self.merge_slot[myparent.split()[2]] < self.merge_slot[selected_pkg[2]] + 1: + self.merge_slot[myparent.split()[2]] = self.merge_slot[selected_pkg[2]] + 1 + except KeyError,e: + if "--debug" in self.myopts: + print "select_dep:Key error on ",str(e)+" selected_pkg ",selected_pkg + # Because selected_pkg[2] already has a slot, + # it must be myparent.split()[2] that's not there + self.merge_slot[myparent.split()[2]] = self.merge_slot[selected_pkg[2]] + 1 else: #if mysource is not set, then we are a command-line dependency and should not be added #if --onlydeps is specified. @@ -1219,8 +1276,13 @@ # if not portage.db["/"]["vartree"].exists_specific(splitski[2]): # portage.db["/"]["merge"].append(splitski) #else: + + # append the merge slot + if len(splitski) == 4: + splitski.append(str(self.merge_slot[splitski[2]])) self.trees[splitski[1]]["merge"].append(splitski) mygraph.delnode(mycurkey) + for x in dolist: for y in self.trees[x]["merge"]: retlist.append(y) @@ -1394,14 +1456,14 @@ i = 0 while i < len(mylist): - if mylist[i][-1]=="nomerge": + if mylist[i][-2]=="nomerge": if "--tree" not in self.myopts: # we don't care about this elements mylist.pop(i) continue if (i == (len(mylist) - 1)) \ - or (mygraph.depth(string.join(mylist[i])) \ - >= mygraph.depth(string.join(mylist[i+1]))): + or (mygraph.depth(string.join(mylist[i][:4])) \ + >= mygraph.depth(string.join(mylist[i+1][:4]))): # end of a useless branch (may be the last one) # -> delete the element and test the previous one mylist.pop(i) @@ -1585,7 +1647,7 @@ if verbosity == 3: # size verbose mysize=0 - if x[0] == "ebuild" and x[-1]!="nomerge": + if x[0] == "ebuild" and x[-2]!="nomerge": myfilesdict = portdb.getfetchsizes( pkg_key, useflags=self.useFlags[myroot][pkg_key], debug=self.edebug) @@ -1633,7 +1695,7 @@ indent="" if "--tree" in self.myopts: - indent=" "*mygraph.depth(string.join(x)) + indent=" "*mygraph.depth(string.join(x[:4])) if myoldbest: myoldbest=portage.pkgsplit(myoldbest)[1]+"-"+portage.pkgsplit(myoldbest)[2] @@ -1791,132 +1853,46 @@ self.pkgsettings["/"] = \ portage.config(clone=trees["/"]["vartree"].settings) - def merge(self, mylist, favorites, mtimedb): - returnme=0 - mymergelist=[] - ldpath_mtimes = mtimedb["ldpath"] + def restart_portage(self, x, mergecount, totalcount, mtimedb): xterm_titles = "notitles" not in self.settings.features - - #check for blocking dependencies - if "--fetchonly" not in self.myopts and \ - "--buildpkgonly" not in self.myopts: - for x in mylist: - if x[0]=="blocks": - print "\n!!! Error: the "+x[2]+" package conflicts with another package;" - print "!!! the two packages cannot be installed on the same system together." - print "!!! Please use 'emerge --pretend' to determine blockers." - if "--quiet" not in self.myopts: - show_blocker_docs_link() - if "--pretend" not in self.myopts: - try: - del mtimedb["resume"] - except KeyError: - pass - sys.exit(1) - - #buildsyspkg: I need mysysdict also on resume (moved from the else block) - mysysdict = genericdict(getlist(self.settings, "system")) - if "--resume" in self.myopts: - # We're resuming. - print colorize("GOOD", "*** Resuming merge...") - emergelog(xterm_titles, " *** Resuming merge...") - mymergelist=mtimedb["resume"]["mergelist"][:] - if "--skipfirst" in self.myopts and mymergelist: - del mtimedb["resume"]["mergelist"][0] - del mymergelist[0] - mtimedb.commit() - validate_merge_list(self.trees, mymergelist) - else: - myfavs = portage.grabfile( - os.path.join(self.target_root, portage.WORLD_FILE)) - myfavdict=genericdict(myfavs) - for x in range(len(mylist)): - if mylist[x][3]!="nomerge": - # Add to the mergelist - mymergelist.append(mylist[x]) - else: - myfavkey=portage.cpv_getkey(mylist[x][2]) - if "--onlydeps" in self.myopts: + # don't really restart if any of these is true + # XXXXX - seems like redundant check, but what the hell! sky is not falling as yet. + if "--pretend" in self.myopts or "--fetchonly" in self.myopts or \ + "--fetch-all-uri" in self.myopts or "--buildpkgonly" in self.myopts: + return + + mysplit=portage.pkgsplit(x[2]) + myver=mysplit[1]+"-"+mysplit[2] + if myver[-3:]=='-r0': + myver=myver[:-3] + if (myver != portage.VERSION) and \ + "livecvsportage" not in self.settings.features: + if totalcount > mergecount: + emergelog(xterm_titles, + " ::: completed emerge ("+ \ + str(mergecount)+" of "+ \ + str(totalcount)+") "+ \ + x[2]+" to "+x[1]) + emergelog(xterm_titles, " *** RESTARTING " + \ + "emerge via exec() after change of " + \ + "portage version.") + portage.run_exitfuncs() + mynewargv=[sys.argv[0],"--resume"] + badlongopts = ("--ask","--tree","--changelog","--skipfirst","--resume") + for arg in self.myopts: + if arg in badlongopts: continue - # Add to the world file. Since we won't be able to later. - if "--fetchonly" not in self.myopts and \ - myfavkey in favorites: - #don't record if already in system profile or already recorded - if (not mysysdict.has_key(myfavkey)) and (not myfavdict.has_key(myfavkey)): - #we don't have a favorites entry for this package yet; add one - myfavdict[myfavkey]=myfavkey - print ">>> Recording",myfavkey,"in \"world\" favorites file..." - if not ("--fetchonly" in self.myopts or \ - "--fetch-all-uri" in self.myopts or \ - "--pretend" in self.myopts): - portage.write_atomic( - os.path.join(self.target_root, portage.WORLD_FILE), - "\n".join(myfavdict.values())) - - mtimedb["resume"]["mergelist"]=mymergelist[:] - mtimedb.commit() + mynewargv.append(arg) + # priority only needs to be adjusted on the first run + os.environ["PORTAGE_NICENESS"] = "0" + os.execv(mynewargv[0], mynewargv) + def fork_one_emerge(self, x, mergecount, totalcount, mtimedb, favorites, mysysdict): + xterm_titles = "notitles" not in self.settings.features myfeat = self.settings.features[:] - - if "parallel-fetch" in myfeat and \ - not ("--ask" in self.myopts or \ - "--pretend" in self.myopts or \ - "--fetch-all-uri" in self.myopts or \ - "--fetchonly" in self.myopts): - if "distlocks" not in myfeat: - print red("!!!") - print red("!!!")+" parallel-fetching requires the distlocks feature enabled" - print red("!!!")+" you have it disabled, thus parallel-fetching is being disabled" - print red("!!!") - elif len(mymergelist) > 1: - print ">>> starting parallel fetching" - pid = os.fork() - if not pid: - sys.stdin.close() - sys.stdout.close() - sys.stderr.close() - time.sleep(3) # allow the parent to have first fetch - fetchlog = "/var/log/emerge-fetch.log" - sys.stdout = open(fetchlog, "w") - sys.stderr = sys.stdout - os.dup2(sys.stdout.fileno(), 1) - os.dup2(sys.stderr.fileno(), 2) - portage_util.apply_secpass_permissions(fetchlog, - uid=portage.portage_uid, gid=portage.portage_gid, - mode=0660) - - for myroot, pkgsettings in self.pkgsettings.iteritems(): - for x in ("autoaddcvs", "cvs"): - while x in pkgsettings.features: - pkgsettings.features.remove(x) - pkgsettings["FEATURES"] = " ".join(pkgsettings.features) - pkgsettings.backup_changes("FEATURES") - - ret = 0 - for x in mymergelist: - if x[0] != "ebuild": - continue - myroot = x[1] - portdb = self.trees[myroot]["porttree"].dbapi - pkgsettings = self.pkgsettings[myroot] - pkgsettings.reset() - pkgsettings.setcpv(x[2]) - try: - ret = portage.doebuild(portdb.findname(x[2]), - "fetch", myroot, pkgsettings, - cleanup=0, fetchonly=True, - mydbapi=portdb, - tree="porttree") - except SystemExit: - raise - except Exception: - ret = 1 - sys.exit(0) - portage.portage_exec.spawned_pids.append(pid) - - mergecount=0 - for x in mymergelist: - mergecount+=1 + ldpath_mtimes = mtimedb["ldpath"] + pid = os.fork() + if not pid: myroot=x[1] pkg_key = x[2] pkgindex=2 @@ -1930,10 +1906,10 @@ if "--pretend" not in self.myopts: print "\n>>> Emerging (" + \ colorize("MERGE_LIST_PROGRESS", str(mergecount)) + " of " + \ - colorize("MERGE_LIST_PROGRESS", str(len(mymergelist))) + ") " + \ + colorize("MERGE_LIST_PROGRESS", str(totalcount)) + ") " + \ colorize("GOOD", x[pkgindex]) + " to " + x[1] emergelog(xterm_titles, " >>> emerge ("+\ - str(mergecount)+" of "+str(len(mymergelist))+\ + str(mergecount)+" of "+str(totalcount)+\ ") "+x[pkgindex]+" to "+x[1]) pkgsettings["EMERGE_FROM"] = x[0] @@ -1964,17 +1940,17 @@ print print "!!! Fetch for",y,"failed, continuing..." print - returnme=1 - continue + sys.exit(1) + sys.exit(0) elif "--buildpkg" in self.myopts or issyspkg: #buildsyspkg: Sounds useful to display something, but I don't know if we should also log it if issyspkg: print ">>> This is a system package, let's pack a rescue tarball." #emergelog(">>> This is a system package, let's pack a rescue tarball.") #create pkg, then merge pkg - short_msg = "emerge: ("+str(mergecount)+" of "+str(len(mymergelist))+") "+x[pkgindex]+" Clean" + short_msg = "emerge: ("+str(mergecount)+" of "+str(totalcount)+") "+x[pkgindex]+" Clean" emergelog(xterm_titles, " === ("+str(mergecount)+\ - " of "+str(len(mymergelist))+") Cleaning ("+\ + " of "+str(totalcount)+") Cleaning ("+\ x[pkgindex]+"::"+y+")", short_msg=short_msg) retval = portage.doebuild(y, "clean", myroot, pkgsettings, self.edebug, cleanup=1, @@ -1985,9 +1961,9 @@ sys.exit(127) if retval: sys.exit(retval) - short_msg = "emerge: ("+str(mergecount)+" of "+str(len(mymergelist))+") "+x[pkgindex]+" Compile" + short_msg = "emerge: ("+str(mergecount)+" of "+str(totalcount)+") "+x[pkgindex]+" Compile" emergelog(xterm_titles, " === ("+str(mergecount)+\ - " of "+str(len(mymergelist))+\ + " of "+str(totalcount)+\ ") Compiling/Packaging ("+x[pkgindex]+"::"+y+\ ")", short_msg=short_msg) retval = portage.doebuild(y, "package", myroot, @@ -2003,10 +1979,10 @@ if "--buildpkgonly" not in self.myopts: self.trees[myroot]["bintree"].inject(pkg_key) mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key) - short_msg = "emerge: ("+str(mergecount)+" of "+str(len(mymergelist))+") "+x[pkgindex]+" Merge" + short_msg = "emerge: ("+str(mergecount)+" of "+str(totalcount)+") "+x[pkgindex]+" Merge" emergelog(xterm_titles, " === ("+\ str(mergecount)+" of "+\ - str(len(mymergelist))+") Merging ("+\ + str(totalcount)+") Merging ("+\ x[pkgindex]+"::"+y+")", short_msg=short_msg) retval = portage.merge(pkgsettings["CATEGORY"], @@ -2023,9 +1999,9 @@ portage.doebuild(y, "clean", myroot, pkgsettings, self.edebug, mydbapi=portdb, tree="porttree") else: - short_msg = "emerge: ("+str(mergecount)+" of "+str(len(mymergelist))+") "+x[pkgindex]+" Clean" + short_msg = "emerge: ("+str(mergecount)+" of "+str(totalcount)+") "+x[pkgindex]+" Clean" emergelog(xterm_titles, " === ("+str(mergecount)+\ - " of "+str(len(mymergelist))+") Cleaning ("+\ + " of "+str(totalcount)+") Cleaning ("+\ x[pkgindex]+"::"+y+")", short_msg=short_msg) retval = portage.doebuild(y, "clean", myroot, pkgsettings, self.edebug, cleanup=1, @@ -2036,9 +2012,9 @@ sys.exit(127) if retval: sys.exit(retval) - short_msg = "emerge: ("+str(mergecount)+" of "+str(len(mymergelist))+") "+x[pkgindex]+" Compile" + short_msg = "emerge: ("+str(mergecount)+" of "+str(totalcount)+") "+x[pkgindex]+" Compile" emergelog(xterm_titles, " === ("+str(mergecount)+\ - " of "+str(len(mymergelist))+\ + " of "+str(totalcount)+\ ") Compiling/Merging ("+x[pkgindex]+\ "::"+y+")", short_msg=short_msg) retval = portage.doebuild(y, "merge", myroot, @@ -2057,9 +2033,9 @@ #merge the tbz2 mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key) if self.trees[myroot]["bintree"].isremote(pkg_key): - short_msg = "emerge: ("+str(mergecount)+" of "+str(len(mymergelist))+") "+x[pkgindex]+" Fetch" + short_msg = "emerge: ("+str(mergecount)+" of "+str(totalcount)+") "+x[pkgindex]+" Fetch" emergelog(xterm_titles, " --- ("+str(mergecount)+\ - " of "+str(len(mymergelist))+\ + " of "+str(totalcount)+\ ") Fetching Binary ("+x[pkgindex]+\ "::"+mytbz2+")", short_msg=short_msg) if not self.trees[myroot]["bintree"].gettbz2(pkg_key): @@ -2067,16 +2043,26 @@ if "--fetchonly" in self.myopts or \ "--fetch-all-uri" in self.myopts: - continue + sys.exit(0) - short_msg = "emerge: ("+str(mergecount)+" of "+str(len(mymergelist))+") "+x[pkgindex]+" Merge Binary" + short_msg = "emerge: ("+str(mergecount)+" of "+str(totalcount)+") "+x[pkgindex]+" Merge Binary" emergelog(xterm_titles, " === ("+str(mergecount)+\ - " of "+str(len(mymergelist))+") Merging Binary ("+\ + " of "+str(totalcount)+") Merging Binary ("+\ x[pkgindex]+"::"+mytbz2+")", short_msg=short_msg) + logfile = None + if "PORT_LOGDIR" in self.settings: + pkg_cat = y.split("/")[0] + pkg_pf = y.split("/")[1] + logid_time = time.strftime("%Y%m%d-%H%M%S", + time.gmtime(os.stat(self.settings["PORTAGE_TMPDIR"]).st_mtime)) + logfile = os.path.join( self.settings["PORT_LOGDIR"], "%s:%s:%s.log" % \ + (pkg_cat, pkg_pf, logid_time)) + del logid_time + retval = portage.pkgmerge(mytbz2, x[1], pkgsettings, mydbapi=bindb, vartree=self.trees[myroot]["vartree"], - prev_mtimes=ldpath_mtimes) + prev_mtimes=ldpath_mtimes, logfile=logfile) if retval is None: sys.exit(1) #need to check for errors @@ -2095,7 +2081,7 @@ print ">>> Recording",myfavkey,"in \"world\" favorites file..." emergelog(xterm_titles, " === ("+\ str(mergecount)+" of "+\ - str(len(mymergelist))+\ + str(totalcount)+\ ") Updating world file ("+x[pkgindex]+")") portage.write_atomic( os.path.join(myroot, portage.WORLD_FILE), @@ -2118,57 +2104,282 @@ + " AUTOCLEAN is disabled. This can cause serious" + " problems due to overlapping packages.\n") - # Figure out if we need a restart. - mysplit=portage.pkgsplit(x[2]) - if mysplit[0] == "sys-apps/portage" and x[1] == "/": - myver=mysplit[1]+"-"+mysplit[2] - if myver[-3:]=='-r0': - myver=myver[:-3] - if (myver != portage.VERSION) and \ - "livecvsportage" not in self.settings.features: - if len(mymergelist) > mergecount: - emergelog(xterm_titles, - " ::: completed emerge ("+ \ - str(mergecount)+" of "+ \ - str(len(mymergelist))+") "+ \ - x[2]+" to "+x[1]) - emergelog(xterm_titles, " *** RESTARTING " + \ - "emerge via exec() after change of " + \ - "portage version.") - del mtimedb["resume"]["mergelist"][0] - mtimedb.commit() - portage.run_exitfuncs() - mynewargv=[sys.argv[0],"--resume"] - badlongopts = ("--ask","--tree","--changelog","--skipfirst","--resume") - for arg in self.myopts: - if arg in badlongopts: - continue - mynewargv.append(arg) - # priority only needs to be adjusted on the first run - os.environ["PORTAGE_NICENESS"] = "0" - os.execv(mynewargv[0], mynewargv) - if "--pretend" not in self.myopts and \ "--fetchonly" not in self.myopts and \ "--fetch-all-uri" not in self.myopts: if "noclean" not in self.settings.features: short_msg = "emerge: (%s of %s) %s Clean Post" % \ - (mergecount, len(mymergelist), x[pkgindex]) + (mergecount, totalcount, x[pkgindex]) emergelog(xterm_titles, (" === (%s of %s) " + \ "Post-Build Cleaning (%s::%s)") % \ - (mergecount, len(mymergelist), x[pkgindex], y), + (mergecount, totalcount, x[pkgindex], y), short_msg=short_msg) emergelog(xterm_titles, " ::: completed emerge ("+\ - str(mergecount)+" of "+str(len(mymergelist))+") "+\ + str(mergecount)+" of "+str(totalcount)+") "+\ x[2]+" to "+x[1]) - # Unsafe for parallel merges - del mtimedb["resume"]["mergelist"][0] - # Commit after each merge so that --resume may still work in - # in the event that portage is not allowed to exit normally - # due to power failure, SIGKILL, etc... + sys.exit(0) + + return pid + + + def merge(self, mylist, favorites, mtimedb): + returnme=0 + mymergelist=[] + ldpath_mtimes = mtimedb["ldpath"] + xterm_titles = "notitles" not in self.settings.features + parallel = "parallel" in self.settings.features + + # parallel merge will be painful to watch with debug or fetchonly. So, you get only one of these...:-) + if self.edebug or "--fetchonly" in self.myopts: + parallel = False + + #check for blocking dependencies + if "--fetchonly" not in self.myopts and \ + "--buildpkgonly" not in self.myopts: + for x in mylist: + if x[0]=="blocks": + print "\n!!! Error: the "+x[2]+" package conflicts with another package;" + print "!!! the two packages cannot be installed on the same system together." + print "!!! Please use 'emerge --pretend' to determine blockers." + if "--quiet" not in self.myopts: + show_blocker_docs_link() + if "--pretend" not in self.myopts: + try: + del mtimedb["resume"] + except KeyError: + pass + sys.exit(1) + + #buildsyspkg: I need mysysdict also on resume (moved from the else block) + mysysdict = genericdict(getlist(self.settings, "system")) + if "--resume" in self.myopts: + # We're resuming. + print colorize("GOOD", "*** Resuming merge...") + emergelog(xterm_titles, " *** Resuming merge...") + mymergelist=mtimedb["resume"]["mergelist"][:] + if "--skipfirst" in self.myopts and mymergelist: + del mtimedb["resume"]["mergelist"][0] + del mymergelist[0] + mtimedb.commit() + validate_merge_list(self.trees, mymergelist) + else: + myfavs = portage.grabfile( + os.path.join(self.target_root, portage.WORLD_FILE)) + myfavdict=genericdict(myfavs) + for x in range(len(mylist)): + if mylist[x][3]!="nomerge": + # Add to the mergelist + mymergelist.append(mylist[x]) + else: + myfavkey=portage.cpv_getkey(mylist[x][2]) + if "--onlydeps" in self.myopts: + continue + # Add to the world file. Since we won't be able to later. + if "--fetchonly" not in self.myopts and \ + myfavkey in favorites: + #don't record if already in system profile or already recorded + if (not mysysdict.has_key(myfavkey)) and (not myfavdict.has_key(myfavkey)): + #we don't have a favorites entry for this package yet; add one + myfavdict[myfavkey]=myfavkey + print ">>> Recording",myfavkey,"in \"world\" favorites file..." + if not ("--fetchonly" in self.myopts or \ + "--fetch-all-uri" in self.myopts or \ + "--pretend" in self.myopts): + portage.write_atomic( + os.path.join(self.target_root, portage.WORLD_FILE), + "\n".join(myfavdict.values())) + + mtimedb["resume"]["mergelist"]=mymergelist[:] mtimedb.commit() + myfeat = self.settings.features[:] + m_slots = mergelist_to_merge_slot(mymergelist) + + if "parallel-fetch" in myfeat and \ + not ("--ask" in self.myopts or \ + "--pretend" in self.myopts or \ + "--fetch-all-uri" in self.myopts or \ + "--fetchonly" in self.myopts): + if "distlocks" not in myfeat: + print red("!!!") + print red("!!!")+" parallel-fetching requires the distlocks feature enabled" + print red("!!!")+" you have it disabled, thus parallel-fetching is being disabled" + print red("!!!") + elif len(mymergelist) > 1: + print ">>> starting parallel fetching" + pid = os.fork() + if not pid: + sys.stdin.close() + sys.stdout.close() + sys.stderr.close() + time.sleep(3) # allow the parent to have first fetch + fetchlog = "/var/log/emerge-fetch.log" + sys.stdout = open(fetchlog, "w") + sys.stderr = sys.stdout + os.dup2(sys.stdout.fileno(), 1) + os.dup2(sys.stderr.fileno(), 2) + portage_util.apply_secpass_permissions(fetchlog, + uid=portage.portage_uid, gid=portage.portage_gid, + mode=0660) + + for myroot, pkgsettings in self.pkgsettings.iteritems(): + for x in ("autoaddcvs", "cvs"): + while x in pkgsettings.features: + pkgsettings.features.remove(x) + pkgsettings["FEATURES"] = " ".join(pkgsettings.features) + pkgsettings.backup_changes("FEATURES") + + ret = 0 + for x in mymergelist: + if x[0] != "ebuild": + continue + myroot = x[1] + portdb = self.trees[myroot]["porttree"].dbapi + pkgsettings = self.pkgsettings[myroot] + pkgsettings.reset() + pkgsettings.setcpv(x[2]) + try: + ret = portage.doebuild(portdb.findname(x[2]), + "fetch", myroot, pkgsettings, + cleanup=0, fetchonly=True, + mydbapi=portdb, + tree="porttree") + except SystemExit: + raise + except Exception: + ret = 1 + sys.exit(0) + portage.portage_exec.spawned_pids.append(pid) + + totalcount = len(mymergelist) + mergecount=1 + one_in_slot_failed=0 + spawnd_pids=[] + + # dirty little trick to get number of cpus from the system + fd_cpuinfo = os.popen("cat /proc/cpuinfo","r") + cpu_count = 0 + for data_cpuinfo in fd_cpuinfo.readlines(): + if string.find(data_cpuinfo,'cpu MHz') > -1 : + cpu_count += 1 + fd_cpuinfo.close() + + # if someone really screwed with /proc/cpuinfo output, we should not suffer + if cpu_count == 0: + cpu_count = 1 + + spawnd_pkg = {} + failed_pid = 0 + mylist = m_slots.keys() + mylist.sort() + for x in mylist: + # if slot is empty, go on + if not m_slots[x]: + continue + + # if previous slot failed, discontinue the emerge + if one_in_slot_failed and not ("--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts): + break + + # start multiple merges in parallel mode + if parallel: + num_at_atime = cpu_count + 1 + else: + num_at_atime = 1 + + for y in m_slots[x]: + # these all can go in parallel, so fork one after the other + # but num_at_atime at most + if num_at_atime: + onepid = self.fork_one_emerge(y, mergecount, totalcount, mtimedb, favorites, mysysdict) + spawnd_pids.append(onepid) + spawnd_pkg[onepid] = (y[2], x) + num_at_atime -= 1 + mergecount += 1 + else: + # let's wait for one of the jobs to finish + onepid = -1 + while onepid not in spawnd_pids: + onepid , retval = os.waitpid(-1, 0) + spawnd_pids.remove(onepid) + + # if it failed, I need to fail next slot but continue to merge all in this slot + if retval: + one_in_slot_failed = retval + failed_pid = onepid + else: + # we need to remove this pkg from resume DB + # this is the dirtiest shit I have ever written + index = 0 + pkg_compl = spawnd_pkg[onepid][0] + pkg_slot = spawnd_pkg[onepid][1] + for pkgs in mymergelist: + if pkgs[2] == pkg_compl: + del mymergelist[index] + del mtimedb["resume"]["mergelist"][index] + mtimedb.commit() + # check if we need to restart portage + mysplit=portage.pkgsplit(pkg_compl) + if mysplit[0] == "sys-apps/portage" and pkgs[1] == "/": + self.restart_portage(pkgs, mergecount, totalcount, mtimedb) + break + index += 1 + onepid = self.fork_one_emerge(y, mergecount, totalcount, mtimedb, favorites, mysysdict) + spawnd_pids.append(onepid) + spawnd_pkg[onepid] = (y[2], x) + mergecount += 1 + + # this slot is exhausted, so wait for all of the forks to finish + while spawnd_pids: + onepid = spawnd_pids.pop() + retval = os.waitpid(onepid, 0)[1] + if retval: + one_in_slot_failed = retval + failed_pid = onepid + else: + # we need to remove this pkg from resume DB + # this is the dirtiest shit I have ever written + index = 0 + pkg_compl = spawnd_pkg[onepid][0] + pkg_slot = spawnd_pkg[onepid][1] + for pkgs in mymergelist: + if pkgs[2] == pkg_compl: + del mymergelist[index] + del mtimedb["resume"]["mergelist"][index] + mtimedb.commit() + # check if we need to restart portage + mysplit=portage.pkgsplit(pkg_compl) + if mysplit[0] == "sys-apps/portage" and pkgs[1] == "/": + self.restart_portage(pkgs, mergecount, totalcount, mtimedb) + break + index += 1 + + if one_in_slot_failed: + if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts: + print "\n\n!!! Some fetch errors were encountered. Please see above for details.\n\n" + sys.exit(1) + + logfile = None + if "PORT_LOGDIR" in self.settings: + pkg_cat = spawnd_pkg[failed_pid][0].split("/")[0] + pkg_pf = spawnd_pkg[failed_pid][0].split("/")[1] + logid_path = os.path.join(self.settings["PORTAGE_TMPDIR"], "portage", pkg_pf, ".logid") + if os.path.exists(logid_path): + logid_time = time.strftime("%Y%m%d-%H%M%S", time.gmtime(os.stat(logid_path).st_mtime)) + logfile = os.path.join( self.settings["PORT_LOGDIR"], "%s:%s:%s.log" % \ + (pkg_cat, pkg_pf, logid_time)) + del logid_time + del logid_path + + if logfile: + portage.portage_exec.spawn(('tail', '-n', '20', logfile), returnpid=False) + + portage.writemsg_stdout(red("Package "+spawnd_pkg[failed_pid][0]+" failed to emerge\n")) + if logfile: + portage.writemsg_stdout(red("Please take a look at the file "+logfile+"\n")) + sys.exit(one_in_slot_failed) + if "--pretend" not in self.myopts: emergelog(xterm_titles, " *** Finished. Cleaning up...") @@ -2177,14 +2388,10 @@ del mtimedb["resume"] mtimedb.commit() - #by doing an exit this way, --fetchonly can continue to try to - #fetch everything even if a particular download fails. if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts: - if returnme: - print "\n\n!!! Some fetch errors were encountered. Please see above for details.\n\n" - sys.exit(returnme) - else: - sys.exit(0) + sys.exit(0) + + def unmerge(settings, myopts, vartree, unmerge_action, unmerge_files, ldpath_mtimes, raise_on_missing=True): @@ -2419,9 +2626,19 @@ emergelog(xterm_titles, "=== Unmerging... ("+y+")") mysplit=string.split(y,"/") #unmerge... + logfile = None + if "PORT_LOGDIR" in settings: + pkg_cat = y.split("/")[0] + pkg_pf = y.split("/")[1] + logid_time = time.strftime("%Y%m%d-%H%M%S", + time.gmtime(os.stat(settings["PORTAGE_TMPDIR"]).st_mtime)) + logfile = os.path.join( settings["PORT_LOGDIR"], "%s:%s:%s.log" % \ + (pkg_cat, pkg_pf, logid_time)) + del logid_time + retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"], mysettings, unmerge_action not in ["clean","prune"], - vartree=vartree, ldpath_mtimes=ldpath_mtimes) + vartree=vartree, ldpath_mtimes=ldpath_mtimes, logfile=logfile) if retval != os.EX_OK: emergelog(xterm_titles, " !!! unmerge FAILURE: "+y) else: @@ -2548,7 +2765,7 @@ def validate_merge_list(trees, mergelist): """Validate the list to make sure all the packages are still available. This is needed for --resume.""" - for (pkg_type, myroot, pkg_key, action) in mergelist: + for (pkg_type, myroot, pkg_key, action, merge_slot) in mergelist: if pkg_type == "binary" and \ not trees[myroot]["bintree"].dbapi.match("="+pkg_key) or \ pkg_type == "ebuild" and \ @@ -3390,6 +3607,37 @@ else: print "Number removed: "+str(len(cleanlist)) +def mergelist_to_merge_slot(mergelist, printonly=False): + merge_slots = {} + for pkg in mergelist: + if pkg[3] == 'merge': + slot = int(pkg[4]) + try: + if pkg not in merge_slots[slot]: + merge_slots[slot].append(pkg) + except KeyError: + merge_slots[slot] = [pkg] + # print the merge slots + max_slot = 0 + mylist = merge_slots.keys() + mylist.sort() + for x in mylist: + if x > max_slot: + max_slot = x + print "Package list for slot = "+str(x) + for y in merge_slots[x]: + print " ",y + if printonly: + return + + # make one last pass at the merge_slots and initialize the missing slots to None + x = 0 + while x < max_slot: + if x not in merge_slots.keys(): + merge_slots[x] = None + x += 1 + return merge_slots + def action_build(settings, trees, mtimedb, myopts, myaction, myfiles, spinner): ldpath_mtimes = mtimedb["ldpath"] @@ -3491,12 +3739,15 @@ if len(mymergelist) == 0: print colorize("INFORM", "emerge: It seems we have nothing to resume...") sys.exit(0) + mergelist_to_merge_slot(mymergelist, True) mydepgraph.display(mymergelist) prompt="Would you like to resume merging these packages?" else: - mydepgraph.display(mydepgraph.altlist()) + mymergelist = mydepgraph.altlist() + mergelist_to_merge_slot(mymergelist, True) + mydepgraph.display(mymergelist) mergecount=0 - for x in mydepgraph.altlist(): + for x in mymergelist: if x[3]!="nomerge": mergecount+=1 #check for blocking dependencies @@ -3536,9 +3787,12 @@ if len(mymergelist) == 0: print colorize("INFORM", "emerge: It seems we have nothing to resume...") sys.exit(0) + mergelist_to_merge_slot(mymergelist, True) mydepgraph.display(mymergelist) else: - mydepgraph.display(mydepgraph.altlist()) + mymergelist = mydepgraph.altlist() + mergelist_to_merge_slot(mymergelist, True) + mydepgraph.display(mymergelist) else: if ("--buildpkgonly" in myopts): if not mydepgraph.digraph.hasallzeros(): diff -ru portage-2.1.1.orig/pym/portage.py portage-2.1.1/pym/portage.py --- portage-2.1.1.orig/pym/portage.py 2006-09-08 17:28:36.000000000 +0000 +++ portage-2.1.1/pym/portage.py 2006-09-14 00:17:19.000000000 +0000 @@ -2114,13 +2114,6 @@ noiselevel=-1) os.unlink(myfile_path) else: - eout = output.EOutput() - eout.quiet = \ - mysettings.get("PORTAGE_QUIET", None) == "1" - for digest_name in mydigests[myfile]: - eout.ebegin( - "%s %s ;-)" % (myfile, digest_name)) - eout.eend(0) continue # fetch any remaining files for loc in filedict[myfile]: @@ -2172,6 +2165,11 @@ else: #normal mode: locfetch=fetchcommand + # assuming -q will work with customized FETCH and RESUME commands. + # It sure works with the default, which most people will leave alone + # I will check for wget anyway, to be on less destructive side...;) + if "parallel" in mysettings.features and locfetch.find("wget ") >= 0: + locfetch+=" -q" writemsg_stdout(">>> Downloading '%s'\n" % \ re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc)) myfetch=string.replace(locfetch,"${URI}",loc) @@ -2260,11 +2258,6 @@ os.unlink(mysettings["DISTDIR"]+"/"+myfile) fetched=0 else: - eout = output.EOutput() - eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1" - for x_key in mydigests[myfile].keys(): - eout.ebegin("%s %s ;-)" % (myfile, x_key)) - eout.eend(0) fetched=2 break else: @@ -2383,19 +2376,14 @@ eout = output.EOutput() eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1" try: - eout.ebegin("checking ebuild checksums ;-)") + myfullname = "/".join((mysettings["CATEGORY"], mysettings["PF"])) + eout.ebegin("Checking various checksums for "+myfullname+" ") mf.checkTypeHashes("EBUILD") - eout.eend(0) - eout.ebegin("checking auxfile checksums ;-)") mf.checkTypeHashes("AUX") - eout.eend(0) - eout.ebegin("checking miscfile checksums ;-)") mf.checkTypeHashes("MISC", ignoreMissingFiles=True) - eout.eend(0) for f in myfiles: - eout.ebegin("checking %s ;-)" % f) mf.checkFileHashes(mf.findFile(f), f) - eout.eend(0) + eout.eend(0) except KeyError, e: eout.eend(1) writemsg("\n!!! Missing digest for %s\n" % str(e), noiselevel=-1) @@ -2425,12 +2413,12 @@ return retval kwargs = actionmap[mydo]["args"] mysettings["EBUILD_PHASE"] = mydo - phase_retval = spawn(actionmap[mydo]["cmd"] % mydo, mysettings, debug=debug, logfile=logfile, **kwargs) + phase_retval = spawn(actionmap[mydo]["cmd"] % mydo, mysettings, debug=debug, quiet=actionmap[mydo]["quiet"], logfile=logfile, **kwargs) del mysettings["EBUILD_PHASE"] if phase_retval == os.EX_OK: if mydo == "install": mycommand = " ".join([MISC_SH_BINARY, "install_qa_check"]) - qa_retval = spawn(mycommand, mysettings, debug=debug, logfile=logfile, **kwargs) + qa_retval = spawn(mycommand, mysettings, debug=debug, quiet=actionmap[mydo]["quiet"], logfile=logfile, **kwargs) if qa_retval: writemsg("!!! install_qa_check failed; exiting.\n", noiselevel=-1) @@ -2805,10 +2793,6 @@ mystatus = prepare_build_dirs(myroot, mysettings, cleanup) if mystatus: return mystatus - if mydo == "unmerge": - return unmerge(mysettings["CATEGORY"], - mysettings["PF"], myroot, mysettings, vartree=vartree) - if "PORT_LOGDIR" in mysettings and builddir_lock: logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid") if not os.path.exists(logid_path): @@ -2823,6 +2807,11 @@ mysettings["PORTAGE_LOG_FILE"] = logfile del logid_path, logid_time + if mydo == "unmerge": + return unmerge(mysettings["CATEGORY"], + mysettings["PF"], myroot, mysettings, vartree=vartree, logfile=logfile) + + # if any of these are being called, handle them -- running them out of # the sandbox -- and stop now. if mydo in ["clean","cleanrm"]: @@ -2830,7 +2819,7 @@ debug=debug, free=1, logfile=None) elif mydo in ["help","setup"]: return spawn(EBUILD_SH_BINARY + " " + mydo, mysettings, - debug=debug, free=1, logfile=logfile) + debug=debug, free=1, quiet=False, logfile=logfile) elif mydo == "preinst": if mysettings.get("EMERGE_FROM", None) == "binary": mysettings.load_infodir(mysettings["O"]) @@ -2855,7 +2844,7 @@ elif mydo in ["prerm","postrm","postinst","config"]: mysettings.load_infodir(mysettings["O"]) return spawn(EBUILD_SH_BINARY + " " + mydo, - mysettings, debug=debug, free=1, logfile=logfile) + mysettings, debug=debug, free=1, quiet=(mydo != "postinst"), logfile=logfile) mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"])) @@ -2971,14 +2960,14 @@ # args are for the to spawn function actionmap = { -"depend": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0, "sesandbox":0}}, -"setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0}}, -"unpack": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0, "sesandbox":sesandbox}}, -"compile":{"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}}, -"test": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}}, -"install":{"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox}}, -"rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0}}, -"package":{"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0}}, +"depend": {"cmd":ebuild_sh, "quiet":True, "args":{"droppriv":1, "free":0, "sesandbox":0}}, +"setup": {"cmd":ebuild_sh, "quiet":False, "args":{"droppriv":0, "free":1, "sesandbox":0}}, +"unpack": {"cmd":ebuild_sh, "quiet":True, "args":{"droppriv":1, "free":0, "sesandbox":sesandbox}}, +"compile":{"cmd":ebuild_sh, "quiet":True, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}}, +"test": {"cmd":ebuild_sh, "quiet":True, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}}, +"install":{"cmd":ebuild_sh, "quiet":True, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox}}, +"rpm": {"cmd":misc_sh, "quiet":False, "args":{"droppriv":0, "free":0, "sesandbox":0}}, +"package":{"cmd":misc_sh, "quiet":False, "args":{"droppriv":0, "free":0, "sesandbox":0}}, } # merge the deps in so we have again a 'full' actionmap @@ -2986,6 +2975,9 @@ for x in actionmap.keys(): if len(actionmap_deps.get(x, [])): actionmap[x]["dep"] = ' '.join(actionmap_deps[x]) + # debug session should get all the junk in the screen + if debug and actionmap[x]["quiet"]: + actionmap[x]["quiet"] = False if mydo in actionmap.keys(): if mydo=="package": @@ -3011,7 +3003,7 @@ mysettings["CATEGORY"], mysettings["PF"], mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"), myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree, - mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes) + mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes, logfile=logfile) elif mydo=="merge": retval = spawnebuild("install", actionmap, mysettings, debug, alwaysdep=1, logfile=logfile) @@ -3020,7 +3012,7 @@ mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"), myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi, - vartree=vartree, prev_mtimes=prev_mtimes) + vartree=vartree, prev_mtimes=prev_mtimes, logfile=logfile) else: print "!!! Unknown mydo:",mydo return 1 @@ -3235,15 +3227,15 @@ return newmtime def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None, - mytree=None, mydbapi=None, vartree=None, prev_mtimes=None): + mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, logfile=None): mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree, - vartree=vartree) + vartree=vartree, logfile=logfile) return mylink.merge(pkgloc, infloc, myroot, myebuild, mydbapi=mydbapi, prev_mtimes=prev_mtimes) -def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None, ldpath_mtimes=None): +def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None, ldpath_mtimes=None, logfile=None): mylink = dblink( - cat, pkg, myroot, mysettings, treetype="vartree", vartree=vartree) + cat, pkg, myroot, mysettings, treetype="vartree", vartree=vartree, logfile=logfile) try: mylink.lockdb() if mylink.exists(): @@ -5541,7 +5533,7 @@ class dblink: "this class provides an interface to the standard text package database" def __init__(self, cat, pkg, myroot, mysettings, treetype=None, - vartree=None): + vartree=None, logfile=None): "create a dblink object for cat/pkg. This dblink entry may or may not exist" self.cat = cat self.pkg = pkg @@ -5574,6 +5566,8 @@ self.updateprotect = protect_obj.updateprotect self.isprotected = protect_obj.isprotected self.contentscache=[] + self.logfile = logfile + self.logfd = None def lockdb(self): if self.lock_num == 0: @@ -5717,6 +5711,11 @@ writemsg("!!! FAILED prerm: "+str(a)+"\n", noiselevel=-1) sys.exit(123) + if self.logfile: + self.logfd = open(self.logfile, "a") + else: + self.logfd = sys.stdout + if pkgfiles: mykeys=pkgfiles.keys() mykeys.sort() @@ -5747,7 +5746,7 @@ #we skip this if we're dealing with a symlink #because os.stat() will operate on the #link target rather than the link itself. - writemsg_stdout("--- !found "+str(pkgfiles[objkey][0])+ " %s\n" % obj) + writemsg("--- !found "+str(pkgfiles[objkey][0])+ " %s\n" % obj, fd=self.logfd) continue # next line includes a tweak to protect modules from being unmerged, # but we don't protect modules from being overwritten if they are @@ -5755,57 +5754,57 @@ # functionality for /lib/modules. For portage-ng both capabilities # should be able to be independently specified. if self.isprotected(obj) or ((len(obj) > len(modprotect)) and (obj[0:len(modprotect)]==modprotect)): - writemsg_stdout("--- cfgpro %s %s\n" % (pkgfiles[objkey][0], obj)) + writemsg("--- cfgpro %s %s\n" % (pkgfiles[objkey][0], obj), fd=self.logfd) continue lmtime=str(lstatobj[stat.ST_MTIME]) if (pkgfiles[objkey][0] not in ("dir","fif","dev")) and (lmtime != pkgfiles[objkey][1]): - writemsg_stdout("--- !mtime %s %s\n" % (pkgfiles[objkey][0], obj)) + writemsg("--- !mtime %s %s\n" % (pkgfiles[objkey][0], obj), fd=self.logfd) continue if pkgfiles[objkey][0]=="dir": if statobj is None or not stat.S_ISDIR(statobj.st_mode): - writemsg_stdout("--- !dir %s %s\n" % ("dir", obj)) + writemsg("--- !dir %s %s\n" % ("dir", obj), fd=self.logfd) continue mydirs.append(obj) elif pkgfiles[objkey][0]=="sym": if not islink: - writemsg_stdout("--- !sym %s %s\n" % ("sym", obj)) + writemsg("--- !sym %s %s\n" % ("sym", obj), fd=self.logfd) continue try: os.unlink(obj) - writemsg_stdout("<<< %s %s\n" % ("sym",obj)) + writemsg("<<< %s %s\n" % ("sym",obj), fd=self.logfd) except (OSError,IOError),e: - writemsg_stdout("!!! %s %s\n" % ("sym",obj)) + writemsg("!!! %s %s\n" % ("sym",obj), fd=self.logfd) elif pkgfiles[objkey][0]=="obj": if statobj is None or not stat.S_ISREG(statobj.st_mode): - writemsg_stdout("--- !obj %s %s\n" % ("obj", obj)) + writemsg("--- !obj %s %s\n" % ("obj", obj), fd=self.logfd) continue mymd5 = None try: mymd5 = portage_checksum.perform_md5(obj, calc_prelink=1) except portage_exception.FileNotFound, e: # the file has disappeared between now and our stat call - writemsg_stdout("--- !obj %s %s\n" % ("obj", obj)) + writemsg("--- !obj %s %s\n" % ("obj", obj), fd=self.logfd) continue # string.lower is needed because db entries used to be in upper-case. The # string.lower allows for backwards compatibility. if mymd5 != string.lower(pkgfiles[objkey][2]): - writemsg_stdout("--- !md5 %s %s\n" % ("obj", obj)) + writemsg("--- !md5 %s %s\n" % ("obj", obj), fd=self.logfd) continue try: os.unlink(obj) except (OSError,IOError),e: pass - writemsg_stdout("<<< %s %s\n" % ("obj",obj)) + writemsg("<<< %s %s\n" % ("obj",obj), fd=self.logfd) elif pkgfiles[objkey][0]=="fif": if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]): - writemsg_stdout("--- !fif %s %s\n" % ("fif", obj)) + writemsg("--- !fif %s %s\n" % ("fif", obj), fd=self.logfd) continue - writemsg_stdout("--- %s %s\n" % ("fif",obj)) + writemsg("--- %s %s\n" % ("fif",obj), fd=self.logfd) elif pkgfiles[objkey][0]=="dev": - writemsg_stdout("--- %s %s\n" % ("dev",obj)) + writemsg("--- %s %s\n" % ("dev",obj), fd=self.logfd) mydirs.sort() mydirs.reverse() @@ -5815,20 +5814,25 @@ if not last_non_empty.startswith(obj) and not listdir(obj): try: os.rmdir(obj) - writemsg_stdout("<<< %s %s\n" % ("dir",obj)) + writemsg("<<< %s %s\n" % ("dir",obj), fd=self.logfd) last_non_empty = "" continue except (OSError,IOError),e: #immutable? pass - writemsg_stdout("--- !empty dir %s\n" % obj) + writemsg("--- !empty dir %s\n" % obj, fd=self.logfd) last_non_empty = obj continue #remove self from vartree database so that our own virtual gets zapped if we're the last node self.vartree.zap(self.mycpv) + # close out the logfile now + self.logfd.flush() + if self.logfile: + self.logfd.close() + #do original postrm if myebuildpath and os.path.exists(myebuildpath): # XXX: This should be the old config, not the current one. @@ -6015,6 +6019,11 @@ prevmask = os.umask(0) secondhand = [] + if self.logfile: + self.logfd = open(self.logfile, "a") + else: + self.logfd = sys.stdout + # we do a first merge; this will recurse through all files in our srcroot but also build up a # "second hand" of symlinks to merge later if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime): @@ -6049,12 +6058,18 @@ outfile.flush() outfile.close() + # close the log file. unmerge will open it again. Can't leave it open because unmerge does + # doebuild which will screw the logfile + self.logfd.flush() + if self.logfile: + self.logfd.close() + if os.path.exists(self.dbpkgdir): - writemsg_stdout(">>> Safely unmerging already-installed instance...\n") + writemsg_stdout(">>> Safely unmerging already-installed instance of "+self.mycpv+" ...\n") self.dbdir = self.dbpkgdir self.unmerge(oldcontents, trimworld=0, ldpath_mtimes=prev_mtimes) self.dbdir = self.dbtmpdir - writemsg_stdout(">>> Original instance of package unmerged safely.\n") + writemsg_stdout(">>> Original instance of "+self.mycpv+" unmerged safely.\n") # We hold both directory locks. self.dbdir = self.dbpkgdir @@ -6213,7 +6228,7 @@ # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings) if mymtime!=None: - writemsg_stdout(">>> %s -> %s\n" % (mydest, myto)) + writemsg(">>> %s -> %s\n" % (mydest, myto), fd=self.logfd) outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n") else: print "!!! Failed to move file." @@ -6242,7 +6257,7 @@ if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode): # a symlink to an existing directory will work for us; keep it: - writemsg_stdout("--- %s/\n" % mydest) + writemsg("--- %s/\n" % mydest, fd=self.logfd) if bsd_chflags: bsd_chflags.lchflags(mydest, dflags) else: @@ -6260,7 +6275,7 @@ bsd_chflags.lchflags(mydest, dflags) os.chmod(mydest,mystat[0]) os.chown(mydest,mystat[4],mystat[5]) - writemsg_stdout(">>> %s/\n" % mydest) + writemsg(">>> %s/\n" % mydest, fd=self.logfd) else: #destination doesn't exist if self.settings.selinux_enabled(): @@ -6272,7 +6287,7 @@ if bsd_chflags: bsd_chflags.lchflags(mydest, bsd_chflags.lgetflags(mysrc)) os.chown(mydest,mystat[4],mystat[5]) - writemsg_stdout(">>> %s/\n" % mydest) + writemsg(">>> %s/\n" % mydest, fd=self.logfd) outfile.write("dir "+myrealdest+"\n") # recurse and merge this directory if self.mergeme(srcroot, destroot, outfile, secondhand, @@ -6290,7 +6305,7 @@ if stat.S_ISDIR(mydmode): # install of destination is blocked by an existing directory with the same name moveme=0 - writemsg_stdout("!!! %s\n" % mydest) + writemsg("!!! %s\n" % mydest, fd=self.logfd) elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])): cfgprot=0 # install of destination is blocked by an existing regular file, @@ -6393,7 +6408,7 @@ if mymtime!=None: zing=">>>" outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n") - writemsg_stdout("%s %s\n" % (zing,mydest)) + writemsg("%s %s\n" % (zing,mydest), fd=self.logfd) else: # we are merging a fifo or device node zing="!!!" @@ -6407,7 +6422,7 @@ outfile.write("fif "+myrealdest+"\n") else: sys.exit(1) - writemsg_stdout(zing+" "+mydest+"\n") + writemsg(zing+" "+mydest+"\n", fd=self.logfd) def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0, mydbapi=None, prev_mtimes=None): @@ -6490,7 +6505,7 @@ os.unlink(settings["PORTAGE_TMPDIR"]+"/portage/"+mypkg+"/temp/environment") os.chdir(origdir) -def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None): +def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None, logfile=None): """will merge a .tbz2 file, returning a list of runtime dependencies that must be satisfied, or None if there was a merge error. This code assumes the package exists.""" @@ -6547,7 +6562,7 @@ # auto-unmerge, virtual/provides updates, etc. mysettings.load_infodir(infloc) mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree, - treetype="bintree") + treetype="bintree", logfile=logfile) mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=1, mydbapi=mydbapi, prev_mtimes=prev_mtimes) diff -ru portage-2.1.1.orig/pym/portage_exec.py portage-2.1.1/pym/portage_exec.py --- portage-2.1.1.orig/pym/portage_exec.py 2006-08-01 17:43:12.000000000 +0000 +++ portage-2.1.1/pym/portage_exec.py 2006-09-14 00:17:13.000000000 +0000 @@ -100,7 +100,7 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False, uid=None, gid=None, groups=None, umask=None, logfile=None, - path_lookup=True): + path_lookup=True, quiet=True): # mycommand is either a str or a list if isinstance(mycommand, str): @@ -123,20 +123,21 @@ # mypids will hold the pids of all processes created. mypids = [] - if logfile: + if logfile and not quiet: # Using a log file requires that stdout and stderr # are assigned to the process we're running. if 1 not in fd_pipes or 2 not in fd_pipes: raise ValueError(fd_pipes) + # Create a pipe (pr, pw) = os.pipe() # Create a tee process, giving it our stdout and stderr # as well as the read end of the pipe. mypids.extend(spawn(('tee', '-i', '-a', logfile), - returnpid=True, fd_pipes={0:pr, - 1:fd_pipes[1], 2:fd_pipes[2]})) + returnpid=True, fd_pipes={0:pr, + 1:fd_pipes[1], 2:fd_pipes[2]})) # We don't need the read end of the pipe, so close it. os.close(pr) @@ -148,6 +149,13 @@ pid = os.fork() if not pid: + if quiet and logfile: + sys.stdout.close() + sys.stderr.close() + sys.stdout = open(logfile, "a") + sys.stderr = sys.stdout + os.dup2(sys.stdout.fileno(), fd_pipes[1]) + os.dup2(sys.stderr.fileno(), fd_pipes[2]) try: _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask) @@ -164,7 +172,7 @@ # If we started a tee process the write side of the pipe is no # longer needed, so close it. - if logfile: + if logfile and not quiet: os.close(pw) # If the caller wants to handle cleaning up the processes, we tell