diff -ur portage-2.1.2.orig/bin/emerge portage-2.1.2/bin/emerge --- portage-2.1.2.orig/bin/emerge 2007-03-25 18:05:47.000000000 -0700 +++ portage-2.1.2/bin/emerge 2007-04-01 19:26:10.000000000 -0700 @@ -2006,6 +2006,7 @@ ignore_priority_range.extend( xrange(DepPriority.MIN, DepPriority.MEDIUM + 1)) tree_mode = "--tree" in self.myopts + slotcount = 0 while not mygraph.empty(): ignore_priority = None nodes = None @@ -2020,6 +2021,7 @@ nodes = [node] asap_nodes.remove(node) break + # if no asap node, then get the first non-null set of leaf (root if --tree) nodes in the priority range if not nodes: for ignore_priority in ignore_priority_range: nodes = get_nodes(ignore_priority=ignore_priority) @@ -2123,7 +2125,10 @@ sys.exit(1) for node in selected_nodes: - retlist.append(list(node)) + node2 = list(node) + if len(node2) == 4: + node2.append(str(slotcount)) + retlist.append(node2) mygraph.remove(node) if not reversed and not circular_blocks and myblockers.contains(node): """This node may have invalidated one or more blockers.""" @@ -2132,6 +2137,7 @@ if not myblockers.child_nodes(blocker): myblockers.remove(blocker) del self.blocker_parents[blocker] + slotcount += 1 if not reversed: """Blocker validation does not work with reverse mode, @@ -2337,7 +2343,7 @@ if "blocks" == x[0]: display_list.append((x, 0, True)) continue - graph_key = tuple(x) + graph_key = tuple(x[:-1]) if "--tree" in self.myopts: depth = len(tree_nodes) while depth and graph_key not in \ @@ -2391,7 +2397,7 @@ continue if "blocks" == graph_key[0]: continue - if ordered and graph_key[-1] != "nomerge": + if ordered and graph_key[-2] != "nomerge": last_merge_depth = depth continue if depth >= last_merge_depth or \ @@ -2426,7 +2432,7 @@ addl = addl + " " + red(resolved) else: addl = "[blocks " + addl + "] " + red(resolved) - block_parents = self.blocker_parents[tuple(x)] + block_parents = self.blocker_parents[tuple(x[:-1])] block_parents = set([pnode[2] for pnode in block_parents]) block_parents = ", ".join(block_parents) if resolved!=x[2]: @@ -2600,7 +2606,7 @@ if verbosity == 3: # size verbose mysize=0 - if x[0] == "ebuild" and ordered and x[-1] != "nomerge": + if x[0] == "ebuild" and ordered and x[-2] != "nomerge": try: myfilesdict = portdb.getfetchsizes(pkg_key, useflags=self.useFlags[myroot][pkg_key], @@ -2679,7 +2685,7 @@ myprint=myprint+myoldbest myprint=myprint+darkgreen("to "+x[1])+" "+verboseadd else: - if x[-1] == "nomerge" or not ordered: + if x[-2] == "nomerge" or not ordered: myprint = darkblue("[nomerge ] ") else: myprint = "[" + pkg_type + " " + addl + "] " @@ -2886,11 +2892,457 @@ self.pkgsettings["/"] = \ portage.config(clone=trees["/"]["vartree"].settings) - def merge(self, mylist, favorites, mtimedb): + def restart_portage(self, x, mergecount, totalcount, mtimedb): + xterm_titles = "notitles" not in self.settings.features + # don't really restart if any of these is true + # XXXXX - seems like redundant check, but what the hell! sky is not falling as yet. + if "--pretend" in self.myopts or "--fetchonly" in self.myopts or \ + "--fetch-all-uri" in self.myopts or "--buildpkgonly" in self.myopts: + return + + bad_resume_opts = set(["--ask", "--tree", "--changelog", "--skipfirst", + "--resume"]) + mysplit=portage.pkgsplit(x[2]) + myver=mysplit[1]+"-"+mysplit[2] + if myver[-3:]=='-r0': + myver=myver[:-3] + if (myver != portage.VERSION) and \ + "livecvsportage" not in self.settings.features: + if totalcount > mergecount: + emergelog(xterm_titles, + " ::: completed emerge ("+ \ + str(mergecount)+" of "+ \ + str(totalcount)+") "+ \ + x[2]+" to "+x[1]) + emergelog(xterm_titles, " *** RESTARTING " + \ + "emerge via exec() after change of " + \ + "portage version.") + portage.run_exitfuncs() + mynewargv=[sys.argv[0],"--resume"] + resume_opts = self.myopts.copy() + # For automatic resume, we need to prevent + # any of bad_resume_opts from leaking in + # via EMERGE_DEFAULT_OPTS. + resume_opts["--ignore-default-opts"] = True + for myopt, myarg in resume_opts.iteritems(): + if myopt not in bad_resume_opts: + if myarg is True: + mynewargv.append(myopt) + else: + mynewargv.append(myopt +"="+ myarg) + # priority only needs to be adjusted on the first run + os.environ["PORTAGE_NICENESS"] = "0" + os.execv(mynewargv[0], mynewargv) + + def fork_one_emerge(self, x, mergecount, totalcount, mtimedb, favorites, mysysdict): + xterm_titles = "notitles" not in self.settings.features + myfeat = self.settings.features[:] + ldpath_mtimes = mtimedb["ldpath"] + myroot=x[1] + pkg_key = x[2] + pkg_cat = x[2].split("/")[0] + pkg_pf = x[2].split("/")[1] + pkgindex=2 + if x[0]=="blocks": + pkgindex=3 + + build_prefix=self.settings["PORTAGE_TMPDIR"]+"/portage" + logid_path = None + null_log = 0 + + if self.settings.get("PORT_LOGDIR", "") == "": + while "PORT_LOGDIR" in self.settings: + del self.settings["PORT_LOGDIR"] + if "PORT_LOGDIR" in self.settings: + port_logdir = self.settings["PORT_LOGDIR"] + else: + port_logdir = self.settings["ROOT"] + portage.DEF_LOGDIR + + try: + portage_util.ensure_dirs(port_logdir, uid=portage.portage_uid, + gid=portage.portage_gid, mode=02770) + except portage_exception.PortageException, e: + writemsg("!!! %s\n" % str(e), noiselevel=-1) + writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \ + self.settings["PORT_LOGDIR"], noiselevel=-1) + writemsg("!!! Because 'parallel' feature is enabled, you won't get any logs.\n", noiselevel=-1) + null_log = 1 + + if not null_log: + logid_path = os.path.join(build_prefix, ".logid.")+pkg_cat+"."+pkg_pf + if not os.path.exists(logid_path): + f = open(logid_path, "w") + f.close() + del f + logid_time = time.strftime("%Y%m%d-%H%M%S", + time.gmtime(os.stat(logid_path).st_mtime)) + logfile = os.path.join(port_logdir, "%s:%s:%s.log" % \ + (pkg_cat, pkg_pf, logid_time)) + del logid_time + else: + logfile = "/dev/null" + + if "--pretend" not in self.myopts and "--fetchonly" not in self.myopts: + print ">>> Emerging (" + \ + colorize("MERGE_LIST_PROGRESS", str(mergecount)) + " of " + \ + colorize("MERGE_LIST_PROGRESS", str(totalcount)) + ") " + \ + colorize("GOOD", x[pkgindex]) + " to " + x[1] + print ">>> Logfile in " + logfile + emergelog(xterm_titles, " >>> emerge ("+\ + str(mergecount)+" of "+str(totalcount)+\ + ") "+x[pkgindex]+" to "+x[1]) + + # need to spawn a --nodeps emerge in a separate process. + pkg="="+x[2] + merge_env = os.environ.copy() + merge_env["PORTAGE_INTERNAL_CALL"] = "1" + merge_env["FEATURES"] = merge_env.get("FEATURES", "") + " notitles -parallel" + merge_args = [sys.argv[0], "--nodeps", "--oneshot", "--nospinner", pkg] + good_nodeps_opts = set(["--buildpkg", "--buildpkgonly", "--fetchonly", "--fetch-all-uri", "--getbinpkg",\ + "--usepkg", "--usepkgonly"]) + fd_pipes = None + merge_logfd = None + for myopt, myarg in self.myopts.iteritems(): + # don't clobber the logfile at the same time as parallel fetch is + # all log of parallel fetch will go /var/log/emerge-fetch.log + # so, just leave 0,1,2 alone. + if "parallel-fetch" in myfeat and myopt == "--fetchonly": + fd_pipes = {0:0, 1:1, 2:2} + if myopt in good_nodeps_opts: + if myarg is True: + merge_args.append(myopt) + else: + merge_args.append(myopt +"="+ myarg) + if not fd_pipes: + merge_logfd = open(logfile, "w") + # put in a start message. This also makes sure that this fd is pointing to a good file on disk + # and hence will be used throughout the other spawns that will happen in the children. + merge_logfd.write("Package "+x[pkgindex]+" started at "+time.ctime()+"\n\n") + merge_logfd.flush() + fd_pipes = {0:0, 1:merge_logfd.fileno(), 2:merge_logfd.fileno()} + portage_util.apply_secpass_permissions(logfile, uid=portage.portage_uid, gid=portage.portage_gid, mode=0660) + + mypids = portage.portage_exec.spawn(merge_args, env=merge_env, fd_pipes=fd_pipes, returnpid=True) + if merge_logfd: + merge_logfd.close() # child has exclusive rights to it now. + return mypids[0] + + def do_one_emerge(self, x, mergecount, totalcount, mtimedb, favorites, mysysdict): + xterm_titles = "notitles" not in self.settings.features + myfeat = self.settings.features[:] + ldpath_mtimes = mtimedb["ldpath"] + myroot=x[1] + pkg_key = x[2] + pkg_cat = x[2].split("/")[0] + pkg_pf = x[2].split("/")[1] + pkgindex=2 + if x[0]=="blocks": + pkgindex=3 + + if "--pretend" not in self.myopts and "--fetchonly" not in self.myopts: + print "\n>>> Emerging (" + \ + colorize("MERGE_LIST_PROGRESS", str(mergecount)) + " of " + \ + colorize("MERGE_LIST_PROGRESS", str(totalcount)) + ") " + \ + colorize("GOOD", x[pkgindex]) + " to " + x[1] + emergelog(xterm_titles, " >>> emerge ("+\ + str(mergecount)+" of "+str(totalcount)+\ + ") "+x[pkgindex]+" to "+x[1]) + + portdb = self.trees[myroot]["porttree"].dbapi + bindb = self.trees[myroot]["bintree"].dbapi + vartree = self.trees[myroot]["vartree"] + pkgsettings = self.pkgsettings[myroot] + y = portdb.findname(pkg_key) + pkgsettings["EMERGE_FROM"] = x[0] + pkgsettings.backup_changes("EMERGE_FROM") + pkgsettings.reset() + + #buildsyspkg: Check if we need to _force_ binary package creation + issyspkg = ("buildsyspkg" in myfeat) \ + and x[0] != "blocks" \ + and mysysdict.has_key(portage.cpv_getkey(x[2])) \ + and "--buildpkg" not in self.myopts + if x[0] in ["ebuild","blocks"]: + if x[0] == "blocks" and "--fetchonly" not in self.myopts: + raise Exception, "Merging a blocker" + elif "--fetchonly" in self.myopts or \ + "--fetch-all-uri" in self.myopts: + if "--fetch-all-uri" in self.myopts: + retval = portage.doebuild(y, "fetch", myroot, + pkgsettings, self.edebug, + "--pretend" in self.myopts, fetchonly=1, + fetchall=1, mydbapi=portdb, tree="porttree") + else: + retval = portage.doebuild(y, "fetch", myroot, + pkgsettings, self.edebug, + "--pretend" in self.myopts, fetchonly=1, + mydbapi=portdb, tree="porttree") + if (retval is None) or retval: + print + print "!!! Fetch for",y,"failed, continuing..." + print + + return retval + + portage.doebuild_environment(y, "setup", myroot, + pkgsettings, self.edebug, 1, portdb) + catdir = os.path.dirname(pkgsettings["PORTAGE_BUILDDIR"]) + portage_util.ensure_dirs(os.path.dirname(catdir), + uid=portage.portage_uid, gid=portage.portage_gid, + mode=070, mask=0) + builddir_lock = None + catdir_lock = None + try: + catdir_lock = portage_locks.lockdir(catdir) + portage_util.ensure_dirs(catdir, + uid=portage.portage_uid, gid=portage.portage_gid, + mode=070, mask=0) + builddir_lock = portage_locks.lockdir( + pkgsettings["PORTAGE_BUILDDIR"]) + try: + portage_locks.unlockdir(catdir_lock) + finally: + catdir_lock = None + msg = " === (%s of %s) Cleaning (%s::%s)" % \ + (mergecount, totalcount, pkg_key, y) + short_msg = "emerge: (%s of %s) %s Clean" % \ + (mergecount, totalcount, pkg_key) + emergelog(xterm_titles, msg, short_msg=short_msg) + retval = portage.doebuild(y, "clean", myroot, + pkgsettings, self.edebug, cleanup=1, + mydbapi=portdb, tree="porttree") + + if retval != os.EX_OK: + return retval + if "--buildpkg" in self.myopts or issyspkg: + if issyspkg: + print ">>> This is a system package, " + \ + "let's pack a rescue tarball." + msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \ + (mergecount, totalcount, pkg_key, y) + short_msg = "emerge: (%s of %s) %s Compile" % \ + (mergecount, totalcount, pkg_key) + emergelog(xterm_titles, msg, short_msg=short_msg) + self.trees[myroot]["bintree"].prevent_collision(pkg_key) + retval = portage.doebuild(y, "package", myroot, + pkgsettings, self.edebug, mydbapi=portdb, + tree="porttree") + if retval != os.EX_OK: + return retval + bintree = self.trees[myroot]["bintree"] + if bintree.populated: + bintree.inject(pkg_key) + if "--buildpkgonly" not in self.myopts: + msg = " === (%s of %s) Merging (%s::%s)" % \ + (mergecount, totalcount, pkg_key, y) + short_msg = "emerge: (%s of %s) %s Merge" % \ + (mergecount, totalcount, pkg_key) + emergelog(xterm_titles, msg, short_msg=short_msg) + retval = portage.merge(pkgsettings["CATEGORY"], + pkgsettings["PF"], pkgsettings["D"], + os.path.join(pkgsettings["PORTAGE_BUILDDIR"], + "build-info"), myroot, pkgsettings, + myebuild=pkgsettings["EBUILD"], + mytree="porttree", mydbapi=portdb, + vartree=vartree, prev_mtimes=ldpath_mtimes) + if retval != os.EX_OK: + return retval + elif "noclean" not in pkgsettings.features: + portage.doebuild(y, "clean", myroot, + pkgsettings, self.edebug, mydbapi=portdb, + tree="porttree") + else: + msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \ + (mergecount, totalcount, pkg_key, y) + short_msg = "emerge: (%s of %s) %s Compile" % \ + (mergecount, totalcount, pkg_key) + emergelog(xterm_titles, msg, short_msg=short_msg) + retval = portage.doebuild(y, "merge", myroot, + pkgsettings, self.edebug, vartree=vartree, + mydbapi=portdb, tree="porttree", + prev_mtimes=ldpath_mtimes) + if retval != os.EX_OK: + return retval + finally: + if builddir_lock: + portage_locks.unlockdir(builddir_lock) + try: + if not catdir_lock: + # Lock catdir for removal if empty. + catdir_lock = portage_locks.lockdir(catdir) + finally: + if catdir_lock: + try: + os.rmdir(catdir) + except OSError, e: + if e.errno not in (errno.ENOENT, + errno.ENOTEMPTY, errno.EEXIST): + raise + del e + portage_locks.unlockdir(catdir_lock) + + elif x[0]=="binary": + #merge the tbz2 + mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key) + if "--getbinpkg" in self.myopts: + tbz2_lock = None + try: + if "distlocks" in pkgsettings.features and \ + os.access(pkgsettings["PKGDIR"], os.W_OK): + portage_util.ensure_dirs(os.path.dirname(mytbz2)) + tbz2_lock = portage_locks.lockfile(mytbz2, + wantnewlockfile=1) + if self.trees[myroot]["bintree"].isremote(pkg_key): + msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\ + (mergecount, totalcount, pkg_key, mytbz2) + short_msg = "emerge: (%s of %s) %s Fetch" % \ + (mergecount, totalcount, pkg_key) + emergelog(xterm_titles, msg, short_msg=short_msg) + if not self.trees[myroot]["bintree"].gettbz2( + pkg_key): + return 1 + finally: + if tbz2_lock: + portage_locks.unlockfile(tbz2_lock) + + if "--fetchonly" in self.myopts or \ + "--fetch-all-uri" in self.myopts: + return os.EX_OK + + short_msg = "emerge: ("+str(mergecount)+" of "+str(totalcount)+") "+x[pkgindex]+" Merge Binary" + emergelog(xterm_titles, " === ("+str(mergecount)+\ + " of "+str(totalcount)+") Merging Binary ("+\ + x[pkgindex]+"::"+mytbz2+")", short_msg=short_msg) + + retval = portage.pkgmerge(mytbz2, x[1], pkgsettings, + mydbapi=bindb, + vartree=self.trees[myroot]["vartree"], + prev_mtimes=ldpath_mtimes) + if retval != os.EX_OK: + return retval + #need to check for errors + + # clean up the older version which emerged on top of + if "--buildpkgonly" not in self.myopts and \ + "--pretend" not in self.myopts and \ + "--fetchonly" not in self.myopts and \ + "--fetch-all-uri" not in self.myopts: + # Clean the old package that we have merged over top of it. + if pkgsettings.get("AUTOCLEAN", "yes") == "yes": + xsplit=portage.pkgsplit(x[2]) + emergelog(xterm_titles, " >>> AUTOCLEAN: " + xsplit[0]) + retval = unmerge(pkgsettings, self.myopts, vartree, + "clean", [xsplit[0]], ldpath_mtimes, autoclean=1) + if not retval: + emergelog(xterm_titles, + " --- AUTOCLEAN: Nothing unmerged.") + else: + portage.writemsg_stdout(colorize("WARN", "WARNING:") + + " AUTOCLEAN is disabled. This can cause serious" + + " problems due to overlapping packages.\n") + + if "--pretend" not in self.myopts and \ + "--fetchonly" not in self.myopts and \ + "--fetch-all-uri" not in self.myopts: + if "noclean" not in self.settings.features: + short_msg = "emerge: (%s of %s) %s Clean Post" % \ + (mergecount, totalcount, x[pkgindex]) + emergelog(xterm_titles, (" === (%s of %s) " + \ + "Post-Build Cleaning (%s::%s)") % \ + (mergecount, totalcount, x[pkgindex], y), + short_msg=short_msg) + emergelog(xterm_titles, " ::: completed emerge ("+\ + str(mergecount)+" of "+str(totalcount)+") "+\ + x[2]+" to "+x[1]) + + return os.EX_OK + + def add_one_emerge_to_world(self, x, mergecount, totalcount, favorites, mysysdict): + xterm_titles = "notitles" not in self.settings.features + pkgindex=2 + if x[0]=="blocks": + pkgindex=3 + + if "--buildpkgonly" not in self.myopts: + self.trees[x[1]]["vartree"].inject(x[2]) + myfavkey=portage.cpv_getkey(x[2]) + if "--fetchonly" not in self.myopts and \ + "--fetch-all-uri" not in self.myopts and \ + myfavkey in favorites: + myfavs = portage.grabfile(os.path.join(x[1], portage.WORLD_FILE)) + myfavdict=genericdict(myfavs) + #don't record if already in system profile or already recorded + if (not mysysdict.has_key(myfavkey)) and (not myfavdict.has_key(myfavkey)): + #we don't have a favorites entry for this package yet; add one + myfavdict[myfavkey]=myfavkey + print ">>> Recording",myfavkey,"in \"world\" favorites file..." + emergelog(xterm_titles, " === ("+\ + str(mergecount)+" of "+\ + str(totalcount)+\ + ") Updating world file ("+x[pkgindex]+")") + portage.write_atomic( + os.path.join(x[1], portage.WORLD_FILE), + "\n".join(myfavdict.values())) + + def print_status(self, totalcount, donec, qsize, failedc, spawnd_pkg, failed): + smsg = "" + fmsg = "" + if spawnd_pkg: + for pkgs in spawnd_pkg.values(): + smsg = smsg+" "+pkgs[0][2] + if failed: + for pkgs in failed: + fmsg = fmsg+" "+pkgs + print ">>> Jobs [Total = "+colorize("blue", str(totalcount))+"] [Done = "+\ + colorize("GOOD", str(donec))+"] [Running = "+colorize("WARN", str(qsize)+smsg)+\ + "] [Failed = "+colorize("BAD", str(failedc)+fmsg)+"]" + xtermTitle("Jobs [Total="+str(totalcount)+"] [Done="+str(donec)+"] [Running="+str(qsize)+"] [Failed="+str(failedc)+"]") + + def wait_one_emerge(self, spawnd_pids, spawnd_pkg, mergecount, totalcount, mymergelist, mtimedb): + build_prefix=self.settings["PORTAGE_TMPDIR"]+"/portage" + # let's wait for one of the jobs to finish + onepid = -1 + while onepid not in spawnd_pids: + onepid , retval = os.waitpid(-1, 0) + spawnd_pids.remove(onepid) + + pkg_compl = spawnd_pkg[onepid][0] + pkg_slot = spawnd_pkg[onepid][1] + del spawnd_pkg[onepid] + + if not retval: + # unlink the logid_path + logid_path = os.path.join(build_prefix, ".logid.")+pkg_compl[2].split("/")[0]+"."+pkg_compl[2].split("/")[1] + if os.path.exists(logid_path): + os.unlink(logid_path) + index = 0 + print ">>> Package "+colorize("GOOD", pkg_compl[2])+" finished emerging." + # we need to remove this pkg from resume DB + # this is the dirtiest shit I have ever written + for pkgs in mymergelist: + if pkgs[2] == pkg_compl[2]: + del mymergelist[index] + del mtimedb["resume"]["mergelist"][index] + mtimedb.commit() + # check if we need to restart portage + mysplit=portage.pkgsplit(pkg_compl[2]) + if mysplit[0] == "sys-apps/portage" and pkgs[1] == "/": + self.restart_portage(pkgs, mergecount, totalcount, mtimedb) + break + index += 1 + return (retval, pkg_compl) + + def merge(self, mylist, favorites, mtimedb, m_slots): failed_fetches = [] mymergelist=[] ldpath_mtimes = mtimedb["ldpath"] xterm_titles = "notitles" not in self.settings.features + parallel = "parallel" in self.settings.features + build_prefix=self.settings["PORTAGE_TMPDIR"]+"/portage" + + # parallel merge will be painful to watch with debug or fetchonly. So, you get only one of these...:-) + if self.edebug or "--fetchonly" in self.myopts: + parallel = False #check for blocking dependencies if "--fetchonly" not in self.myopts and \ @@ -2913,8 +3365,6 @@ mysysdict = genericdict(getlist(self.settings, "system")) if "--resume" in self.myopts: # We're resuming. - print colorize("GOOD", "*** Resuming merge...") - emergelog(xterm_titles, " *** Resuming merge...") mymergelist=mtimedb["resume"]["mergelist"][:] if "--skipfirst" in self.myopts and mymergelist: del mtimedb["resume"]["mergelist"][0] @@ -2948,8 +3398,17 @@ os.path.join(self.target_root, portage.WORLD_FILE), "\n".join(myfavdict.values())) - mtimedb["resume"]["mergelist"]=mymergelist[:] - mtimedb.commit() + if "--nodeps" not in self.myopts or len(mymergelist) > 1: + mtimedb["resume"]["mergelist"]=mymergelist[:] + mtimedb.commit() + + totalcount = len(mymergelist) + mergecount=1 + + if "--resume" in self.myopts and "--fetchonly" not in self.myopts: + # We're resuming. + print colorize("GOOD", "*** Resuming merge...") + emergelog(xterm_titles, " *** Resuming merge...") myfeat = self.settings.features[:] bad_resume_opts = set(["--ask", "--tree", "--changelog", "--skipfirst", @@ -2967,12 +3426,12 @@ print ">>> starting parallel fetching" fetch_log = "/var/log/emerge-fetch.log" logfile = open(fetch_log, "w") - fd_pipes = {1:logfile.fileno(), 2:logfile.fileno()} + fd_pipes = {0:0, 1:logfile.fileno(), 2:logfile.fileno()} portage_util.apply_secpass_permissions(fetch_log, uid=portage.portage_uid, gid=portage.portage_gid, mode=0660) fetch_env = os.environ.copy() - fetch_env["FEATURES"] = fetch_env.get("FEATURES", "") + " -cvs" + fetch_env["FEATURES"] = fetch_env.get("FEATURES", "") + " -cvs -parallel" fetch_env["PORTAGE_NICENESS"] = "0" fetch_args = [sys.argv[0], "--resume", "--fetchonly"] resume_opts = self.myopts.copy() @@ -2992,311 +3451,199 @@ del fetch_log, logfile, fd_pipes, fetch_env, fetch_args, \ resume_opts - mergecount=0 - for x in mymergelist: - mergecount+=1 - myroot=x[1] - pkg_key = x[2] - pkgindex=2 - portdb = self.trees[myroot]["porttree"].dbapi - bindb = self.trees[myroot]["bintree"].dbapi - vartree = self.trees[myroot]["vartree"] - pkgsettings = self.pkgsettings[myroot] - if x[0]=="blocks": - pkgindex=3 - y = portdb.findname(pkg_key) + if not parallel: + failed_fetches = [] + for x in mymergelist: + retcode = self.do_one_emerge(x, mergecount, totalcount, mtimedb, favorites, mysysdict) + mergecount += 1 + + # need to short circuit the spawn with --nodeps + if os.environ.get("PORTAGE_INTERNAL_CALL", "0") != "1": + if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts: + continue + if retcode != os.EX_OK: + if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts: + failed_fetches.append(x[2]) + continue + else: + return retcode + # Unsafe for parallel merges + del mtimedb["resume"]["mergelist"][0] + # Commit after each merge so that --resume may still work in + # in the event that portage is not allowed to exit normally + # due to power failure, SIGKILL, etc... + mtimedb.commit() + + # unlink the logid_path if any exists + logid_path = os.path.join(build_prefix, ".logid.")+x[2].split("/")[0]+"."+x[2].split("/")[1] + if os.path.exists(logid_path): + os.unlink(logid_path) + del logid_path + + # check if we need to restart portage + mysplit=portage.pkgsplit(x[2]) + if mysplit[0] == "sys-apps/portage" and x[1] == "/": + self.restart_portage(x, mergecount, totalcount, mtimedb) + + else: + if retcode != os.EX_OK: + sys.exit(1) + else: + sys.exit(0) + if "--pretend" not in self.myopts: - print "\n>>> Emerging (" + \ - colorize("MERGE_LIST_PROGRESS", str(mergecount)) + " of " + \ - colorize("MERGE_LIST_PROGRESS", str(len(mymergelist))) + ") " + \ - colorize("GOOD", x[pkgindex]) + " to " + x[1] - emergelog(xterm_titles, " >>> emerge ("+\ - str(mergecount)+" of "+str(len(mymergelist))+\ - ") "+x[pkgindex]+" to "+x[1]) - - pkgsettings["EMERGE_FROM"] = x[0] - pkgsettings.backup_changes("EMERGE_FROM") - pkgsettings.reset() - - #buildsyspkg: Check if we need to _force_ binary package creation - issyspkg = ("buildsyspkg" in myfeat) \ - and x[0] != "blocks" \ - and mysysdict.has_key(portage.cpv_getkey(x[2])) \ - and "--buildpkg" not in self.myopts - if x[0] in ["ebuild","blocks"]: - if x[0] == "blocks" and "--fetchonly" not in self.myopts: - raise Exception, "Merging a blocker" - elif "--fetchonly" in self.myopts or \ - "--fetch-all-uri" in self.myopts: - if "--fetch-all-uri" in self.myopts: - retval = portage.doebuild(y, "fetch", myroot, - pkgsettings, self.edebug, - "--pretend" in self.myopts, fetchonly=1, - fetchall=1, mydbapi=portdb, tree="porttree") - else: - retval = portage.doebuild(y, "fetch", myroot, - pkgsettings, self.edebug, - "--pretend" in self.myopts, fetchonly=1, - mydbapi=portdb, tree="porttree") - if (retval is None) or retval: - print - print "!!! Fetch for",y,"failed, continuing..." - print - failed_fetches.append(pkg_key) - continue - - portage.doebuild_environment(y, "setup", myroot, - pkgsettings, self.edebug, 1, portdb) - catdir = os.path.dirname(pkgsettings["PORTAGE_BUILDDIR"]) - portage_util.ensure_dirs(os.path.dirname(catdir), - uid=portage.portage_uid, gid=portage.portage_gid, - mode=070, mask=0) - builddir_lock = None - catdir_lock = None - try: - catdir_lock = portage_locks.lockdir(catdir) - portage_util.ensure_dirs(catdir, - uid=portage.portage_uid, gid=portage.portage_gid, - mode=070, mask=0) - builddir_lock = portage_locks.lockdir( - pkgsettings["PORTAGE_BUILDDIR"]) - try: - portage_locks.unlockdir(catdir_lock) - finally: - catdir_lock = None - msg = " === (%s of %s) Cleaning (%s::%s)" % \ - (mergecount, len(mymergelist), pkg_key, y) - short_msg = "emerge: (%s of %s) %s Clean" % \ - (mergecount, len(mymergelist), pkg_key) - emergelog(xterm_titles, msg, short_msg=short_msg) - retval = portage.doebuild(y, "clean", myroot, - pkgsettings, self.edebug, cleanup=1, - mydbapi=portdb, tree="porttree") - if retval != os.EX_OK: - return retval - if "--buildpkg" in self.myopts or issyspkg: - if issyspkg: - print ">>> This is a system package, " + \ - "let's pack a rescue tarball." - msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \ - (mergecount, len(mymergelist), pkg_key, y) - short_msg = "emerge: (%s of %s) %s Compile" % \ - (mergecount, len(mymergelist), pkg_key) - emergelog(xterm_titles, msg, short_msg=short_msg) - self.trees[myroot]["bintree"].prevent_collision(pkg_key) - retval = portage.doebuild(y, "package", myroot, - pkgsettings, self.edebug, mydbapi=portdb, - tree="porttree") - if retval != os.EX_OK: - return retval - bintree = self.trees[myroot]["bintree"] - if bintree.populated: - bintree.inject(pkg_key) - if "--buildpkgonly" not in self.myopts: - msg = " === (%s of %s) Merging (%s::%s)" % \ - (mergecount, len(mymergelist), pkg_key, y) - short_msg = "emerge: (%s of %s) %s Merge" % \ - (mergecount, len(mymergelist), pkg_key) - emergelog(xterm_titles, msg, short_msg=short_msg) - retval = portage.merge(pkgsettings["CATEGORY"], - pkgsettings["PF"], pkgsettings["D"], - os.path.join(pkgsettings["PORTAGE_BUILDDIR"], - "build-info"), myroot, pkgsettings, - myebuild=pkgsettings["EBUILD"], - mytree="porttree", mydbapi=portdb, - vartree=vartree, prev_mtimes=ldpath_mtimes) - if retval != os.EX_OK: - return retval - elif "noclean" not in pkgsettings.features: - portage.doebuild(y, "clean", myroot, - pkgsettings, self.edebug, mydbapi=portdb, - tree="porttree") - else: - msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \ - (mergecount, len(mymergelist), pkg_key, y) - short_msg = "emerge: (%s of %s) %s Compile" % \ - (mergecount, len(mymergelist), pkg_key) - emergelog(xterm_titles, msg, short_msg=short_msg) - retval = portage.doebuild(y, "merge", myroot, - pkgsettings, self.edebug, vartree=vartree, - mydbapi=portdb, tree="porttree", - prev_mtimes=ldpath_mtimes) - if retval != os.EX_OK: - return retval - finally: - if builddir_lock: - portage_locks.unlockdir(builddir_lock) - try: - if not catdir_lock: - # Lock catdir for removal if empty. - catdir_lock = portage_locks.lockdir(catdir) - finally: - if catdir_lock: - try: - os.rmdir(catdir) - except OSError, e: - if e.errno not in (errno.ENOENT, - errno.ENOTEMPTY, errno.EEXIST): - raise - del e - portage_locks.unlockdir(catdir_lock) - - elif x[0]=="binary": - #merge the tbz2 - mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key) - if "--getbinpkg" in self.myopts: - tbz2_lock = None - try: - if "distlocks" in pkgsettings.features and \ - os.access(pkgsettings["PKGDIR"], os.W_OK): - portage_util.ensure_dirs(os.path.dirname(mytbz2)) - tbz2_lock = portage_locks.lockfile(mytbz2, - wantnewlockfile=1) - if self.trees[myroot]["bintree"].isremote(pkg_key): - msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\ - (mergecount, len(mymergelist), pkg_key, mytbz2) - short_msg = "emerge: (%s of %s) %s Fetch" % \ - (mergecount, len(mymergelist), pkg_key) - emergelog(xterm_titles, msg, short_msg=short_msg) - if not self.trees[myroot]["bintree"].gettbz2( - pkg_key): - return 1 - finally: - if tbz2_lock: - portage_locks.unlockfile(tbz2_lock) - - if "--fetchonly" in self.myopts or \ - "--fetch-all-uri" in self.myopts: - continue + emergelog(xterm_titles, " *** Finished. Cleaning up...") + + # We're out of the loop... We're done. Delete the resume data. + if mtimedb.has_key("resume"): + del mtimedb["resume"] + mtimedb.commit() + + #by doing an exit this way, --fetchonly can continue to try to + #fetch everything even if a particular download fails. + if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts: + if failed_fetches: + sys.stderr.write("\n\n!!! Some fetch errors were " + \ + "encountered. Please see above for details.\n\n") + for cpv in failed_fetches: + sys.stderr.write(" ") + sys.stderr.write(cpv) + sys.stderr.write("\n") + sys.stderr.write("\n") + sys.exit(1) + else: + sys.exit(0) + return os.EX_OK + + # parallel code - dirty starts here...;-) + one_in_slot_failed=0 + spawnd_pids=[] + + # dirty little trick to get number of cpus from the system + fd_cpuinfo = os.popen("cat /proc/cpuinfo","r") + cpu_count = 0 + for data_cpuinfo in fd_cpuinfo.readlines(): + if data_cpuinfo.find("cpu MHz") > -1 : + cpu_count += 1 + fd_cpuinfo.close() + + # if someone really screwed with /proc/cpuinfo output, we should not suffer + if cpu_count == 0: + cpu_count = 1 + + spawnd_pkg = {} + donec = 0 + failedc = 0 + failedPkgs = [] + logid_path = None + mylist = m_slots.keys() + mylist.sort() + for x in mylist: + # if slot is empty, go on + if not m_slots[x]: + continue - short_msg = "emerge: ("+str(mergecount)+" of "+str(len(mymergelist))+") "+x[pkgindex]+" Merge Binary" - emergelog(xterm_titles, " === ("+str(mergecount)+\ - " of "+str(len(mymergelist))+") Merging Binary ("+\ - x[pkgindex]+"::"+mytbz2+")", short_msg=short_msg) - retval = portage.pkgmerge(mytbz2, x[1], pkgsettings, - mydbapi=bindb, - vartree=self.trees[myroot]["vartree"], - prev_mtimes=ldpath_mtimes) - if retval != os.EX_OK: - return retval - #need to check for errors - if "--buildpkgonly" not in self.myopts: - self.trees[x[1]]["vartree"].inject(x[2]) - myfavkey=portage.cpv_getkey(x[2]) - if "--fetchonly" not in self.myopts and \ - "--fetch-all-uri" not in self.myopts and \ - myfavkey in favorites: - myfavs = portage.grabfile(os.path.join(myroot, portage.WORLD_FILE)) - myfavdict=genericdict(myfavs) - #don't record if already in system profile or already recorded - if (not mysysdict.has_key(myfavkey)) and (not myfavdict.has_key(myfavkey)): - #we don't have a favorites entry for this package yet; add one - myfavdict[myfavkey]=myfavkey - print ">>> Recording",myfavkey,"in \"world\" favorites file..." - emergelog(xterm_titles, " === ("+\ - str(mergecount)+" of "+\ - str(len(mymergelist))+\ - ") Updating world file ("+x[pkgindex]+")") - portage.write_atomic( - os.path.join(myroot, portage.WORLD_FILE), - "\n".join(myfavdict.values())) - - if "--pretend" not in self.myopts and \ - "--fetchonly" not in self.myopts and \ - "--fetch-all-uri" not in self.myopts: - # Clean the old package that we have merged over top of it. - if pkgsettings.get("AUTOCLEAN", "yes") == "yes": - xsplit=portage.pkgsplit(x[2]) - emergelog(xterm_titles, " >>> AUTOCLEAN: " + xsplit[0]) - retval = unmerge(pkgsettings, self.myopts, vartree, - "clean", [xsplit[0]], ldpath_mtimes, autoclean=1) - if not retval: - emergelog(xterm_titles, - " --- AUTOCLEAN: Nothing unmerged.") - else: - portage.writemsg_stdout(colorize("WARN", "WARNING:") - + " AUTOCLEAN is disabled. This can cause serious" - + " problems due to overlapping packages.\n") + # if previous slot failed, discontinue the emerge + if one_in_slot_failed and not ("--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts): + break - # Figure out if we need a restart. - mysplit=portage.pkgsplit(x[2]) - if mysplit[0] == "sys-apps/portage" and x[1] == "/": - myver=mysplit[1]+"-"+mysplit[2] - if myver[-3:]=='-r0': - myver=myver[:-3] - if (myver != portage.VERSION) and \ - "livecvsportage" not in self.settings.features: - if len(mymergelist) > mergecount: - emergelog(xterm_titles, - " ::: completed emerge ("+ \ - str(mergecount)+" of "+ \ - str(len(mymergelist))+") "+ \ - x[2]+" to "+x[1]) - emergelog(xterm_titles, " *** RESTARTING " + \ - "emerge via exec() after change of " + \ - "portage version.") - del mtimedb["resume"]["mergelist"][0] - mtimedb.commit() - portage.run_exitfuncs() - mynewargv=[sys.argv[0],"--resume"] - resume_opts = self.myopts.copy() - # For automatic resume, we need to prevent - # any of bad_resume_opts from leaking in - # via EMERGE_DEFAULT_OPTS. - resume_opts["--ignore-default-opts"] = True - for myopt, myarg in resume_opts.iteritems(): - if myopt not in bad_resume_opts: - if myarg is True: - mynewargv.append(myopt) - else: - mynewargv.append(myopt +"="+ myarg) - # priority only needs to be adjusted on the first run - os.environ["PORTAGE_NICENESS"] = "0" - os.execv(mynewargv[0], mynewargv) - - if "--pretend" not in self.myopts and \ - "--fetchonly" not in self.myopts and \ - "--fetch-all-uri" not in self.myopts: - if "noclean" not in self.settings.features: - short_msg = "emerge: (%s of %s) %s Clean Post" % \ - (mergecount, len(mymergelist), x[pkgindex]) - emergelog(xterm_titles, (" === (%s of %s) " + \ - "Post-Build Cleaning (%s::%s)") % \ - (mergecount, len(mymergelist), x[pkgindex], y), - short_msg=short_msg) - emergelog(xterm_titles, " ::: completed emerge ("+\ - str(mergecount)+" of "+str(len(mymergelist))+") "+\ - x[2]+" to "+x[1]) + # start multiple merges in parallel mode + num_at_atime = cpu_count + 1 - # Unsafe for parallel merges - del mtimedb["resume"]["mergelist"][0] - # Commit after each merge so that --resume may still work in - # in the event that portage is not allowed to exit normally - # due to power failure, SIGKILL, etc... - mtimedb.commit() + qsize = 0 + for y in m_slots[x]: + # these all can go in parallel, so fork one after the other + # but num_at_atime at most + if num_at_atime: + onepid = self.fork_one_emerge(y, mergecount, totalcount, mtimedb, favorites, mysysdict) + spawnd_pids.append(onepid) + spawnd_pkg[onepid] = (y, x) + num_at_atime -= 1 + mergecount += 1 + qsize += 1 + else: + self.print_status(totalcount, donec, qsize, failedc, spawnd_pkg, failedPkgs) + # let's wait for one of the jobs to finish + (retval, pkg_compl) = self.wait_one_emerge(spawnd_pids, spawnd_pkg, mergecount, totalcount, mymergelist, mtimedb) + + # if it failed, I need to fail next slot but continue to merge all in this slot + if retval: + one_in_slot_failed = retval + failedc += 1 + failedPkgs.append(pkg_compl[2]) + else: + donec += 1 + self.add_one_emerge_to_world(pkg_compl, mergecount, totalcount, favorites, mysysdict) + onepid = self.fork_one_emerge(y, mergecount, totalcount, mtimedb, favorites, mysysdict) + spawnd_pids.append(onepid) + spawnd_pkg[onepid] = (y[2], x) + mergecount += 1 + + # this slot is exhausted, so wait for all of the forks to finish + while spawnd_pids: + self.print_status(totalcount, donec, qsize, failedc, spawnd_pkg, failedPkgs) + # let's wait for one of the jobs to finish + (retval, pkg_compl) = self.wait_one_emerge(spawnd_pids, spawnd_pkg, mergecount, totalcount, mymergelist, mtimedb) + + qsize -= 1 + if retval: + one_in_slot_failed = retval + failedc += 1 + failedPkgs.append(pkg_compl[2]) + else: + donec += 1 + self.add_one_emerge_to_world(pkg_compl, mergecount, totalcount, favorites, mysysdict) + if totalcount: + self.print_status(totalcount, donec, qsize, failedc, None, failedPkgs) + + if one_in_slot_failed: + portage.writemsg_stdout(red("\nSome packages failed to emerge, summary follows:\n")) + + for pkgs in failedPkgs: + if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts: + print "\n\n!!! Some fetch errors were encountered. Please see above for details.\n\n" + sys.exit(1) + portage.writemsg_stdout(red("\nPackage "+pkgs+" failed to emerge\n")) + logfile = None + if "PORT_LOGDIR" in self.settings: + port_logdir = self.settings["PORT_LOGDIR"] + else: + port_logdir = self.settings["ROOT"] + portage.DEF_LOGDIR + + pkg_cat = pkgs.split("/")[0] + pkg_pf = pkgs.split("/")[1] + logid_path = os.path.join(build_prefix, ".logid.")+pkg_cat+"."+pkg_pf + if os.path.exists(logid_path): + logid_time = time.strftime("%Y%m%d-%H%M%S", time.gmtime(os.stat(logid_path).st_mtime)) + logfile = os.path.join(port_logdir, "%s:%s:%s.log" % \ + (pkg_cat, pkg_pf, logid_time)) + del logid_time + + if logfile and os.path.exists(logfile): + portage.portage_exec.spawn(('tail', '-n', '20', logfile), returnpid=False) + + if logfile and os.path.exists(logfile): + portage.writemsg_stdout(red("Please take a look at the file "+logfile+"\n")) + os.unlink(logid_path) + if one_in_slot_failed: + sys.exit(1) if "--pretend" not in self.myopts: emergelog(xterm_titles, " *** Finished. Cleaning up...") + # see if there are any extraneous files in build_prefix, which we might have leftover + import glob + for fnames in glob.glob(os.path.join(build_prefix, ".logid.")+"*"): + os.unlink(fnames) + # We're out of the loop... We're done. Delete the resume data. if mtimedb.has_key("resume"): del mtimedb["resume"] mtimedb.commit() - #by doing an exit this way, --fetchonly can continue to try to - #fetch everything even if a particular download fails. if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts: - if failed_fetches: - sys.stderr.write("\n\n!!! Some fetch errors were " + \ - "encountered. Please see above for details.\n\n") - for cpv in failed_fetches: - sys.stderr.write(" ") - sys.stderr.write(cpv) - sys.stderr.write("\n") - sys.stderr.write("\n") - sys.exit(1) - else: - sys.exit(0) + sys.exit(0) + return os.EX_OK def unmerge(settings, myopts, vartree, unmerge_action, unmerge_files, @@ -3736,7 +4083,7 @@ def validate_merge_list(trees, mergelist): """Validate the list to make sure all the packages are still available. This is needed for --resume.""" - for (pkg_type, myroot, pkg_key, action) in mergelist: + for (pkg_type, myroot, pkg_key, action, merge_slot) in mergelist: if pkg_type == "binary" and \ not trees[myroot]["bintree"].dbapi.match("="+pkg_key) or \ pkg_type == "ebuild" and \ @@ -4622,7 +4969,7 @@ if not "--pretend" in myopts: #just check pretend, since --ask implies pretend emergelog(xterm_titles, " >>> depclean") - if "--quiet" not in myopts: + if "--quiet" not in myopts and "--nodeps" not in myopts: print "\nCalculating dependencies ", soft = 0 @@ -4724,6 +5071,38 @@ else: print "Number removed: "+str(len(cleanlist)) +def mergelist_to_merge_slot(mergelist, myopts, printonly=False, parallel=False): + merge_slots = {} + for pkg in mergelist: + if pkg[0] != 'blocks' and pkg[3] == 'merge': + slot = int(pkg[4]) + try: + if pkg not in merge_slots[slot]: + merge_slots[slot].append(pkg) + except KeyError: + merge_slots[slot] = [pkg] + # print the merge slots + max_slot = 0 + mylist = merge_slots.keys() + mylist.sort() + for x in mylist: + if x > max_slot: + max_slot = x + if parallel: + print "Package list for slot = "+str(x) + for y in merge_slots[x]: + print " ",y + if printonly: + return + + # make one last pass at the merge_slots and initialize the missing slots to None + x = 0 + while x < max_slot: + if x not in merge_slots.keys(): + merge_slots[x] = None + x += 1 + return merge_slots + def action_build(settings, trees, mtimedb, myopts, myaction, myfiles, spinner): ldpath_mtimes = mtimedb["ldpath"] @@ -4831,13 +5210,15 @@ if len(mymergelist) == 0: print colorize("INFORM", "emerge: It seems we have nothing to resume...") sys.exit(0) + mergelist_to_merge_slot(mymergelist, myopts, True, "parallel" in settings.features) mydepgraph.display(mymergelist) prompt="Would you like to resume merging these packages?" else: - mydepgraph.display( - mydepgraph.altlist(reversed=("--tree" in myopts))) + mymergelist = mydepgraph.altlist(reversed=("--tree" in myopts)) + mergelist_to_merge_slot(mymergelist, myopts, True, "parallel" in settings.features) + mydepgraph.display(mymergelist) mergecount=0 - for x in mydepgraph.altlist(): + for x in mymergelist: if x[0] != "blocks" and x[3] != "nomerge": mergecount+=1 #check for blocking dependencies @@ -4882,10 +5263,13 @@ if len(mymergelist) == 0: print colorize("INFORM", "emerge: It seems we have nothing to resume...") sys.exit(0) + mergelist_to_merge_slot(mymergelist, myopts, True, "parallel" in settings.features) mydepgraph.display(mymergelist) else: - mydepgraph.display( - mydepgraph.altlist(reversed=("--tree" in myopts))) + # mydepgraph.digraph.debug_print() + mymergelist = mydepgraph.altlist(reversed=("--tree" in myopts)) + mergelist_to_merge_slot(mymergelist, myopts, True, "parallel" in settings.features) + mydepgraph.display(mymergelist) else: if ("--buildpkgonly" in myopts): if not mydepgraph.digraph.hasallzeros(ignore_priority=DepPriority.MEDIUM): @@ -4901,23 +5285,32 @@ it to write the mtimedb""" mtimedb.filename = None time.sleep(3) # allow the parent to have first fetch + mymergelist = mtimedb["resume"]["mergelist"] + if "--skipfirst" in myopts: + mymergelist = mymergelist[1:] + if len(mymergelist) == 0: + print colorize("INFORM", "emerge: It seems we have nothing to resume...") + sys.exit(0) + merge_slots = mergelist_to_merge_slot(mymergelist, myopts, False, False) del mydepgraph - retval = mergetask.merge( - mtimedb["resume"]["mergelist"], favorites, mtimedb) + retval = mergetask.merge(mymergelist, favorites, mtimedb, merge_slots) if retval != os.EX_OK: sys.exit(retval) else: - if "resume" in mtimedb and \ - "mergelist" in mtimedb["resume"] and \ - len(mtimedb["resume"]["mergelist"]) > 1: - mtimedb["resume_backup"] = mtimedb["resume"] - del mtimedb["resume"] - mtimedb.commit() - mtimedb["resume"]={} - # XXX: Stored as a list for backward compatibility. - mtimedb["resume"]["myopts"] = \ - [k for k in myopts if myopts[k] is True] - mtimedb["resume"]["favorites"]=favorites + mymergelist = mydepgraph.altlist() + merge_slots = mergelist_to_merge_slot(mymergelist, myopts, False, False) + if "--nodeps" not in myopts or len(mymergelist) > 1: + if "resume" in mtimedb and \ + "mergelist" in mtimedb["resume"] and \ + len(mtimedb["resume"]["mergelist"]) > 1: + mtimedb["resume_backup"] = mtimedb["resume"] + del mtimedb["resume"] + mtimedb.commit() + mtimedb["resume"]={} + # XXX: Stored as a list for backward compatibility. + mtimedb["resume"]["myopts"] = \ + [k for k in myopts if myopts[k] is True] + mtimedb["resume"]["favorites"]=favorites if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts): for pkgline in mydepgraph.altlist(): if pkgline[0]=="ebuild" and pkgline[3]=="merge": @@ -4933,18 +5326,18 @@ tree="porttree") if "--fetchonly" in myopts or "--fetch-all-uri" in myopts: pkglist = [] - for pkg in mydepgraph.altlist(): + for pkg in mymergelist: if pkg[0] != "blocks": pkglist.append(pkg) else: - pkglist = mydepgraph.altlist() + pkglist = mymergelist del mydepgraph mergetask = MergeTask(settings, trees, myopts) - retval = mergetask.merge(pkglist, favorites, mtimedb) + retval = mergetask.merge(pkglist, favorites, mtimedb, merge_slots) if retval != os.EX_OK: sys.exit(retval) - if mtimedb.has_key("resume"): + if mtimedb.has_key("resume") and ("--nodeps" not in myopts or len(mymergelist) > 1): del mtimedb["resume"] if settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]: portage.writemsg_stdout(">>> Auto-cleaning packages...\n") diff -ur portage-2.1.2.orig/pym/portage.py portage-2.1.2/pym/portage.py --- portage-2.1.2.orig/pym/portage.py 2007-03-25 18:05:47.000000000 -0700 +++ portage-2.1.2/pym/portage.py 2007-03-25 18:06:34.000000000 -0700 @@ -64,7 +64,7 @@ from output import bold, colorize, green, red, yellow import portage_const - from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \ + from portage_const import VDB_PATH, PRIVATE_PATH, DEF_LOGDIR, CACHE_PATH, DEPCACHE_PATH, \ USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \ PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \ EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \ @@ -2192,19 +2192,32 @@ mypids = [] pw = None if logfile: - del keywords["logfile"] + tee_good = 1 fd_pipes = keywords.get("fd_pipes") if fd_pipes is None: fd_pipes = {0:0, 1:1, 2:2} elif 1 not in fd_pipes or 2 not in fd_pipes: raise ValueError(fd_pipes) - pr, pw = os.pipe() - mypids.extend(portage_exec.spawn(('tee', '-i', '-a', logfile), - returnpid=True, fd_pipes={0:pr, 1:fd_pipes[1], 2:fd_pipes[2]})) - os.close(pr) - fd_pipes[1] = pw - fd_pipes[2] = pw - keywords["fd_pipes"] = fd_pipes + try: + statinfo1 = os.stat(logfile) + statinfo2 = os.fstat(fd_pipes[1]) + statinfo3 = os.fstat(fd_pipes[2]) + # if they are pointing to same file as logfile, no 'tee' is required. + if statinfo1 == statinfo2 and statinfo2 == statinfo3: + tee_good = 0 + except: + tee_good = 1 + + if tee_good: + + del keywords["logfile"] + pr, pw = os.pipe() + mypids.extend(portage_exec.spawn(('tee', '-i', '-a', logfile), + returnpid=True, fd_pipes={0:pr, 1:fd_pipes[1], 2:fd_pipes[2]})) + os.close(pr) + fd_pipes[1] = pw + fd_pipes[2] = pw + keywords["fd_pipes"] = fd_pipes features = mysettings.features # XXX: Negative RESTRICT word @@ -3230,6 +3243,10 @@ if mysettings.get("PORT_LOGDIR", "") == "": while "PORT_LOGDIR" in mysettings: del mysettings["PORT_LOGDIR"] + + if not "PORT_LOGDIR" in mysettings and "parallel" in mysettings.features: + mysettings["PORT_LOGDIR"] = mysettings["ROOT"] + DEF_LOGDIR + if "PORT_LOGDIR" in mysettings: try: portage_util.ensure_dirs(mysettings["PORT_LOGDIR"], @@ -3242,7 +3259,7 @@ while "PORT_LOGDIR" in mysettings: del mysettings["PORT_LOGDIR"] if "PORT_LOGDIR" in mysettings: - logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid") + logid_path = os.path.join(mysettings["BUILD_PREFIX"], ".logid.")+mysettings["CATEGORY"]+"."+ mysettings["PF"] if not os.path.exists(logid_path): f = open(logid_path, "w") f.close() diff -ur portage-2.1.2.orig/pym/portage_const.py portage-2.1.2/pym/portage_const.py --- portage-2.1.2.orig/pym/portage_const.py 2007-03-25 18:05:47.000000000 -0700 +++ portage-2.1.2/pym/portage_const.py 2007-03-25 18:06:34.000000000 -0700 @@ -12,6 +12,7 @@ VDB_PATH = "var/db/pkg" PRIVATE_PATH = "var/lib/portage" +DEF_LOGDIR = "var/log/portage" CACHE_PATH = "/var/cache/edb" DEPCACHE_PATH = CACHE_PATH+"/dep" diff -ur portage-2.1.2.orig/pym/portage_exec.py portage-2.1.2/pym/portage_exec.py --- portage-2.1.2.orig/pym/portage_exec.py 2007-03-25 18:05:47.000000000 -0700 +++ portage-2.1.2/pym/portage_exec.py 2007-03-25 18:06:34.000000000 -0700 @@ -19,6 +19,8 @@ if os.path.isdir("/proc/%i/fd" % os.getpid()): def get_open_fds(): + # there is a race here - fd used by listdir may be in the list but closed + # before this method returns. return map(int, [fd for fd in os.listdir("/proc/%i/fd" % os.getpid()) if fd.isdigit()]) else: def get_open_fds(): @@ -167,27 +169,41 @@ # mypids will hold the pids of all processes created. mypids = [] + pw = None if logfile: # Using a log file requires that stdout and stderr # are assigned to the process we're running. if 1 not in fd_pipes or 2 not in fd_pipes: raise ValueError(fd_pipes) - # Create a pipe - (pr, pw) = os.pipe() - - # Create a tee process, giving it our stdout and stderr - # as well as the read end of the pipe. - mypids.extend(spawn(('tee', '-i', '-a', logfile), - returnpid=True, fd_pipes={0:pr, - 1:fd_pipes[1], 2:fd_pipes[2]})) - - # We don't need the read end of the pipe, so close it. - os.close(pr) - - # Assign the write end of the pipe to our stdout and stderr. - fd_pipes[1] = pw - fd_pipes[2] = pw + tee_good = 1 + try: + statinfo1 = os.stat(logfile) + statinfo2 = os.fstat(fd_pipes[1]) + statinfo3 = os.fstat(fd_pipes[2]) + # if they are pointing to same file as logfile, no 'tee' is required. + if statinfo1 == statinfo2 and statinfo2 == statinfo3: + tee_good = 0 + except: + tee_good = 1 + + if tee_good: + + # Create a pipe + (pr, pw) = os.pipe() + + # Create a tee process, giving it our stdout and stderr + # as well as the read end of the pipe. + mypids.extend(spawn(('tee', '-i', '-a', logfile), + returnpid=True, fd_pipes={0:pr, + 1:fd_pipes[1], 2:fd_pipes[2]})) + + # We don't need the read end of the pipe, so close it. + os.close(pr) + + # Assign the write end of the pipe to our stdout and stderr. + fd_pipes[1] = pw + fd_pipes[2] = pw pid = os.fork() @@ -209,7 +225,7 @@ # If we started a tee process the write side of the pipe is no # longer needed, so close it. - if logfile: + if logfile and pw: os.close(pw) # If the caller wants to handle cleaning up the processes, we tell