Go to:
Gentoo Home
Documentation
Forums
Lists
Bugs
Planet
Store
Wiki
Get Gentoo!
Gentoo's Bugzilla – Attachment 143576 Details for
Bug 147516
[PATCH] Parallel portage can reduce build times
Home
|
New
–
[Ex]
|
Browse
|
Search
|
Privacy Policy
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
[x]
|
Forgot Password
Login:
[x]
[patch]
patch por lastest portage version
portage-2.1.4.4-parallel.patch (text/plain), 65.10 KB, created by
Anielkis Herrera
on 2008-02-15 16:32:28 UTC
(
hide
)
Description:
patch por lastest portage version
Filename:
MIME Type:
Creator:
Anielkis Herrera
Created:
2008-02-15 16:32:28 UTC
Size:
65.10 KB
patch
obsolete
>diff -Nru portage_orig/bin/emerge portage/bin/emerge >--- portage_orig/bin/emerge 2008-02-15 08:55:40.000000000 -0500 >+++ portage/bin/emerge 2008-02-15 20:24:57.000000000 -0500 >@@ -2740,6 +2740,8 @@ > ignore_priority_soft_range.extend( > xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1)) > tree_mode = "--tree" in self.myopts >+ slotcount = 0 >+ > # Tracks whether or not the current iteration should prefer asap_nodes > # if available. This is set to False when the previous iteration > # failed to select any nodes. It is reset whenever nodes are >@@ -2958,7 +2960,10 @@ > > for node in selected_nodes: > if node[-1] != "nomerge": >- retlist.append(list(node)) >+ node2 = list(node) >+ if len(node2) == 4: >+ node2.append(str(slotcount)) >+ retlist.append(node2) > mygraph.remove(node) > if not reversed and not circular_blocks and myblockers.contains(node): > """This node may have invalidated one or more blockers.""" >@@ -2972,6 +2977,7 @@ > self.blocker_parents[blocker] = unresolved > else: > del self.blocker_parents[blocker] >+ slotcount += 1 > > if not reversed: > """Blocker validation does not work with reverse mode, >@@ -3157,6 +3163,10 @@ > display_list.append((x, 0, True)) > continue > graph_key = tuple(x) >+ if len(x) > 4: >+ graph_key = tuple(x[:-1]) >+ else: >+ graph_key = tuple(x) > if "--tree" in self.myopts: > depth = len(tree_nodes) > while depth and graph_key not in \ >@@ -3223,7 +3233,11 @@ > continue > if "blocks" == graph_key[0]: > continue >- if ordered and graph_key[-1] != "nomerge": >+ if len(graph_key) > 4: >+ merge_op = graph_key[-2] >+ else: >+ merge_op = graph_key[-1] >+ if ordered and merge_op != "nomerge": > last_merge_depth = depth > continue > if depth >= last_merge_depth or \ >@@ -3239,7 +3253,6 @@ > > for mylist_index in xrange(len(mylist)): > x, depth, ordered = mylist[mylist_index] >- pkg_node = tuple(x) > pkg_type = x[0] > myroot = x[1] > pkg_key = x[2] >@@ -3248,6 +3261,10 @@ > vardb = self.trees[myroot]["vartree"].dbapi > vartree = self.trees[myroot]["vartree"] > pkgsettings = self.pkgsettings[myroot] >+ if len(x) > 4: >+ pkg_node = tuple(x[:-1]) >+ else: >+ pkg_node = tuple(x) > > fetch=" " > >@@ -3261,7 +3278,7 @@ > addl = addl + " " + red(resolved) > else: > addl = "[blocks " + addl + "] " + red(resolved) >- block_parents = self.blocker_parents[tuple(x)] >+ block_parents = self.blocker_parents[pkg_node] > block_parents = set([pnode[2] for pnode in block_parents]) > block_parents = ", ".join(block_parents) > if resolved!=x[2]: >@@ -3820,9 +3837,9 @@ > fakedb = self.mydbapi > trees = self.trees > for x in mergelist: >- if len(x) != 4: >+ if len(x) != 5: > continue >- pkg_type, myroot, pkg_key, action = x >+ pkg_type, myroot, pkg_key, action, slott = x > if pkg_type not in self.pkg_tree_map: > continue > if action != "merge": >@@ -3976,9 +3993,9 @@ > self.curval = 0 > self._spawned_pids = [] > >- def merge(self, mylist, favorites, mtimedb): >+ def merge(self, mylist, favorites, mtimedb, m_slots): > try: >- return self._merge(mylist, favorites, mtimedb) >+ return self._merge(mylist, favorites, mtimedb, m_slots) > finally: > if self._spawned_pids: > portage.portage_exec.spawned_pids.extend(self._spawned_pids) >@@ -4003,7 +4020,7 @@ > pass > spawned_pids.remove(pid) > >- def _merge(self, mylist, favorites, mtimedb): >+ def _merge(self, mylist, favorites, mtimedb, m_slots): > failed_fetches = [] > fetchonly = "--fetchonly" in self.myopts or \ > "--fetch-all-uri" in self.myopts >@@ -4011,6 +4028,12 @@ > mymergelist=[] > ldpath_mtimes = mtimedb["ldpath"] > xterm_titles = "notitles" not in self.settings.features >+ parallel = "parallel" in self.settings.features >+ build_prefix=self.settings["PORTAGE_TMPDIR"]+"/portage" >+ >+ # parallel merge will be painful to watch with debug or fetchonly. So, you get only one of these...:-) >+ if self.edebug or "--fetchonly" in self.myopts: >+ parallel = False > > if "--resume" in self.myopts: > # We're resuming. >@@ -4042,7 +4065,7 @@ > if not shown_verifying_msg: > shown_verifying_msg = True > print ">>> Verifying ebuild Manifests..." >- mytype, myroot, mycpv, mystatus = x >+ mytype, myroot, mycpv, mystatus, slott = x > portdb = self.trees[myroot]["porttree"].dbapi > quiet_config = quiet_settings[myroot] > quiet_config["O"] = os.path.dirname(portdb.findname(mycpv)) >@@ -4051,14 +4074,14 @@ > del x, mytype, myroot, mycpv, mystatus, quiet_config > del shown_verifying_msg, quiet_settings > >- root_config = RootConfig(self.trees[self.target_root]) >- system_set = root_config.sets["system"] >- args_set = AtomSet(favorites) >- world_set = root_config.sets["world"] > if "--resume" not in self.myopts: > mymergelist = mylist >- mtimedb["resume"]["mergelist"]=mymergelist[:] >- mtimedb.commit() >+ if "--nodeps" not in self.myopts or len(mymergelist) > 1: >+ mtimedb["resume"]["mergelist"]=mymergelist[:] >+ mtimedb.commit() >+ >+ totalcount = len(mymergelist) >+ mergecount=1 > > myfeat = self.settings.features[:] > bad_resume_opts = set(["--ask", "--tree", "--changelog", "--skipfirst", >@@ -4075,12 +4098,12 @@ > elif len(mymergelist) > 1: > fetch_log = "/var/log/emerge-fetch.log" > logfile = open(fetch_log, "w") >- fd_pipes = {1:logfile.fileno(), 2:logfile.fileno()} >+ fd_pipes = {0:0, 1:logfile.fileno(), 2:logfile.fileno()} > portage_util.apply_secpass_permissions(fetch_log, > uid=portage.portage_uid, gid=portage.portage_gid, > mode=0660) > fetch_env = os.environ.copy() >- fetch_env["FEATURES"] = fetch_env.get("FEATURES", "") + " -cvs" >+ fetch_env["FEATURES"] = fetch_env.get("FEATURES", "") + " -cvs -parallel" > fetch_env["PORTAGE_NICENESS"] = "0" > fetch_args = [sys.argv[0], "--resume", "--fetchonly"] > resume_opts = self.myopts.copy() >@@ -4107,347 +4130,715 @@ > metadata_keys = [k for k in portage.auxdbkeys \ > if not k.startswith("UNUSED_")] + ["USE"] > >- mergecount=0 >- for x in mymergelist: >- mergecount+=1 >- pkg_type = x[0] >- myroot=x[1] >- pkg_key = x[2] >- pkgindex=2 >- portdb = self.trees[myroot]["porttree"].dbapi >- bindb = self.trees[myroot]["bintree"].dbapi >- vartree = self.trees[myroot]["vartree"] >- pkgsettings = self.pkgsettings[myroot] >- metadata = {} >- if pkg_type == "blocks": >- pass >- elif pkg_type == "ebuild": >- mydbapi = portdb >- metadata.update(izip(metadata_keys, >- mydbapi.aux_get(pkg_key, metadata_keys))) >- pkgsettings.setcpv(pkg_key, mydb=mydbapi) >- metadata["USE"] = pkgsettings["USE"] >- else: >- if pkg_type == "binary": >- mydbapi = bindb >- else: >- raise AssertionError("Package type: '%s'" % pkg_type) >- metadata.update(izip(metadata_keys, >- mydbapi.aux_get(pkg_key, metadata_keys))) >- if x[0]=="blocks": >- pkgindex=3 >- y = portdb.findname(pkg_key) >- if "--pretend" not in self.myopts: >- print "\n>>> Emerging (" + \ >- colorize("MERGE_LIST_PROGRESS", str(mergecount)) + " of " + \ >- colorize("MERGE_LIST_PROGRESS", str(len(mymergelist))) + ") " + \ >- colorize("GOOD", x[pkgindex]) + " to " + x[1] >- emergelog(xterm_titles, " >>> emerge ("+\ >- str(mergecount)+" of "+str(len(mymergelist))+\ >- ") "+x[pkgindex]+" to "+x[1]) > >- pkgsettings["EMERGE_FROM"] = x[0] >- pkgsettings.backup_changes("EMERGE_FROM") >- pkgsettings.reset() >+ if not parallel: >+ failed_fetches = [] >+ for x in mymergelist: >+ retcode = self.do_one_emerge(x, mergecount, totalcount, mtimedb, favorites, metadata_keys) >+ mergecount += 1 >+ # need to short circuit the spawn with --nodeps >+ if os.environ.get("PORTAGE_INTERNAL_CALL", "0") != "1": >+ if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts: >+ continue >+ if retcode != os.EX_OK: >+ if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts: >+ failed_fetches.append(x[2]) >+ continue >+ else: >+ return retcode >+ # Unsafe for parallel merges >+ del mtimedb["resume"]["mergelist"][0] >+ # Commit after each merge so that --resume may still work in >+ # in the event that portage is not allowed to exit normally >+ # due to power failure, SIGKILL, etc... >+ mtimedb.commit() >+ self.curval += 1 >+ self._poll_child_processes() >+ >+ # unlink the logid_path if any exists >+ logid_path = os.path.join(build_prefix, ".logid.")+x[2].split("/")[0]+"."+x[2].split("/")[1] >+ if os.path.exists(logid_path): >+ os.unlink(logid_path) >+ del logid_path >+ >+ # check if we need to restart portage >+ mysplit=portage.pkgsplit(x[2]) >+ if mysplit[0] == "sys-apps/portage" and x[1] == "/": >+ self.restart_portage(x, mergecount, totalcount, mtimedb) >+ else: >+ if retcode != os.EX_OK: >+ sys.exit(1) >+ else: >+ sys.exit(0) > >- #buildsyspkg: Check if we need to _force_ binary package creation >- issyspkg = ("buildsyspkg" in myfeat) \ >- and x[0] != "blocks" \ >- and system_set.findAtomForPackage(pkg_key, metadata) \ >- and "--buildpkg" not in self.myopts >- if x[0] in ["ebuild","blocks"]: >- if x[0] == "blocks" and "--fetchonly" not in self.myopts: >- raise Exception, "Merging a blocker" >- elif "--fetchonly" in self.myopts or \ >- "--fetch-all-uri" in self.myopts: >- if "--fetch-all-uri" in self.myopts: >- retval = portage.doebuild(y, "fetch", myroot, >- pkgsettings, self.edebug, >- "--pretend" in self.myopts, fetchonly=1, >- fetchall=1, mydbapi=portdb, tree="porttree") >- else: >- retval = portage.doebuild(y, "fetch", myroot, >- pkgsettings, self.edebug, >- "--pretend" in self.myopts, fetchonly=1, >- mydbapi=portdb, tree="porttree") >- if (retval is None) or retval: >- print >- print "!!! Fetch for",y,"failed, continuing..." >- print >- failed_fetches.append(pkg_key) >- self.curval += 1 >- continue >+ if "--pretend" not in self.myopts: >+ emergelog(xterm_titles, " *** Finished. Cleaning up...") > >- portage.doebuild_environment(y, "setup", myroot, >- pkgsettings, self.edebug, 1, portdb) >- catdir = os.path.dirname(pkgsettings["PORTAGE_BUILDDIR"]) >- portage_util.ensure_dirs(os.path.dirname(catdir), >+ # We're out of the loop... We're done. Delete the resume data. >+ if mtimedb.has_key("resume"): >+ del mtimedb["resume"] >+ mtimedb.commit() >+ >+ #by doing an exit this way, --fetchonly can continue to try to >+ #fetch everything even if a particular download fails. >+ if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts: >+ if failed_fetches: >+ sys.stderr.write("\n\n!!! Some fetch errors were " + \ >+ "encountered. Please see above for details.\n\n") >+ for cpv in failed_fetches: >+ sys.stderr.write(" ") >+ sys.stderr.write(cpv) >+ sys.stderr.write("\n") >+ sys.stderr.write("\n") >+ sys.exit(1) >+ else: >+ sys.exit(0) >+ return os.EX_OK >+ >+ # parallel code - dirty starts here...;-) >+ one_in_slot_failed=0 >+ spawnd_pids=[] >+ >+ # dirty little trick to get number of cpus from the system >+ fd_cpuinfo = os.popen("cat /proc/cpuinfo","r") >+ cpu_count = 0 >+ for data_cpuinfo in fd_cpuinfo.readlines(): >+ if data_cpuinfo.find("cpu MHz") > -1 : >+ cpu_count += 1 >+ fd_cpuinfo.close() >+ >+ # if someone really screwed with /proc/cpuinfo output, we should not suffer >+ if cpu_count == 0: >+ cpu_count = 1 >+ >+ spawnd_pkg = {} >+ donec = 0 >+ failedc = 0 >+ failedPkgs = [] >+ logid_path = None >+ mylist = m_slots.keys() >+ mylist.sort() >+ for x in mylist: >+ # if slot is empty, go on >+ if not m_slots[x]: >+ continue >+ >+ # if previous slot failed, discontinue the emerge >+ if one_in_slot_failed and not ("--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts): >+ break >+ >+ # start multiple merges in parallel mode >+ num_at_atime = cpu_count + 1 >+ >+ qsize = 0 >+ for y in m_slots[x]: >+ # these all can go in parallel, so fork one after the other >+ # but num_at_atime at most >+ if num_at_atime: >+ onepid = self.fork_one_emerge(y, mergecount, totalcount, mtimedb, favorites, metadata_keys) >+ spawnd_pids.append(onepid) >+ spawnd_pkg[onepid] = (y, x) >+ num_at_atime -= 1 >+ mergecount += 1 >+ qsize += 1 >+ else: >+ self.print_status(totalcount, donec, qsize, failedc, spawnd_pkg, failedPkgs) >+ # let's wait for one of the jobs to finish >+ (retval, pkg_compl) = self.wait_one_emerge(spawnd_pids, spawnd_pkg, mergecount, totalcount, mymergelist, mtimedb) >+ >+ # if it failed, I need to fail next slot but continue to merge all in this slot >+ if retval: >+ one_in_slot_failed = retval >+ failedc += 1 >+ failedPkgs.append(pkg_compl[2]) >+ else: >+ donec += 1 >+ self.add_one_emerge_to_world(pkg_compl, mergecount, totalcount, favorites, metadata_keys) >+ onepid = self.fork_one_emerge(y, mergecount, totalcount, mtimedb, favorites, metadata_keys) >+ spawnd_pids.append(onepid) >+ spawnd_pkg[onepid] = (y, x) >+ mergecount += 1 >+ >+ # this slot is exhausted, so wait for all of the forks to finish >+ while spawnd_pids: >+ self.print_status(totalcount, donec, qsize, failedc, spawnd_pkg, failedPkgs) >+ # let's wait for one of the jobs to finish >+ (retval, pkg_compl) = self.wait_one_emerge(spawnd_pids, spawnd_pkg, mergecount, totalcount, mymergelist, mtimedb) >+ >+ qsize -= 1 >+ if retval: >+ one_in_slot_failed = retval >+ failedc += 1 >+ failedPkgs.append(pkg_compl[2]) >+ else: >+ donec += 1 >+ self.add_one_emerge_to_world(pkg_compl, mergecount, totalcount, favorites, metadata_keys) >+ if totalcount: >+ self.print_status(totalcount, donec, qsize, failedc, None, failedPkgs) >+ >+ if one_in_slot_failed: >+ portage.writemsg_stdout(red("\nSome packages failed to emerge, summary follows:\n")) >+ >+ for pkgs in failedPkgs: >+ if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts: >+ print "\n\n!!! Some fetch errors were encountered. Please see above for details.\n\n" >+ sys.exit(1) >+ >+ portage.writemsg_stdout(red("\nPackage "+pkgs+" failed to emerge\n")) >+ logfile = None >+ if "PORT_LOGDIR" in self.settings: >+ port_logdir = self.settings["PORT_LOGDIR"] >+ else: >+ port_logdir = self.settings["ROOT"] + portage.DEF_LOGDIR >+ >+ pkg_cat = pkgs.split("/")[0] >+ pkg_pf = pkgs.split("/")[1] >+ logid_path = os.path.join(build_prefix, ".logid.")+pkg_cat+"."+pkg_pf >+ if os.path.exists(logid_path): >+ logid_time = time.strftime("%Y%m%d-%H%M%S", time.gmtime(os.stat(logid_path).st_mtime)) >+ logfile = os.path.join(port_logdir, "%s:%s:%s.log" % \ >+ (pkg_cat, pkg_pf, logid_time)) >+ del logid_time >+ >+ if logfile and os.path.exists(logfile): >+ portage.portage_exec.spawn(('tail', '-n', '20', logfile), returnpid=False) >+ >+ if logfile and os.path.exists(logfile): >+ portage.writemsg_stdout(red("Please take a look at the file "+logfile+"\n")) >+ os.unlink(logid_path) >+ if one_in_slot_failed: >+ sys.exit(1) >+ if "--pretend" not in self.myopts: >+ emergelog(xterm_titles, " *** Finished. Cleaning up...") >+ >+ # see if there are any extraneous files in build_prefix, which we might have leftover >+ import glob >+ for fnames in glob.glob(os.path.join(build_prefix, ".logid.")+"*"): >+ os.unlink(fnames) >+ >+ # We're out of the loop... We're done. Delete the resume data. >+ if mtimedb.has_key("resume"): >+ del mtimedb["resume"] >+ mtimedb.commit() >+ >+ if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts: >+ sys.exit(0) >+ >+ return os.EX_OK >+ >+ def print_status(self, totalcount, donec, qsize, failedc, spawnd_pkg, failed): >+ smsg = "" >+ fmsg = "" >+ if spawnd_pkg: >+ for pkgs in spawnd_pkg.values(): >+ smsg = smsg+" "+pkgs[0][2] >+ if failed: >+ for pkgs in failed: >+ fmsg = fmsg+" "+pkgs >+ print ">>> Jobs [Total = "+colorize("blue", str(totalcount))+"] [Done = "+\ >+ colorize("GOOD", str(donec))+"] [Running = "+colorize("WARN", str(qsize)+smsg)+\ >+ "] [Failed = "+colorize("BAD", str(failedc)+fmsg)+"]" >+ xtermTitle("Jobs [Total="+str(totalcount)+"] [Done="+str(donec)+"] [Running="+str(qsize)+"] [Failed="+str(failedc)+"]") >+ >+ def wait_one_emerge(self, spawnd_pids, spawnd_pkg, mergecount, totalcount, mymergelist, mtimedb): >+ build_prefix=self.settings["PORTAGE_TMPDIR"]+"/portage" >+ # let's wait for one of the jobs to finish >+ onepid = -1 >+ while onepid not in spawnd_pids: >+ onepid , retval = os.waitpid(-1, 0) >+ spawnd_pids.remove(onepid) >+ >+ pkg_compl = spawnd_pkg[onepid][0] >+ pkg_slot = spawnd_pkg[onepid][1] >+ del spawnd_pkg[onepid] >+ >+ if not retval: >+ # unlink the logid_path >+ logid_path = os.path.join(build_prefix, ".logid.")+pkg_compl[2].split("/")[0]+"."+pkg_compl[2].split("/")[1] >+ if os.path.exists(logid_path): >+ os.unlink(logid_path) >+ index = 0 >+ print ">>> Package "+colorize("GOOD", pkg_compl[2])+" finished emerging." >+ # we need to remove this pkg from resume DB >+ # this is the dirtiest shit I have ever written >+ for pkgs in mymergelist: >+ if pkgs[2] == pkg_compl[2]: >+ if len(mymergelist) > 1: >+ del mtimedb["resume"]["mergelist"][index] >+ mtimedb.commit() >+ self.curval += 1 >+ self._poll_child_processes() >+ del mymergelist[index] >+ # check if we need to restart portage >+ mysplit=portage.pkgsplit(pkg_compl[2]) >+ if mysplit[0] == "sys-apps/portage" and pkgs[1] == "/": >+ self.restart_portage(pkgs, mergecount, totalcount, mtimedb) >+ break >+ index += 1 >+ return (retval, pkg_compl) >+ >+ def fork_one_emerge(self, x, mergecount, totalcount, mtimedb, favorites, metadata_keys): >+ xterm_titles = "notitles" not in self.settings.features >+ myfeat = self.settings.features[:] >+ ldpath_mtimes = mtimedb["ldpath"] >+ myroot=x[1] >+ pkg_key = x[2] >+ pkg_cat = x[2].split("/")[0] >+ pkg_pf = x[2].split("/")[1] >+ pkgindex=2 >+ if x[0]=="blocks": >+ pkgindex=3 >+ >+ build_prefix=self.settings["PORTAGE_TMPDIR"]+"/portage" >+ portage_util.ensure_dirs(build_prefix, uid=portage.portage_uid, gid=portage.portage_gid, mode=0775) >+ >+ logid_path = None >+ null_log = 0 >+ >+ if self.settings.get("PORT_LOGDIR", "") == "": >+ while "PORT_LOGDIR" in self.settings: >+ del self.settings["PORT_LOGDIR"] >+ if "PORT_LOGDIR" in self.settings: >+ port_logdir = self.settings["PORT_LOGDIR"] >+ else: >+ port_logdir = self.settings["ROOT"] + portage.DEF_LOGDIR >+ >+ try: >+ portage_util.ensure_dirs(port_logdir, uid=portage.portage_uid, >+ gid=portage.portage_gid, mode=02770) >+ except portage_exception.PortageException, e: >+ writemsg("!!! %s\n" % str(e), noiselevel=-1) >+ writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \ >+ self.settings["PORT_LOGDIR"], noiselevel=-1) >+ writemsg("!!! Because 'parallel' feature is enabled, you won't get any logs.\n", noiselevel=-1) >+ null_log = 1 >+ >+ if not null_log: >+ logid_path = os.path.join(build_prefix, ".logid.")+pkg_cat+"."+pkg_pf >+ if not os.path.exists(logid_path): >+ f = open(logid_path, "w") >+ f.close() >+ del f >+ logid_time = time.strftime("%Y%m%d-%H%M%S", >+ time.gmtime(os.stat(logid_path).st_mtime)) >+ logfile = os.path.join(port_logdir, "%s:%s:%s.log" % \ >+ (pkg_cat, pkg_pf, logid_time)) >+ del logid_time >+ else: >+ logfile = "/dev/null" >+ >+ if "--pretend" not in self.myopts and "--fetchonly" not in self.myopts: >+ print ">>> Emerging (" + \ >+ colorize("MERGE_LIST_PROGRESS", str(mergecount)) + " of " + \ >+ colorize("MERGE_LIST_PROGRESS", str(totalcount)) + ") " + \ >+ colorize("GOOD", x[pkgindex]) + " to " + x[1] >+ print ">>> Logfile in " + logfile >+ emergelog(xterm_titles, " >>> emerge ("+\ >+ str(mergecount)+" of "+str(totalcount)+\ >+ ") "+x[pkgindex]+" to "+x[1]) >+ >+ # need to spawn a --nodeps emerge in a separate process. >+ pkg="="+x[2] >+ merge_env = os.environ.copy() >+ merge_env["PORTAGE_INTERNAL_CALL"] = "1" >+ merge_env["FEATURES"] = merge_env.get("FEATURES", "") + " notitles -parallel" >+ merge_args = [sys.argv[0], "--nodeps", "--oneshot", "--nospinner", pkg] >+ good_nodeps_opts = set(["--buildpkg", "--buildpkgonly", "--fetchonly", "--fetch-all-uri", "--getbinpkg",\ >+ "--usepkg", "--usepkgonly"]) >+ fd_pipes = None >+ merge_logfd = None >+ for myopt, myarg in self.myopts.iteritems(): >+ # don't clobber the logfile at the same time as parallel fetch is >+ # all log of parallel fetch will go /var/log/emerge-fetch.log >+ # so, just leave 0,1,2 alone. >+ if "parallel-fetch" in myfeat and myopt == "--fetchonly": >+ fd_pipes = {0:0, 1:1, 2:2} >+ if myopt in good_nodeps_opts: >+ if myarg is True: >+ merge_args.append(myopt) >+ else: >+ merge_args.append(myopt +"="+ myarg) >+ if not fd_pipes: >+ merge_logfd = open(logfile, "w") >+ # put in a start message. This also makes sure that this fd is pointing to a good file on disk >+ # and hence will be used throughout the other spawns that will happen in the children. >+ merge_logfd.write("Package "+x[pkgindex]+" started at "+time.ctime()+"\n\n") >+ merge_logfd.flush() >+ fd_pipes = {0:0, 1:merge_logfd.fileno(), 2:merge_logfd.fileno()} >+ portage_util.apply_secpass_permissions(logfile, uid=portage.portage_uid, gid=portage.portage_gid, mode=0660) >+ >+ mypids = portage.portage_exec.spawn(merge_args, env=merge_env, fd_pipes=fd_pipes, returnpid=True) >+ if merge_logfd: >+ merge_logfd.close() # child has exclusive rights to it now. >+ return mypids[0] >+ >+ def restart_portage(self, x, mergecount, totalcount, mtimedb): >+ xterm_titles = "notitles" not in self.settings.features >+ # don't really restart if any of these is true >+ # XXXXX - seems like redundant check, but what the hell! sky is not falling as yet. >+ if "--pretend" in self.myopts or "--fetchonly" in self.myopts or \ >+ "--fetch-all-uri" in self.myopts or "--buildpkgonly" in self.myopts: >+ return >+ >+ bad_resume_opts = set(["--ask", "--tree", "--changelog", "--skipfirst", >+ "--resume"]) >+ mysplit=portage.pkgsplit(x[2]) >+ if "livecvsportage" not in self.settings.features: >+ if totalcount > mergecount: >+ emergelog(xterm_titles, >+ " ::: completed emerge ("+ \ >+ str(mergecount)+" of "+ \ >+ str(totalcount)+") "+ \ >+ x[2]+" to "+x[1]) >+ emergelog(xterm_titles, " *** RESTARTING " + \ >+ "emerge via exec() after change of " + \ >+ "portage version.") >+ portage.run_exitfuncs() >+ mynewargv=[sys.argv[0],"--resume"] >+ resume_opts = self.myopts.copy() >+ # For automatic resume, we need to prevent >+ # any of bad_resume_opts from leaking in >+ # via EMERGE_DEFAULT_OPTS. >+ resume_opts["--ignore-default-opts"] = True >+ for myopt, myarg in resume_opts.iteritems(): >+ if myopt not in bad_resume_opts: >+ if myarg is True: >+ mynewargv.append(myopt) >+ else: >+ mynewargv.append(myopt +"="+ myarg) >+ # priority only needs to be adjusted on the first run >+ os.environ["PORTAGE_NICENESS"] = "0" >+ os.execv(mynewargv[0], mynewargv) >+ >+ >+ def do_one_emerge(self, x, mergecount, totalcount, mtimedb, favorites, metadata_keys): >+ xterm_titles = "notitles" not in self.settings.features >+ myfeat = self.settings.features[:] >+ ldpath_mtimes = mtimedb["ldpath"] >+ pkg_type = x[0] >+ myroot=x[1] >+ pkg_key = x[2] >+ pkg_cat = x[2].split("/")[0] >+ pkg_pf = x[2].split("/")[1] >+ pkgindex=2 >+ if x[0]=="blocks": >+ pkgindex=3 >+ >+ if "--pretend" not in self.myopts and "--fetchonly" not in self.myopts: >+ print "\n>>> Emerging (" + \ >+ colorize("MERGE_LIST_PROGRESS", str(mergecount)) + " of " + \ >+ colorize("MERGE_LIST_PROGRESS", str(totalcount)) + ") " + \ >+ colorize("GOOD", x[pkgindex]) + " to " + x[1] >+ emergelog(xterm_titles, " >>> emerge ("+\ >+ str(mergecount)+" of "+str(totalcount)+\ >+ ") "+x[pkgindex]+" to "+x[1]) >+ >+ portdb = self.trees[myroot]["porttree"].dbapi >+ bindb = self.trees[myroot]["bintree"].dbapi >+ vartree = self.trees[myroot]["vartree"] >+ pkgsettings = self.pkgsettings[myroot] >+ metadata = {} >+ if pkg_type == "blocks": >+ pass >+ elif pkg_type == "ebuild": >+ mydbapi = portdb >+ metadata.update(izip(metadata_keys, >+ mydbapi.aux_get(pkg_key, metadata_keys))) >+ pkgsettings.setcpv(pkg_key, mydb=mydbapi) >+ metadata["USE"] = pkgsettings["USE"] >+ else: >+ if pkg_type == "binary": >+ mydbapi = bindb >+ else: >+ raise AssertionError("Package type: '%s'" % pkg_type) >+ metadata.update(izip(metadata_keys, >+ mydbapi.aux_get(pkg_key, metadata_keys))) >+ y = portdb.findname(pkg_key) >+ pkgsettings["EMERGE_FROM"] = x[0] >+ pkgsettings.backup_changes("EMERGE_FROM") >+ pkgsettings.reset() >+ >+ #buildsyspkg: Check if we need to _force_ binary package creation >+ issyspkg = ("buildsyspkg" in myfeat) \ >+ and x[0] != "blocks" \ >+ and system_set.findAtomForPackage(pkg_key, metadata) \ >+ and "--buildpkg" not in self.myopts >+ if x[0] in ["ebuild","blocks"]: >+ if x[0] == "blocks" and "--fetchonly" not in self.myopts: >+ raise Exception, "Merging a blocker" >+ elif "--fetchonly" in self.myopts or \ >+ "--fetch-all-uri" in self.myopts: >+ if "--fetch-all-uri" in self.myopts: >+ retval = portage.doebuild(y, "fetch", myroot, >+ pkgsettings, self.edebug, >+ "--pretend" in self.myopts, fetchonly=1, >+ fetchall=1, mydbapi=portdb, tree="porttree") >+ else: >+ retval = portage.doebuild(y, "fetch", myroot, >+ pkgsettings, self.edebug, >+ "--pretend" in self.myopts, fetchonly=1, >+ mydbapi=portdb, tree="porttree") >+ if (retval is None) or retval: >+ print >+ print "!!! Fetch for",y,"failed, continuing..." >+ print >+ >+ self.curval += 1 >+ return retval >+ >+ portage.doebuild_environment(y, "setup", myroot, >+ pkgsettings, self.edebug, 1, portdb) >+ catdir = os.path.dirname(pkgsettings["PORTAGE_BUILDDIR"]) >+ portage_util.ensure_dirs(os.path.dirname(catdir), >+ uid=portage.portage_uid, gid=portage.portage_gid, >+ mode=070, mask=0) >+ builddir_lock = None >+ catdir_lock = None >+ try: >+ catdir_lock = portage_locks.lockdir(catdir) >+ portage_util.ensure_dirs(catdir, > uid=portage.portage_uid, gid=portage.portage_gid, > mode=070, mask=0) >- builddir_lock = None >- catdir_lock = None >+ builddir_lock = portage_locks.lockdir( >+ pkgsettings["PORTAGE_BUILDDIR"]) > try: >- catdir_lock = portage_locks.lockdir(catdir) >- portage_util.ensure_dirs(catdir, >- uid=portage.portage_uid, gid=portage.portage_gid, >- mode=070, mask=0) >- builddir_lock = portage_locks.lockdir( >- pkgsettings["PORTAGE_BUILDDIR"]) >- try: >- portage_locks.unlockdir(catdir_lock) >- finally: >- catdir_lock = None >- msg = " === (%s of %s) Cleaning (%s::%s)" % \ >- (mergecount, len(mymergelist), pkg_key, y) >- short_msg = "emerge: (%s of %s) %s Clean" % \ >- (mergecount, len(mymergelist), pkg_key) >+ portage_locks.unlockdir(catdir_lock) >+ finally: >+ catdir_lock = None >+ msg = " === (%s of %s) Cleaning (%s::%s)" % \ >+ (mergecount, totalcount, pkg_key, y) >+ short_msg = "emerge: (%s of %s) %s Clean" % \ >+ (mergecount, totalcount, pkg_key) >+ emergelog(xterm_titles, msg, short_msg=short_msg) >+ retval = portage.doebuild(y, "clean", myroot, >+ pkgsettings, self.edebug, cleanup=1, >+ mydbapi=portdb, tree="porttree") >+ >+ if retval != os.EX_OK: >+ return retval >+ if "--buildpkg" in self.myopts or issyspkg: >+ if issyspkg: >+ print ">>> This is a system package, " + \ >+ "let's pack a rescue tarball." >+ msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \ >+ (mergecount, totalcount, pkg_key, y) >+ short_msg = "emerge: (%s of %s) %s Compile" % \ >+ (mergecount, totalcount, pkg_key) > emergelog(xterm_titles, msg, short_msg=short_msg) >- retval = portage.doebuild(y, "clean", myroot, >- pkgsettings, self.edebug, cleanup=1, >- mydbapi=portdb, tree="porttree") >+ self.trees[myroot]["bintree"].prevent_collision(pkg_key) >+ retval = portage.doebuild(y, "package", myroot, >+ pkgsettings, self.edebug, mydbapi=portdb, >+ tree="porttree") >+ if retval != os.EX_OK or \ >+ "--buildpkgonly" in self.myopts: >+ portage.elog_process(pkg_key, pkgsettings) > if retval != os.EX_OK: > return retval >- if "--buildpkg" in self.myopts or issyspkg: >- if issyspkg: >- print ">>> This is a system package, " + \ >- "let's pack a rescue tarball." >- msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \ >- (mergecount, len(mymergelist), pkg_key, y) >- short_msg = "emerge: (%s of %s) %s Compile" % \ >- (mergecount, len(mymergelist), pkg_key) >+ bintree = self.trees[myroot]["bintree"] >+ if bintree.populated: >+ bintree.inject(pkg_key) >+ if "--buildpkgonly" not in self.myopts: >+ msg = " === (%s of %s) Merging (%s::%s)" % \ >+ (mergecount, totalcount, pkg_key, y) >+ short_msg = "emerge: (%s of %s) %s Merge" % \ >+ (mergecount, totalcount, pkg_key) > emergelog(xterm_titles, msg, short_msg=short_msg) >- self.trees[myroot]["bintree"].prevent_collision(pkg_key) >- retval = portage.doebuild(y, "package", myroot, >- pkgsettings, self.edebug, mydbapi=portdb, >- tree="porttree") >- if retval != os.EX_OK or \ >- "--buildpkgonly" in self.myopts: >- portage.elog_process(pkg_key, pkgsettings) >- if retval != os.EX_OK: >- return retval >- bintree = self.trees[myroot]["bintree"] >- if bintree.populated: >- bintree.inject(pkg_key) >- if "--buildpkgonly" not in self.myopts: >- msg = " === (%s of %s) Merging (%s::%s)" % \ >- (mergecount, len(mymergelist), pkg_key, y) >- short_msg = "emerge: (%s of %s) %s Merge" % \ >- (mergecount, len(mymergelist), pkg_key) >- emergelog(xterm_titles, msg, short_msg=short_msg) >- retval = portage.merge(pkgsettings["CATEGORY"], >- pkgsettings["PF"], pkgsettings["D"], >- os.path.join(pkgsettings["PORTAGE_BUILDDIR"], >- "build-info"), myroot, pkgsettings, >- myebuild=pkgsettings["EBUILD"], >- mytree="porttree", mydbapi=portdb, >- vartree=vartree, prev_mtimes=ldpath_mtimes) >- if retval != os.EX_OK: >- return retval >- elif "noclean" not in pkgsettings.features: >- portage.doebuild(y, "clean", myroot, >- pkgsettings, self.edebug, mydbapi=portdb, >- tree="porttree") >- else: >- msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \ >- (mergecount, len(mymergelist), pkg_key, y) >- short_msg = "emerge: (%s of %s) %s Compile" % \ >- (mergecount, len(mymergelist), pkg_key) >- emergelog(xterm_titles, msg, short_msg=short_msg) >- retval = portage.doebuild(y, "merge", myroot, >- pkgsettings, self.edebug, vartree=vartree, >- mydbapi=portdb, tree="porttree", >- prev_mtimes=ldpath_mtimes) >+ retval = portage.merge(pkgsettings["CATEGORY"], >+ pkgsettings["PF"], pkgsettings["D"], >+ os.path.join(pkgsettings["PORTAGE_BUILDDIR"], >+ "build-info"), myroot, pkgsettings, >+ myebuild=pkgsettings["EBUILD"], >+ mytree="porttree", mydbapi=portdb, >+ vartree=vartree, prev_mtimes=ldpath_mtimes) > if retval != os.EX_OK: > return retval >+ elif "noclean" not in pkgsettings.features: >+ portage.doebuild(y, "clean", myroot, >+ pkgsettings, self.edebug, mydbapi=portdb, >+ tree="porttree") >+ else: >+ msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \ >+ (mergecount, totalcount, pkg_key, y) >+ short_msg = "emerge: (%s of %s) %s Compile" % \ >+ (mergecount, totalcount, pkg_key) >+ emergelog(xterm_titles, msg, short_msg=short_msg) >+ retval = portage.doebuild(y, "merge", myroot, >+ pkgsettings, self.edebug, vartree=vartree, >+ mydbapi=portdb, tree="porttree", >+ prev_mtimes=ldpath_mtimes) >+ if retval != os.EX_OK: >+ return retval >+ finally: >+ if builddir_lock: >+ portage_locks.unlockdir(builddir_lock) >+ try: >+ if not catdir_lock: >+ # Lock catdir for removal if empty. >+ catdir_lock = portage_locks.lockdir(catdir) > finally: >- if builddir_lock: >- portage_locks.unlockdir(builddir_lock) >- try: >- if not catdir_lock: >- # Lock catdir for removal if empty. >- catdir_lock = portage_locks.lockdir(catdir) >- finally: >- if catdir_lock: >- try: >- os.rmdir(catdir) >- except OSError, e: >- if e.errno not in (errno.ENOENT, >- errno.ENOTEMPTY, errno.EEXIST): >- raise >- del e >- portage_locks.unlockdir(catdir_lock) >- >- elif x[0]=="binary": >- #merge the tbz2 >- mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key) >- if "--getbinpkg" in self.myopts: >- tbz2_lock = None >- try: >- if "distlocks" in pkgsettings.features and \ >- os.access(pkgsettings["PKGDIR"], os.W_OK): >- portage_util.ensure_dirs(os.path.dirname(mytbz2)) >- tbz2_lock = portage_locks.lockfile(mytbz2, >- wantnewlockfile=1) >- if self.trees[myroot]["bintree"].isremote(pkg_key): >- msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\ >- (mergecount, len(mymergelist), pkg_key, mytbz2) >- short_msg = "emerge: (%s of %s) %s Fetch" % \ >- (mergecount, len(mymergelist), pkg_key) >- emergelog(xterm_titles, msg, short_msg=short_msg) >- try: >- self.trees[myroot]["bintree"].gettbz2(pkg_key) >- except portage_exception.FileNotFound: >- writemsg("!!! Fetching Binary failed " + \ >- "for '%s'\n" % pkg_key, noiselevel=-1) >- if not fetchonly: >- return 1 >- failed_fetches.append(pkg_key) >- except portage_exception.DigestException, e: >- writemsg("\n!!! Digest verification failed:\n", >- noiselevel=-1) >- writemsg("!!! %s\n" % e.value[0], >- noiselevel=-1) >- writemsg("!!! Reason: %s\n" % e.value[1], >- noiselevel=-1) >- writemsg("!!! Got: %s\n" % e.value[2], >- noiselevel=-1) >- writemsg("!!! Expected: %s\n" % e.value[3], >- noiselevel=-1) >- os.unlink(mytbz2) >- if not fetchonly: >- return 1 >- failed_fetches.append(pkg_key) >- finally: >- if tbz2_lock: >- portage_locks.unlockfile(tbz2_lock) >- >- if "--fetchonly" in self.myopts or \ >- "--fetch-all-uri" in self.myopts: >- self.curval += 1 >- continue >- >- short_msg = "emerge: ("+str(mergecount)+" of "+str(len(mymergelist))+") "+x[pkgindex]+" Merge Binary" >- emergelog(xterm_titles, " === ("+str(mergecount)+\ >- " of "+str(len(mymergelist))+") Merging Binary ("+\ >- x[pkgindex]+"::"+mytbz2+")", short_msg=short_msg) >- retval = portage.pkgmerge(mytbz2, x[1], pkgsettings, >- mydbapi=bindb, >- vartree=self.trees[myroot]["vartree"], >- prev_mtimes=ldpath_mtimes) >- if retval != os.EX_OK: >- return retval >- #need to check for errors >- if "--buildpkgonly" not in self.myopts: >- self.trees[x[1]]["vartree"].inject(x[2]) >- myfavkey = portage.cpv_getkey(x[2]) >- if not fetchonly and not pretend and \ >- args_set.findAtomForPackage(pkg_key, metadata): >- world_set.lock() >- world_set.load() >- myfavkey = create_world_atom(pkg_key, metadata, >- args_set, root_config) >- if myfavkey: >- world_set.add(myfavkey) >- print ">>> Recording",myfavkey,"in \"world\" favorites file..." >- emergelog(xterm_titles, " === ("+\ >- str(mergecount)+" of "+\ >- str(len(mymergelist))+\ >- ") Updating world file ("+x[pkgindex]+")") >- world_set.save() >- world_set.unlock() >- >- if "--pretend" not in self.myopts and \ >- "--fetchonly" not in self.myopts and \ >- "--fetch-all-uri" not in self.myopts: >- # Clean the old package that we have merged over top of it. >- if pkgsettings.get("AUTOCLEAN", "yes") == "yes": >- xsplit=portage.pkgsplit(x[2]) >- emergelog(xterm_titles, " >>> AUTOCLEAN: " + xsplit[0]) >- retval = unmerge(pkgsettings, self.myopts, vartree, >- "clean", [xsplit[0]], ldpath_mtimes, autoclean=1) >- if not retval: >- emergelog(xterm_titles, >- " --- AUTOCLEAN: Nothing unmerged.") >- else: >- portage.writemsg_stdout(colorize("WARN", "WARNING:") >- + " AUTOCLEAN is disabled. This can cause serious" >- + " problems due to overlapping packages.\n") >- >- # Figure out if we need a restart. >- mysplit=portage.pkgsplit(x[2]) >- if mysplit[0] == "sys-apps/portage" and x[1] == "/": >- if "livecvsportage" not in self.settings.features: >- if len(mymergelist) > mergecount: >- emergelog(xterm_titles, >- " ::: completed emerge ("+ \ >- str(mergecount)+" of "+ \ >- str(len(mymergelist))+") "+ \ >- x[2]+" to "+x[1]) >- emergelog(xterm_titles, " *** RESTARTING " + \ >- "emerge via exec() after change of " + \ >- "portage version.") >- del mtimedb["resume"]["mergelist"][0] >- mtimedb.commit() >- portage.run_exitfuncs() >- mynewargv=[sys.argv[0],"--resume"] >- resume_opts = self.myopts.copy() >- # For automatic resume, we need to prevent >- # any of bad_resume_opts from leaking in >- # via EMERGE_DEFAULT_OPTS. >- resume_opts["--ignore-default-opts"] = True >- for myopt, myarg in resume_opts.iteritems(): >- if myopt not in bad_resume_opts: >- if myarg is True: >- mynewargv.append(myopt) >- else: >- mynewargv.append(myopt +"="+ myarg) >- # priority only needs to be adjusted on the first run >- os.environ["PORTAGE_NICENESS"] = "0" >- os.execv(mynewargv[0], mynewargv) >- >- if "--pretend" not in self.myopts and \ >- "--fetchonly" not in self.myopts and \ >- "--fetch-all-uri" not in self.myopts: >- if "noclean" not in self.settings.features: >- short_msg = "emerge: (%s of %s) %s Clean Post" % \ >- (mergecount, len(mymergelist), x[pkgindex]) >- emergelog(xterm_titles, (" === (%s of %s) " + \ >- "Post-Build Cleaning (%s::%s)") % \ >- (mergecount, len(mymergelist), x[pkgindex], y), >- short_msg=short_msg) >- emergelog(xterm_titles, " ::: completed emerge ("+\ >- str(mergecount)+" of "+str(len(mymergelist))+") "+\ >- x[2]+" to "+x[1]) >- >- # Unsafe for parallel merges >- del mtimedb["resume"]["mergelist"][0] >- # Commit after each merge so that --resume may still work in >- # in the event that portage is not allowed to exit normally >- # due to power failure, SIGKILL, etc... >- mtimedb.commit() >- self.curval += 1 >- self._poll_child_processes() >- >- if "--pretend" not in self.myopts: >- emergelog(xterm_titles, " *** Finished. Cleaning up...") >- >- # We're out of the loop... We're done. Delete the resume data. >- if mtimedb.has_key("resume"): >- del mtimedb["resume"] >- mtimedb.commit() >- >- #by doing an exit this way, --fetchonly can continue to try to >- #fetch everything even if a particular download fails. >- if "--fetchonly" in self.myopts or "--fetch-all-uri" in self.myopts: >- if failed_fetches: >- sys.stderr.write("\n\n!!! Some fetch errors were " + \ >- "encountered. Please see above for details.\n\n") >- for cpv in failed_fetches: >- sys.stderr.write(" ") >- sys.stderr.write(cpv) >- sys.stderr.write("\n") >- sys.stderr.write("\n") >- sys.exit(1) >+ if catdir_lock: >+ try: >+ os.rmdir(catdir) >+ except OSError, e: >+ if e.errno not in (errno.ENOENT, >+ errno.ENOTEMPTY, errno.EEXIST): >+ raise >+ del e >+ portage_locks.unlockdir(catdir_lock) >+ >+ elif x[0]=="binary": >+ #merge the tbz2 >+ mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key) >+ if "--getbinpkg" in self.myopts: >+ tbz2_lock = None >+ try: >+ if "distlocks" in pkgsettings.features and \ >+ os.access(pkgsettings["PKGDIR"], os.W_OK): >+ portage_util.ensure_dirs(os.path.dirname(mytbz2)) >+ tbz2_lock = portage_locks.lockfile(mytbz2, >+ wantnewlockfile=1) >+ if self.trees[myroot]["bintree"].isremote(pkg_key): >+ msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\ >+ (mergecount, totalcount, pkg_key, mytbz2) >+ short_msg = "emerge: (%s of %s) %s Fetch" % \ >+ (mergecount, totalcount, pkg_key) >+ emergelog(xterm_titles, msg, short_msg=short_msg) >+ try: >+ self.trees[myroot]["bintree"].gettbz2(pkg_key) >+ except portage_exception.FileNotFound: >+ writemsg("!!! Fetching Binary failed " + \ >+ "for '%s'\n" % pkg_key, noiselevel=-1) >+ if not fetchonly: >+ return 1 >+ except portage_exception.DigestException, e: >+ writemsg("\n!!! Digest verification failed:\n", >+ noiselevel=-1) >+ writemsg("!!! %s\n" % e.value[0], >+ noiselevel=-1) >+ writemsg("!!! Reason: %s\n" % e.value[1], >+ noiselevel=-1) >+ writemsg("!!! Got: %s\n" % e.value[2], >+ noiselevel=-1) >+ writemsg("!!! Expected: %s\n" % e.value[3], >+ noiselevel=-1) >+ os.unlink(mytbz2) >+ if not fetchonly: >+ return 1 >+ finally: >+ if tbz2_lock: >+ portage_locks.unlockfile(tbz2_lock) >+ >+ if "--fetchonly" in self.myopts or \ >+ "--fetch-all-uri" in self.myopts: >+ self.curval += 1 >+ return os.EX_OK >+ >+ short_msg = "emerge: ("+str(mergecount)+" of "+str(totalcount)+") "+x[pkgindex]+" Merge Binary" >+ emergelog(xterm_titles, " === ("+str(mergecount)+\ >+ " of "+str(totalcount)+") Merging Binary ("+\ >+ x[pkgindex]+"::"+mytbz2+")", short_msg=short_msg) >+ >+ retval = portage.pkgmerge(mytbz2, x[1], pkgsettings, >+ mydbapi=bindb, >+ vartree=self.trees[myroot]["vartree"], >+ prev_mtimes=ldpath_mtimes) >+ if retval != os.EX_OK: >+ return retval >+ #need to check for errors >+ >+ # clean up the older version which emerged on top of >+ if "--buildpkgonly" not in self.myopts and \ >+ "--pretend" not in self.myopts and \ >+ "--fetchonly" not in self.myopts and \ >+ "--fetch-all-uri" not in self.myopts: >+ # Clean the old package that we have merged over top of it. >+ if pkgsettings.get("AUTOCLEAN", "yes") == "yes": >+ xsplit=portage.pkgsplit(x[2]) >+ emergelog(xterm_titles, " >>> AUTOCLEAN: " + xsplit[0]) >+ retval = unmerge(pkgsettings, self.myopts, vartree, >+ "clean", [xsplit[0]], ldpath_mtimes, autoclean=1) >+ if not retval: >+ emergelog(xterm_titles, >+ " --- AUTOCLEAN: Nothing unmerged.") >+ else: >+ portage.writemsg_stdout(colorize("WARN", "WARNING:") >+ + " AUTOCLEAN is disabled. This can cause serious" >+ + " problems due to overlapping packages.\n") >+ >+- if "--pretend" not in self.myopts: >+- emergelog(xterm_titles, " *** Finished. Cleaning up...") >+ if "--pretend" not in self.myopts and \ >+ "--fetchonly" not in self.myopts and \ >+ "--fetch-all-uri" not in self.myopts: >+ if "noclean" not in self.settings.features: >+ short_msg = "emerge: (%s of %s) %s Clean Post" % \ >+ (mergecount, totalcount, x[pkgindex]) >+ emergelog(xterm_titles, (" === (%s of %s) " + \ >+ "Post-Build Cleaning (%s::%s)") % \ >+ (mergecount, totalcount, x[pkgindex], y), >+ short_msg=short_msg) >+ emergelog(xterm_titles, " ::: completed emerge ("+\ >+ str(mergecount)+" of "+str(totalcount)+") "+\ >+ x[2]+" to "+x[1]) >+ >+ return os.EX_OK >+ >+ def add_one_emerge_to_world(self, x, mergecount, totalcount, favorites, metadata_keys): >+ fetchonly = "--fetchonly" in self.myopts or \ >+ "--fetch-all-uri" in self.myopts >+ pretend = "--pretend" in self.myopts >+ xterm_titles = "notitles" not in self.settings.features >+ pkg_type = x[0] >+ myroot=x[1] >+ pkg_key = x[2] >+ pkgindex=2 >+ if x[0]=="blocks": >+ pkgindex=3 >+ >+ portdb = self.trees[myroot]["porttree"].dbapi >+ bindb = self.trees[myroot]["bintree"].dbapi >+ pkgsettings = self.pkgsettings[myroot] >+ root_config = RootConfig(self.trees[self.target_root]) >+ system_set = root_config.sets["system"] >+ args_set = AtomSet(favorites) >+ world_set = root_config.sets["world"] >+ metadata = {} >+ if pkg_type == "blocks": >+ pass >+ elif pkg_type == "ebuild": >+ mydbapi = portdb >+ metadata.update(izip(metadata_keys, >+ mydbapi.aux_get(pkg_key, metadata_keys))) >+ pkgsettings.setcpv(pkg_key, mydb=mydbapi) >+ metadata["USE"] = pkgsettings["USE"] >+ else: >+ if pkg_type == "binary": >+ mydbapi = bindb >+ else: >+- sys.exit(0) >+- return os.EX_OK >+ raise AssertionError("Package type: '%s'" % pkg_type) >+ metadata.update(izip(metadata_keys, >+ mydbapi.aux_get(pkg_key, metadata_keys))) >+ >+ if "--buildpkgonly" not in self.myopts: >+ self.trees[x[1]]["vartree"].inject(x[2]) >+ myfavkey = portage.cpv_getkey(x[2]) >+ if not fetchonly and not pretend and \ >+ args_set.findAtomForPackage(pkg_key, metadata): >+ world_set.lock() >+ world_set.load() >+ myfavkey = create_world_atom(pkg_key, metadata, >+ args_set, root_config) >+ if myfavkey: >+ world_set.add(myfavkey) >+ print ">>> Recording",myfavkey,"in \"world\" favorites file..." >+ emergelog(xterm_titles, " === ("+\ >+ str(mergecount)+" of "+\ >+ str(totalcount)+\ >+ ") Updating world file ("+x[pkgindex]+")") >+ world_set.save() >+ world_set.unlock() > else: > sys.exit(0) > return os.EX_OK >@@ -5904,7 +6295,7 @@ > ">>> No packages selected for removal by %s\n" % action) > return > >- if "--quiet" not in myopts: >+ if "--quiet" not in myopts and "--nodeps" not in myopts: > print "\nCalculating dependencies ", > > soft = 0 >@@ -6124,6 +6515,38 @@ > else: > print "Number removed: "+str(len(cleanlist)) > >+ def mergelist_to_merge_slot(mergelist, myopts, printonly=False, parallel=False): >+ merge_slots = {} >+ for pkg in mergelist: >+ if pkg[0] != 'blocks' and pkg[3] == 'merge': >+ slot = int(pkg[4]) >+ try: >+ if pkg not in merge_slots[slot]: >+ merge_slots[slot].append(pkg) >+ except KeyError: >+ merge_slots[slot] = [pkg] >+ # print the merge slots >+ max_slot = 0 >+ mylist = merge_slots.keys() >+ mylist.sort() >+ for x in mylist: >+ if x > max_slot: >+ max_slot = x >+ if parallel: >+ print "Package list for slot = "+str(x) >+ for y in merge_slots[x]: >+ print " ",y >+ if printonly: >+ return >+ >+ # make one last pass at the merge_slots and initialize the missing slots to None >+ x = 0 >+ while x < max_slot: >+ if x not in merge_slots.keys(): >+ merge_slots[x] = None >+ x += 1 >+ return merge_slots >+ > def action_build(settings, trees, mtimedb, > myopts, myaction, myfiles, spinner): > ldpath_mtimes = mtimedb["ldpath"] >@@ -6250,18 +6673,19 @@ > print colorize("INFORM", "emerge: It seems we have nothing to resume...") > return os.EX_OK > favorites = mtimedb["resume"]["favorites"] >+ mergelist_to_merge_slot(mymergelist, myopts, True, "parallel" in settings.features) > retval = mydepgraph.display(mymergelist, favorites=favorites) > if retval != os.EX_OK: > return retval > prompt="Would you like to resume merging these packages?" > else: >- retval = mydepgraph.display( >- mydepgraph.altlist(reversed=("--tree" in myopts)), >- favorites=favorites) >+ mymergelist = mydepgraph.altlist(reversed=("--tree" in myopts)) >+ mergelist_to_merge_slot(mymergelist, myopts, True, "parallel" in settings.features) >+ retval = mydepgraph.display(mymergelist, favorites=favorites) > if retval != os.EX_OK: > return retval > mergecount=0 >- for x in mydepgraph.altlist(): >+ for x in mymergelist: > if x[0] != "blocks" and x[3] != "nomerge": > mergecount+=1 > #check for blocking dependencies >@@ -6306,13 +6730,14 @@ > print colorize("INFORM", "emerge: It seems we have nothing to resume...") > return os.EX_OK > favorites = mtimedb["resume"]["favorites"] >+ mergelist_to_merge_slot(mymergelist, myopts, True, "parallel" in settings.features) > retval = mydepgraph.display(mymergelist, favorites=favorites) > if retval != os.EX_OK: > return retval > else: >- retval = mydepgraph.display( >- mydepgraph.altlist(reversed=("--tree" in myopts)), >- favorites=favorites) >+ mymergelist = mydepgraph.altlist(reversed=("--tree" in myopts)) >+ mergelist_to_merge_slot(mymergelist, myopts, True, "parallel" in settings.features) >+ retval = mydepgraph.display(mymergelist, favorites=favorites) > if retval != os.EX_OK: > return retval > if "--buildpkgonly" in myopts and \ >@@ -6335,22 +6760,31 @@ > it to write the mtimedb""" > mtimedb.filename = None > time.sleep(3) # allow the parent to have first fetch >+ mymergelist = mtimedb["resume"]["mergelist"] >+ if "--skipfirst" in myopts: >+ mymergelist = mymergelist[1:] >+ if len(mymergelist) == 0: >+ print colorize("INFORM", "emerge: It seems we have nothing to resume...") >+ sys.exit(0) >+ merge_slots = mergelist_to_merge_slot(mymergelist, myopts, False, False) > del mydepgraph >- retval = mergetask.merge( >- mtimedb["resume"]["mergelist"], favorites, mtimedb) >+ retval = mergetask.merge(mymergelist, favorites, mtimedb, merge_slots) > merge_count = mergetask.curval > else: >- if "resume" in mtimedb and \ >- "mergelist" in mtimedb["resume"] and \ >- len(mtimedb["resume"]["mergelist"]) > 1: >- mtimedb["resume_backup"] = mtimedb["resume"] >- del mtimedb["resume"] >- mtimedb.commit() >- mtimedb["resume"]={} >- # XXX: Stored as a list for backward compatibility. >- mtimedb["resume"]["myopts"] = \ >- [k for k in myopts if myopts[k] is True] >- mtimedb["resume"]["favorites"]=favorites >+ mymergelist = mydepgraph.altlist(reversed=("--tree" in myopts)) >+ merge_slots = mergelist_to_merge_slot(mymergelist, myopts, False, False) >+ if "--nodeps" not in myopts or len(mymergelist) > 1: >+ if "resume" in mtimedb and \ >+ "mergelist" in mtimedb["resume"] and \ >+ len(mtimedb["resume"]["mergelist"]) > 1: >+ mtimedb["resume_backup"] = mtimedb["resume"] >+ del mtimedb["resume"] >+ mtimedb.commit() >+ mtimedb["resume"]={} >+ # XXX: Stored as a list for backward compatibility. >+ mtimedb["resume"]["myopts"] = \ >+ [k for k in myopts if myopts[k] is True] >+ mtimedb["resume"]["favorites"]=favorites > if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts): > for pkgline in mydepgraph.altlist(): > if pkgline[0]=="ebuild" and pkgline[3]=="merge": >@@ -6365,7 +6799,7 @@ > mydbapi=trees[pkgline[1]]["porttree"].dbapi, > tree="porttree") > >- pkglist = mydepgraph.altlist() >+ pkglist = mymergelist > > if fetchonly or "--buildpkgonly" in myopts: > pkglist = [pkg for pkg in pkglist if pkg[0] != "blocks"] >@@ -6373,9 +6807,7 @@ > for x in pkglist: > if x[0] != "blocks": > continue >- retval = mydepgraph.display(mydepgraph.altlist( >- reversed=("--tree" in myopts)), >- favorites=favorites) >+ retval = mydepgraph.display(mymergelist, favorites=favorites) > msg = "Error: The above package list contains " + \ > "packages which cannot be installed " + \ > "at the same time on the same system." >@@ -6392,11 +6824,12 @@ > mydepgraph.saveNomergeFavorites() > del mydepgraph > mergetask = MergeTask(settings, trees, myopts) >- retval = mergetask.merge(pkglist, favorites, mtimedb) >+ retval = mergetask.merge(pkglist, favorites, mtimedb, merge_slots) > merge_count = mergetask.curval > > if retval == os.EX_OK and not (pretend or fetchonly): >- mtimedb.pop("resume", None) >+ if ("--nodeps" not in myopts or len(mymergelist) > 1): >+ mtimedb.pop("resume", None) > if "yes" == settings.get("AUTOCLEAN"): > portage.writemsg_stdout(">>> Auto-cleaning packages...\n") > vartree = trees[settings["ROOT"]]["vartree"] >diff -Nru portage_orig/pym/portage.py portage/pym/portage.py >--- portage_orig/pym/portage.py 2008-02-15 19:04:02.000000000 -0500 >+++ portage/pym/portage.py 2008-02-15 20:27:18.000000000 -0500 >@@ -86,7 +86,7 @@ > from output import bold, colorize, green, red, yellow > > import portage_const >- from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \ >+ from portage_const import VDB_PATH, PRIVATE_PATH, DEF_LOGDIR, CACHE_PATH, DEPCACHE_PATH, \ > USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \ > PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \ > EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \ >@@ -481,16 +481,27 @@ > return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \ > len(self.order) > >- def debug_print(self): >- for node in self.nodes: >- print node, >- if self.nodes[node][0]: >- print "depends on" >- else: >- print "(no children)" >- for child in self.nodes[node][0]: >- print " ",child, >- print "(%s)" % self.nodes[node][0][child] >+ def debug_print(self): >+ lista = dict() >+ for node in self.nodes: >+ #print node, >+ #if self.nodes[node][0]: >+ #print "depends on" >+ #else: >+ # print "(no children)" >+ for child in self.nodes[node][0]: >+ #print " ",child, >+ #print "(%s)" % self.nodes[node][0][child] >+ for node2 in self.nodes: >+ if child[2] == node2[2]: >+ for child2 in self.nodes[node2][0]: >+ if child2[2] == node[2]: >+ if not node[2] in lista.keys() and not node2[2] in lista.keys(): >+ lista[node[2]] = node2[2] >+ >+ for i in lista.keys(): >+ print "%s depends on %s" % (i, lista[i]) >+ > > _elog_atexit_handlers = [] > def elog_process(cpv, mysettings): >@@ -3810,6 +3821,10 @@ > if mysettings.get("PORT_LOGDIR", "") == "": > while "PORT_LOGDIR" in mysettings: > del mysettings["PORT_LOGDIR"] >+ >+ if not "PORT_LOGDIR" in mysettings and "parallel" in mysettings.features: >+ mysettings["PORT_LOGDIR"] = mysettings["ROOT"] + DEF_LOGDIR >+ > if "PORT_LOGDIR" in mysettings: > try: > modified = portage_util.ensure_dirs(mysettings["PORT_LOGDIR"]) >@@ -3824,7 +3839,7 @@ > while "PORT_LOGDIR" in mysettings: > del mysettings["PORT_LOGDIR"] > if "PORT_LOGDIR" in mysettings: >- logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid") >+ logid_path = os.path.join(mysettings["BUILD_PREFIX"], ".logid.")+mysettings["CATEGORY"]+"."+ mysettings["PF"] > if not os.path.exists(logid_path): > f = open(logid_path, "w") > f.close() >diff -Nru portage_orig/pym/portage_const.py portage/pym/portage_const.py >--- portage_orig/pym/portage_const.py 2008-02-15 08:55:40.000000000 -0500 >+++ portage/pym/portage_const.py 2008-02-15 20:27:46.000000000 -0500 >@@ -12,6 +12,7 @@ > > VDB_PATH = "var/db/pkg" > PRIVATE_PATH = "var/lib/portage" >+DEF_LOGDIR = "var/log/portage" > CACHE_PATH = "/var/cache/edb" > DEPCACHE_PATH = CACHE_PATH+"/dep" > >diff -Nru portage_orig/pym/portage_exec.py portage/pym/portage_exec.py >--- portage_orig/pym/portage_exec.py 2008-02-15 08:55:40.000000000 -0500 >+++ portage/pym/portage_exec.py 2008-02-15 20:29:48.000000000 -0500 >@@ -19,6 +19,8 @@ > > if os.path.isdir("/proc/%i/fd" % os.getpid()): > def get_open_fds(): >+ # there is a race here - fd used by listdir may be in the list but closed >+ # before this method returns. > return map(int, [fd for fd in os.listdir("/proc/%i/fd" % os.getpid()) if fd.isdigit()]) > else: > def get_open_fds(): >@@ -190,27 +192,41 @@ > # mypids will hold the pids of all processes created. > mypids = [] > >+ pw = None > if logfile: > # Using a log file requires that stdout and stderr > # are assigned to the process we're running. > if 1 not in fd_pipes or 2 not in fd_pipes: > raise ValueError(fd_pipes) > >- # Create a pipe >- (pr, pw) = os.pipe() >- >- # Create a tee process, giving it our stdout and stderr >- # as well as the read end of the pipe. >- mypids.extend(spawn(('tee', '-i', '-a', logfile), >- returnpid=True, fd_pipes={0:pr, >- 1:fd_pipes[1], 2:fd_pipes[2]})) >- >- # We don't need the read end of the pipe, so close it. >- os.close(pr) >- >- # Assign the write end of the pipe to our stdout and stderr. >- fd_pipes[1] = pw >- fd_pipes[2] = pw >+ tee_good = 1 >+ try: >+ statinfo1 = os.stat(logfile) >+ statinfo2 = os.fstat(fd_pipes[1]) >+ statinfo3 = os.fstat(fd_pipes[2]) >+ # if they are pointing to same file as logfile, no 'tee' is required. >+ if statinfo1 == statinfo2 and statinfo2 == statinfo3: >+ tee_good = 0 >+ except: >+ tee_good = 1 >+ >+ if tee_good: >+ >+ # Create a pipe >+ (pr, pw) = os.pipe() >+ >+ # Create a tee process, giving it our stdout and stderr >+ # as well as the read end of the pipe. >+ mypids.extend(spawn(('tee', '-i', '-a', logfile), >+ returnpid=True, fd_pipes={0:pr, >+ 1:fd_pipes[1], 2:fd_pipes[2]})) >+ >+ # We don't need the read end of the pipe, so close it. >+ os.close(pr) >+ >+ # Assign the write end of the pipe to our stdout and stderr. >+ fd_pipes[1] = pw >+ fd_pipes[2] = pw > > pid = os.fork() > >@@ -232,7 +248,7 @@ > > # If we started a tee process the write side of the pipe is no > # longer needed, so close it. >- if logfile: >+ if logfile and pw: > os.close(pw) > > # If the caller wants to handle cleaning up the processes, we tell >diff -Nru portage_orig/pym/portage_locks.py portage/pym/portage_locks.py >--- portage_orig/pym/portage_locks.py 2008-02-15 08:55:40.000000000 -0500 >+++ portage/pym/portage_locks.py 2008-02-15 20:31:18.000000000 -0500 >@@ -79,7 +79,7 @@ > except IOError, e: > if "errno" not in dir(e): > raise >- if e.errno == errno.EAGAIN: >+ if (e.errno == errno.EAGAIN or e.errno == errno.EPERM): > # resource temp unavailable; eg, someone beat us to the lock. > if waiting_msg is None: > if isinstance(mypath, int): >@@ -89,6 +89,7 @@ > elif waiting_msg: > print waiting_msg > # try for the exclusive lock now. >+ time.sleep(5) > fcntl.lockf(myfd,fcntl.LOCK_EX) > elif e.errno == errno.ENOLCK: > # We're not allowed to lock on this FS. >@@ -107,6 +108,7 @@ > locking_method = None > myfd = HARDLINK_FD > else: >+ print "waiting for lock on "+str(lockfilename) > raise > >
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 147516
:
96922
|
96923
|
97020
|
97056
|
111065
|
111081
|
111086
|
111095
|
111106
|
111136
|
111434
|
111724
|
114444
|
114501
|
115212
|
115395
|
115986
|
121259
|
121347
|
121467
|
126839
|
140211
|
142083
|
142841
| 143576 |
144689
|
160298
|
160299
|
160301