Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 275047 | Differences between
and this patch

Collapse All | Expand All

(-)__init__.py (-6734 / +20 lines)
Lines 9-16 Link Here
9
import signal
9
import signal
10
import sys
10
import sys
11
import textwrap
11
import textwrap
12
import weakref
13
import gc
14
import os, stat
12
import os, stat
15
import platform
13
import platform
16
14
Lines 23-35 Link Here
23
21
24
from portage import digraph
22
from portage import digraph
25
from portage.const import NEWS_LIB_PATH
23
from portage.const import NEWS_LIB_PATH
26
from portage.cache.mappings import slot_dict_class
27
24
28
import _emerge.help
25
import _emerge.help
29
import portage.xpak, commands, errno, re, socket, time
26
import portage.xpak, commands, errno, re, socket, time
30
from portage.output import blue, bold, colorize, darkblue, darkgreen, green, \
27
from portage.output import blue, bold, colorize, darkgreen, \
31
	nc_len, red, teal, turquoise, \
28
	red, xtermTitleReset, yellow
32
	xtermTitleReset, yellow
33
from portage.output import create_color_func
29
from portage.output import create_color_func
34
good = create_color_func("GOOD")
30
good = create_color_func("GOOD")
35
bad = create_color_func("BAD")
31
bad = create_color_func("BAD")
Lines 42-48 Link Here
42
import portage.exception
38
import portage.exception
43
from portage.cache.cache_errors import CacheError
39
from portage.cache.cache_errors import CacheError
44
from portage.data import secpass
40
from portage.data import secpass
45
from portage.elog.messages import eerror
46
from portage.util import normalize_path as normpath
41
from portage.util import normalize_path as normpath
47
from portage.util import cmp_sort_key, writemsg, writemsg_level
42
from portage.util import cmp_sort_key, writemsg, writemsg_level
48
from portage.sets import load_default_config, SETPREFIX
43
from portage.sets import load_default_config, SETPREFIX
Lines 50-96 Link Here
50
45
51
from itertools import chain, izip
46
from itertools import chain, izip
52
47
53
from _emerge.SlotObject import SlotObject
48
from _emerge.clear_caches import clear_caches
54
from _emerge.DepPriority import DepPriority
49
from _emerge.countdown import countdown
55
from _emerge.BlockerDepPriority import BlockerDepPriority
50
from _emerge.create_depgraph_params import create_depgraph_params
56
from _emerge.UnmergeDepPriority import UnmergeDepPriority
51
from _emerge.Dependency import Dependency
57
from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
52
from _emerge.depgraph import depgraph, resume_depgraph
58
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
53
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
54
from _emerge.emergelog import emergelog
55
from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
56
from _emerge.is_valid_package_atom import is_valid_package_atom
57
from _emerge.MetadataRegen import MetadataRegen
59
from _emerge.Package import Package
58
from _emerge.Package import Package
60
from _emerge.Blocker import Blocker
61
from _emerge.BlockerDB import BlockerDB
62
from _emerge.EbuildFetcher import EbuildFetcher
63
from _emerge.EbuildPhase import EbuildPhase
64
from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
65
from _emerge.PackageMerge import PackageMerge
66
from _emerge.DependencyArg import DependencyArg
67
from _emerge.AtomArg import AtomArg
68
from _emerge.PackageArg import PackageArg
69
from _emerge.SetArg import SetArg
70
from _emerge.Dependency import Dependency
71
from _emerge.BlockerCache import BlockerCache
72
from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
73
from _emerge.RepoDisplay import RepoDisplay
74
from _emerge.UseFlagDisplay import UseFlagDisplay
75
from _emerge.SequentialTaskQueue import SequentialTaskQueue
76
from _emerge.ProgressHandler import ProgressHandler
59
from _emerge.ProgressHandler import ProgressHandler
77
from _emerge.stdout_spinner import stdout_spinner
78
from _emerge.JobStatusDisplay import JobStatusDisplay
79
from _emerge.PollScheduler import PollScheduler
80
from _emerge.search import search
81
from _emerge.visible import visible
82
from _emerge.emergelog import emergelog, _emerge_log_dir
83
from _emerge.userquery import userquery
84
from _emerge.countdown import countdown
85
from _emerge.unmerge import unmerge
86
from _emerge.MergeListItem import MergeListItem
87
from _emerge.MetadataRegen import MetadataRegen
88
from _emerge.RootConfig import RootConfig
60
from _emerge.RootConfig import RootConfig
89
from _emerge.format_size import format_size
61
from _emerge.Scheduler import Scheduler
90
from _emerge.PackageCounters import PackageCounters
62
from _emerge.search import search
91
from _emerge.FakeVartree import FakeVartree
63
from _emerge.SetArg import SetArg
92
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
64
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
65
from _emerge.stdout_spinner import stdout_spinner
66
from _emerge.unmerge import unmerge
67
from _emerge.UnmergeDepPriority import UnmergeDepPriority
68
from _emerge.UseFlagDisplay import UseFlagDisplay
69
from _emerge.userquery import userquery
93
70
71
94
actions = frozenset([
72
actions = frozenset([
95
"clean", "config", "depclean",
73
"clean", "config", "depclean",
96
"info", "list-sets", "metadata",
74
"info", "list-sets", "metadata",
Lines 208-6754 Link Here
208
186
209
	return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
187
	return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
210
188
211
def create_depgraph_params(myopts, myaction):
212
	#configure emerge engine parameters
213
	#
214
	# self:      include _this_ package regardless of if it is merged.
215
	# selective: exclude the package if it is merged
216
	# recurse:   go into the dependencies
217
	# deep:      go into the dependencies of already merged packages
218
	# empty:     pretend nothing is merged
219
	# complete:  completely account for all known dependencies
220
	# remove:    build graph for use in removing packages
221
	myparams = set(["recurse"])
222
223
	if myaction == "remove":
224
		myparams.add("remove")
225
		myparams.add("complete")
226
		return myparams
227
228
	if "--update" in myopts or \
229
		"--newuse" in myopts or \
230
		"--reinstall" in myopts or \
231
		"--noreplace" in myopts:
232
		myparams.add("selective")
233
	if "--emptytree" in myopts:
234
		myparams.add("empty")
235
		myparams.discard("selective")
236
	if "--nodeps" in myopts:
237
		myparams.discard("recurse")
238
	if "--deep" in myopts:
239
		myparams.add("deep")
240
	if "--complete-graph" in myopts:
241
		myparams.add("complete")
242
	return myparams
243
244
def create_world_atom(pkg, args_set, root_config):
245
	"""Create a new atom for the world file if one does not exist.  If the
246
	argument atom is precise enough to identify a specific slot then a slot
247
	atom will be returned. Atoms that are in the system set may also be stored
248
	in world since system atoms can only match one slot while world atoms can
249
	be greedy with respect to slots.  Unslotted system packages will not be
250
	stored in world."""
251
252
	arg_atom = args_set.findAtomForPackage(pkg)
253
	if not arg_atom:
254
		return None
255
	cp = portage.dep_getkey(arg_atom)
256
	new_world_atom = cp
257
	sets = root_config.sets
258
	portdb = root_config.trees["porttree"].dbapi
259
	vardb = root_config.trees["vartree"].dbapi
260
	available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
261
		for cpv in portdb.match(cp))
262
	slotted = len(available_slots) > 1 or \
263
		(len(available_slots) == 1 and "0" not in available_slots)
264
	if not slotted:
265
		# check the vdb in case this is multislot
266
		available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
267
			for cpv in vardb.match(cp))
268
		slotted = len(available_slots) > 1 or \
269
			(len(available_slots) == 1 and "0" not in available_slots)
270
	if slotted and arg_atom != cp:
271
		# If the user gave a specific atom, store it as a
272
		# slot atom in the world file.
273
		slot_atom = pkg.slot_atom
274
275
		# For USE=multislot, there are a couple of cases to
276
		# handle here:
277
		#
278
		# 1) SLOT="0", but the real SLOT spontaneously changed to some
279
		#    unknown value, so just record an unslotted atom.
280
		#
281
		# 2) SLOT comes from an installed package and there is no
282
		#    matching SLOT in the portage tree.
283
		#
284
		# Make sure that the slot atom is available in either the
285
		# portdb or the vardb, since otherwise the user certainly
286
		# doesn't want the SLOT atom recorded in the world file
287
		# (case 1 above).  If it's only available in the vardb,
288
		# the user may be trying to prevent a USE=multislot
289
		# package from being removed by --depclean (case 2 above).
290
291
		mydb = portdb
292
		if not portdb.match(slot_atom):
293
			# SLOT seems to come from an installed multislot package
294
			mydb = vardb
295
		# If there is no installed package matching the SLOT atom,
296
		# it probably changed SLOT spontaneously due to USE=multislot,
297
		# so just record an unslotted atom.
298
		if vardb.match(slot_atom):
299
			# Now verify that the argument is precise
300
			# enough to identify a specific slot.
301
			matches = mydb.match(arg_atom)
302
			matched_slots = set()
303
			for cpv in matches:
304
				matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
305
			if len(matched_slots) == 1:
306
				new_world_atom = slot_atom
307
308
	if new_world_atom == sets["world"].findAtomForPackage(pkg):
309
		# Both atoms would be identical, so there's nothing to add.
310
		return None
311
	if not slotted:
312
		# Unlike world atoms, system atoms are not greedy for slots, so they
313
		# can't be safely excluded from world if they are slotted.
314
		system_atom = sets["system"].findAtomForPackage(pkg)
315
		if system_atom:
316
			if not portage.dep_getkey(system_atom).startswith("virtual/"):
317
				return None
318
			# System virtuals aren't safe to exclude from world since they can
319
			# match multiple old-style virtuals but only one of them will be
320
			# pulled in by update or depclean.
321
			providers = portdb.mysettings.getvirtuals().get(
322
				portage.dep_getkey(system_atom))
323
			if providers and len(providers) == 1 and providers[0] == cp:
324
				return None
325
	return new_world_atom
326
327
def filter_iuse_defaults(iuse):
328
	for flag in iuse:
329
		if flag.startswith("+") or flag.startswith("-"):
330
			yield flag[1:]
331
		else:
332
			yield flag
333
334
def _find_deep_system_runtime_deps(graph):
335
	deep_system_deps = set()
336
	node_stack = []
337
	for node in graph:
338
		if not isinstance(node, Package) or \
339
			node.operation == 'uninstall':
340
			continue
341
		if node.root_config.sets['system'].findAtomForPackage(node):
342
			node_stack.append(node)
343
344
	def ignore_priority(priority):
345
		"""
346
		Ignore non-runtime priorities.
347
		"""
348
		if isinstance(priority, DepPriority) and \
349
			(priority.runtime or priority.runtime_post):
350
			return False
351
		return True
352
353
	while node_stack:
354
		node = node_stack.pop()
355
		if node in deep_system_deps:
356
			continue
357
		deep_system_deps.add(node)
358
		for child in graph.child_nodes(node, ignore_priority=ignore_priority):
359
			if not isinstance(child, Package) or \
360
				child.operation == 'uninstall':
361
				continue
362
			node_stack.append(child)
363
364
	return deep_system_deps
365
366
def get_masking_status(pkg, pkgsettings, root_config):
367
368
	mreasons = portage.getmaskingstatus(
369
		pkg, settings=pkgsettings,
370
		portdb=root_config.trees["porttree"].dbapi)
371
372
	if not pkg.installed:
373
		if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
374
			mreasons.append("CHOST: %s" % \
375
				pkg.metadata["CHOST"])
376
377
	if not pkg.metadata["SLOT"]:
378
		mreasons.append("invalid: SLOT is undefined")
379
380
	return mreasons
381
382
def get_mask_info(root_config, cpv, pkgsettings,
383
	db, pkg_type, built, installed, db_keys):
384
	eapi_masked = False
385
	try:
386
		metadata = dict(izip(db_keys,
387
			db.aux_get(cpv, db_keys)))
388
	except KeyError:
389
		metadata = None
390
	if metadata and not built:
391
		pkgsettings.setcpv(cpv, mydb=metadata)
392
		metadata["USE"] = pkgsettings["PORTAGE_USE"]
393
		metadata['CHOST'] = pkgsettings.get('CHOST', '')
394
	if metadata is None:
395
		mreasons = ["corruption"]
396
	else:
397
		eapi = metadata['EAPI']
398
		if eapi[:1] == '-':
399
			eapi = eapi[1:]
400
		if not portage.eapi_is_supported(eapi):
401
			mreasons = ['EAPI %s' % eapi]
402
		else:
403
			pkg = Package(type_name=pkg_type, root_config=root_config,
404
				cpv=cpv, built=built, installed=installed, metadata=metadata)
405
			mreasons = get_masking_status(pkg, pkgsettings, root_config)
406
	return metadata, mreasons
407
408
def show_masked_packages(masked_packages):
409
	shown_licenses = set()
410
	shown_comments = set()
411
	# Maybe there is both an ebuild and a binary. Only
412
	# show one of them to avoid redundant appearance.
413
	shown_cpvs = set()
414
	have_eapi_mask = False
415
	for (root_config, pkgsettings, cpv,
416
		metadata, mreasons) in masked_packages:
417
		if cpv in shown_cpvs:
418
			continue
419
		shown_cpvs.add(cpv)
420
		comment, filename = None, None
421
		if "package.mask" in mreasons:
422
			comment, filename = \
423
				portage.getmaskingreason(
424
				cpv, metadata=metadata,
425
				settings=pkgsettings,
426
				portdb=root_config.trees["porttree"].dbapi,
427
				return_location=True)
428
		missing_licenses = []
429
		if metadata:
430
			if not portage.eapi_is_supported(metadata["EAPI"]):
431
				have_eapi_mask = True
432
			try:
433
				missing_licenses = \
434
					pkgsettings._getMissingLicenses(
435
						cpv, metadata)
436
			except portage.exception.InvalidDependString:
437
				# This will have already been reported
438
				# above via mreasons.
439
				pass
440
441
		print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
442
		if comment and comment not in shown_comments:
443
			print filename+":"
444
			print comment
445
			shown_comments.add(comment)
446
		portdb = root_config.trees["porttree"].dbapi
447
		for l in missing_licenses:
448
			l_path = portdb.findLicensePath(l)
449
			if l in shown_licenses:
450
				continue
451
			msg = ("A copy of the '%s' license" + \
452
			" is located at '%s'.") % (l, l_path)
453
			print msg
454
			print
455
			shown_licenses.add(l)
456
	return have_eapi_mask
457
458
class depgraph(object):
459
460
	pkg_tree_map = RootConfig.pkg_tree_map
461
462
	_dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
463
464
	def __init__(self, settings, trees, myopts, myparams, spinner):
465
		self.settings = settings
466
		self.target_root = settings["ROOT"]
467
		self.myopts = myopts
468
		self.myparams = myparams
469
		self.edebug = 0
470
		if settings.get("PORTAGE_DEBUG", "") == "1":
471
			self.edebug = 1
472
		self.spinner = spinner
473
		self._running_root = trees["/"]["root_config"]
474
		self._opts_no_restart = Scheduler._opts_no_restart
475
		self.pkgsettings = {}
476
		# Maps slot atom to package for each Package added to the graph.
477
		self._slot_pkg_map = {}
478
		# Maps nodes to the reasons they were selected for reinstallation.
479
		self._reinstall_nodes = {}
480
		self.mydbapi = {}
481
		self.trees = {}
482
		self._trees_orig = trees
483
		self.roots = {}
484
		# Contains a filtered view of preferred packages that are selected
485
		# from available repositories.
486
		self._filtered_trees = {}
487
		# Contains installed packages and new packages that have been added
488
		# to the graph.
489
		self._graph_trees = {}
490
		# All Package instances
491
		self._pkg_cache = {}
492
		for myroot in trees:
493
			self.trees[myroot] = {}
494
			# Create a RootConfig instance that references
495
			# the FakeVartree instead of the real one.
496
			self.roots[myroot] = RootConfig(
497
				trees[myroot]["vartree"].settings,
498
				self.trees[myroot],
499
				trees[myroot]["root_config"].setconfig)
500
			for tree in ("porttree", "bintree"):
501
				self.trees[myroot][tree] = trees[myroot][tree]
502
			self.trees[myroot]["vartree"] = \
503
				FakeVartree(trees[myroot]["root_config"],
504
					pkg_cache=self._pkg_cache)
505
			self.pkgsettings[myroot] = portage.config(
506
				clone=self.trees[myroot]["vartree"].settings)
507
			self._slot_pkg_map[myroot] = {}
508
			vardb = self.trees[myroot]["vartree"].dbapi
509
			preload_installed_pkgs = "--nodeps" not in self.myopts and \
510
				"--buildpkgonly" not in self.myopts
511
			# This fakedbapi instance will model the state that the vdb will
512
			# have after new packages have been installed.
513
			fakedb = PackageVirtualDbapi(vardb.settings)
514
			if preload_installed_pkgs:
515
				for pkg in vardb:
516
					self.spinner.update()
517
					# This triggers metadata updates via FakeVartree.
518
					vardb.aux_get(pkg.cpv, [])
519
					fakedb.cpv_inject(pkg)
520
521
			# Now that the vardb state is cached in our FakeVartree,
522
			# we won't be needing the real vartree cache for awhile.
523
			# To make some room on the heap, clear the vardbapi
524
			# caches.
525
			trees[myroot]["vartree"].dbapi._clear_cache()
526
			gc.collect()
527
528
			self.mydbapi[myroot] = fakedb
529
			def graph_tree():
530
				pass
531
			graph_tree.dbapi = fakedb
532
			self._graph_trees[myroot] = {}
533
			self._filtered_trees[myroot] = {}
534
			# Substitute the graph tree for the vartree in dep_check() since we
535
			# want atom selections to be consistent with package selections
536
			# have already been made.
537
			self._graph_trees[myroot]["porttree"]   = graph_tree
538
			self._graph_trees[myroot]["vartree"]    = graph_tree
539
			def filtered_tree():
540
				pass
541
			filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
542
			self._filtered_trees[myroot]["porttree"] = filtered_tree
543
544
			# Passing in graph_tree as the vartree here could lead to better
545
			# atom selections in some cases by causing atoms for packages that
546
			# have been added to the graph to be preferred over other choices.
547
			# However, it can trigger atom selections that result in
548
			# unresolvable direct circular dependencies. For example, this
549
			# happens with gwydion-dylan which depends on either itself or
550
			# gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
551
			# gwydion-dylan-bin needs to be selected in order to avoid a
552
			# an unresolvable direct circular dependency.
553
			#
554
			# To solve the problem described above, pass in "graph_db" so that
555
			# packages that have been added to the graph are distinguishable
556
			# from other available packages and installed packages. Also, pass
557
			# the parent package into self._select_atoms() calls so that
558
			# unresolvable direct circular dependencies can be detected and
559
			# avoided when possible.
560
			self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
561
			self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
562
563
			dbs = []
564
			portdb = self.trees[myroot]["porttree"].dbapi
565
			bindb  = self.trees[myroot]["bintree"].dbapi
566
			vardb  = self.trees[myroot]["vartree"].dbapi
567
			#               (db, pkg_type, built, installed, db_keys)
568
			if "--usepkgonly" not in self.myopts:
569
				db_keys = list(portdb._aux_cache_keys)
570
				dbs.append((portdb, "ebuild", False, False, db_keys))
571
			if "--usepkg" in self.myopts:
572
				db_keys = list(bindb._aux_cache_keys)
573
				dbs.append((bindb,  "binary", True, False, db_keys))
574
			db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
575
			dbs.append((vardb, "installed", True, True, db_keys))
576
			self._filtered_trees[myroot]["dbs"] = dbs
577
			if "--usepkg" in self.myopts:
578
				self.trees[myroot]["bintree"].populate(
579
					"--getbinpkg" in self.myopts,
580
					"--getbinpkgonly" in self.myopts)
581
		del trees
582
583
		self.digraph=portage.digraph()
584
		# contains all sets added to the graph
585
		self._sets = {}
586
		# contains atoms given as arguments
587
		self._sets["args"] = InternalPackageSet()
588
		# contains all atoms from all sets added to the graph, including
589
		# atoms given as arguments
590
		self._set_atoms = InternalPackageSet()
591
		self._atom_arg_map = {}
592
		# contains all nodes pulled in by self._set_atoms
593
		self._set_nodes = set()
594
		# Contains only Blocker -> Uninstall edges
595
		self._blocker_uninstalls = digraph()
596
		# Contains only Package -> Blocker edges
597
		self._blocker_parents = digraph()
598
		# Contains only irrelevant Package -> Blocker edges
599
		self._irrelevant_blockers = digraph()
600
		# Contains only unsolvable Package -> Blocker edges
601
		self._unsolvable_blockers = digraph()
602
		# Contains all Blocker -> Blocked Package edges
603
		self._blocked_pkgs = digraph()
604
		# Contains world packages that have been protected from
605
		# uninstallation but may not have been added to the graph
606
		# if the graph is not complete yet.
607
		self._blocked_world_pkgs = {}
608
		self._slot_collision_info = {}
609
		# Slot collision nodes are not allowed to block other packages since
610
		# blocker validation is only able to account for one package per slot.
611
		self._slot_collision_nodes = set()
612
		self._parent_atoms = {}
613
		self._slot_conflict_parent_atoms = set()
614
		self._serialized_tasks_cache = None
615
		self._scheduler_graph = None
616
		self._displayed_list = None
617
		self._pprovided_args = []
618
		self._missing_args = []
619
		self._masked_installed = set()
620
		self._unsatisfied_deps_for_display = []
621
		self._unsatisfied_blockers_for_display = None
622
		self._circular_deps_for_display = None
623
		self._dep_stack = []
624
		self._dep_disjunctive_stack = []
625
		self._unsatisfied_deps = []
626
		self._initially_unsatisfied_deps = []
627
		self._ignored_deps = []
628
		self._required_set_names = set(["system", "world"])
629
		self._select_atoms = self._select_atoms_highest_available
630
		self._select_package = self._select_pkg_highest_available
631
		self._highest_pkg_cache = {}
632
633
	def _show_slot_collision_notice(self):
634
		"""Show an informational message advising the user to mask one of the
635
		the packages. In some cases it may be possible to resolve this
636
		automatically, but support for backtracking (removal nodes that have
637
		already been selected) will be required in order to handle all possible
638
		cases.
639
		"""
640
641
		if not self._slot_collision_info:
642
			return
643
644
		self._show_merge_list()
645
646
		msg = []
647
		msg.append("\n!!! Multiple package instances within a single " + \
648
			"package slot have been pulled\n")
649
		msg.append("!!! into the dependency graph, resulting" + \
650
			" in a slot conflict:\n\n")
651
		indent = "  "
652
		# Max number of parents shown, to avoid flooding the display.
653
		max_parents = 3
654
		explanation_columns = 70
655
		explanations = 0
656
		for (slot_atom, root), slot_nodes \
657
			in self._slot_collision_info.iteritems():
658
			msg.append(str(slot_atom))
659
			msg.append("\n\n")
660
661
			for node in slot_nodes:
662
				msg.append(indent)
663
				msg.append(str(node))
664
				parent_atoms = self._parent_atoms.get(node)
665
				if parent_atoms:
666
					pruned_list = set()
667
					# Prefer conflict atoms over others.
668
					for parent_atom in parent_atoms:
669
						if len(pruned_list) >= max_parents:
670
							break
671
						if parent_atom in self._slot_conflict_parent_atoms:
672
							pruned_list.add(parent_atom)
673
674
					# If this package was pulled in by conflict atoms then
675
					# show those alone since those are the most interesting.
676
					if not pruned_list:
677
						# When generating the pruned list, prefer instances
678
						# of DependencyArg over instances of Package.
679
						for parent_atom in parent_atoms:
680
							if len(pruned_list) >= max_parents:
681
								break
682
							parent, atom = parent_atom
683
							if isinstance(parent, DependencyArg):
684
								pruned_list.add(parent_atom)
685
						# Prefer Packages instances that themselves have been
686
						# pulled into collision slots.
687
						for parent_atom in parent_atoms:
688
							if len(pruned_list) >= max_parents:
689
								break
690
							parent, atom = parent_atom
691
							if isinstance(parent, Package) and \
692
								(parent.slot_atom, parent.root) \
693
								in self._slot_collision_info:
694
								pruned_list.add(parent_atom)
695
						for parent_atom in parent_atoms:
696
							if len(pruned_list) >= max_parents:
697
								break
698
							pruned_list.add(parent_atom)
699
					omitted_parents = len(parent_atoms) - len(pruned_list)
700
					parent_atoms = pruned_list
701
					msg.append(" pulled in by\n")
702
					for parent_atom in parent_atoms:
703
						parent, atom = parent_atom
704
						msg.append(2*indent)
705
						if isinstance(parent,
706
							(PackageArg, AtomArg)):
707
							# For PackageArg and AtomArg types, it's
708
							# redundant to display the atom attribute.
709
							msg.append(str(parent))
710
						else:
711
							# Display the specific atom from SetArg or
712
							# Package types.
713
							msg.append("%s required by %s" % (atom, parent))
714
						msg.append("\n")
715
					if omitted_parents:
716
						msg.append(2*indent)
717
						msg.append("(and %d more)\n" % omitted_parents)
718
				else:
719
					msg.append(" (no parents)\n")
720
				msg.append("\n")
721
			explanation = self._slot_conflict_explanation(slot_nodes)
722
			if explanation:
723
				explanations += 1
724
				msg.append(indent + "Explanation:\n\n")
725
				for line in textwrap.wrap(explanation, explanation_columns):
726
					msg.append(2*indent + line + "\n")
727
				msg.append("\n")
728
		msg.append("\n")
729
		sys.stderr.write("".join(msg))
730
		sys.stderr.flush()
731
732
		explanations_for_all = explanations == len(self._slot_collision_info)
733
734
		if explanations_for_all or "--quiet" in self.myopts:
735
			return
736
737
		msg = []
738
		msg.append("It may be possible to solve this problem ")
739
		msg.append("by using package.mask to prevent one of ")
740
		msg.append("those packages from being selected. ")
741
		msg.append("However, it is also possible that conflicting ")
742
		msg.append("dependencies exist such that they are impossible to ")
743
		msg.append("satisfy simultaneously.  If such a conflict exists in ")
744
		msg.append("the dependencies of two different packages, then those ")
745
		msg.append("packages can not be installed simultaneously.")
746
747
		from formatter import AbstractFormatter, DumbWriter
748
		f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
749
		for x in msg:
750
			f.add_flowing_data(x)
751
		f.end_paragraph(1)
752
753
		msg = []
754
		msg.append("For more information, see MASKED PACKAGES ")
755
		msg.append("section in the emerge man page or refer ")
756
		msg.append("to the Gentoo Handbook.")
757
		for x in msg:
758
			f.add_flowing_data(x)
759
		f.end_paragraph(1)
760
		f.writer.flush()
761
762
	def _slot_conflict_explanation(self, slot_nodes):
763
		"""
764
		When a slot conflict occurs due to USE deps, there are a few
765
		different cases to consider:
766
767
		1) New USE are correctly set but --newuse wasn't requested so an
768
		   installed package with incorrect USE happened to get pulled
769
		   into graph before the new one.
770
771
		2) New USE are incorrectly set but an installed package has correct
772
		   USE so it got pulled into the graph, and a new instance also got
773
		   pulled in due to --newuse or an upgrade.
774
775
		3) Multiple USE deps exist that can't be satisfied simultaneously,
776
		   and multiple package instances got pulled into the same slot to
777
		   satisfy the conflicting deps.
778
779
		Currently, explanations and suggested courses of action are generated
780
		for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
781
		"""
782
783
		if len(slot_nodes) != 2:
784
			# Suggestions are only implemented for
785
			# conflicts between two packages.
786
			return None
787
788
		all_conflict_atoms = self._slot_conflict_parent_atoms
789
		matched_node = None
790
		matched_atoms = None
791
		unmatched_node = None
792
		for node in slot_nodes:
793
			parent_atoms = self._parent_atoms.get(node)
794
			if not parent_atoms:
795
				# Normally, there are always parent atoms. If there are
796
				# none then something unexpected is happening and there's
797
				# currently no suggestion for this case.
798
				return None
799
			conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
800
			for parent_atom in conflict_atoms:
801
				parent, atom = parent_atom
802
				if not atom.use:
803
					# Suggestions are currently only implemented for cases
804
					# in which all conflict atoms have USE deps.
805
					return None
806
			if conflict_atoms:
807
				if matched_node is not None:
808
					# If conflict atoms match multiple nodes
809
					# then there's no suggestion.
810
					return None
811
				matched_node = node
812
				matched_atoms = conflict_atoms
813
			else:
814
				if unmatched_node is not None:
815
					# Neither node is matched by conflict atoms, and
816
					# there is no suggestion for this case.
817
					return None
818
				unmatched_node = node
819
820
		if matched_node is None or unmatched_node is None:
821
			# This shouldn't happen.
822
			return None
823
824
		if unmatched_node.installed and not matched_node.installed and \
825
			unmatched_node.cpv == matched_node.cpv:
826
			# If the conflicting packages are the same version then
827
			# --newuse should be all that's needed. If they are different
828
			# versions then there's some other problem.
829
			return "New USE are correctly set, but --newuse wasn't" + \
830
				" requested, so an installed package with incorrect USE " + \
831
				"happened to get pulled into the dependency graph. " + \
832
				"In order to solve " + \
833
				"this, either specify the --newuse option or explicitly " + \
834
				" reinstall '%s'." % matched_node.slot_atom
835
836
		if matched_node.installed and not unmatched_node.installed:
837
			atoms = sorted(set(atom for parent, atom in matched_atoms))
838
			explanation = ("New USE for '%s' are incorrectly set. " + \
839
				"In order to solve this, adjust USE to satisfy '%s'") % \
840
				(matched_node.slot_atom, atoms[0])
841
			if len(atoms) > 1:
842
				for atom in atoms[1:-1]:
843
					explanation += ", '%s'" % (atom,)
844
				if len(atoms) > 2:
845
					explanation += ","
846
				explanation += " and '%s'" % (atoms[-1],)
847
			explanation += "."
848
			return explanation
849
850
		return None
851
852
	def _process_slot_conflicts(self):
853
		"""
854
		Process slot conflict data to identify specific atoms which
855
		lead to conflict. These atoms only match a subset of the
856
		packages that have been pulled into a given slot.
857
		"""
858
		for (slot_atom, root), slot_nodes \
859
			in self._slot_collision_info.iteritems():
860
861
			all_parent_atoms = set()
862
			for pkg in slot_nodes:
863
				parent_atoms = self._parent_atoms.get(pkg)
864
				if not parent_atoms:
865
					continue
866
				all_parent_atoms.update(parent_atoms)
867
868
			for pkg in slot_nodes:
869
				parent_atoms = self._parent_atoms.get(pkg)
870
				if parent_atoms is None:
871
					parent_atoms = set()
872
					self._parent_atoms[pkg] = parent_atoms
873
				for parent_atom in all_parent_atoms:
874
					if parent_atom in parent_atoms:
875
						continue
876
					# Use package set for matching since it will match via
877
					# PROVIDE when necessary, while match_from_list does not.
878
					parent, atom = parent_atom
879
					atom_set = InternalPackageSet(
880
						initial_atoms=(atom,))
881
					if atom_set.findAtomForPackage(pkg):
882
						parent_atoms.add(parent_atom)
883
					else:
884
						self._slot_conflict_parent_atoms.add(parent_atom)
885
886
	def _reinstall_for_flags(self, forced_flags,
887
		orig_use, orig_iuse, cur_use, cur_iuse):
888
		"""Return a set of flags that trigger reinstallation, or None if there
889
		are no such flags."""
890
		if "--newuse" in self.myopts:
891
			flags = set(orig_iuse.symmetric_difference(
892
				cur_iuse).difference(forced_flags))
893
			flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
894
				cur_iuse.intersection(cur_use)))
895
			if flags:
896
				return flags
897
		elif "changed-use" == self.myopts.get("--reinstall"):
898
			flags = orig_iuse.intersection(orig_use).symmetric_difference(
899
				cur_iuse.intersection(cur_use))
900
			if flags:
901
				return flags
902
		return None
903
904
	def _create_graph(self, allow_unsatisfied=False):
905
		dep_stack = self._dep_stack
906
		dep_disjunctive_stack = self._dep_disjunctive_stack
907
		while dep_stack or dep_disjunctive_stack:
908
			self.spinner.update()
909
			while dep_stack:
910
				dep = dep_stack.pop()
911
				if isinstance(dep, Package):
912
					if not self._add_pkg_deps(dep,
913
						allow_unsatisfied=allow_unsatisfied):
914
						return 0
915
					continue
916
				if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
917
					return 0
918
			if dep_disjunctive_stack:
919
				if not self._pop_disjunction(allow_unsatisfied):
920
					return 0
921
		return 1
922
923
	def _add_dep(self, dep, allow_unsatisfied=False):
924
		debug = "--debug" in self.myopts
925
		buildpkgonly = "--buildpkgonly" in self.myopts
926
		nodeps = "--nodeps" in self.myopts
927
		empty = "empty" in self.myparams
928
		deep = "deep" in self.myparams
929
		update = "--update" in self.myopts and dep.depth <= 1
930
		if dep.blocker:
931
			if not buildpkgonly and \
932
				not nodeps and \
933
				dep.parent not in self._slot_collision_nodes:
934
				if dep.parent.onlydeps:
935
					# It's safe to ignore blockers if the
936
					# parent is an --onlydeps node.
937
					return 1
938
				# The blocker applies to the root where
939
				# the parent is or will be installed.
940
				blocker = Blocker(atom=dep.atom,
941
					eapi=dep.parent.metadata["EAPI"],
942
					root=dep.parent.root)
943
				self._blocker_parents.add(blocker, dep.parent)
944
			return 1
945
		dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
946
			onlydeps=dep.onlydeps)
947
		if not dep_pkg:
948
			if dep.priority.optional:
949
				# This could be an unecessary build-time dep
950
				# pulled in by --with-bdeps=y.
951
				return 1
952
			if allow_unsatisfied:
953
				self._unsatisfied_deps.append(dep)
954
				return 1
955
			self._unsatisfied_deps_for_display.append(
956
				((dep.root, dep.atom), {"myparent":dep.parent}))
957
			return 0
958
		# In some cases, dep_check will return deps that shouldn't
959
		# be proccessed any further, so they are identified and
960
		# discarded here. Try to discard as few as possible since
961
		# discarded dependencies reduce the amount of information
962
		# available for optimization of merge order.
963
		if dep.priority.satisfied and \
964
			not dep_pkg.installed and \
965
			not (existing_node or empty or deep or update):
966
			myarg = None
967
			if dep.root == self.target_root:
968
				try:
969
					myarg = self._iter_atoms_for_pkg(dep_pkg).next()
970
				except StopIteration:
971
					pass
972
				except portage.exception.InvalidDependString:
973
					if not dep_pkg.installed:
974
						# This shouldn't happen since the package
975
						# should have been masked.
976
						raise
977
			if not myarg:
978
				self._ignored_deps.append(dep)
979
				return 1
980
981
		if not self._add_pkg(dep_pkg, dep):
982
			return 0
983
		return 1
984
985
	def _add_pkg(self, pkg, dep):
986
		myparent = None
987
		priority = None
988
		depth = 0
989
		if dep is None:
990
			dep = Dependency()
991
		else:
992
			myparent = dep.parent
993
			priority = dep.priority
994
			depth = dep.depth
995
		if priority is None:
996
			priority = DepPriority()
997
		"""
998
		Fills the digraph with nodes comprised of packages to merge.
999
		mybigkey is the package spec of the package to merge.
1000
		myparent is the package depending on mybigkey ( or None )
1001
		addme = Should we add this package to the digraph or are we just looking at it's deps?
1002
			Think --onlydeps, we need to ignore packages in that case.
1003
		#stuff to add:
1004
		#SLOT-aware emerge
1005
		#IUSE-aware emerge -> USE DEP aware depgraph
1006
		#"no downgrade" emerge
1007
		"""
1008
		# Ensure that the dependencies of the same package
1009
		# are never processed more than once.
1010
		previously_added = pkg in self.digraph
1011
1012
		# select the correct /var database that we'll be checking against
1013
		vardbapi = self.trees[pkg.root]["vartree"].dbapi
1014
		pkgsettings = self.pkgsettings[pkg.root]
1015
1016
		arg_atoms = None
1017
		if True:
1018
			try:
1019
				arg_atoms = list(self._iter_atoms_for_pkg(pkg))
1020
			except portage.exception.InvalidDependString, e:
1021
				if not pkg.installed:
1022
					show_invalid_depstring_notice(
1023
						pkg, pkg.metadata["PROVIDE"], str(e))
1024
					return 0
1025
				del e
1026
1027
		if not pkg.onlydeps:
1028
			if not pkg.installed and \
1029
				"empty" not in self.myparams and \
1030
				vardbapi.match(pkg.slot_atom):
1031
				# Increase the priority of dependencies on packages that
1032
				# are being rebuilt. This optimizes merge order so that
1033
				# dependencies are rebuilt/updated as soon as possible,
1034
				# which is needed especially when emerge is called by
1035
				# revdep-rebuild since dependencies may be affected by ABI
1036
				# breakage that has rendered them useless. Don't adjust
1037
				# priority here when in "empty" mode since all packages
1038
				# are being merged in that case.
1039
				priority.rebuild = True
1040
1041
			existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
1042
			slot_collision = False
1043
			if existing_node:
1044
				existing_node_matches = pkg.cpv == existing_node.cpv
1045
				if existing_node_matches and \
1046
					pkg != existing_node and \
1047
					dep.atom is not None:
1048
					# Use package set for matching since it will match via
1049
					# PROVIDE when necessary, while match_from_list does not.
1050
					atom_set = InternalPackageSet(initial_atoms=[dep.atom])
1051
					if not atom_set.findAtomForPackage(existing_node):
1052
						existing_node_matches = False
1053
				if existing_node_matches:
1054
					# The existing node can be reused.
1055
					if arg_atoms:
1056
						for parent_atom in arg_atoms:
1057
							parent, atom = parent_atom
1058
							self.digraph.add(existing_node, parent,
1059
								priority=priority)
1060
							self._add_parent_atom(existing_node, parent_atom)
1061
					# If a direct circular dependency is not an unsatisfied
1062
					# buildtime dependency then drop it here since otherwise
1063
					# it can skew the merge order calculation in an unwanted
1064
					# way.
1065
					if existing_node != myparent or \
1066
						(priority.buildtime and not priority.satisfied):
1067
						self.digraph.addnode(existing_node, myparent,
1068
							priority=priority)
1069
						if dep.atom is not None and dep.parent is not None:
1070
							self._add_parent_atom(existing_node,
1071
								(dep.parent, dep.atom))
1072
					return 1
1073
				else:
1074
1075
					# A slot collision has occurred.  Sometimes this coincides
1076
					# with unresolvable blockers, so the slot collision will be
1077
					# shown later if there are no unresolvable blockers.
1078
					self._add_slot_conflict(pkg)
1079
					slot_collision = True
1080
1081
			if slot_collision:
1082
				# Now add this node to the graph so that self.display()
1083
				# can show use flags and --tree portage.output.  This node is
1084
				# only being partially added to the graph.  It must not be
1085
				# allowed to interfere with the other nodes that have been
1086
				# added.  Do not overwrite data for existing nodes in
1087
				# self.mydbapi since that data will be used for blocker
1088
				# validation.
1089
				# Even though the graph is now invalid, continue to process
1090
				# dependencies so that things like --fetchonly can still
1091
				# function despite collisions.
1092
				pass
1093
			elif not previously_added:
1094
				self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
1095
				self.mydbapi[pkg.root].cpv_inject(pkg)
1096
				self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
1097
1098
			if not pkg.installed:
1099
				# Allow this package to satisfy old-style virtuals in case it
1100
				# doesn't already. Any pre-existing providers will be preferred
1101
				# over this one.
1102
				try:
1103
					pkgsettings.setinst(pkg.cpv, pkg.metadata)
1104
					# For consistency, also update the global virtuals.
1105
					settings = self.roots[pkg.root].settings
1106
					settings.unlock()
1107
					settings.setinst(pkg.cpv, pkg.metadata)
1108
					settings.lock()
1109
				except portage.exception.InvalidDependString, e:
1110
					show_invalid_depstring_notice(
1111
						pkg, pkg.metadata["PROVIDE"], str(e))
1112
					del e
1113
					return 0
1114
1115
		if arg_atoms:
1116
			self._set_nodes.add(pkg)
1117
1118
		# Do this even when addme is False (--onlydeps) so that the
1119
		# parent/child relationship is always known in case
1120
		# self._show_slot_collision_notice() needs to be called later.
1121
		self.digraph.add(pkg, myparent, priority=priority)
1122
		if dep.atom is not None and dep.parent is not None:
1123
			self._add_parent_atom(pkg, (dep.parent, dep.atom))
1124
1125
		if arg_atoms:
1126
			for parent_atom in arg_atoms:
1127
				parent, atom = parent_atom
1128
				self.digraph.add(pkg, parent, priority=priority)
1129
				self._add_parent_atom(pkg, parent_atom)
1130
1131
		""" This section determines whether we go deeper into dependencies or not.
1132
		    We want to go deeper on a few occasions:
1133
		    Installing package A, we need to make sure package A's deps are met.
1134
		    emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
1135
		    If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
1136
		"""
1137
		dep_stack = self._dep_stack
1138
		if "recurse" not in self.myparams:
1139
			return 1
1140
		elif pkg.installed and \
1141
			"deep" not in self.myparams:
1142
			dep_stack = self._ignored_deps
1143
1144
		self.spinner.update()
1145
1146
		if arg_atoms:
1147
			depth = 0
1148
		pkg.depth = depth
1149
		if not previously_added:
1150
			dep_stack.append(pkg)
1151
		return 1
1152
1153
	def _add_parent_atom(self, pkg, parent_atom):
1154
		parent_atoms = self._parent_atoms.get(pkg)
1155
		if parent_atoms is None:
1156
			parent_atoms = set()
1157
			self._parent_atoms[pkg] = parent_atoms
1158
		parent_atoms.add(parent_atom)
1159
1160
	def _add_slot_conflict(self, pkg):
1161
		self._slot_collision_nodes.add(pkg)
1162
		slot_key = (pkg.slot_atom, pkg.root)
1163
		slot_nodes = self._slot_collision_info.get(slot_key)
1164
		if slot_nodes is None:
1165
			slot_nodes = set()
1166
			slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
1167
			self._slot_collision_info[slot_key] = slot_nodes
1168
		slot_nodes.add(pkg)
1169
1170
	def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
1171
1172
		mytype = pkg.type_name
1173
		myroot = pkg.root
1174
		mykey = pkg.cpv
1175
		metadata = pkg.metadata
1176
		myuse = pkg.use.enabled
1177
		jbigkey = pkg
1178
		depth = pkg.depth + 1
1179
		removal_action = "remove" in self.myparams
1180
1181
		edepend={}
1182
		depkeys = ["DEPEND","RDEPEND","PDEPEND"]
1183
		for k in depkeys:
1184
			edepend[k] = metadata[k]
1185
1186
		if not pkg.built and \
1187
			"--buildpkgonly" in self.myopts and \
1188
			"deep" not in self.myparams and \
1189
			"empty" not in self.myparams:
1190
			edepend["RDEPEND"] = ""
1191
			edepend["PDEPEND"] = ""
1192
		bdeps_optional = False
1193
1194
		if pkg.built and not removal_action:
1195
			if self.myopts.get("--with-bdeps", "n") == "y":
1196
				# Pull in build time deps as requested, but marked them as
1197
				# "optional" since they are not strictly required. This allows
1198
				# more freedom in the merge order calculation for solving
1199
				# circular dependencies. Don't convert to PDEPEND since that
1200
				# could make --with-bdeps=y less effective if it is used to
1201
				# adjust merge order to prevent built_with_use() calls from
1202
				# failing.
1203
				bdeps_optional = True
1204
			else:
1205
				# built packages do not have build time dependencies.
1206
				edepend["DEPEND"] = ""
1207
1208
		if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
1209
			edepend["DEPEND"] = ""
1210
1211
		bdeps_root = "/"
1212
		root_deps = self.myopts.get("--root-deps")
1213
		if root_deps is not None:
1214
			if root_deps is True:
1215
				bdeps_root = myroot
1216
			elif root_deps == "rdeps":
1217
				edepend["DEPEND"] = ""
1218
1219
		deps = (
1220
			(bdeps_root, edepend["DEPEND"],
1221
				self._priority(buildtime=(not bdeps_optional),
1222
				optional=bdeps_optional)),
1223
			(myroot, edepend["RDEPEND"], self._priority(runtime=True)),
1224
			(myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
1225
		)
1226
1227
		debug = "--debug" in self.myopts
1228
		strict = mytype != "installed"
1229
		try:
1230
			if not strict:
1231
				portage.dep._dep_check_strict = False
1232
1233
			for dep_root, dep_string, dep_priority in deps:
1234
				if not dep_string:
1235
					continue
1236
				if debug:
1237
					print
1238
					print "Parent:   ", jbigkey
1239
					print "Depstring:", dep_string
1240
					print "Priority:", dep_priority
1241
1242
				try:
1243
1244
					dep_string = portage.dep.paren_normalize(
1245
						portage.dep.use_reduce(
1246
						portage.dep.paren_reduce(dep_string),
1247
						uselist=pkg.use.enabled))
1248
1249
					dep_string = list(self._queue_disjunctive_deps(
1250
						pkg, dep_root, dep_priority, dep_string))
1251
1252
				except portage.exception.InvalidDependString, e:
1253
					if pkg.installed:
1254
						del e
1255
						continue
1256
					show_invalid_depstring_notice(pkg, dep_string, str(e))
1257
					return 0
1258
1259
				if not dep_string:
1260
					continue
1261
1262
				dep_string = portage.dep.paren_enclose(dep_string)
1263
1264
				if not self._add_pkg_dep_string(
1265
					pkg, dep_root, dep_priority, dep_string,
1266
					allow_unsatisfied):
1267
					return 0
1268
1269
		except portage.exception.AmbiguousPackageName, e:
1270
			pkgs = e.args[0]
1271
			portage.writemsg("\n\n!!! An atom in the dependencies " + \
1272
				"is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
1273
			for cpv in pkgs:
1274
				portage.writemsg("    %s\n" % cpv, noiselevel=-1)
1275
			portage.writemsg("\n", noiselevel=-1)
1276
			if mytype == "binary":
1277
				portage.writemsg(
1278
					"!!! This binary package cannot be installed: '%s'\n" % \
1279
					mykey, noiselevel=-1)
1280
			elif mytype == "ebuild":
1281
				portdb = self.roots[myroot].trees["porttree"].dbapi
1282
				myebuild, mylocation = portdb.findname2(mykey)
1283
				portage.writemsg("!!! This ebuild cannot be installed: " + \
1284
					"'%s'\n" % myebuild, noiselevel=-1)
1285
			portage.writemsg("!!! Please notify the package maintainer " + \
1286
				"that atoms must be fully-qualified.\n", noiselevel=-1)
1287
			return 0
1288
		finally:
1289
			portage.dep._dep_check_strict = True
1290
		return 1
1291
1292
	def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
1293
		allow_unsatisfied):
1294
		depth = pkg.depth + 1
1295
		debug = "--debug" in self.myopts
1296
		strict = pkg.type_name != "installed"
1297
1298
		if debug:
1299
			print
1300
			print "Parent:   ", pkg
1301
			print "Depstring:", dep_string
1302
			print "Priority:", dep_priority
1303
1304
		try:
1305
			selected_atoms = self._select_atoms(dep_root,
1306
				dep_string, myuse=pkg.use.enabled, parent=pkg,
1307
				strict=strict, priority=dep_priority)
1308
		except portage.exception.InvalidDependString, e:
1309
			show_invalid_depstring_notice(pkg, dep_string, str(e))
1310
			del e
1311
			if pkg.installed:
1312
				return 1
1313
			return 0
1314
1315
		if debug:
1316
			print "Candidates:", selected_atoms
1317
1318
		vardb = self.roots[dep_root].trees["vartree"].dbapi
1319
1320
		for atom in selected_atoms:
1321
			try:
1322
1323
				atom = portage.dep.Atom(atom)
1324
1325
				mypriority = dep_priority.copy()
1326
				if not atom.blocker and vardb.match(atom):
1327
					mypriority.satisfied = True
1328
1329
				if not self._add_dep(Dependency(atom=atom,
1330
					blocker=atom.blocker, depth=depth, parent=pkg,
1331
					priority=mypriority, root=dep_root),
1332
					allow_unsatisfied=allow_unsatisfied):
1333
					return 0
1334
1335
			except portage.exception.InvalidAtom, e:
1336
				show_invalid_depstring_notice(
1337
					pkg, dep_string, str(e))
1338
				del e
1339
				if not pkg.installed:
1340
					return 0
1341
1342
		if debug:
1343
			print "Exiting...", pkg
1344
1345
		return 1
1346
1347
	def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
1348
		"""
1349
		Queue disjunctive (virtual and ||) deps in self._dep_disjunctive_stack.
1350
		Yields non-disjunctive deps. Raises InvalidDependString when 
1351
		necessary.
1352
		"""
1353
		i = 0
1354
		while i < len(dep_struct):
1355
			x = dep_struct[i]
1356
			if isinstance(x, list):
1357
				for y in self._queue_disjunctive_deps(
1358
					pkg, dep_root, dep_priority, x):
1359
					yield y
1360
			elif x == "||":
1361
				self._queue_disjunction(pkg, dep_root, dep_priority,
1362
					[ x, dep_struct[ i + 1 ] ] )
1363
				i += 1
1364
			else:
1365
				try:
1366
					x = portage.dep.Atom(x)
1367
				except portage.exception.InvalidAtom:
1368
					if not pkg.installed:
1369
						raise portage.exception.InvalidDependString(
1370
							"invalid atom: '%s'" % x)
1371
				else:
1372
					# Note: Eventually this will check for PROPERTIES=virtual
1373
					# or whatever other metadata gets implemented for this
1374
					# purpose.
1375
					if x.cp.startswith('virtual/'):
1376
						self._queue_disjunction( pkg, dep_root,
1377
							dep_priority, [ str(x) ] )
1378
					else:
1379
						yield str(x)
1380
			i += 1
1381
1382
	def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1383
		self._dep_disjunctive_stack.append(
1384
			(pkg, dep_root, dep_priority, dep_struct))
1385
1386
	def _pop_disjunction(self, allow_unsatisfied):
1387
		"""
1388
		Pop one disjunctive dep from self._dep_disjunctive_stack, and use it to
1389
		populate self._dep_stack.
1390
		"""
1391
		pkg, dep_root, dep_priority, dep_struct = \
1392
			self._dep_disjunctive_stack.pop()
1393
		dep_string = portage.dep.paren_enclose(dep_struct)
1394
		if not self._add_pkg_dep_string(
1395
			pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1396
			return 0
1397
		return 1
1398
1399
	def _priority(self, **kwargs):
1400
		if "remove" in self.myparams:
1401
			priority_constructor = UnmergeDepPriority
1402
		else:
1403
			priority_constructor = DepPriority
1404
		return priority_constructor(**kwargs)
1405
1406
	def _dep_expand(self, root_config, atom_without_category):
1407
		"""
1408
		@param root_config: a root config instance
1409
		@type root_config: RootConfig
1410
		@param atom_without_category: an atom without a category component
1411
		@type atom_without_category: String
1412
		@rtype: list
1413
		@returns: a list of atoms containing categories (possibly empty)
1414
		"""
1415
		null_cp = portage.dep_getkey(insert_category_into_atom(
1416
			atom_without_category, "null"))
1417
		cat, atom_pn = portage.catsplit(null_cp)
1418
1419
		dbs = self._filtered_trees[root_config.root]["dbs"]
1420
		categories = set()
1421
		for db, pkg_type, built, installed, db_keys in dbs:
1422
			for cat in db.categories:
1423
				if db.cp_list("%s/%s" % (cat, atom_pn)):
1424
					categories.add(cat)
1425
1426
		deps = []
1427
		for cat in categories:
1428
			deps.append(insert_category_into_atom(
1429
				atom_without_category, cat))
1430
		return deps
1431
1432
	def _have_new_virt(self, root, atom_cp):
1433
		ret = False
1434
		for db, pkg_type, built, installed, db_keys in \
1435
			self._filtered_trees[root]["dbs"]:
1436
			if db.cp_list(atom_cp):
1437
				ret = True
1438
				break
1439
		return ret
1440
1441
	def _iter_atoms_for_pkg(self, pkg):
1442
		# TODO: add multiple $ROOT support
1443
		if pkg.root != self.target_root:
1444
			return
1445
		atom_arg_map = self._atom_arg_map
1446
		root_config = self.roots[pkg.root]
1447
		for atom in self._set_atoms.iterAtomsForPackage(pkg):
1448
			atom_cp = portage.dep_getkey(atom)
1449
			if atom_cp != pkg.cp and \
1450
				self._have_new_virt(pkg.root, atom_cp):
1451
				continue
1452
			visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
1453
			visible_pkgs.reverse() # descending order
1454
			higher_slot = None
1455
			for visible_pkg in visible_pkgs:
1456
				if visible_pkg.cp != atom_cp:
1457
					continue
1458
				if pkg >= visible_pkg:
1459
					# This is descending order, and we're not
1460
					# interested in any versions <= pkg given.
1461
					break
1462
				if pkg.slot_atom != visible_pkg.slot_atom:
1463
					higher_slot = visible_pkg
1464
					break
1465
			if higher_slot is not None:
1466
				continue
1467
			for arg in atom_arg_map[(atom, pkg.root)]:
1468
				if isinstance(arg, PackageArg) and \
1469
					arg.package != pkg:
1470
					continue
1471
				yield arg, atom
1472
1473
	def select_files(self, myfiles):
1474
		"""Given a list of .tbz2s, .ebuilds sets, and deps, create the
1475
		appropriate depgraph and return a favorite list."""
1476
		debug = "--debug" in self.myopts
1477
		root_config = self.roots[self.target_root]
1478
		sets = root_config.sets
1479
		getSetAtoms = root_config.setconfig.getSetAtoms
1480
		myfavorites=[]
1481
		myroot = self.target_root
1482
		dbs = self._filtered_trees[myroot]["dbs"]
1483
		vardb = self.trees[myroot]["vartree"].dbapi
1484
		real_vardb = self._trees_orig[myroot]["vartree"].dbapi
1485
		portdb = self.trees[myroot]["porttree"].dbapi
1486
		bindb = self.trees[myroot]["bintree"].dbapi
1487
		pkgsettings = self.pkgsettings[myroot]
1488
		args = []
1489
		onlydeps = "--onlydeps" in self.myopts
1490
		lookup_owners = []
1491
		for x in myfiles:
1492
			ext = os.path.splitext(x)[1]
1493
			if ext==".tbz2":
1494
				if not os.path.exists(x):
1495
					if os.path.exists(
1496
						os.path.join(pkgsettings["PKGDIR"], "All", x)):
1497
						x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1498
					elif os.path.exists(
1499
						os.path.join(pkgsettings["PKGDIR"], x)):
1500
						x = os.path.join(pkgsettings["PKGDIR"], x)
1501
					else:
1502
						print "\n\n!!! Binary package '"+str(x)+"' does not exist."
1503
						print "!!! Please ensure the tbz2 exists as specified.\n"
1504
						return 0, myfavorites
1505
				mytbz2=portage.xpak.tbz2(x)
1506
				mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1507
				if os.path.realpath(x) != \
1508
					os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
1509
					print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
1510
					return 0, myfavorites
1511
				db_keys = list(bindb._aux_cache_keys)
1512
				metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
1513
				pkg = Package(type_name="binary", root_config=root_config,
1514
					cpv=mykey, built=True, metadata=metadata,
1515
					onlydeps=onlydeps)
1516
				self._pkg_cache[pkg] = pkg
1517
				args.append(PackageArg(arg=x, package=pkg,
1518
					root_config=root_config))
1519
			elif ext==".ebuild":
1520
				ebuild_path = portage.util.normalize_path(os.path.abspath(x))
1521
				pkgdir = os.path.dirname(ebuild_path)
1522
				tree_root = os.path.dirname(os.path.dirname(pkgdir))
1523
				cp = pkgdir[len(tree_root)+1:]
1524
				e = portage.exception.PackageNotFound(
1525
					("%s is not in a valid portage tree " + \
1526
					"hierarchy or does not exist") % x)
1527
				if not portage.isvalidatom(cp):
1528
					raise e
1529
				cat = portage.catsplit(cp)[0]
1530
				mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
1531
				if not portage.isvalidatom("="+mykey):
1532
					raise e
1533
				ebuild_path = portdb.findname(mykey)
1534
				if ebuild_path:
1535
					if ebuild_path != os.path.join(os.path.realpath(tree_root),
1536
						cp, os.path.basename(ebuild_path)):
1537
						print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
1538
						return 0, myfavorites
1539
					if mykey not in portdb.xmatch(
1540
						"match-visible", portage.dep_getkey(mykey)):
1541
						print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
1542
						print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
1543
						print colorize("BAD", "*** page for details.")
1544
						countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
1545
							"Continuing...")
1546
				else:
1547
					raise portage.exception.PackageNotFound(
1548
						"%s is not in a valid portage tree hierarchy or does not exist" % x)
1549
				db_keys = list(portdb._aux_cache_keys)
1550
				metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
1551
				pkg = Package(type_name="ebuild", root_config=root_config,
1552
					cpv=mykey, metadata=metadata, onlydeps=onlydeps)
1553
				pkgsettings.setcpv(pkg)
1554
				pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
1555
				pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
1556
				self._pkg_cache[pkg] = pkg
1557
				args.append(PackageArg(arg=x, package=pkg,
1558
					root_config=root_config))
1559
			elif x.startswith(os.path.sep):
1560
				if not x.startswith(myroot):
1561
					portage.writemsg(("\n\n!!! '%s' does not start with" + \
1562
						" $ROOT.\n") % x, noiselevel=-1)
1563
					return 0, []
1564
				# Queue these up since it's most efficient to handle
1565
				# multiple files in a single iter_owners() call.
1566
				lookup_owners.append(x)
1567
			else:
1568
				if x in ("system", "world"):
1569
					x = SETPREFIX + x
1570
				if x.startswith(SETPREFIX):
1571
					s = x[len(SETPREFIX):]
1572
					if s not in sets:
1573
						raise portage.exception.PackageSetNotFound(s)
1574
					if s in self._sets:
1575
						continue
1576
					# Recursively expand sets so that containment tests in
1577
					# self._get_parent_sets() properly match atoms in nested
1578
					# sets (like if world contains system).
1579
					expanded_set = InternalPackageSet(
1580
						initial_atoms=getSetAtoms(s))
1581
					self._sets[s] = expanded_set
1582
					args.append(SetArg(arg=x, set=expanded_set,
1583
						root_config=root_config))
1584
					continue
1585
				if not is_valid_package_atom(x):
1586
					portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
1587
						noiselevel=-1)
1588
					portage.writemsg("!!! Please check ebuild(5) for full details.\n")
1589
					portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
1590
					return (0,[])
1591
				# Don't expand categories or old-style virtuals here unless
1592
				# necessary. Expansion of old-style virtuals here causes at
1593
				# least the following problems:
1594
				#   1) It's more difficult to determine which set(s) an atom
1595
				#      came from, if any.
1596
				#   2) It takes away freedom from the resolver to choose other
1597
				#      possible expansions when necessary.
1598
				if "/" in x:
1599
					args.append(AtomArg(arg=x, atom=x,
1600
						root_config=root_config))
1601
					continue
1602
				expanded_atoms = self._dep_expand(root_config, x)
1603
				installed_cp_set = set()
1604
				for atom in expanded_atoms:
1605
					atom_cp = portage.dep_getkey(atom)
1606
					if vardb.cp_list(atom_cp):
1607
						installed_cp_set.add(atom_cp)
1608
1609
				if len(installed_cp_set) > 1:
1610
					non_virtual_cps = set()
1611
					for atom_cp in installed_cp_set:
1612
						if not atom_cp.startswith("virtual/"):
1613
							non_virtual_cps.add(atom_cp)
1614
					if len(non_virtual_cps) == 1:
1615
						installed_cp_set = non_virtual_cps
1616
1617
				if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
1618
					installed_cp = iter(installed_cp_set).next()
1619
					expanded_atoms = [atom for atom in expanded_atoms \
1620
						if portage.dep_getkey(atom) == installed_cp]
1621
1622
				if len(expanded_atoms) > 1:
1623
					print
1624
					print
1625
					ambiguous_package_name(x, expanded_atoms, root_config,
1626
						self.spinner, self.myopts)
1627
					return False, myfavorites
1628
				if expanded_atoms:
1629
					atom = expanded_atoms[0]
1630
				else:
1631
					null_atom = insert_category_into_atom(x, "null")
1632
					null_cp = portage.dep_getkey(null_atom)
1633
					cat, atom_pn = portage.catsplit(null_cp)
1634
					virts_p = root_config.settings.get_virts_p().get(atom_pn)
1635
					if virts_p:
1636
						# Allow the depgraph to choose which virtual.
1637
						atom = insert_category_into_atom(x, "virtual")
1638
					else:
1639
						atom = insert_category_into_atom(x, "null")
1640
1641
				args.append(AtomArg(arg=x, atom=atom,
1642
					root_config=root_config))
1643
1644
		if lookup_owners:
1645
			relative_paths = []
1646
			search_for_multiple = False
1647
			if len(lookup_owners) > 1:
1648
				search_for_multiple = True
1649
1650
			for x in lookup_owners:
1651
				if not search_for_multiple and os.path.isdir(x):
1652
					search_for_multiple = True
1653
				relative_paths.append(x[len(myroot):])
1654
1655
			owners = set()
1656
			for pkg, relative_path in \
1657
				real_vardb._owners.iter_owners(relative_paths):
1658
				owners.add(pkg.mycpv)
1659
				if not search_for_multiple:
1660
					break
1661
1662
			if not owners:
1663
				portage.writemsg(("\n\n!!! '%s' is not claimed " + \
1664
					"by any package.\n") % lookup_owners[0], noiselevel=-1)
1665
				return 0, []
1666
1667
			for cpv in owners:
1668
				slot = vardb.aux_get(cpv, ["SLOT"])[0]
1669
				if not slot:
1670
					# portage now masks packages with missing slot, but it's
1671
					# possible that one was installed by an older version
1672
					atom = portage.cpv_getkey(cpv)
1673
				else:
1674
					atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
1675
				args.append(AtomArg(arg=atom, atom=atom,
1676
					root_config=root_config))
1677
1678
		if "--update" in self.myopts:
1679
			# In some cases, the greedy slots behavior can pull in a slot that
1680
			# the user would want to uninstall due to it being blocked by a
1681
			# newer version in a different slot. Therefore, it's necessary to
1682
			# detect and discard any that should be uninstalled. Each time
1683
			# that arguments are updated, package selections are repeated in
1684
			# order to ensure consistency with the current arguments:
1685
			#
1686
			#  1) Initialize args
1687
			#  2) Select packages and generate initial greedy atoms
1688
			#  3) Update args with greedy atoms
1689
			#  4) Select packages and generate greedy atoms again, while
1690
			#     accounting for any blockers between selected packages
1691
			#  5) Update args with revised greedy atoms
1692
1693
			self._set_args(args)
1694
			greedy_args = []
1695
			for arg in args:
1696
				greedy_args.append(arg)
1697
				if not isinstance(arg, AtomArg):
1698
					continue
1699
				for atom in self._greedy_slots(arg.root_config, arg.atom):
1700
					greedy_args.append(
1701
						AtomArg(arg=arg.arg, atom=atom,
1702
							root_config=arg.root_config))
1703
1704
			self._set_args(greedy_args)
1705
			del greedy_args
1706
1707
			# Revise greedy atoms, accounting for any blockers
1708
			# between selected packages.
1709
			revised_greedy_args = []
1710
			for arg in args:
1711
				revised_greedy_args.append(arg)
1712
				if not isinstance(arg, AtomArg):
1713
					continue
1714
				for atom in self._greedy_slots(arg.root_config, arg.atom,
1715
					blocker_lookahead=True):
1716
					revised_greedy_args.append(
1717
						AtomArg(arg=arg.arg, atom=atom,
1718
							root_config=arg.root_config))
1719
			args = revised_greedy_args
1720
			del revised_greedy_args
1721
1722
		self._set_args(args)
1723
1724
		myfavorites = set(myfavorites)
1725
		for arg in args:
1726
			if isinstance(arg, (AtomArg, PackageArg)):
1727
				myfavorites.add(arg.atom)
1728
			elif isinstance(arg, SetArg):
1729
				myfavorites.add(arg.arg)
1730
		myfavorites = list(myfavorites)
1731
1732
		pprovideddict = pkgsettings.pprovideddict
1733
		if debug:
1734
			portage.writemsg("\n", noiselevel=-1)
1735
		# Order needs to be preserved since a feature of --nodeps
1736
		# is to allow the user to force a specific merge order.
1737
		args.reverse()
1738
		while args:
1739
			arg = args.pop()
1740
			for atom in arg.set:
1741
				self.spinner.update()
1742
				dep = Dependency(atom=atom, onlydeps=onlydeps,
1743
					root=myroot, parent=arg)
1744
				atom_cp = portage.dep_getkey(atom)
1745
				try:
1746
					pprovided = pprovideddict.get(portage.dep_getkey(atom))
1747
					if pprovided and portage.match_from_list(atom, pprovided):
1748
						# A provided package has been specified on the command line.
1749
						self._pprovided_args.append((arg, atom))
1750
						continue
1751
					if isinstance(arg, PackageArg):
1752
						if not self._add_pkg(arg.package, dep) or \
1753
							not self._create_graph():
1754
							sys.stderr.write(("\n\n!!! Problem resolving " + \
1755
								"dependencies for %s\n") % arg.arg)
1756
							return 0, myfavorites
1757
						continue
1758
					if debug:
1759
						portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
1760
							(arg, atom), noiselevel=-1)
1761
					pkg, existing_node = self._select_package(
1762
						myroot, atom, onlydeps=onlydeps)
1763
					if not pkg:
1764
						if not (isinstance(arg, SetArg) and \
1765
							arg.name in ("system", "world")):
1766
							self._unsatisfied_deps_for_display.append(
1767
								((myroot, atom), {}))
1768
							return 0, myfavorites
1769
						self._missing_args.append((arg, atom))
1770
						continue
1771
					if atom_cp != pkg.cp:
1772
						# For old-style virtuals, we need to repeat the
1773
						# package.provided check against the selected package.
1774
						expanded_atom = atom.replace(atom_cp, pkg.cp)
1775
						pprovided = pprovideddict.get(pkg.cp)
1776
						if pprovided and \
1777
							portage.match_from_list(expanded_atom, pprovided):
1778
							# A provided package has been
1779
							# specified on the command line.
1780
							self._pprovided_args.append((arg, atom))
1781
							continue
1782
					if pkg.installed and "selective" not in self.myparams:
1783
						self._unsatisfied_deps_for_display.append(
1784
							((myroot, atom), {}))
1785
						# Previous behavior was to bail out in this case, but
1786
						# since the dep is satisfied by the installed package,
1787
						# it's more friendly to continue building the graph
1788
						# and just show a warning message. Therefore, only bail
1789
						# out here if the atom is not from either the system or
1790
						# world set.
1791
						if not (isinstance(arg, SetArg) and \
1792
							arg.name in ("system", "world")):
1793
							return 0, myfavorites
1794
1795
					# Add the selected package to the graph as soon as possible
1796
					# so that later dep_check() calls can use it as feedback
1797
					# for making more consistent atom selections.
1798
					if not self._add_pkg(pkg, dep):
1799
						if isinstance(arg, SetArg):
1800
							sys.stderr.write(("\n\n!!! Problem resolving " + \
1801
								"dependencies for %s from %s\n") % \
1802
								(atom, arg.arg))
1803
						else:
1804
							sys.stderr.write(("\n\n!!! Problem resolving " + \
1805
								"dependencies for %s\n") % atom)
1806
						return 0, myfavorites
1807
1808
				except portage.exception.MissingSignature, e:
1809
					portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
1810
					portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1811
					portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1812
					portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1813
					portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1814
					return 0, myfavorites
1815
				except portage.exception.InvalidSignature, e:
1816
					portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
1817
					portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1818
					portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1819
					portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1820
					portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1821
					return 0, myfavorites
1822
				except SystemExit, e:
1823
					raise # Needed else can't exit
1824
				except Exception, e:
1825
					print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
1826
					print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
1827
					raise
1828
1829
		# Now that the root packages have been added to the graph,
1830
		# process the dependencies.
1831
		if not self._create_graph():
1832
			return 0, myfavorites
1833
1834
		missing=0
1835
		if "--usepkgonly" in self.myopts:
1836
			for xs in self.digraph.all_nodes():
1837
				if not isinstance(xs, Package):
1838
					continue
1839
				if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
1840
					if missing == 0:
1841
						print
1842
					missing += 1
1843
					print "Missing binary for:",xs[2]
1844
1845
		try:
1846
			self.altlist()
1847
		except self._unknown_internal_error:
1848
			return False, myfavorites
1849
1850
		# We're true here unless we are missing binaries.
1851
		return (not missing,myfavorites)
1852
1853
	def _set_args(self, args):
1854
		"""
1855
		Create the "args" package set from atoms and packages given as
1856
		arguments. This method can be called multiple times if necessary.
1857
		The package selection cache is automatically invalidated, since
1858
		arguments influence package selections.
1859
		"""
1860
		args_set = self._sets["args"]
1861
		args_set.clear()
1862
		for arg in args:
1863
			if not isinstance(arg, (AtomArg, PackageArg)):
1864
				continue
1865
			atom = arg.atom
1866
			if atom in args_set:
1867
				continue
1868
			args_set.add(atom)
1869
1870
		self._set_atoms.clear()
1871
		self._set_atoms.update(chain(*self._sets.itervalues()))
1872
		atom_arg_map = self._atom_arg_map
1873
		atom_arg_map.clear()
1874
		for arg in args:
1875
			for atom in arg.set:
1876
				atom_key = (atom, arg.root_config.root)
1877
				refs = atom_arg_map.get(atom_key)
1878
				if refs is None:
1879
					refs = []
1880
					atom_arg_map[atom_key] = refs
1881
					if arg not in refs:
1882
						refs.append(arg)
1883
1884
		# Invalidate the package selection cache, since
1885
		# arguments influence package selections.
1886
		self._highest_pkg_cache.clear()
1887
		for trees in self._filtered_trees.itervalues():
1888
			trees["porttree"].dbapi._clear_cache()
1889
1890
	def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
1891
		"""
1892
		Return a list of slot atoms corresponding to installed slots that
1893
		differ from the slot of the highest visible match. When
1894
		blocker_lookahead is True, slot atoms that would trigger a blocker
1895
		conflict are automatically discarded, potentially allowing automatic
1896
		uninstallation of older slots when appropriate.
1897
		"""
1898
		highest_pkg, in_graph = self._select_package(root_config.root, atom)
1899
		if highest_pkg is None:
1900
			return []
1901
		vardb = root_config.trees["vartree"].dbapi
1902
		slots = set()
1903
		for cpv in vardb.match(atom):
1904
			# don't mix new virtuals with old virtuals
1905
			if portage.cpv_getkey(cpv) == highest_pkg.cp:
1906
				slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
1907
1908
		slots.add(highest_pkg.metadata["SLOT"])
1909
		if len(slots) == 1:
1910
			return []
1911
		greedy_pkgs = []
1912
		slots.remove(highest_pkg.metadata["SLOT"])
1913
		while slots:
1914
			slot = slots.pop()
1915
			slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
1916
			pkg, in_graph = self._select_package(root_config.root, slot_atom)
1917
			if pkg is not None and \
1918
				pkg.cp == highest_pkg.cp and pkg < highest_pkg:
1919
				greedy_pkgs.append(pkg)
1920
		if not greedy_pkgs:
1921
			return []
1922
		if not blocker_lookahead:
1923
			return [pkg.slot_atom for pkg in greedy_pkgs]
1924
1925
		blockers = {}
1926
		blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
1927
		for pkg in greedy_pkgs + [highest_pkg]:
1928
			dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
1929
			try:
1930
				atoms = self._select_atoms(
1931
					pkg.root, dep_str, pkg.use.enabled,
1932
					parent=pkg, strict=True)
1933
			except portage.exception.InvalidDependString:
1934
				continue
1935
			blocker_atoms = (x for x in atoms if x.blocker)
1936
			blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
1937
1938
		if highest_pkg not in blockers:
1939
			return []
1940
1941
		# filter packages with invalid deps
1942
		greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
1943
1944
		# filter packages that conflict with highest_pkg
1945
		greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
1946
			(blockers[highest_pkg].findAtomForPackage(pkg) or \
1947
			blockers[pkg].findAtomForPackage(highest_pkg))]
1948
1949
		if not greedy_pkgs:
1950
			return []
1951
1952
		# If two packages conflict, discard the lower version.
1953
		discard_pkgs = set()
1954
		greedy_pkgs.sort(reverse=True)
1955
		for i in xrange(len(greedy_pkgs) - 1):
1956
			pkg1 = greedy_pkgs[i]
1957
			if pkg1 in discard_pkgs:
1958
				continue
1959
			for j in xrange(i + 1, len(greedy_pkgs)):
1960
				pkg2 = greedy_pkgs[j]
1961
				if pkg2 in discard_pkgs:
1962
					continue
1963
				if blockers[pkg1].findAtomForPackage(pkg2) or \
1964
					blockers[pkg2].findAtomForPackage(pkg1):
1965
					# pkg1 > pkg2
1966
					discard_pkgs.add(pkg2)
1967
1968
		return [pkg.slot_atom for pkg in greedy_pkgs \
1969
			if pkg not in discard_pkgs]
1970
1971
	def _select_atoms_from_graph(self, *pargs, **kwargs):
1972
		"""
1973
		Prefer atoms matching packages that have already been
1974
		added to the graph or those that are installed and have
1975
		not been scheduled for replacement.
1976
		"""
1977
		kwargs["trees"] = self._graph_trees
1978
		return self._select_atoms_highest_available(*pargs, **kwargs)
1979
1980
	def _select_atoms_highest_available(self, root, depstring,
1981
		myuse=None, parent=None, strict=True, trees=None, priority=None):
1982
		"""This will raise InvalidDependString if necessary. If trees is
1983
		None then self._filtered_trees is used."""
1984
		pkgsettings = self.pkgsettings[root]
1985
		if trees is None:
1986
			trees = self._filtered_trees
1987
		if not getattr(priority, "buildtime", False):
1988
			# The parent should only be passed to dep_check() for buildtime
1989
			# dependencies since that's the only case when it's appropriate
1990
			# to trigger the circular dependency avoidance code which uses it.
1991
			# It's important not to trigger the same circular dependency
1992
			# avoidance code for runtime dependencies since it's not needed
1993
			# and it can promote an incorrect package choice.
1994
			parent = None
1995
		if True:
1996
			try:
1997
				if parent is not None:
1998
					trees[root]["parent"] = parent
1999
				if not strict:
2000
					portage.dep._dep_check_strict = False
2001
				mycheck = portage.dep_check(depstring, None,
2002
					pkgsettings, myuse=myuse,
2003
					myroot=root, trees=trees)
2004
			finally:
2005
				if parent is not None:
2006
					trees[root].pop("parent")
2007
				portage.dep._dep_check_strict = True
2008
			if not mycheck[0]:
2009
				raise portage.exception.InvalidDependString(mycheck[1])
2010
			selected_atoms = mycheck[1]
2011
		return selected_atoms
2012
2013
	def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
2014
		atom = portage.dep.Atom(atom)
2015
		atom_set = InternalPackageSet(initial_atoms=(atom,))
2016
		atom_without_use = atom
2017
		if atom.use:
2018
			atom_without_use = portage.dep.remove_slot(atom)
2019
			if atom.slot:
2020
				atom_without_use += ":" + atom.slot
2021
			atom_without_use = portage.dep.Atom(atom_without_use)
2022
		xinfo = '"%s"' % atom
2023
		if arg:
2024
			xinfo='"%s"' % arg
2025
		# Discard null/ from failed cpv_expand category expansion.
2026
		xinfo = xinfo.replace("null/", "")
2027
		masked_packages = []
2028
		missing_use = []
2029
		masked_pkg_instances = set()
2030
		missing_licenses = []
2031
		have_eapi_mask = False
2032
		pkgsettings = self.pkgsettings[root]
2033
		implicit_iuse = pkgsettings._get_implicit_iuse()
2034
		root_config = self.roots[root]
2035
		portdb = self.roots[root].trees["porttree"].dbapi
2036
		dbs = self._filtered_trees[root]["dbs"]
2037
		for db, pkg_type, built, installed, db_keys in dbs:
2038
			if installed:
2039
				continue
2040
			match = db.match
2041
			if hasattr(db, "xmatch"):
2042
				cpv_list = db.xmatch("match-all", atom_without_use)
2043
			else:
2044
				cpv_list = db.match(atom_without_use)
2045
			# descending order
2046
			cpv_list.reverse()
2047
			for cpv in cpv_list:
2048
				metadata, mreasons  = get_mask_info(root_config, cpv,
2049
					pkgsettings, db, pkg_type, built, installed, db_keys)
2050
				if metadata is not None:
2051
					pkg = Package(built=built, cpv=cpv,
2052
						installed=installed, metadata=metadata,
2053
						root_config=root_config)
2054
					if pkg.cp != atom.cp:
2055
						# A cpv can be returned from dbapi.match() as an
2056
						# old-style virtual match even in cases when the
2057
						# package does not actually PROVIDE the virtual.
2058
						# Filter out any such false matches here.
2059
						if not atom_set.findAtomForPackage(pkg):
2060
							continue
2061
					if mreasons:
2062
						masked_pkg_instances.add(pkg)
2063
					if atom.use:
2064
						missing_use.append(pkg)
2065
						if not mreasons:
2066
							continue
2067
				masked_packages.append(
2068
					(root_config, pkgsettings, cpv, metadata, mreasons))
2069
2070
		missing_use_reasons = []
2071
		missing_iuse_reasons = []
2072
		for pkg in missing_use:
2073
			use = pkg.use.enabled
2074
			iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
2075
			iuse_re = re.compile("^(%s)$" % "|".join(iuse))
2076
			missing_iuse = []
2077
			for x in atom.use.required:
2078
				if iuse_re.match(x) is None:
2079
					missing_iuse.append(x)
2080
			mreasons = []
2081
			if missing_iuse:
2082
				mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
2083
				missing_iuse_reasons.append((pkg, mreasons))
2084
			else:
2085
				need_enable = sorted(atom.use.enabled.difference(use))
2086
				need_disable = sorted(atom.use.disabled.intersection(use))
2087
				if need_enable or need_disable:
2088
					changes = []
2089
					changes.extend(colorize("red", "+" + x) \
2090
						for x in need_enable)
2091
					changes.extend(colorize("blue", "-" + x) \
2092
						for x in need_disable)
2093
					mreasons.append("Change USE: %s" % " ".join(changes))
2094
					missing_use_reasons.append((pkg, mreasons))
2095
2096
		unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2097
			in missing_use_reasons if pkg not in masked_pkg_instances]
2098
2099
		unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
2100
			in missing_iuse_reasons if pkg not in masked_pkg_instances]
2101
2102
		show_missing_use = False
2103
		if unmasked_use_reasons:
2104
			# Only show the latest version.
2105
			show_missing_use = unmasked_use_reasons[:1]
2106
		elif unmasked_iuse_reasons:
2107
			if missing_use_reasons:
2108
				# All packages with required IUSE are masked,
2109
				# so display a normal masking message.
2110
				pass
2111
			else:
2112
				show_missing_use = unmasked_iuse_reasons
2113
2114
		if show_missing_use:
2115
			print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
2116
			print "!!! One of the following packages is required to complete your request:"
2117
			for pkg, mreasons in show_missing_use:
2118
				print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
2119
2120
		elif masked_packages:
2121
			print "\n!!! " + \
2122
				colorize("BAD", "All ebuilds that could satisfy ") + \
2123
				colorize("INFORM", xinfo) + \
2124
				colorize("BAD", " have been masked.")
2125
			print "!!! One of the following masked packages is required to complete your request:"
2126
			have_eapi_mask = show_masked_packages(masked_packages)
2127
			if have_eapi_mask:
2128
				print
2129
				msg = ("The current version of portage supports " + \
2130
					"EAPI '%s'. You must upgrade to a newer version" + \
2131
					" of portage before EAPI masked packages can" + \
2132
					" be installed.") % portage.const.EAPI
2133
				from textwrap import wrap
2134
				for line in wrap(msg, 75):
2135
					print line
2136
			print
2137
			show_mask_docs()
2138
		else:
2139
			print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
2140
2141
		# Show parent nodes and the argument that pulled them in.
2142
		traversed_nodes = set()
2143
		node = myparent
2144
		msg = []
2145
		while node is not None:
2146
			traversed_nodes.add(node)
2147
			msg.append('(dependency required by "%s" [%s])' % \
2148
				(colorize('INFORM', str(node.cpv)), node.type_name))
2149
			# When traversing to parents, prefer arguments over packages
2150
			# since arguments are root nodes. Never traverse the same
2151
			# package twice, in order to prevent an infinite loop.
2152
			selected_parent = None
2153
			for parent in self.digraph.parent_nodes(node):
2154
				if isinstance(parent, DependencyArg):
2155
					msg.append('(dependency required by "%s" [argument])' % \
2156
						(colorize('INFORM', str(parent))))
2157
					selected_parent = None
2158
					break
2159
				if parent not in traversed_nodes:
2160
					selected_parent = parent
2161
			node = selected_parent
2162
		for line in msg:
2163
			print line
2164
2165
		print
2166
2167
	def _select_pkg_highest_available(self, root, atom, onlydeps=False):
2168
		cache_key = (root, atom, onlydeps)
2169
		ret = self._highest_pkg_cache.get(cache_key)
2170
		if ret is not None:
2171
			pkg, existing = ret
2172
			if pkg and not existing:
2173
				existing = self._slot_pkg_map[root].get(pkg.slot_atom)
2174
				if existing and existing == pkg:
2175
					# Update the cache to reflect that the
2176
					# package has been added to the graph.
2177
					ret = pkg, pkg
2178
					self._highest_pkg_cache[cache_key] = ret
2179
			return ret
2180
		ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
2181
		self._highest_pkg_cache[cache_key] = ret
2182
		pkg, existing = ret
2183
		if pkg is not None:
2184
			settings = pkg.root_config.settings
2185
			if visible(settings, pkg) and not (pkg.installed and \
2186
				settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
2187
				pkg.root_config.visible_pkgs.cpv_inject(pkg)
2188
		return ret
2189
2190
	def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
2191
		root_config = self.roots[root]
2192
		pkgsettings = self.pkgsettings[root]
2193
		dbs = self._filtered_trees[root]["dbs"]
2194
		vardb = self.roots[root].trees["vartree"].dbapi
2195
		portdb = self.roots[root].trees["porttree"].dbapi
2196
		# List of acceptable packages, ordered by type preference.
2197
		matched_packages = []
2198
		highest_version = None
2199
		if not isinstance(atom, portage.dep.Atom):
2200
			atom = portage.dep.Atom(atom)
2201
		atom_cp = atom.cp
2202
		atom_set = InternalPackageSet(initial_atoms=(atom,))
2203
		existing_node = None
2204
		myeb = None
2205
		usepkgonly = "--usepkgonly" in self.myopts
2206
		empty = "empty" in self.myparams
2207
		selective = "selective" in self.myparams
2208
		reinstall = False
2209
		noreplace = "--noreplace" in self.myopts
2210
		# Behavior of the "selective" parameter depends on
2211
		# whether or not a package matches an argument atom.
2212
		# If an installed package provides an old-style
2213
		# virtual that is no longer provided by an available
2214
		# package, the installed package may match an argument
2215
		# atom even though none of the available packages do.
2216
		# Therefore, "selective" logic does not consider
2217
		# whether or not an installed package matches an
2218
		# argument atom. It only considers whether or not
2219
		# available packages match argument atoms, which is
2220
		# represented by the found_available_arg flag.
2221
		found_available_arg = False
2222
		for find_existing_node in True, False:
2223
			if existing_node:
2224
				break
2225
			for db, pkg_type, built, installed, db_keys in dbs:
2226
				if existing_node:
2227
					break
2228
				if installed and not find_existing_node:
2229
					want_reinstall = reinstall or empty or \
2230
						(found_available_arg and not selective)
2231
					if want_reinstall and matched_packages:
2232
						continue
2233
				if hasattr(db, "xmatch"):
2234
					cpv_list = db.xmatch("match-all", atom)
2235
				else:
2236
					cpv_list = db.match(atom)
2237
2238
				# USE=multislot can make an installed package appear as if
2239
				# it doesn't satisfy a slot dependency. Rebuilding the ebuild
2240
				# won't do any good as long as USE=multislot is enabled since
2241
				# the newly built package still won't have the expected slot.
2242
				# Therefore, assume that such SLOT dependencies are already
2243
				# satisfied rather than forcing a rebuild.
2244
				if installed and not cpv_list and atom.slot:
2245
					for cpv in db.match(atom.cp):
2246
						slot_available = False
2247
						for other_db, other_type, other_built, \
2248
							other_installed, other_keys in dbs:
2249
							try:
2250
								if atom.slot == \
2251
									other_db.aux_get(cpv, ["SLOT"])[0]:
2252
									slot_available = True
2253
									break
2254
							except KeyError:
2255
								pass
2256
						if not slot_available:
2257
							continue
2258
						inst_pkg = self._pkg(cpv, "installed",
2259
							root_config, installed=installed)
2260
						# Remove the slot from the atom and verify that
2261
						# the package matches the resulting atom.
2262
						atom_without_slot = portage.dep.remove_slot(atom)
2263
						if atom.use:
2264
							atom_without_slot += str(atom.use)
2265
						atom_without_slot = portage.dep.Atom(atom_without_slot)
2266
						if portage.match_from_list(
2267
							atom_without_slot, [inst_pkg]):
2268
							cpv_list = [inst_pkg.cpv]
2269
						break
2270
2271
				if not cpv_list:
2272
					continue
2273
				pkg_status = "merge"
2274
				if installed or onlydeps:
2275
					pkg_status = "nomerge"
2276
				# descending order
2277
				cpv_list.reverse()
2278
				for cpv in cpv_list:
2279
					# Make --noreplace take precedence over --newuse.
2280
					if not installed and noreplace and \
2281
						cpv in vardb.match(atom):
2282
						# If the installed version is masked, it may
2283
						# be necessary to look at lower versions,
2284
						# in case there is a visible downgrade.
2285
						continue
2286
					reinstall_for_flags = None
2287
					cache_key = (pkg_type, root, cpv, pkg_status)
2288
					calculated_use = True
2289
					pkg = self._pkg_cache.get(cache_key)
2290
					if pkg is None:
2291
						calculated_use = False
2292
						try:
2293
							metadata = izip(db_keys, db.aux_get(cpv, db_keys))
2294
						except KeyError:
2295
							continue
2296
						pkg = Package(built=built, cpv=cpv,
2297
							installed=installed, metadata=metadata,
2298
							onlydeps=onlydeps, root_config=root_config,
2299
							type_name=pkg_type)
2300
						metadata = pkg.metadata
2301
						if not built:
2302
							metadata['CHOST'] = pkgsettings.get('CHOST', '')
2303
						if not built and ("?" in metadata["LICENSE"] or \
2304
							"?" in metadata["PROVIDE"]):
2305
							# This is avoided whenever possible because
2306
							# it's expensive. It only needs to be done here
2307
							# if it has an effect on visibility.
2308
							pkgsettings.setcpv(pkg)
2309
							metadata["USE"] = pkgsettings["PORTAGE_USE"]
2310
							calculated_use = True
2311
						self._pkg_cache[pkg] = pkg
2312
2313
					if not installed or (built and matched_packages):
2314
						# Only enforce visibility on installed packages
2315
						# if there is at least one other visible package
2316
						# available. By filtering installed masked packages
2317
						# here, packages that have been masked since they
2318
						# were installed can be automatically downgraded
2319
						# to an unmasked version.
2320
						try:
2321
							if not visible(pkgsettings, pkg):
2322
								continue
2323
						except portage.exception.InvalidDependString:
2324
							if not installed:
2325
								continue
2326
2327
						# Enable upgrade or downgrade to a version
2328
						# with visible KEYWORDS when the installed
2329
						# version is masked by KEYWORDS, but never
2330
						# reinstall the same exact version only due
2331
						# to a KEYWORDS mask.
2332
						if built and matched_packages:
2333
2334
							different_version = None
2335
							for avail_pkg in matched_packages:
2336
								if not portage.dep.cpvequal(
2337
									pkg.cpv, avail_pkg.cpv):
2338
									different_version = avail_pkg
2339
									break
2340
							if different_version is not None:
2341
2342
								if installed and \
2343
									pkgsettings._getMissingKeywords(
2344
									pkg.cpv, pkg.metadata):
2345
									continue
2346
2347
								# If the ebuild no longer exists or it's
2348
								# keywords have been dropped, reject built
2349
								# instances (installed or binary).
2350
								# If --usepkgonly is enabled, assume that
2351
								# the ebuild status should be ignored.
2352
								if not usepkgonly:
2353
									try:
2354
										pkg_eb = self._pkg(
2355
											pkg.cpv, "ebuild", root_config)
2356
									except portage.exception.PackageNotFound:
2357
										continue
2358
									else:
2359
										if not visible(pkgsettings, pkg_eb):
2360
											continue
2361
2362
					if not pkg.built and not calculated_use:
2363
						# This is avoided whenever possible because
2364
						# it's expensive.
2365
						pkgsettings.setcpv(pkg)
2366
						pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
2367
2368
					if pkg.cp != atom.cp:
2369
						# A cpv can be returned from dbapi.match() as an
2370
						# old-style virtual match even in cases when the
2371
						# package does not actually PROVIDE the virtual.
2372
						# Filter out any such false matches here.
2373
						if not atom_set.findAtomForPackage(pkg):
2374
							continue
2375
2376
					myarg = None
2377
					if root == self.target_root:
2378
						try:
2379
							# Ebuild USE must have been calculated prior
2380
							# to this point, in case atoms have USE deps.
2381
							myarg = self._iter_atoms_for_pkg(pkg).next()
2382
						except StopIteration:
2383
							pass
2384
						except portage.exception.InvalidDependString:
2385
							if not installed:
2386
								# masked by corruption
2387
								continue
2388
					if not installed and myarg:
2389
						found_available_arg = True
2390
2391
					if atom.use and not pkg.built:
2392
						use = pkg.use.enabled
2393
						if atom.use.enabled.difference(use):
2394
							continue
2395
						if atom.use.disabled.intersection(use):
2396
							continue
2397
					if pkg.cp == atom_cp:
2398
						if highest_version is None:
2399
							highest_version = pkg
2400
						elif pkg > highest_version:
2401
							highest_version = pkg
2402
					# At this point, we've found the highest visible
2403
					# match from the current repo. Any lower versions
2404
					# from this repo are ignored, so this so the loop
2405
					# will always end with a break statement below
2406
					# this point.
2407
					if find_existing_node:
2408
						e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
2409
						if not e_pkg:
2410
							break
2411
						if portage.dep.match_from_list(atom, [e_pkg]):
2412
							if highest_version and \
2413
								e_pkg.cp == atom_cp and \
2414
								e_pkg < highest_version and \
2415
								e_pkg.slot_atom != highest_version.slot_atom:
2416
								# There is a higher version available in a
2417
								# different slot, so this existing node is
2418
								# irrelevant.
2419
								pass
2420
							else:
2421
								matched_packages.append(e_pkg)
2422
								existing_node = e_pkg
2423
						break
2424
					# Compare built package to current config and
2425
					# reject the built package if necessary.
2426
					if built and not installed and \
2427
						("--newuse" in self.myopts or \
2428
						"--reinstall" in self.myopts):
2429
						iuses = pkg.iuse.all
2430
						old_use = pkg.use.enabled
2431
						if myeb:
2432
							pkgsettings.setcpv(myeb)
2433
						else:
2434
							pkgsettings.setcpv(pkg)
2435
						now_use = pkgsettings["PORTAGE_USE"].split()
2436
						forced_flags = set()
2437
						forced_flags.update(pkgsettings.useforce)
2438
						forced_flags.update(pkgsettings.usemask)
2439
						cur_iuse = iuses
2440
						if myeb and not usepkgonly:
2441
							cur_iuse = myeb.iuse.all
2442
						if self._reinstall_for_flags(forced_flags,
2443
							old_use, iuses,
2444
							now_use, cur_iuse):
2445
							break
2446
					# Compare current config to installed package
2447
					# and do not reinstall if possible.
2448
					if not installed and \
2449
						("--newuse" in self.myopts or \
2450
						"--reinstall" in self.myopts) and \
2451
						cpv in vardb.match(atom):
2452
						pkgsettings.setcpv(pkg)
2453
						forced_flags = set()
2454
						forced_flags.update(pkgsettings.useforce)
2455
						forced_flags.update(pkgsettings.usemask)
2456
						old_use = vardb.aux_get(cpv, ["USE"])[0].split()
2457
						old_iuse = set(filter_iuse_defaults(
2458
							vardb.aux_get(cpv, ["IUSE"])[0].split()))
2459
						cur_use = pkg.use.enabled
2460
						cur_iuse = pkg.iuse.all
2461
						reinstall_for_flags = \
2462
							self._reinstall_for_flags(
2463
							forced_flags, old_use, old_iuse,
2464
							cur_use, cur_iuse)
2465
						if reinstall_for_flags:
2466
							reinstall = True
2467
					if not built:
2468
						myeb = pkg
2469
					matched_packages.append(pkg)
2470
					if reinstall_for_flags:
2471
						self._reinstall_nodes[pkg] = \
2472
							reinstall_for_flags
2473
					break
2474
2475
		if not matched_packages:
2476
			return None, None
2477
2478
		if "--debug" in self.myopts:
2479
			for pkg in matched_packages:
2480
				portage.writemsg("%s %s\n" % \
2481
					((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
2482
2483
		# Filter out any old-style virtual matches if they are
2484
		# mixed with new-style virtual matches.
2485
		cp = portage.dep_getkey(atom)
2486
		if len(matched_packages) > 1 and \
2487
			"virtual" == portage.catsplit(cp)[0]:
2488
			for pkg in matched_packages:
2489
				if pkg.cp != cp:
2490
					continue
2491
				# Got a new-style virtual, so filter
2492
				# out any old-style virtuals.
2493
				matched_packages = [pkg for pkg in matched_packages \
2494
					if pkg.cp == cp]
2495
				break
2496
2497
		if len(matched_packages) > 1:
2498
			bestmatch = portage.best(
2499
				[pkg.cpv for pkg in matched_packages])
2500
			matched_packages = [pkg for pkg in matched_packages \
2501
				if portage.dep.cpvequal(pkg.cpv, bestmatch)]
2502
2503
		# ordered by type preference ("ebuild" type is the last resort)
2504
		return  matched_packages[-1], existing_node
2505
2506
	def _select_pkg_from_graph(self, root, atom, onlydeps=False):
2507
		"""
2508
		Select packages that have already been added to the graph or
2509
		those that are installed and have not been scheduled for
2510
		replacement.
2511
		"""
2512
		graph_db = self._graph_trees[root]["porttree"].dbapi
2513
		matches = graph_db.match_pkgs(atom)
2514
		if not matches:
2515
			return None, None
2516
		pkg = matches[-1] # highest match
2517
		in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
2518
		return pkg, in_graph
2519
2520
	def _complete_graph(self):
2521
		"""
2522
		Add any deep dependencies of required sets (args, system, world) that
2523
		have not been pulled into the graph yet. This ensures that the graph
2524
		is consistent such that initially satisfied deep dependencies are not
2525
		broken in the new graph. Initially unsatisfied dependencies are
2526
		irrelevant since we only want to avoid breaking dependencies that are
2527
		intially satisfied.
2528
2529
		Since this method can consume enough time to disturb users, it is
2530
		currently only enabled by the --complete-graph option.
2531
		"""
2532
		if "--buildpkgonly" in self.myopts or \
2533
			"recurse" not in self.myparams:
2534
			return 1
2535
2536
		if "complete" not in self.myparams:
2537
			# Skip this to avoid consuming enough time to disturb users.
2538
			return 1
2539
2540
		# Put the depgraph into a mode that causes it to only
2541
		# select packages that have already been added to the
2542
		# graph or those that are installed and have not been
2543
		# scheduled for replacement. Also, toggle the "deep"
2544
		# parameter so that all dependencies are traversed and
2545
		# accounted for.
2546
		self._select_atoms = self._select_atoms_from_graph
2547
		self._select_package = self._select_pkg_from_graph
2548
		already_deep = "deep" in self.myparams
2549
		if not already_deep:
2550
			self.myparams.add("deep")
2551
2552
		for root in self.roots:
2553
			required_set_names = self._required_set_names.copy()
2554
			if root == self.target_root and \
2555
				(already_deep or "empty" in self.myparams):
2556
				required_set_names.difference_update(self._sets)
2557
			if not required_set_names and not self._ignored_deps:
2558
				continue
2559
			root_config = self.roots[root]
2560
			setconfig = root_config.setconfig
2561
			args = []
2562
			# Reuse existing SetArg instances when available.
2563
			for arg in self.digraph.root_nodes():
2564
				if not isinstance(arg, SetArg):
2565
					continue
2566
				if arg.root_config != root_config:
2567
					continue
2568
				if arg.name in required_set_names:
2569
					args.append(arg)
2570
					required_set_names.remove(arg.name)
2571
			# Create new SetArg instances only when necessary.
2572
			for s in required_set_names:
2573
				expanded_set = InternalPackageSet(
2574
					initial_atoms=setconfig.getSetAtoms(s))
2575
				atom = SETPREFIX + s
2576
				args.append(SetArg(arg=atom, set=expanded_set,
2577
					root_config=root_config))
2578
			vardb = root_config.trees["vartree"].dbapi
2579
			for arg in args:
2580
				for atom in arg.set:
2581
					self._dep_stack.append(
2582
						Dependency(atom=atom, root=root, parent=arg))
2583
			if self._ignored_deps:
2584
				self._dep_stack.extend(self._ignored_deps)
2585
				self._ignored_deps = []
2586
			if not self._create_graph(allow_unsatisfied=True):
2587
				return 0
2588
			# Check the unsatisfied deps to see if any initially satisfied deps
2589
			# will become unsatisfied due to an upgrade. Initially unsatisfied
2590
			# deps are irrelevant since we only want to avoid breaking deps
2591
			# that are initially satisfied.
2592
			while self._unsatisfied_deps:
2593
				dep = self._unsatisfied_deps.pop()
2594
				matches = vardb.match_pkgs(dep.atom)
2595
				if not matches:
2596
					self._initially_unsatisfied_deps.append(dep)
2597
					continue
2598
				# An scheduled installation broke a deep dependency.
2599
				# Add the installed package to the graph so that it
2600
				# will be appropriately reported as a slot collision
2601
				# (possibly solvable via backtracking).
2602
				pkg = matches[-1] # highest match
2603
				if not self._add_pkg(pkg, dep):
2604
					return 0
2605
				if not self._create_graph(allow_unsatisfied=True):
2606
					return 0
2607
		return 1
2608
2609
	def _pkg(self, cpv, type_name, root_config, installed=False):
2610
		"""
2611
		Get a package instance from the cache, or create a new
2612
		one if necessary. Raises KeyError from aux_get if it
2613
		failures for some reason (package does not exist or is
2614
		corrupt).
2615
		"""
2616
		operation = "merge"
2617
		if installed:
2618
			operation = "nomerge"
2619
		pkg = self._pkg_cache.get(
2620
			(type_name, root_config.root, cpv, operation))
2621
		if pkg is None:
2622
			tree_type = self.pkg_tree_map[type_name]
2623
			db = root_config.trees[tree_type].dbapi
2624
			db_keys = list(self._trees_orig[root_config.root][
2625
				tree_type].dbapi._aux_cache_keys)
2626
			try:
2627
				metadata = izip(db_keys, db.aux_get(cpv, db_keys))
2628
			except KeyError:
2629
				raise portage.exception.PackageNotFound(cpv)
2630
			pkg = Package(cpv=cpv, metadata=metadata,
2631
				root_config=root_config, installed=installed)
2632
			if type_name == "ebuild":
2633
				settings = self.pkgsettings[root_config.root]
2634
				settings.setcpv(pkg)
2635
				pkg.metadata["USE"] = settings["PORTAGE_USE"]
2636
				pkg.metadata['CHOST'] = settings.get('CHOST', '')
2637
			self._pkg_cache[pkg] = pkg
2638
		return pkg
2639
2640
	def validate_blockers(self):
2641
		"""Remove any blockers from the digraph that do not match any of the
2642
		packages within the graph.  If necessary, create hard deps to ensure
2643
		correct merge order such that mutually blocking packages are never
2644
		installed simultaneously."""
2645
2646
		if "--buildpkgonly" in self.myopts or \
2647
			"--nodeps" in self.myopts:
2648
			return True
2649
2650
		#if "deep" in self.myparams:
2651
		if True:
2652
			# Pull in blockers from all installed packages that haven't already
2653
			# been pulled into the depgraph.  This is not enabled by default
2654
			# due to the performance penalty that is incurred by all the
2655
			# additional dep_check calls that are required.
2656
2657
			dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
2658
			for myroot in self.trees:
2659
				vardb = self.trees[myroot]["vartree"].dbapi
2660
				portdb = self.trees[myroot]["porttree"].dbapi
2661
				pkgsettings = self.pkgsettings[myroot]
2662
				final_db = self.mydbapi[myroot]
2663
2664
				blocker_cache = BlockerCache(myroot, vardb)
2665
				stale_cache = set(blocker_cache)
2666
				for pkg in vardb:
2667
					cpv = pkg.cpv
2668
					stale_cache.discard(cpv)
2669
					pkg_in_graph = self.digraph.contains(pkg)
2670
2671
					# Check for masked installed packages. Only warn about
2672
					# packages that are in the graph in order to avoid warning
2673
					# about those that will be automatically uninstalled during
2674
					# the merge process or by --depclean.
2675
					if pkg in final_db:
2676
						if pkg_in_graph and not visible(pkgsettings, pkg):
2677
							self._masked_installed.add(pkg)
2678
2679
					blocker_atoms = None
2680
					blockers = None
2681
					if pkg_in_graph:
2682
						blockers = []
2683
						try:
2684
							blockers.extend(
2685
								self._blocker_parents.child_nodes(pkg))
2686
						except KeyError:
2687
							pass
2688
						try:
2689
							blockers.extend(
2690
								self._irrelevant_blockers.child_nodes(pkg))
2691
						except KeyError:
2692
							pass
2693
					if blockers is not None:
2694
						blockers = set(str(blocker.atom) \
2695
							for blocker in blockers)
2696
2697
					# If this node has any blockers, create a "nomerge"
2698
					# node for it so that they can be enforced.
2699
					self.spinner.update()
2700
					blocker_data = blocker_cache.get(cpv)
2701
					if blocker_data is not None and \
2702
						blocker_data.counter != long(pkg.metadata["COUNTER"]):
2703
						blocker_data = None
2704
2705
					# If blocker data from the graph is available, use
2706
					# it to validate the cache and update the cache if
2707
					# it seems invalid.
2708
					if blocker_data is not None and \
2709
						blockers is not None:
2710
						if not blockers.symmetric_difference(
2711
							blocker_data.atoms):
2712
							continue
2713
						blocker_data = None
2714
2715
					if blocker_data is None and \
2716
						blockers is not None:
2717
						# Re-use the blockers from the graph.
2718
						blocker_atoms = sorted(blockers)
2719
						counter = long(pkg.metadata["COUNTER"])
2720
						blocker_data = \
2721
							blocker_cache.BlockerData(counter, blocker_atoms)
2722
						blocker_cache[pkg.cpv] = blocker_data
2723
						continue
2724
2725
					if blocker_data:
2726
						blocker_atoms = blocker_data.atoms
2727
					else:
2728
						# Use aux_get() to trigger FakeVartree global
2729
						# updates on *DEPEND when appropriate.
2730
						depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
2731
						# It is crucial to pass in final_db here in order to
2732
						# optimize dep_check calls by eliminating atoms via
2733
						# dep_wordreduce and dep_eval calls.
2734
						try:
2735
							portage.dep._dep_check_strict = False
2736
							try:
2737
								success, atoms = portage.dep_check(depstr,
2738
									final_db, pkgsettings, myuse=pkg.use.enabled,
2739
									trees=self._graph_trees, myroot=myroot)
2740
							except Exception, e:
2741
								if isinstance(e, SystemExit):
2742
									raise
2743
								# This is helpful, for example, if a ValueError
2744
								# is thrown from cpv_expand due to multiple
2745
								# matches (this can happen if an atom lacks a
2746
								# category).
2747
								show_invalid_depstring_notice(
2748
									pkg, depstr, str(e))
2749
								del e
2750
								raise
2751
						finally:
2752
							portage.dep._dep_check_strict = True
2753
						if not success:
2754
							replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
2755
							if replacement_pkg and \
2756
								replacement_pkg[0].operation == "merge":
2757
								# This package is being replaced anyway, so
2758
								# ignore invalid dependencies so as not to
2759
								# annoy the user too much (otherwise they'd be
2760
								# forced to manually unmerge it first).
2761
								continue
2762
							show_invalid_depstring_notice(pkg, depstr, atoms)
2763
							return False
2764
						blocker_atoms = [myatom for myatom in atoms \
2765
							if myatom.startswith("!")]
2766
						blocker_atoms.sort()
2767
						counter = long(pkg.metadata["COUNTER"])
2768
						blocker_cache[cpv] = \
2769
							blocker_cache.BlockerData(counter, blocker_atoms)
2770
					if blocker_atoms:
2771
						try:
2772
							for atom in blocker_atoms:
2773
								blocker = Blocker(atom=portage.dep.Atom(atom),
2774
									eapi=pkg.metadata["EAPI"], root=myroot)
2775
								self._blocker_parents.add(blocker, pkg)
2776
						except portage.exception.InvalidAtom, e:
2777
							depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
2778
							show_invalid_depstring_notice(
2779
								pkg, depstr, "Invalid Atom: %s" % (e,))
2780
							return False
2781
				for cpv in stale_cache:
2782
					del blocker_cache[cpv]
2783
				blocker_cache.flush()
2784
				del blocker_cache
2785
2786
		# Discard any "uninstall" tasks scheduled by previous calls
2787
		# to this method, since those tasks may not make sense given
2788
		# the current graph state.
2789
		previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
2790
		if previous_uninstall_tasks:
2791
			self._blocker_uninstalls = digraph()
2792
			self.digraph.difference_update(previous_uninstall_tasks)
2793
2794
		for blocker in self._blocker_parents.leaf_nodes():
2795
			self.spinner.update()
2796
			root_config = self.roots[blocker.root]
2797
			virtuals = root_config.settings.getvirtuals()
2798
			myroot = blocker.root
2799
			initial_db = self.trees[myroot]["vartree"].dbapi
2800
			final_db = self.mydbapi[myroot]
2801
			
2802
			provider_virtual = False
2803
			if blocker.cp in virtuals and \
2804
				not self._have_new_virt(blocker.root, blocker.cp):
2805
				provider_virtual = True
2806
2807
			# Use this to check PROVIDE for each matched package
2808
			# when necessary.
2809
			atom_set = InternalPackageSet(
2810
				initial_atoms=[blocker.atom])
2811
2812
			if provider_virtual:
2813
				atoms = []
2814
				for provider_entry in virtuals[blocker.cp]:
2815
					provider_cp = \
2816
						portage.dep_getkey(provider_entry)
2817
					atoms.append(blocker.atom.replace(
2818
						blocker.cp, provider_cp))
2819
			else:
2820
				atoms = [blocker.atom]
2821
2822
			blocked_initial = set()
2823
			for atom in atoms:
2824
				for pkg in initial_db.match_pkgs(atom):
2825
					if atom_set.findAtomForPackage(pkg):
2826
						blocked_initial.add(pkg)
2827
2828
			blocked_final = set()
2829
			for atom in atoms:
2830
				for pkg in final_db.match_pkgs(atom):
2831
					if atom_set.findAtomForPackage(pkg):
2832
						blocked_final.add(pkg)
2833
2834
			if not blocked_initial and not blocked_final:
2835
				parent_pkgs = self._blocker_parents.parent_nodes(blocker)
2836
				self._blocker_parents.remove(blocker)
2837
				# Discard any parents that don't have any more blockers.
2838
				for pkg in parent_pkgs:
2839
					self._irrelevant_blockers.add(blocker, pkg)
2840
					if not self._blocker_parents.child_nodes(pkg):
2841
						self._blocker_parents.remove(pkg)
2842
				continue
2843
			for parent in self._blocker_parents.parent_nodes(blocker):
2844
				unresolved_blocks = False
2845
				depends_on_order = set()
2846
				for pkg in blocked_initial:
2847
					if pkg.slot_atom == parent.slot_atom:
2848
						# TODO: Support blocks within slots in cases where it
2849
						# might make sense.  For example, a new version might
2850
						# require that the old version be uninstalled at build
2851
						# time.
2852
						continue
2853
					if parent.installed:
2854
						# Two currently installed packages conflict with
2855
						# eachother. Ignore this case since the damage
2856
						# is already done and this would be likely to
2857
						# confuse users if displayed like a normal blocker.
2858
						continue
2859
2860
					self._blocked_pkgs.add(pkg, blocker)
2861
2862
					if parent.operation == "merge":
2863
						# Maybe the blocked package can be replaced or simply
2864
						# unmerged to resolve this block.
2865
						depends_on_order.add((pkg, parent))
2866
						continue
2867
					# None of the above blocker resolutions techniques apply,
2868
					# so apparently this one is unresolvable.
2869
					unresolved_blocks = True
2870
				for pkg in blocked_final:
2871
					if pkg.slot_atom == parent.slot_atom:
2872
						# TODO: Support blocks within slots.
2873
						continue
2874
					if parent.operation == "nomerge" and \
2875
						pkg.operation == "nomerge":
2876
						# This blocker will be handled the next time that a
2877
						# merge of either package is triggered.
2878
						continue
2879
2880
					self._blocked_pkgs.add(pkg, blocker)
2881
2882
					# Maybe the blocking package can be
2883
					# unmerged to resolve this block.
2884
					if parent.operation == "merge" and pkg.installed:
2885
						depends_on_order.add((pkg, parent))
2886
						continue
2887
					elif parent.operation == "nomerge":
2888
						depends_on_order.add((parent, pkg))
2889
						continue
2890
					# None of the above blocker resolutions techniques apply,
2891
					# so apparently this one is unresolvable.
2892
					unresolved_blocks = True
2893
2894
				# Make sure we don't unmerge any package that have been pulled
2895
				# into the graph.
2896
				if not unresolved_blocks and depends_on_order:
2897
					for inst_pkg, inst_task in depends_on_order:
2898
						if self.digraph.contains(inst_pkg) and \
2899
							self.digraph.parent_nodes(inst_pkg):
2900
							unresolved_blocks = True
2901
							break
2902
2903
				if not unresolved_blocks and depends_on_order:
2904
					for inst_pkg, inst_task in depends_on_order:
2905
						uninst_task = Package(built=inst_pkg.built,
2906
							cpv=inst_pkg.cpv, installed=inst_pkg.installed,
2907
							metadata=inst_pkg.metadata,
2908
							operation="uninstall",
2909
							root_config=inst_pkg.root_config,
2910
							type_name=inst_pkg.type_name)
2911
						self._pkg_cache[uninst_task] = uninst_task
2912
						# Enforce correct merge order with a hard dep.
2913
						self.digraph.addnode(uninst_task, inst_task,
2914
							priority=BlockerDepPriority.instance)
2915
						# Count references to this blocker so that it can be
2916
						# invalidated after nodes referencing it have been
2917
						# merged.
2918
						self._blocker_uninstalls.addnode(uninst_task, blocker)
2919
				if not unresolved_blocks and not depends_on_order:
2920
					self._irrelevant_blockers.add(blocker, parent)
2921
					self._blocker_parents.remove_edge(blocker, parent)
2922
					if not self._blocker_parents.parent_nodes(blocker):
2923
						self._blocker_parents.remove(blocker)
2924
					if not self._blocker_parents.child_nodes(parent):
2925
						self._blocker_parents.remove(parent)
2926
				if unresolved_blocks:
2927
					self._unsolvable_blockers.add(blocker, parent)
2928
2929
		return True
2930
2931
	def _accept_blocker_conflicts(self):
2932
		acceptable = False
2933
		for x in ("--buildpkgonly", "--fetchonly",
2934
			"--fetch-all-uri", "--nodeps"):
2935
			if x in self.myopts:
2936
				acceptable = True
2937
				break
2938
		return acceptable
2939
2940
	def _merge_order_bias(self, mygraph):
2941
		"""
2942
		For optimal leaf node selection, promote deep system runtime deps and
2943
		order nodes from highest to lowest overall reference count.
2944
		"""
2945
2946
		node_info = {}
2947
		for node in mygraph.order:
2948
			node_info[node] = len(mygraph.parent_nodes(node))
2949
		deep_system_deps = _find_deep_system_runtime_deps(mygraph)
2950
2951
		def cmp_merge_preference(node1, node2):
2952
2953
			if node1.operation == 'uninstall':
2954
				if node2.operation == 'uninstall':
2955
					return 0
2956
				return 1
2957
2958
			if node2.operation == 'uninstall':
2959
				if node1.operation == 'uninstall':
2960
					return 0
2961
				return -1
2962
2963
			node1_sys = node1 in deep_system_deps
2964
			node2_sys = node2 in deep_system_deps
2965
			if node1_sys != node2_sys:
2966
				if node1_sys:
2967
					return -1
2968
				return 1
2969
2970
			return node_info[node2] - node_info[node1]
2971
2972
		mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
2973
2974
	def altlist(self, reversed=False):
2975
2976
		while self._serialized_tasks_cache is None:
2977
			self._resolve_conflicts()
2978
			try:
2979
				self._serialized_tasks_cache, self._scheduler_graph = \
2980
					self._serialize_tasks()
2981
			except self._serialize_tasks_retry:
2982
				pass
2983
2984
		retlist = self._serialized_tasks_cache[:]
2985
		if reversed:
2986
			retlist.reverse()
2987
		return retlist
2988
2989
	def schedulerGraph(self):
2990
		"""
2991
		The scheduler graph is identical to the normal one except that
2992
		uninstall edges are reversed in specific cases that require
2993
		conflicting packages to be temporarily installed simultaneously.
2994
		This is intended for use by the Scheduler in it's parallelization
2995
		logic. It ensures that temporary simultaneous installation of
2996
		conflicting packages is avoided when appropriate (especially for
2997
		!!atom blockers), but allowed in specific cases that require it.
2998
2999
		Note that this method calls break_refs() which alters the state of
3000
		internal Package instances such that this depgraph instance should
3001
		not be used to perform any more calculations.
3002
		"""
3003
		if self._scheduler_graph is None:
3004
			self.altlist()
3005
		self.break_refs(self._scheduler_graph.order)
3006
		return self._scheduler_graph
3007
3008
	def break_refs(self, nodes):
3009
		"""
3010
		Take a mergelist like that returned from self.altlist() and
3011
		break any references that lead back to the depgraph. This is
3012
		useful if you want to hold references to packages without
3013
		also holding the depgraph on the heap.
3014
		"""
3015
		for node in nodes:
3016
			if hasattr(node, "root_config"):
3017
				# The FakeVartree references the _package_cache which
3018
				# references the depgraph. So that Package instances don't
3019
				# hold the depgraph and FakeVartree on the heap, replace
3020
				# the RootConfig that references the FakeVartree with the
3021
				# original RootConfig instance which references the actual
3022
				# vartree.
3023
				node.root_config = \
3024
					self._trees_orig[node.root_config.root]["root_config"]
3025
3026
	def _resolve_conflicts(self):
3027
		if not self._complete_graph():
3028
			raise self._unknown_internal_error()
3029
3030
		if not self.validate_blockers():
3031
			raise self._unknown_internal_error()
3032
3033
		if self._slot_collision_info:
3034
			self._process_slot_conflicts()
3035
3036
	def _serialize_tasks(self):
3037
3038
		if "--debug" in self.myopts:
3039
			writemsg("\ndigraph:\n\n", noiselevel=-1)
3040
			self.digraph.debug_print()
3041
			writemsg("\n", noiselevel=-1)
3042
3043
		scheduler_graph = self.digraph.copy()
3044
3045
		if '--nodeps' in self.myopts:
3046
			# Preserve the package order given on the command line.
3047
			return ([node for node in scheduler_graph \
3048
				if isinstance(node, Package) \
3049
				and node.operation == 'merge'], scheduler_graph)
3050
3051
		mygraph=self.digraph.copy()
3052
		# Prune "nomerge" root nodes if nothing depends on them, since
3053
		# otherwise they slow down merge order calculation. Don't remove
3054
		# non-root nodes since they help optimize merge order in some cases
3055
		# such as revdep-rebuild.
3056
		removed_nodes = set()
3057
		while True:
3058
			for node in mygraph.root_nodes():
3059
				if not isinstance(node, Package) or \
3060
					node.installed or node.onlydeps:
3061
					removed_nodes.add(node)
3062
			if removed_nodes:
3063
				self.spinner.update()
3064
				mygraph.difference_update(removed_nodes)
3065
			if not removed_nodes:
3066
				break
3067
			removed_nodes.clear()
3068
		self._merge_order_bias(mygraph)
3069
		def cmp_circular_bias(n1, n2):
3070
			"""
3071
			RDEPEND is stronger than PDEPEND and this function
3072
			measures such a strength bias within a circular
3073
			dependency relationship.
3074
			"""
3075
			n1_n2_medium = n2 in mygraph.child_nodes(n1,
3076
				ignore_priority=priority_range.ignore_medium_soft)
3077
			n2_n1_medium = n1 in mygraph.child_nodes(n2,
3078
				ignore_priority=priority_range.ignore_medium_soft)
3079
			if n1_n2_medium == n2_n1_medium:
3080
				return 0
3081
			elif n1_n2_medium:
3082
				return 1
3083
			return -1
3084
		myblocker_uninstalls = self._blocker_uninstalls.copy()
3085
		retlist=[]
3086
		# Contains uninstall tasks that have been scheduled to
3087
		# occur after overlapping blockers have been installed.
3088
		scheduled_uninstalls = set()
3089
		# Contains any Uninstall tasks that have been ignored
3090
		# in order to avoid the circular deps code path. These
3091
		# correspond to blocker conflicts that could not be
3092
		# resolved.
3093
		ignored_uninstall_tasks = set()
3094
		have_uninstall_task = False
3095
		complete = "complete" in self.myparams
3096
		asap_nodes = []
3097
3098
		def get_nodes(**kwargs):
3099
			"""
3100
			Returns leaf nodes excluding Uninstall instances
3101
			since those should be executed as late as possible.
3102
			"""
3103
			return [node for node in mygraph.leaf_nodes(**kwargs) \
3104
				if isinstance(node, Package) and \
3105
					(node.operation != "uninstall" or \
3106
					node in scheduled_uninstalls)]
3107
3108
		# sys-apps/portage needs special treatment if ROOT="/"
3109
		running_root = self._running_root.root
3110
		from portage.const import PORTAGE_PACKAGE_ATOM
3111
		runtime_deps = InternalPackageSet(
3112
			initial_atoms=[PORTAGE_PACKAGE_ATOM])
3113
		running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
3114
			PORTAGE_PACKAGE_ATOM)
3115
		replacement_portage = self.mydbapi[running_root].match_pkgs(
3116
			PORTAGE_PACKAGE_ATOM)
3117
3118
		if running_portage:
3119
			running_portage = running_portage[0]
3120
		else:
3121
			running_portage = None
3122
3123
		if replacement_portage:
3124
			replacement_portage = replacement_portage[0]
3125
		else:
3126
			replacement_portage = None
3127
3128
		if replacement_portage == running_portage:
3129
			replacement_portage = None
3130
3131
		if replacement_portage is not None:
3132
			# update from running_portage to replacement_portage asap
3133
			asap_nodes.append(replacement_portage)
3134
3135
		if running_portage is not None:
3136
			try:
3137
				portage_rdepend = self._select_atoms_highest_available(
3138
					running_root, running_portage.metadata["RDEPEND"],
3139
					myuse=running_portage.use.enabled,
3140
					parent=running_portage, strict=False)
3141
			except portage.exception.InvalidDependString, e:
3142
				portage.writemsg("!!! Invalid RDEPEND in " + \
3143
					"'%svar/db/pkg/%s/RDEPEND': %s\n" % \
3144
					(running_root, running_portage.cpv, e), noiselevel=-1)
3145
				del e
3146
				portage_rdepend = []
3147
			runtime_deps.update(atom for atom in portage_rdepend \
3148
				if not atom.startswith("!"))
3149
3150
		def gather_deps(ignore_priority, mergeable_nodes,
3151
			selected_nodes, node):
3152
			"""
3153
			Recursively gather a group of nodes that RDEPEND on
3154
			eachother. This ensures that they are merged as a group
3155
			and get their RDEPENDs satisfied as soon as possible.
3156
			"""
3157
			if node in selected_nodes:
3158
				return True
3159
			if node not in mergeable_nodes:
3160
				return False
3161
			if node == replacement_portage and \
3162
				mygraph.child_nodes(node,
3163
				ignore_priority=priority_range.ignore_medium_soft):
3164
				# Make sure that portage always has all of it's
3165
				# RDEPENDs installed first.
3166
				return False
3167
			selected_nodes.add(node)
3168
			for child in mygraph.child_nodes(node,
3169
				ignore_priority=ignore_priority):
3170
				if not gather_deps(ignore_priority,
3171
					mergeable_nodes, selected_nodes, child):
3172
					return False
3173
			return True
3174
3175
		def ignore_uninst_or_med(priority):
3176
			if priority is BlockerDepPriority.instance:
3177
				return True
3178
			return priority_range.ignore_medium(priority)
3179
3180
		def ignore_uninst_or_med_soft(priority):
3181
			if priority is BlockerDepPriority.instance:
3182
				return True
3183
			return priority_range.ignore_medium_soft(priority)
3184
3185
		tree_mode = "--tree" in self.myopts
3186
		# Tracks whether or not the current iteration should prefer asap_nodes
3187
		# if available.  This is set to False when the previous iteration
3188
		# failed to select any nodes.  It is reset whenever nodes are
3189
		# successfully selected.
3190
		prefer_asap = True
3191
3192
		# Controls whether or not the current iteration should drop edges that
3193
		# are "satisfied" by installed packages, in order to solve circular
3194
		# dependencies. The deep runtime dependencies of installed packages are
3195
		# not checked in this case (bug #199856), so it must be avoided
3196
		# whenever possible.
3197
		drop_satisfied = False
3198
3199
		# State of variables for successive iterations that loosen the
3200
		# criteria for node selection.
3201
		#
3202
		# iteration   prefer_asap   drop_satisfied
3203
		# 1           True          False
3204
		# 2           False         False
3205
		# 3           False         True
3206
		#
3207
		# If no nodes are selected on the last iteration, it is due to
3208
		# unresolved blockers or circular dependencies.
3209
3210
		while not mygraph.empty():
3211
			self.spinner.update()
3212
			selected_nodes = None
3213
			ignore_priority = None
3214
			if drop_satisfied or (prefer_asap and asap_nodes):
3215
				priority_range = DepPrioritySatisfiedRange
3216
			else:
3217
				priority_range = DepPriorityNormalRange
3218
			if prefer_asap and asap_nodes:
3219
				# ASAP nodes are merged before their soft deps. Go ahead and
3220
				# select root nodes here if necessary, since it's typical for
3221
				# the parent to have been removed from the graph already.
3222
				asap_nodes = [node for node in asap_nodes \
3223
					if mygraph.contains(node)]
3224
				for node in asap_nodes:
3225
					if not mygraph.child_nodes(node,
3226
						ignore_priority=priority_range.ignore_soft):
3227
						selected_nodes = [node]
3228
						asap_nodes.remove(node)
3229
						break
3230
			if not selected_nodes and \
3231
				not (prefer_asap and asap_nodes):
3232
				for i in xrange(priority_range.NONE,
3233
					priority_range.MEDIUM_SOFT + 1):
3234
					ignore_priority = priority_range.ignore_priority[i]
3235
					nodes = get_nodes(ignore_priority=ignore_priority)
3236
					if nodes:
3237
						# If there is a mix of uninstall nodes with other
3238
						# types, save the uninstall nodes for later since
3239
						# sometimes a merge node will render an uninstall
3240
						# node unnecessary (due to occupying the same slot),
3241
						# and we want to avoid executing a separate uninstall
3242
						# task in that case.
3243
						if len(nodes) > 1:
3244
							good_uninstalls = []
3245
							with_some_uninstalls_excluded = []
3246
							for node in nodes:
3247
								if node.operation == "uninstall":
3248
									slot_node = self.mydbapi[node.root
3249
										].match_pkgs(node.slot_atom)
3250
									if slot_node and \
3251
										slot_node[0].operation == "merge":
3252
										continue
3253
									good_uninstalls.append(node)
3254
								with_some_uninstalls_excluded.append(node)
3255
							if good_uninstalls:
3256
								nodes = good_uninstalls
3257
							elif with_some_uninstalls_excluded:
3258
								nodes = with_some_uninstalls_excluded
3259
							else:
3260
								nodes = nodes
3261
3262
						if ignore_priority is None and not tree_mode:
3263
							# Greedily pop all of these nodes since no
3264
							# relationship has been ignored. This optimization
3265
							# destroys --tree output, so it's disabled in tree
3266
							# mode.
3267
							selected_nodes = nodes
3268
						else:
3269
							# For optimal merge order:
3270
							#  * Only pop one node.
3271
							#  * Removing a root node (node without a parent)
3272
							#    will not produce a leaf node, so avoid it.
3273
							#  * It's normal for a selected uninstall to be a
3274
							#    root node, so don't check them for parents.
3275
							for node in nodes:
3276
								if node.operation == "uninstall" or \
3277
									mygraph.parent_nodes(node):
3278
									selected_nodes = [node]
3279
									break
3280
3281
						if selected_nodes:
3282
							break
3283
3284
			if not selected_nodes:
3285
				nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
3286
				if nodes:
3287
					mergeable_nodes = set(nodes)
3288
					if prefer_asap and asap_nodes:
3289
						nodes = asap_nodes
3290
					for i in xrange(priority_range.SOFT,
3291
						priority_range.MEDIUM_SOFT + 1):
3292
						ignore_priority = priority_range.ignore_priority[i]
3293
						for node in nodes:
3294
							if not mygraph.parent_nodes(node):
3295
								continue
3296
							selected_nodes = set()
3297
							if gather_deps(ignore_priority,
3298
								mergeable_nodes, selected_nodes, node):
3299
								break
3300
							else:
3301
								selected_nodes = None
3302
						if selected_nodes:
3303
							break
3304
3305
					if prefer_asap and asap_nodes and not selected_nodes:
3306
						# We failed to find any asap nodes to merge, so ignore
3307
						# them for the next iteration.
3308
						prefer_asap = False
3309
						continue
3310
3311
			if selected_nodes and ignore_priority is not None:
3312
				# Try to merge ignored medium_soft deps as soon as possible
3313
				# if they're not satisfied by installed packages.
3314
				for node in selected_nodes:
3315
					children = set(mygraph.child_nodes(node))
3316
					soft = children.difference(
3317
						mygraph.child_nodes(node,
3318
						ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
3319
					medium_soft = children.difference(
3320
						mygraph.child_nodes(node,
3321
							ignore_priority = \
3322
							DepPrioritySatisfiedRange.ignore_medium_soft))
3323
					medium_soft.difference_update(soft)
3324
					for child in medium_soft:
3325
						if child in selected_nodes:
3326
							continue
3327
						if child in asap_nodes:
3328
							continue
3329
						asap_nodes.append(child)
3330
3331
			if selected_nodes and len(selected_nodes) > 1:
3332
				if not isinstance(selected_nodes, list):
3333
					selected_nodes = list(selected_nodes)
3334
				selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
3335
3336
			if not selected_nodes and not myblocker_uninstalls.is_empty():
3337
				# An Uninstall task needs to be executed in order to
3338
				# avoid conflict if possible.
3339
3340
				if drop_satisfied:
3341
					priority_range = DepPrioritySatisfiedRange
3342
				else:
3343
					priority_range = DepPriorityNormalRange
3344
3345
				mergeable_nodes = get_nodes(
3346
					ignore_priority=ignore_uninst_or_med)
3347
3348
				min_parent_deps = None
3349
				uninst_task = None
3350
				for task in myblocker_uninstalls.leaf_nodes():
3351
					# Do some sanity checks so that system or world packages
3352
					# don't get uninstalled inappropriately here (only really
3353
					# necessary when --complete-graph has not been enabled).
3354
3355
					if task in ignored_uninstall_tasks:
3356
						continue
3357
3358
					if task in scheduled_uninstalls:
3359
						# It's been scheduled but it hasn't
3360
						# been executed yet due to dependence
3361
						# on installation of blocking packages.
3362
						continue
3363
3364
					root_config = self.roots[task.root]
3365
					inst_pkg = self._pkg_cache[
3366
						("installed", task.root, task.cpv, "nomerge")]
3367
3368
					if self.digraph.contains(inst_pkg):
3369
						continue
3370
3371
					forbid_overlap = False
3372
					heuristic_overlap = False
3373
					for blocker in myblocker_uninstalls.parent_nodes(task):
3374
						if blocker.eapi in ("0", "1"):
3375
							heuristic_overlap = True
3376
						elif blocker.atom.blocker.overlap.forbid:
3377
							forbid_overlap = True
3378
							break
3379
					if forbid_overlap and running_root == task.root:
3380
						continue
3381
3382
					if heuristic_overlap and running_root == task.root:
3383
						# Never uninstall sys-apps/portage or it's essential
3384
						# dependencies, except through replacement.
3385
						try:
3386
							runtime_dep_atoms = \
3387
								list(runtime_deps.iterAtomsForPackage(task))
3388
						except portage.exception.InvalidDependString, e:
3389
							portage.writemsg("!!! Invalid PROVIDE in " + \
3390
								"'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3391
								(task.root, task.cpv, e), noiselevel=-1)
3392
							del e
3393
							continue
3394
3395
						# Don't uninstall a runtime dep if it appears
3396
						# to be the only suitable one installed.
3397
						skip = False
3398
						vardb = root_config.trees["vartree"].dbapi
3399
						for atom in runtime_dep_atoms:
3400
							other_version = None
3401
							for pkg in vardb.match_pkgs(atom):
3402
								if pkg.cpv == task.cpv and \
3403
									pkg.metadata["COUNTER"] == \
3404
									task.metadata["COUNTER"]:
3405
									continue
3406
								other_version = pkg
3407
								break
3408
							if other_version is None:
3409
								skip = True
3410
								break
3411
						if skip:
3412
							continue
3413
3414
						# For packages in the system set, don't take
3415
						# any chances. If the conflict can't be resolved
3416
						# by a normal replacement operation then abort.
3417
						skip = False
3418
						try:
3419
							for atom in root_config.sets[
3420
								"system"].iterAtomsForPackage(task):
3421
								skip = True
3422
								break
3423
						except portage.exception.InvalidDependString, e:
3424
							portage.writemsg("!!! Invalid PROVIDE in " + \
3425
								"'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3426
								(task.root, task.cpv, e), noiselevel=-1)
3427
							del e
3428
							skip = True
3429
						if skip:
3430
							continue
3431
3432
					# Note that the world check isn't always
3433
					# necessary since self._complete_graph() will
3434
					# add all packages from the system and world sets to the
3435
					# graph. This just allows unresolved conflicts to be
3436
					# detected as early as possible, which makes it possible
3437
					# to avoid calling self._complete_graph() when it is
3438
					# unnecessary due to blockers triggering an abortion.
3439
					if not complete:
3440
						# For packages in the world set, go ahead an uninstall
3441
						# when necessary, as long as the atom will be satisfied
3442
						# in the final state.
3443
						graph_db = self.mydbapi[task.root]
3444
						skip = False
3445
						try:
3446
							for atom in root_config.sets[
3447
								"world"].iterAtomsForPackage(task):
3448
								satisfied = False
3449
								for pkg in graph_db.match_pkgs(atom):
3450
									if pkg == inst_pkg:
3451
										continue
3452
									satisfied = True
3453
									break
3454
								if not satisfied:
3455
									skip = True
3456
									self._blocked_world_pkgs[inst_pkg] = atom
3457
									break
3458
						except portage.exception.InvalidDependString, e:
3459
							portage.writemsg("!!! Invalid PROVIDE in " + \
3460
								"'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3461
								(task.root, task.cpv, e), noiselevel=-1)
3462
							del e
3463
							skip = True
3464
						if skip:
3465
							continue
3466
3467
					# Check the deps of parent nodes to ensure that
3468
					# the chosen task produces a leaf node. Maybe
3469
					# this can be optimized some more to make the
3470
					# best possible choice, but the current algorithm
3471
					# is simple and should be near optimal for most
3472
					# common cases.
3473
					mergeable_parent = False
3474
					parent_deps = set()
3475
					for parent in mygraph.parent_nodes(task):
3476
						parent_deps.update(mygraph.child_nodes(parent,
3477
							ignore_priority=priority_range.ignore_medium_soft))
3478
						if parent in mergeable_nodes and \
3479
							gather_deps(ignore_uninst_or_med_soft,
3480
							mergeable_nodes, set(), parent):
3481
							mergeable_parent = True
3482
3483
					if not mergeable_parent:
3484
						continue
3485
3486
					parent_deps.remove(task)
3487
					if min_parent_deps is None or \
3488
						len(parent_deps) < min_parent_deps:
3489
						min_parent_deps = len(parent_deps)
3490
						uninst_task = task
3491
3492
				if uninst_task is not None:
3493
					# The uninstall is performed only after blocking
3494
					# packages have been merged on top of it. File
3495
					# collisions between blocking packages are detected
3496
					# and removed from the list of files to be uninstalled.
3497
					scheduled_uninstalls.add(uninst_task)
3498
					parent_nodes = mygraph.parent_nodes(uninst_task)
3499
3500
					# Reverse the parent -> uninstall edges since we want
3501
					# to do the uninstall after blocking packages have
3502
					# been merged on top of it.
3503
					mygraph.remove(uninst_task)
3504
					for blocked_pkg in parent_nodes:
3505
						mygraph.add(blocked_pkg, uninst_task,
3506
							priority=BlockerDepPriority.instance)
3507
						scheduler_graph.remove_edge(uninst_task, blocked_pkg)
3508
						scheduler_graph.add(blocked_pkg, uninst_task,
3509
							priority=BlockerDepPriority.instance)
3510
3511
					# Reset the state variables for leaf node selection and
3512
					# continue trying to select leaf nodes.
3513
					prefer_asap = True
3514
					drop_satisfied = False
3515
					continue
3516
3517
			if not selected_nodes:
3518
				# Only select root nodes as a last resort. This case should
3519
				# only trigger when the graph is nearly empty and the only
3520
				# remaining nodes are isolated (no parents or children). Since
3521
				# the nodes must be isolated, ignore_priority is not needed.
3522
				selected_nodes = get_nodes()
3523
3524
			if not selected_nodes and not drop_satisfied:
3525
				drop_satisfied = True
3526
				continue
3527
3528
			if not selected_nodes and not myblocker_uninstalls.is_empty():
3529
				# If possible, drop an uninstall task here in order to avoid
3530
				# the circular deps code path. The corresponding blocker will
3531
				# still be counted as an unresolved conflict.
3532
				uninst_task = None
3533
				for node in myblocker_uninstalls.leaf_nodes():
3534
					try:
3535
						mygraph.remove(node)
3536
					except KeyError:
3537
						pass
3538
					else:
3539
						uninst_task = node
3540
						ignored_uninstall_tasks.add(node)
3541
						break
3542
3543
				if uninst_task is not None:
3544
					# Reset the state variables for leaf node selection and
3545
					# continue trying to select leaf nodes.
3546
					prefer_asap = True
3547
					drop_satisfied = False
3548
					continue
3549
3550
			if not selected_nodes:
3551
				self._circular_deps_for_display = mygraph
3552
				raise self._unknown_internal_error()
3553
3554
			# At this point, we've succeeded in selecting one or more nodes, so
3555
			# reset state variables for leaf node selection.
3556
			prefer_asap = True
3557
			drop_satisfied = False
3558
3559
			mygraph.difference_update(selected_nodes)
3560
3561
			for node in selected_nodes:
3562
				if isinstance(node, Package) and \
3563
					node.operation == "nomerge":
3564
					continue
3565
3566
				# Handle interactions between blockers
3567
				# and uninstallation tasks.
3568
				solved_blockers = set()
3569
				uninst_task = None
3570
				if isinstance(node, Package) and \
3571
					"uninstall" == node.operation:
3572
					have_uninstall_task = True
3573
					uninst_task = node
3574
				else:
3575
					vardb = self.trees[node.root]["vartree"].dbapi
3576
					previous_cpv = vardb.match(node.slot_atom)
3577
					if previous_cpv:
3578
						# The package will be replaced by this one, so remove
3579
						# the corresponding Uninstall task if necessary.
3580
						previous_cpv = previous_cpv[0]
3581
						uninst_task = \
3582
							("installed", node.root, previous_cpv, "uninstall")
3583
						try:
3584
							mygraph.remove(uninst_task)
3585
						except KeyError:
3586
							pass
3587
3588
				if uninst_task is not None and \
3589
					uninst_task not in ignored_uninstall_tasks and \
3590
					myblocker_uninstalls.contains(uninst_task):
3591
					blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
3592
					myblocker_uninstalls.remove(uninst_task)
3593
					# Discard any blockers that this Uninstall solves.
3594
					for blocker in blocker_nodes:
3595
						if not myblocker_uninstalls.child_nodes(blocker):
3596
							myblocker_uninstalls.remove(blocker)
3597
							solved_blockers.add(blocker)
3598
3599
				retlist.append(node)
3600
3601
				if (isinstance(node, Package) and \
3602
					"uninstall" == node.operation) or \
3603
					(uninst_task is not None and \
3604
					uninst_task in scheduled_uninstalls):
3605
					# Include satisfied blockers in the merge list
3606
					# since the user might be interested and also
3607
					# it serves as an indicator that blocking packages
3608
					# will be temporarily installed simultaneously.
3609
					for blocker in solved_blockers:
3610
						retlist.append(Blocker(atom=blocker.atom,
3611
							root=blocker.root, eapi=blocker.eapi,
3612
							satisfied=True))
3613
3614
		unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
3615
		for node in myblocker_uninstalls.root_nodes():
3616
			unsolvable_blockers.add(node)
3617
3618
		for blocker in unsolvable_blockers:
3619
			retlist.append(blocker)
3620
3621
		# If any Uninstall tasks need to be executed in order
3622
		# to avoid a conflict, complete the graph with any
3623
		# dependencies that may have been initially
3624
		# neglected (to ensure that unsafe Uninstall tasks
3625
		# are properly identified and blocked from execution).
3626
		if have_uninstall_task and \
3627
			not complete and \
3628
			not unsolvable_blockers:
3629
			self.myparams.add("complete")
3630
			raise self._serialize_tasks_retry("")
3631
3632
		if unsolvable_blockers and \
3633
			not self._accept_blocker_conflicts():
3634
			self._unsatisfied_blockers_for_display = unsolvable_blockers
3635
			self._serialized_tasks_cache = retlist[:]
3636
			self._scheduler_graph = scheduler_graph
3637
			raise self._unknown_internal_error()
3638
3639
		if self._slot_collision_info and \
3640
			not self._accept_blocker_conflicts():
3641
			self._serialized_tasks_cache = retlist[:]
3642
			self._scheduler_graph = scheduler_graph
3643
			raise self._unknown_internal_error()
3644
3645
		return retlist, scheduler_graph
3646
3647
	def _show_circular_deps(self, mygraph):
3648
		# No leaf nodes are available, so we have a circular
3649
		# dependency panic situation.  Reduce the noise level to a
3650
		# minimum via repeated elimination of root nodes since they
3651
		# have no parents and thus can not be part of a cycle.
3652
		while True:
3653
			root_nodes = mygraph.root_nodes(
3654
				ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
3655
			if not root_nodes:
3656
				break
3657
			mygraph.difference_update(root_nodes)
3658
		# Display the USE flags that are enabled on nodes that are part
3659
		# of dependency cycles in case that helps the user decide to
3660
		# disable some of them.
3661
		display_order = []
3662
		tempgraph = mygraph.copy()
3663
		while not tempgraph.empty():
3664
			nodes = tempgraph.leaf_nodes()
3665
			if not nodes:
3666
				node = tempgraph.order[0]
3667
			else:
3668
				node = nodes[0]
3669
			display_order.append(node)
3670
			tempgraph.remove(node)
3671
		display_order.reverse()
3672
		self.myopts.pop("--quiet", None)
3673
		self.myopts.pop("--verbose", None)
3674
		self.myopts["--tree"] = True
3675
		portage.writemsg("\n\n", noiselevel=-1)
3676
		self.display(display_order)
3677
		prefix = colorize("BAD", " * ")
3678
		portage.writemsg("\n", noiselevel=-1)
3679
		portage.writemsg(prefix + "Error: circular dependencies:\n",
3680
			noiselevel=-1)
3681
		portage.writemsg("\n", noiselevel=-1)
3682
		mygraph.debug_print()
3683
		portage.writemsg("\n", noiselevel=-1)
3684
		portage.writemsg(prefix + "Note that circular dependencies " + \
3685
			"can often be avoided by temporarily\n", noiselevel=-1)
3686
		portage.writemsg(prefix + "disabling USE flags that trigger " + \
3687
			"optional dependencies.\n", noiselevel=-1)
3688
3689
	def _show_merge_list(self):
3690
		if self._serialized_tasks_cache is not None and \
3691
			not (self._displayed_list and \
3692
			(self._displayed_list == self._serialized_tasks_cache or \
3693
			self._displayed_list == \
3694
				list(reversed(self._serialized_tasks_cache)))):
3695
			display_list = self._serialized_tasks_cache[:]
3696
			if "--tree" in self.myopts:
3697
				display_list.reverse()
3698
			self.display(display_list)
3699
3700
	def _show_unsatisfied_blockers(self, blockers):
3701
		self._show_merge_list()
3702
		msg = "Error: The above package list contains " + \
3703
			"packages which cannot be installed " + \
3704
			"at the same time on the same system."
3705
		prefix = colorize("BAD", " * ")
3706
		from textwrap import wrap
3707
		portage.writemsg("\n", noiselevel=-1)
3708
		for line in wrap(msg, 70):
3709
			portage.writemsg(prefix + line + "\n", noiselevel=-1)
3710
3711
		# Display the conflicting packages along with the packages
3712
		# that pulled them in. This is helpful for troubleshooting
3713
		# cases in which blockers don't solve automatically and
3714
		# the reasons are not apparent from the normal merge list
3715
		# display.
3716
3717
		conflict_pkgs = {}
3718
		for blocker in blockers:
3719
			for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
3720
				self._blocker_parents.parent_nodes(blocker)):
3721
				parent_atoms = self._parent_atoms.get(pkg)
3722
				if not parent_atoms:
3723
					atom = self._blocked_world_pkgs.get(pkg)
3724
					if atom is not None:
3725
						parent_atoms = set([("@world", atom)])
3726
				if parent_atoms:
3727
					conflict_pkgs[pkg] = parent_atoms
3728
3729
		if conflict_pkgs:
3730
			# Reduce noise by pruning packages that are only
3731
			# pulled in by other conflict packages.
3732
			pruned_pkgs = set()
3733
			for pkg, parent_atoms in conflict_pkgs.iteritems():
3734
				relevant_parent = False
3735
				for parent, atom in parent_atoms:
3736
					if parent not in conflict_pkgs:
3737
						relevant_parent = True
3738
						break
3739
				if not relevant_parent:
3740
					pruned_pkgs.add(pkg)
3741
			for pkg in pruned_pkgs:
3742
				del conflict_pkgs[pkg]
3743
3744
		if conflict_pkgs:
3745
			msg = []
3746
			msg.append("\n")
3747
			indent = "  "
3748
			# Max number of parents shown, to avoid flooding the display.
3749
			max_parents = 3
3750
			for pkg, parent_atoms in conflict_pkgs.iteritems():
3751
3752
				pruned_list = set()
3753
3754
				# Prefer packages that are not directly involved in a conflict.
3755
				for parent_atom in parent_atoms:
3756
					if len(pruned_list) >= max_parents:
3757
						break
3758
					parent, atom = parent_atom
3759
					if parent not in conflict_pkgs:
3760
						pruned_list.add(parent_atom)
3761
3762
				for parent_atom in parent_atoms:
3763
					if len(pruned_list) >= max_parents:
3764
						break
3765
					pruned_list.add(parent_atom)
3766
3767
				omitted_parents = len(parent_atoms) - len(pruned_list)
3768
				msg.append(indent + "%s pulled in by\n" % pkg)
3769
3770
				for parent_atom in pruned_list:
3771
					parent, atom = parent_atom
3772
					msg.append(2*indent)
3773
					if isinstance(parent,
3774
						(PackageArg, AtomArg)):
3775
						# For PackageArg and AtomArg types, it's
3776
						# redundant to display the atom attribute.
3777
						msg.append(str(parent))
3778
					else:
3779
						# Display the specific atom from SetArg or
3780
						# Package types.
3781
						msg.append("%s required by %s" % (atom, parent))
3782
					msg.append("\n")
3783
3784
				if omitted_parents:
3785
					msg.append(2*indent)
3786
					msg.append("(and %d more)\n" % omitted_parents)
3787
3788
				msg.append("\n")
3789
3790
			sys.stderr.write("".join(msg))
3791
			sys.stderr.flush()
3792
3793
		if "--quiet" not in self.myopts:
3794
			show_blocker_docs_link()
3795
3796
	def display(self, mylist, favorites=[], verbosity=None):
3797
3798
		# This is used to prevent display_problems() from
3799
		# redundantly displaying this exact same merge list
3800
		# again via _show_merge_list().
3801
		self._displayed_list = mylist
3802
3803
		if verbosity is None:
3804
			verbosity = ("--quiet" in self.myopts and 1 or \
3805
				"--verbose" in self.myopts and 3 or 2)
3806
		favorites_set = InternalPackageSet(favorites)
3807
		oneshot = "--oneshot" in self.myopts or \
3808
			"--onlydeps" in self.myopts
3809
		columns = "--columns" in self.myopts
3810
		changelogs=[]
3811
		p=[]
3812
		blockers = []
3813
3814
		counters = PackageCounters()
3815
3816
		if verbosity == 1 and "--verbose" not in self.myopts:
3817
			def create_use_string(*args):
3818
				return ""
3819
		else:
3820
			def create_use_string(name, cur_iuse, iuse_forced, cur_use,
3821
				old_iuse, old_use,
3822
				is_new, reinst_flags,
3823
				all_flags=(verbosity == 3 or "--quiet" in self.myopts),
3824
				alphabetical=("--alphabetical" in self.myopts)):
3825
				enabled = []
3826
				if alphabetical:
3827
					disabled = enabled
3828
					removed = enabled
3829
				else:
3830
					disabled = []
3831
					removed = []
3832
				cur_iuse = set(cur_iuse)
3833
				enabled_flags = cur_iuse.intersection(cur_use)
3834
				removed_iuse = set(old_iuse).difference(cur_iuse)
3835
				any_iuse = cur_iuse.union(old_iuse)
3836
				any_iuse = list(any_iuse)
3837
				any_iuse.sort()
3838
				for flag in any_iuse:
3839
					flag_str = None
3840
					isEnabled = False
3841
					reinst_flag = reinst_flags and flag in reinst_flags
3842
					if flag in enabled_flags:
3843
						isEnabled = True
3844
						if is_new or flag in old_use and \
3845
							(all_flags or reinst_flag):
3846
							flag_str = red(flag)
3847
						elif flag not in old_iuse:
3848
							flag_str = yellow(flag) + "%*"
3849
						elif flag not in old_use:
3850
							flag_str = green(flag) + "*"
3851
					elif flag in removed_iuse:
3852
						if all_flags or reinst_flag:
3853
							flag_str = yellow("-" + flag) + "%"
3854
							if flag in old_use:
3855
								flag_str += "*"
3856
							flag_str = "(" + flag_str + ")"
3857
							removed.append(flag_str)
3858
						continue
3859
					else:
3860
						if is_new or flag in old_iuse and \
3861
							flag not in old_use and \
3862
							(all_flags or reinst_flag):
3863
							flag_str = blue("-" + flag)
3864
						elif flag not in old_iuse:
3865
							flag_str = yellow("-" + flag)
3866
							if flag not in iuse_forced:
3867
								flag_str += "%"
3868
						elif flag in old_use:
3869
							flag_str = green("-" + flag) + "*"
3870
					if flag_str:
3871
						if flag in iuse_forced:
3872
							flag_str = "(" + flag_str + ")"
3873
						if isEnabled:
3874
							enabled.append(flag_str)
3875
						else:
3876
							disabled.append(flag_str)
3877
3878
				if alphabetical:
3879
					ret = " ".join(enabled)
3880
				else:
3881
					ret = " ".join(enabled + disabled + removed)
3882
				if ret:
3883
					ret = '%s="%s" ' % (name, ret)
3884
				return ret
3885
3886
		repo_display = RepoDisplay(self.roots)
3887
3888
		tree_nodes = []
3889
		display_list = []
3890
		mygraph = self.digraph.copy()
3891
3892
		# If there are any Uninstall instances, add the corresponding
3893
		# blockers to the digraph (useful for --tree display).
3894
3895
		executed_uninstalls = set(node for node in mylist \
3896
			if isinstance(node, Package) and node.operation == "unmerge")
3897
3898
		for uninstall in self._blocker_uninstalls.leaf_nodes():
3899
			uninstall_parents = \
3900
				self._blocker_uninstalls.parent_nodes(uninstall)
3901
			if not uninstall_parents:
3902
				continue
3903
3904
			# Remove the corresponding "nomerge" node and substitute
3905
			# the Uninstall node.
3906
			inst_pkg = self._pkg_cache[
3907
				("installed", uninstall.root, uninstall.cpv, "nomerge")]
3908
			try:
3909
				mygraph.remove(inst_pkg)
3910
			except KeyError:
3911
				pass
3912
3913
			try:
3914
				inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
3915
			except KeyError:
3916
				inst_pkg_blockers = []
3917
3918
			# Break the Package -> Uninstall edges.
3919
			mygraph.remove(uninstall)
3920
3921
			# Resolution of a package's blockers
3922
			# depend on it's own uninstallation.
3923
			for blocker in inst_pkg_blockers:
3924
				mygraph.add(uninstall, blocker)
3925
3926
			# Expand Package -> Uninstall edges into
3927
			# Package -> Blocker -> Uninstall edges.
3928
			for blocker in uninstall_parents:
3929
				mygraph.add(uninstall, blocker)
3930
				for parent in self._blocker_parents.parent_nodes(blocker):
3931
					if parent != inst_pkg:
3932
						mygraph.add(blocker, parent)
3933
3934
			# If the uninstall task did not need to be executed because
3935
			# of an upgrade, display Blocker -> Upgrade edges since the
3936
			# corresponding Blocker -> Uninstall edges will not be shown.
3937
			upgrade_node = \
3938
				self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
3939
			if upgrade_node is not None and \
3940
				uninstall not in executed_uninstalls:
3941
				for blocker in uninstall_parents:
3942
					mygraph.add(upgrade_node, blocker)
3943
3944
		unsatisfied_blockers = []
3945
		i = 0
3946
		depth = 0
3947
		shown_edges = set()
3948
		for x in mylist:
3949
			if isinstance(x, Blocker) and not x.satisfied:
3950
				unsatisfied_blockers.append(x)
3951
				continue
3952
			graph_key = x
3953
			if "--tree" in self.myopts:
3954
				depth = len(tree_nodes)
3955
				while depth and graph_key not in \
3956
					mygraph.child_nodes(tree_nodes[depth-1]):
3957
						depth -= 1
3958
				if depth:
3959
					tree_nodes = tree_nodes[:depth]
3960
					tree_nodes.append(graph_key)
3961
					display_list.append((x, depth, True))
3962
					shown_edges.add((graph_key, tree_nodes[depth-1]))
3963
				else:
3964
					traversed_nodes = set() # prevent endless circles
3965
					traversed_nodes.add(graph_key)
3966
					def add_parents(current_node, ordered):
3967
						parent_nodes = None
3968
						# Do not traverse to parents if this node is an
3969
						# an argument or a direct member of a set that has
3970
						# been specified as an argument (system or world).
3971
						if current_node not in self._set_nodes:
3972
							parent_nodes = mygraph.parent_nodes(current_node)
3973
						if parent_nodes:
3974
							child_nodes = set(mygraph.child_nodes(current_node))
3975
							selected_parent = None
3976
							# First, try to avoid a direct cycle.
3977
							for node in parent_nodes:
3978
								if not isinstance(node, (Blocker, Package)):
3979
									continue
3980
								if node not in traversed_nodes and \
3981
									node not in child_nodes:
3982
									edge = (current_node, node)
3983
									if edge in shown_edges:
3984
										continue
3985
									selected_parent = node
3986
									break
3987
							if not selected_parent:
3988
								# A direct cycle is unavoidable.
3989
								for node in parent_nodes:
3990
									if not isinstance(node, (Blocker, Package)):
3991
										continue
3992
									if node not in traversed_nodes:
3993
										edge = (current_node, node)
3994
										if edge in shown_edges:
3995
											continue
3996
										selected_parent = node
3997
										break
3998
							if selected_parent:
3999
								shown_edges.add((current_node, selected_parent))
4000
								traversed_nodes.add(selected_parent)
4001
								add_parents(selected_parent, False)
4002
						display_list.append((current_node,
4003
							len(tree_nodes), ordered))
4004
						tree_nodes.append(current_node)
4005
					tree_nodes = []
4006
					add_parents(graph_key, True)
4007
			else:
4008
				display_list.append((x, depth, True))
4009
		mylist = display_list
4010
		for x in unsatisfied_blockers:
4011
			mylist.append((x, 0, True))
4012
4013
		last_merge_depth = 0
4014
		for i in xrange(len(mylist)-1,-1,-1):
4015
			graph_key, depth, ordered = mylist[i]
4016
			if not ordered and depth == 0 and i > 0 \
4017
				and graph_key == mylist[i-1][0] and \
4018
				mylist[i-1][1] == 0:
4019
				# An ordered node got a consecutive duplicate when the tree was
4020
				# being filled in.
4021
				del mylist[i]
4022
				continue
4023
			if ordered and graph_key[-1] != "nomerge":
4024
				last_merge_depth = depth
4025
				continue
4026
			if depth >= last_merge_depth or \
4027
				i < len(mylist) - 1 and \
4028
				depth >= mylist[i+1][1]:
4029
					del mylist[i]
4030
4031
		from portage import flatten
4032
		from portage.dep import use_reduce, paren_reduce
4033
		# files to fetch list - avoids counting a same file twice
4034
		# in size display (verbose mode)
4035
		myfetchlist=[]
4036
4037
		# Use this set to detect when all the "repoadd" strings are "[0]"
4038
		# and disable the entire repo display in this case.
4039
		repoadd_set = set()
4040
4041
		for mylist_index in xrange(len(mylist)):
4042
			x, depth, ordered = mylist[mylist_index]
4043
			pkg_type = x[0]
4044
			myroot = x[1]
4045
			pkg_key = x[2]
4046
			portdb = self.trees[myroot]["porttree"].dbapi
4047
			bindb  = self.trees[myroot]["bintree"].dbapi
4048
			vardb = self.trees[myroot]["vartree"].dbapi
4049
			vartree = self.trees[myroot]["vartree"]
4050
			pkgsettings = self.pkgsettings[myroot]
4051
4052
			fetch=" "
4053
			indent = " " * depth
4054
4055
			if isinstance(x, Blocker):
4056
				if x.satisfied:
4057
					blocker_style = "PKG_BLOCKER_SATISFIED"
4058
					addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
4059
				else:
4060
					blocker_style = "PKG_BLOCKER"
4061
					addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
4062
				if ordered:
4063
					counters.blocks += 1
4064
					if x.satisfied:
4065
						counters.blocks_satisfied += 1
4066
				resolved = portage.key_expand(
4067
					str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
4068
				if "--columns" in self.myopts and "--quiet" in self.myopts:
4069
					addl += " " + colorize(blocker_style, resolved)
4070
				else:
4071
					addl = "[%s %s] %s%s" % \
4072
						(colorize(blocker_style, "blocks"),
4073
						addl, indent, colorize(blocker_style, resolved))
4074
				block_parents = self._blocker_parents.parent_nodes(x)
4075
				block_parents = set([pnode[2] for pnode in block_parents])
4076
				block_parents = ", ".join(block_parents)
4077
				if resolved!=x[2]:
4078
					addl += colorize(blocker_style,
4079
						" (\"%s\" is blocking %s)") % \
4080
						(str(x.atom).lstrip("!"), block_parents)
4081
				else:
4082
					addl += colorize(blocker_style,
4083
						" (is blocking %s)") % block_parents
4084
				if isinstance(x, Blocker) and x.satisfied:
4085
					if columns:
4086
						continue
4087
					p.append(addl)
4088
				else:
4089
					blockers.append(addl)
4090
			else:
4091
				pkg_status = x[3]
4092
				pkg_merge = ordered and pkg_status == "merge"
4093
				if not pkg_merge and pkg_status == "merge":
4094
					pkg_status = "nomerge"
4095
				built = pkg_type != "ebuild"
4096
				installed = pkg_type == "installed"
4097
				pkg = x
4098
				metadata = pkg.metadata
4099
				ebuild_path = None
4100
				repo_name = metadata["repository"]
4101
				if pkg_type == "ebuild":
4102
					ebuild_path = portdb.findname(pkg_key)
4103
					if not ebuild_path: # shouldn't happen
4104
						raise portage.exception.PackageNotFound(pkg_key)
4105
					repo_path_real = os.path.dirname(os.path.dirname(
4106
						os.path.dirname(ebuild_path)))
4107
				else:
4108
					repo_path_real = portdb.getRepositoryPath(repo_name)
4109
				pkg_use = list(pkg.use.enabled)
4110
				try:
4111
					restrict = flatten(use_reduce(paren_reduce(
4112
						pkg.metadata["RESTRICT"]), uselist=pkg_use))
4113
				except portage.exception.InvalidDependString, e:
4114
					if not pkg.installed:
4115
						show_invalid_depstring_notice(x,
4116
							pkg.metadata["RESTRICT"], str(e))
4117
						del e
4118
						return 1
4119
					restrict = []
4120
				if "ebuild" == pkg_type and x[3] != "nomerge" and \
4121
					"fetch" in restrict:
4122
					fetch = red("F")
4123
					if ordered:
4124
						counters.restrict_fetch += 1
4125
					if portdb.fetch_check(pkg_key, pkg_use):
4126
						fetch = green("f")
4127
						if ordered:
4128
							counters.restrict_fetch_satisfied += 1
4129
4130
				#we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
4131
				#param is used for -u, where you still *do* want to see when something is being upgraded.
4132
				myoldbest = []
4133
				myinslotlist = None
4134
				installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
4135
				if vardb.cpv_exists(pkg_key):
4136
					addl="  "+yellow("R")+fetch+"  "
4137
					if ordered:
4138
						if pkg_merge:
4139
							counters.reinst += 1
4140
						elif pkg_status == "uninstall":
4141
							counters.uninst += 1
4142
				# filter out old-style virtual matches
4143
				elif installed_versions and \
4144
					portage.cpv_getkey(installed_versions[0]) == \
4145
					portage.cpv_getkey(pkg_key):
4146
					myinslotlist = vardb.match(pkg.slot_atom)
4147
					# If this is the first install of a new-style virtual, we
4148
					# need to filter out old-style virtual matches.
4149
					if myinslotlist and \
4150
						portage.cpv_getkey(myinslotlist[0]) != \
4151
						portage.cpv_getkey(pkg_key):
4152
						myinslotlist = None
4153
					if myinslotlist:
4154
						myoldbest = myinslotlist[:]
4155
						addl = "   " + fetch
4156
						if not portage.dep.cpvequal(pkg_key,
4157
							portage.best([pkg_key] + myoldbest)):
4158
							# Downgrade in slot
4159
							addl += turquoise("U")+blue("D")
4160
							if ordered:
4161
								counters.downgrades += 1
4162
						else:
4163
							# Update in slot
4164
							addl += turquoise("U") + " "
4165
							if ordered:
4166
								counters.upgrades += 1
4167
					else:
4168
						# New slot, mark it new.
4169
						addl = " " + green("NS") + fetch + "  "
4170
						myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
4171
						if ordered:
4172
							counters.newslot += 1
4173
4174
					if "--changelog" in self.myopts:
4175
						inst_matches = vardb.match(pkg.slot_atom)
4176
						if inst_matches:
4177
							changelogs.extend(self.calc_changelog(
4178
								portdb.findname(pkg_key),
4179
								inst_matches[0], pkg_key))
4180
				else:
4181
					addl = " " + green("N") + " " + fetch + "  "
4182
					if ordered:
4183
						counters.new += 1
4184
4185
				verboseadd = ""
4186
				repoadd = None
4187
4188
				if True:
4189
					# USE flag display
4190
					forced_flags = set()
4191
					pkgsettings.setcpv(pkg) # for package.use.{mask,force}
4192
					forced_flags.update(pkgsettings.useforce)
4193
					forced_flags.update(pkgsettings.usemask)
4194
4195
					cur_use = [flag for flag in pkg.use.enabled \
4196
						if flag in pkg.iuse.all]
4197
					cur_iuse = sorted(pkg.iuse.all)
4198
4199
					if myoldbest and myinslotlist:
4200
						previous_cpv = myoldbest[0]
4201
					else:
4202
						previous_cpv = pkg.cpv
4203
					if vardb.cpv_exists(previous_cpv):
4204
						old_iuse, old_use = vardb.aux_get(
4205
								previous_cpv, ["IUSE", "USE"])
4206
						old_iuse = list(set(
4207
							filter_iuse_defaults(old_iuse.split())))
4208
						old_iuse.sort()
4209
						old_use = old_use.split()
4210
						is_new = False
4211
					else:
4212
						old_iuse = []
4213
						old_use = []
4214
						is_new = True
4215
4216
					old_use = [flag for flag in old_use if flag in old_iuse]
4217
4218
					use_expand = pkgsettings["USE_EXPAND"].lower().split()
4219
					use_expand.sort()
4220
					use_expand.reverse()
4221
					use_expand_hidden = \
4222
						pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
4223
4224
					def map_to_use_expand(myvals, forcedFlags=False,
4225
						removeHidden=True):
4226
						ret = {}
4227
						forced = {}
4228
						for exp in use_expand:
4229
							ret[exp] = []
4230
							forced[exp] = set()
4231
							for val in myvals[:]:
4232
								if val.startswith(exp.lower()+"_"):
4233
									if val in forced_flags:
4234
										forced[exp].add(val[len(exp)+1:])
4235
									ret[exp].append(val[len(exp)+1:])
4236
									myvals.remove(val)
4237
						ret["USE"] = myvals
4238
						forced["USE"] = [val for val in myvals \
4239
							if val in forced_flags]
4240
						if removeHidden:
4241
							for exp in use_expand_hidden:
4242
								ret.pop(exp, None)
4243
						if forcedFlags:
4244
							return ret, forced
4245
						return ret
4246
4247
					# Prevent USE_EXPAND_HIDDEN flags from being hidden if they
4248
					# are the only thing that triggered reinstallation.
4249
					reinst_flags_map = {}
4250
					reinstall_for_flags = self._reinstall_nodes.get(pkg)
4251
					reinst_expand_map = None
4252
					if reinstall_for_flags:
4253
						reinst_flags_map = map_to_use_expand(
4254
							list(reinstall_for_flags), removeHidden=False)
4255
						for k in list(reinst_flags_map):
4256
							if not reinst_flags_map[k]:
4257
								del reinst_flags_map[k]
4258
						if not reinst_flags_map.get("USE"):
4259
							reinst_expand_map = reinst_flags_map.copy()
4260
							reinst_expand_map.pop("USE", None)
4261
					if reinst_expand_map and \
4262
						not set(reinst_expand_map).difference(
4263
						use_expand_hidden):
4264
						use_expand_hidden = \
4265
							set(use_expand_hidden).difference(
4266
							reinst_expand_map)
4267
4268
					cur_iuse_map, iuse_forced = \
4269
						map_to_use_expand(cur_iuse, forcedFlags=True)
4270
					cur_use_map = map_to_use_expand(cur_use)
4271
					old_iuse_map = map_to_use_expand(old_iuse)
4272
					old_use_map = map_to_use_expand(old_use)
4273
4274
					use_expand.sort()
4275
					use_expand.insert(0, "USE")
4276
					
4277
					for key in use_expand:
4278
						if key in use_expand_hidden:
4279
							continue
4280
						verboseadd += create_use_string(key.upper(),
4281
							cur_iuse_map[key], iuse_forced[key],
4282
							cur_use_map[key], old_iuse_map[key],
4283
							old_use_map[key], is_new,
4284
							reinst_flags_map.get(key))
4285
4286
				if verbosity == 3:
4287
					# size verbose
4288
					mysize=0
4289
					if pkg_type == "ebuild" and pkg_merge:
4290
						try:
4291
							myfilesdict = portdb.getfetchsizes(pkg_key,
4292
								useflags=pkg_use, debug=self.edebug)
4293
						except portage.exception.InvalidDependString, e:
4294
							src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
4295
							show_invalid_depstring_notice(x, src_uri, str(e))
4296
							del e
4297
							return 1
4298
						if myfilesdict is None:
4299
							myfilesdict="[empty/missing/bad digest]"
4300
						else:
4301
							for myfetchfile in myfilesdict:
4302
								if myfetchfile not in myfetchlist:
4303
									mysize+=myfilesdict[myfetchfile]
4304
									myfetchlist.append(myfetchfile)
4305
							if ordered:
4306
								counters.totalsize += mysize
4307
						verboseadd += format_size(mysize)
4308
4309
					# overlay verbose
4310
					# assign index for a previous version in the same slot
4311
					has_previous = False
4312
					repo_name_prev = None
4313
					slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
4314
						metadata["SLOT"])
4315
					slot_matches = vardb.match(slot_atom)
4316
					if slot_matches:
4317
						has_previous = True
4318
						repo_name_prev = vardb.aux_get(slot_matches[0],
4319
							["repository"])[0]
4320
4321
					# now use the data to generate output
4322
					if pkg.installed or not has_previous:
4323
						repoadd = repo_display.repoStr(repo_path_real)
4324
					else:
4325
						repo_path_prev = None
4326
						if repo_name_prev:
4327
							repo_path_prev = portdb.getRepositoryPath(
4328
								repo_name_prev)
4329
						if repo_path_prev == repo_path_real:
4330
							repoadd = repo_display.repoStr(repo_path_real)
4331
						else:
4332
							repoadd = "%s=>%s" % (
4333
								repo_display.repoStr(repo_path_prev),
4334
								repo_display.repoStr(repo_path_real))
4335
					if repoadd:
4336
						repoadd_set.add(repoadd)
4337
4338
				xs = [portage.cpv_getkey(pkg_key)] + \
4339
					list(portage.catpkgsplit(pkg_key)[2:])
4340
				if xs[2] == "r0":
4341
					xs[2] = ""
4342
				else:
4343
					xs[2] = "-" + xs[2]
4344
4345
				mywidth = 130
4346
				if "COLUMNWIDTH" in self.settings:
4347
					try:
4348
						mywidth = int(self.settings["COLUMNWIDTH"])
4349
					except ValueError, e:
4350
						portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
4351
						portage.writemsg(
4352
							"!!! Unable to parse COLUMNWIDTH='%s'\n" % \
4353
							self.settings["COLUMNWIDTH"], noiselevel=-1)
4354
						del e
4355
				oldlp = mywidth - 30
4356
				newlp = oldlp - 30
4357
4358
				# Convert myoldbest from a list to a string.
4359
				if not myoldbest:
4360
					myoldbest = ""
4361
				else:
4362
					for pos, key in enumerate(myoldbest):
4363
						key = portage.catpkgsplit(key)[2] + \
4364
							"-" + portage.catpkgsplit(key)[3]
4365
						if key[-3:] == "-r0":
4366
							key = key[:-3]
4367
						myoldbest[pos] = key
4368
					myoldbest = blue("["+", ".join(myoldbest)+"]")
4369
4370
				pkg_cp = xs[0]
4371
				root_config = self.roots[myroot]
4372
				system_set = root_config.sets["system"]
4373
				world_set  = root_config.sets["world"]
4374
4375
				pkg_system = False
4376
				pkg_world = False
4377
				try:
4378
					pkg_system = system_set.findAtomForPackage(pkg)
4379
					pkg_world  = world_set.findAtomForPackage(pkg)
4380
					if not (oneshot or pkg_world) and \
4381
						myroot == self.target_root and \
4382
						favorites_set.findAtomForPackage(pkg):
4383
						# Maybe it will be added to world now.
4384
						if create_world_atom(pkg, favorites_set, root_config):
4385
							pkg_world = True
4386
				except portage.exception.InvalidDependString:
4387
					# This is reported elsewhere if relevant.
4388
					pass
4389
4390
				def pkgprint(pkg_str):
4391
					if pkg_merge:
4392
						if pkg_system:
4393
							return colorize("PKG_MERGE_SYSTEM", pkg_str)
4394
						elif pkg_world:
4395
							return colorize("PKG_MERGE_WORLD", pkg_str)
4396
						else:
4397
							return colorize("PKG_MERGE", pkg_str)
4398
					elif pkg_status == "uninstall":
4399
						return colorize("PKG_UNINSTALL", pkg_str)
4400
					else:
4401
						if pkg_system:
4402
							return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
4403
						elif pkg_world:
4404
							return colorize("PKG_NOMERGE_WORLD", pkg_str)
4405
						else:
4406
							return colorize("PKG_NOMERGE", pkg_str)
4407
4408
				try:
4409
					properties = flatten(use_reduce(paren_reduce(
4410
						pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
4411
				except portage.exception.InvalidDependString, e:
4412
					if not pkg.installed:
4413
						show_invalid_depstring_notice(pkg,
4414
							pkg.metadata["PROPERTIES"], str(e))
4415
						del e
4416
						return 1
4417
					properties = []
4418
				interactive = "interactive" in properties
4419
				if interactive and pkg.operation == "merge":
4420
					addl = colorize("WARN", "I") + addl[1:]
4421
					if ordered:
4422
						counters.interactive += 1
4423
4424
				if x[1]!="/":
4425
					if myoldbest:
4426
						myoldbest +=" "
4427
					if "--columns" in self.myopts:
4428
						if "--quiet" in self.myopts:
4429
							myprint=addl+" "+indent+pkgprint(pkg_cp)
4430
							myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
4431
							myprint=myprint+myoldbest
4432
							myprint=myprint+darkgreen("to "+x[1])
4433
							verboseadd = None
4434
						else:
4435
							if not pkg_merge:
4436
								myprint = "[%s] %s%s" % \
4437
									(pkgprint(pkg_status.ljust(13)),
4438
									indent, pkgprint(pkg.cp))
4439
							else:
4440
								myprint = "[%s %s] %s%s" % \
4441
									(pkgprint(pkg.type_name), addl,
4442
									indent, pkgprint(pkg.cp))
4443
							if (newlp-nc_len(myprint)) > 0:
4444
								myprint=myprint+(" "*(newlp-nc_len(myprint)))
4445
							myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
4446
							if (oldlp-nc_len(myprint)) > 0:
4447
								myprint=myprint+" "*(oldlp-nc_len(myprint))
4448
							myprint=myprint+myoldbest
4449
							myprint += darkgreen("to " + pkg.root)
4450
					else:
4451
						if not pkg_merge:
4452
							myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
4453
						else:
4454
							myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
4455
						myprint += indent + pkgprint(pkg_key) + " " + \
4456
							myoldbest + darkgreen("to " + myroot)
4457
				else:
4458
					if "--columns" in self.myopts:
4459
						if "--quiet" in self.myopts:
4460
							myprint=addl+" "+indent+pkgprint(pkg_cp)
4461
							myprint=myprint+" "+green(xs[1]+xs[2])+" "
4462
							myprint=myprint+myoldbest
4463
							verboseadd = None
4464
						else:
4465
							if not pkg_merge:
4466
								myprint = "[%s] %s%s" % \
4467
									(pkgprint(pkg_status.ljust(13)),
4468
									indent, pkgprint(pkg.cp))
4469
							else:
4470
								myprint = "[%s %s] %s%s" % \
4471
									(pkgprint(pkg.type_name), addl,
4472
									indent, pkgprint(pkg.cp))
4473
							if (newlp-nc_len(myprint)) > 0:
4474
								myprint=myprint+(" "*(newlp-nc_len(myprint)))
4475
							myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
4476
							if (oldlp-nc_len(myprint)) > 0:
4477
								myprint=myprint+(" "*(oldlp-nc_len(myprint)))
4478
							myprint += myoldbest
4479
					else:
4480
						if not pkg_merge:
4481
							myprint = "[%s] %s%s %s" % \
4482
								(pkgprint(pkg_status.ljust(13)),
4483
								indent, pkgprint(pkg.cpv),
4484
								myoldbest)
4485
						else:
4486
							myprint = "[%s %s] %s%s %s" % \
4487
								(pkgprint(pkg_type), addl, indent,
4488
								pkgprint(pkg.cpv), myoldbest)
4489
4490
				if columns and pkg.operation == "uninstall":
4491
					continue
4492
				p.append((myprint, verboseadd, repoadd))
4493
4494
				if "--tree" not in self.myopts and \
4495
					"--quiet" not in self.myopts and \
4496
					not self._opts_no_restart.intersection(self.myopts) and \
4497
					pkg.root == self._running_root.root and \
4498
					portage.match_from_list(
4499
					portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
4500
					not vardb.cpv_exists(pkg.cpv) and \
4501
					"--quiet" not in self.myopts:
4502
						if mylist_index < len(mylist) - 1:
4503
							p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
4504
							p.append(colorize("WARN", "    then resume the merge."))
4505
4506
		out = sys.stdout
4507
		show_repos = repoadd_set and repoadd_set != set(["0"])
4508
4509
		for x in p:
4510
			if isinstance(x, basestring):
4511
				out.write("%s\n" % (x,))
4512
				continue
4513
4514
			myprint, verboseadd, repoadd = x
4515
4516
			if verboseadd:
4517
				myprint += " " + verboseadd
4518
4519
			if show_repos and repoadd:
4520
				myprint += " " + teal("[%s]" % repoadd)
4521
4522
			out.write("%s\n" % (myprint,))
4523
4524
		for x in blockers:
4525
			print x
4526
4527
		if verbosity == 3:
4528
			print
4529
			print counters
4530
			if show_repos:
4531
				sys.stdout.write(str(repo_display))
4532
4533
		if "--changelog" in self.myopts:
4534
			print
4535
			for revision,text in changelogs:
4536
				print bold('*'+revision)
4537
				sys.stdout.write(text)
4538
4539
		sys.stdout.flush()
4540
		return os.EX_OK
4541
4542
	def display_problems(self):
4543
		"""
4544
		Display problems with the dependency graph such as slot collisions.
4545
		This is called internally by display() to show the problems _after_
4546
		the merge list where it is most likely to be seen, but if display()
4547
		is not going to be called then this method should be called explicitly
4548
		to ensure that the user is notified of problems with the graph.
4549
4550
		All output goes to stderr, except for unsatisfied dependencies which
4551
		go to stdout for parsing by programs such as autounmask.
4552
		"""
4553
4554
		# Note that show_masked_packages() sends it's output to
4555
		# stdout, and some programs such as autounmask parse the
4556
		# output in cases when emerge bails out. However, when
4557
		# show_masked_packages() is called for installed packages
4558
		# here, the message is a warning that is more appropriate
4559
		# to send to stderr, so temporarily redirect stdout to
4560
		# stderr. TODO: Fix output code so there's a cleaner way
4561
		# to redirect everything to stderr.
4562
		sys.stdout.flush()
4563
		sys.stderr.flush()
4564
		stdout = sys.stdout
4565
		try:
4566
			sys.stdout = sys.stderr
4567
			self._display_problems()
4568
		finally:
4569
			sys.stdout = stdout
4570
			sys.stdout.flush()
4571
			sys.stderr.flush()
4572
4573
		# This goes to stdout for parsing by programs like autounmask.
4574
		for pargs, kwargs in self._unsatisfied_deps_for_display:
4575
			self._show_unsatisfied_dep(*pargs, **kwargs)
4576
4577
	def _display_problems(self):
4578
		if self._circular_deps_for_display is not None:
4579
			self._show_circular_deps(
4580
				self._circular_deps_for_display)
4581
4582
		# The user is only notified of a slot conflict if
4583
		# there are no unresolvable blocker conflicts.
4584
		if self._unsatisfied_blockers_for_display is not None:
4585
			self._show_unsatisfied_blockers(
4586
				self._unsatisfied_blockers_for_display)
4587
		else:
4588
			self._show_slot_collision_notice()
4589
4590
		# TODO: Add generic support for "set problem" handlers so that
4591
		# the below warnings aren't special cases for world only.
4592
4593
		if self._missing_args:
4594
			world_problems = False
4595
			if "world" in self._sets:
4596
				# Filter out indirect members of world (from nested sets)
4597
				# since only direct members of world are desired here.
4598
				world_set = self.roots[self.target_root].sets["world"]
4599
				for arg, atom in self._missing_args:
4600
					if arg.name == "world" and atom in world_set:
4601
						world_problems = True
4602
						break
4603
4604
			if world_problems:
4605
				sys.stderr.write("\n!!! Problems have been " + \
4606
					"detected with your world file\n")
4607
				sys.stderr.write("!!! Please run " + \
4608
					green("emaint --check world")+"\n\n")
4609
4610
		if self._missing_args:
4611
			sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4612
				" Ebuilds for the following packages are either all\n")
4613
			sys.stderr.write(colorize("BAD", "!!!") + \
4614
				" masked or don't exist:\n")
4615
			sys.stderr.write(" ".join(str(atom) for arg, atom in \
4616
				self._missing_args) + "\n")
4617
4618
		if self._pprovided_args:
4619
			arg_refs = {}
4620
			for arg, atom in self._pprovided_args:
4621
				if isinstance(arg, SetArg):
4622
					parent = arg.name
4623
					arg_atom = (atom, atom)
4624
				else:
4625
					parent = "args"
4626
					arg_atom = (arg.arg, atom)
4627
				refs = arg_refs.setdefault(arg_atom, [])
4628
				if parent not in refs:
4629
					refs.append(parent)
4630
			msg = []
4631
			msg.append(bad("\nWARNING: "))
4632
			if len(self._pprovided_args) > 1:
4633
				msg.append("Requested packages will not be " + \
4634
					"merged because they are listed in\n")
4635
			else:
4636
				msg.append("A requested package will not be " + \
4637
					"merged because it is listed in\n")
4638
			msg.append("package.provided:\n\n")
4639
			problems_sets = set()
4640
			for (arg, atom), refs in arg_refs.iteritems():
4641
				ref_string = ""
4642
				if refs:
4643
					problems_sets.update(refs)
4644
					refs.sort()
4645
					ref_string = ", ".join(["'%s'" % name for name in refs])
4646
					ref_string = " pulled in by " + ref_string
4647
				msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
4648
			msg.append("\n")
4649
			if "world" in problems_sets:
4650
				msg.append("This problem can be solved in one of the following ways:\n\n")
4651
				msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
4652
				msg.append("  B) Uninstall offending packages (cleans them from world).\n")
4653
				msg.append("  C) Remove offending entries from package.provided.\n\n")
4654
				msg.append("The best course of action depends on the reason that an offending\n")
4655
				msg.append("package.provided entry exists.\n\n")
4656
			sys.stderr.write("".join(msg))
4657
4658
		masked_packages = []
4659
		for pkg in self._masked_installed:
4660
			root_config = pkg.root_config
4661
			pkgsettings = self.pkgsettings[pkg.root]
4662
			mreasons = get_masking_status(pkg, pkgsettings, root_config)
4663
			masked_packages.append((root_config, pkgsettings,
4664
				pkg.cpv, pkg.metadata, mreasons))
4665
		if masked_packages:
4666
			sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4667
				" The following installed packages are masked:\n")
4668
			show_masked_packages(masked_packages)
4669
			show_mask_docs()
4670
			print
4671
4672
	def calc_changelog(self,ebuildpath,current,next):
4673
		if ebuildpath == None or not os.path.exists(ebuildpath):
4674
			return []
4675
		current = '-'.join(portage.catpkgsplit(current)[1:])
4676
		if current.endswith('-r0'):
4677
			current = current[:-3]
4678
		next = '-'.join(portage.catpkgsplit(next)[1:])
4679
		if next.endswith('-r0'):
4680
			next = next[:-3]
4681
		changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
4682
		try:
4683
			changelog = open(changelogpath).read()
4684
		except SystemExit, e:
4685
			raise # Needed else can't exit
4686
		except:
4687
			return []
4688
		divisions = self.find_changelog_tags(changelog)
4689
		#print 'XX from',current,'to',next
4690
		#for div,text in divisions: print 'XX',div
4691
		# skip entries for all revisions above the one we are about to emerge
4692
		for i in range(len(divisions)):
4693
			if divisions[i][0]==next:
4694
				divisions = divisions[i:]
4695
				break
4696
		# find out how many entries we are going to display
4697
		for i in range(len(divisions)):
4698
			if divisions[i][0]==current:
4699
				divisions = divisions[:i]
4700
				break
4701
		else:
4702
		    # couldnt find the current revision in the list. display nothing
4703
			return []
4704
		return divisions
4705
4706
	def find_changelog_tags(self,changelog):
4707
		divs = []
4708
		release = None
4709
		while 1:
4710
			match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
4711
			if match is None:
4712
				if release is not None:
4713
					divs.append((release,changelog))
4714
				return divs
4715
			if release is not None:
4716
				divs.append((release,changelog[:match.start()]))
4717
			changelog = changelog[match.end():]
4718
			release = match.group(1)
4719
			if release.endswith('.ebuild'):
4720
				release = release[:-7]
4721
			if release.endswith('-r0'):
4722
				release = release[:-3]
4723
4724
	def saveNomergeFavorites(self):
4725
		"""Find atoms in favorites that are not in the mergelist and add them
4726
		to the world file if necessary."""
4727
		for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
4728
			"--oneshot", "--onlydeps", "--pretend"):
4729
			if x in self.myopts:
4730
				return
4731
		root_config = self.roots[self.target_root]
4732
		world_set = root_config.sets["world"]
4733
4734
		world_locked = False
4735
		if hasattr(world_set, "lock"):
4736
			world_set.lock()
4737
			world_locked = True
4738
4739
		if hasattr(world_set, "load"):
4740
			world_set.load() # maybe it's changed on disk
4741
4742
		args_set = self._sets["args"]
4743
		portdb = self.trees[self.target_root]["porttree"].dbapi
4744
		added_favorites = set()
4745
		for x in self._set_nodes:
4746
			pkg_type, root, pkg_key, pkg_status = x
4747
			if pkg_status != "nomerge":
4748
				continue
4749
4750
			try:
4751
				myfavkey = create_world_atom(x, args_set, root_config)
4752
				if myfavkey:
4753
					if myfavkey in added_favorites:
4754
						continue
4755
					added_favorites.add(myfavkey)
4756
			except portage.exception.InvalidDependString, e:
4757
				writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
4758
					(pkg_key, str(e)), noiselevel=-1)
4759
				writemsg("!!! see '%s'\n\n" % os.path.join(
4760
					root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
4761
				del e
4762
		all_added = []
4763
		for k in self._sets:
4764
			if k in ("args", "world") or not root_config.sets[k].world_candidate:
4765
				continue
4766
			s = SETPREFIX + k
4767
			if s in world_set:
4768
				continue
4769
			all_added.append(SETPREFIX + k)
4770
		all_added.extend(added_favorites)
4771
		all_added.sort()
4772
		for a in all_added:
4773
			print ">>> Recording %s in \"world\" favorites file..." % \
4774
				colorize("INFORM", str(a))
4775
		if all_added:
4776
			world_set.update(all_added)
4777
4778
		if world_locked:
4779
			world_set.unlock()
4780
4781
	def loadResumeCommand(self, resume_data, skip_masked=True,
4782
		skip_missing=True):
4783
		"""
4784
		Add a resume command to the graph and validate it in the process.  This
4785
		will raise a PackageNotFound exception if a package is not available.
4786
		"""
4787
4788
		if not isinstance(resume_data, dict):
4789
			return False
4790
4791
		mergelist = resume_data.get("mergelist")
4792
		if not isinstance(mergelist, list):
4793
			mergelist = []
4794
4795
		fakedb = self.mydbapi
4796
		trees = self.trees
4797
		serialized_tasks = []
4798
		masked_tasks = []
4799
		for x in mergelist:
4800
			if not (isinstance(x, list) and len(x) == 4):
4801
				continue
4802
			pkg_type, myroot, pkg_key, action = x
4803
			if pkg_type not in self.pkg_tree_map:
4804
				continue
4805
			if action != "merge":
4806
				continue
4807
			tree_type = self.pkg_tree_map[pkg_type]
4808
			mydb = trees[myroot][tree_type].dbapi
4809
			db_keys = list(self._trees_orig[myroot][
4810
				tree_type].dbapi._aux_cache_keys)
4811
			try:
4812
				metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
4813
			except KeyError:
4814
				# It does no exist or it is corrupt.
4815
				if action == "uninstall":
4816
					continue
4817
				if skip_missing:
4818
					# TODO: log these somewhere
4819
					continue
4820
				raise portage.exception.PackageNotFound(pkg_key)
4821
			installed = action == "uninstall"
4822
			built = pkg_type != "ebuild"
4823
			root_config = self.roots[myroot]
4824
			pkg = Package(built=built, cpv=pkg_key,
4825
				installed=installed, metadata=metadata,
4826
				operation=action, root_config=root_config,
4827
				type_name=pkg_type)
4828
			if pkg_type == "ebuild":
4829
				pkgsettings = self.pkgsettings[myroot]
4830
				pkgsettings.setcpv(pkg)
4831
				pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
4832
				pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
4833
			self._pkg_cache[pkg] = pkg
4834
4835
			root_config = self.roots[pkg.root]
4836
			if "merge" == pkg.operation and \
4837
				not visible(root_config.settings, pkg):
4838
				if skip_masked:
4839
					masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
4840
				else:
4841
					self._unsatisfied_deps_for_display.append(
4842
						((pkg.root, "="+pkg.cpv), {"myparent":None}))
4843
4844
			fakedb[myroot].cpv_inject(pkg)
4845
			serialized_tasks.append(pkg)
4846
			self.spinner.update()
4847
4848
		if self._unsatisfied_deps_for_display:
4849
			return False
4850
4851
		if not serialized_tasks or "--nodeps" in self.myopts:
4852
			self._serialized_tasks_cache = serialized_tasks
4853
			self._scheduler_graph = self.digraph
4854
		else:
4855
			self._select_package = self._select_pkg_from_graph
4856
			self.myparams.add("selective")
4857
			# Always traverse deep dependencies in order to account for
4858
			# potentially unsatisfied dependencies of installed packages.
4859
			# This is necessary for correct --keep-going or --resume operation
4860
			# in case a package from a group of circularly dependent packages
4861
			# fails. In this case, a package which has recently been installed
4862
			# may have an unsatisfied circular dependency (pulled in by
4863
			# PDEPEND, for example). So, even though a package is already
4864
			# installed, it may not have all of it's dependencies satisfied, so
4865
			# it may not be usable. If such a package is in the subgraph of
4866
			# deep depenedencies of a scheduled build, that build needs to
4867
			# be cancelled. In order for this type of situation to be
4868
			# recognized, deep traversal of dependencies is required.
4869
			self.myparams.add("deep")
4870
4871
			favorites = resume_data.get("favorites")
4872
			args_set = self._sets["args"]
4873
			if isinstance(favorites, list):
4874
				args = self._load_favorites(favorites)
4875
			else:
4876
				args = []
4877
4878
			for task in serialized_tasks:
4879
				if isinstance(task, Package) and \
4880
					task.operation == "merge":
4881
					if not self._add_pkg(task, None):
4882
						return False
4883
4884
			# Packages for argument atoms need to be explicitly
4885
			# added via _add_pkg() so that they are included in the
4886
			# digraph (needed at least for --tree display).
4887
			for arg in args:
4888
				for atom in arg.set:
4889
					pkg, existing_node = self._select_package(
4890
						arg.root_config.root, atom)
4891
					if existing_node is None and \
4892
						pkg is not None:
4893
						if not self._add_pkg(pkg, Dependency(atom=atom,
4894
							root=pkg.root, parent=arg)):
4895
							return False
4896
4897
			# Allow unsatisfied deps here to avoid showing a masking
4898
			# message for an unsatisfied dep that isn't necessarily
4899
			# masked.
4900
			if not self._create_graph(allow_unsatisfied=True):
4901
				return False
4902
4903
			unsatisfied_deps = []
4904
			for dep in self._unsatisfied_deps:
4905
				if not isinstance(dep.parent, Package):
4906
					continue
4907
				if dep.parent.operation == "merge":
4908
					unsatisfied_deps.append(dep)
4909
					continue
4910
4911
				# For unsatisfied deps of installed packages, only account for
4912
				# them if they are in the subgraph of dependencies of a package
4913
				# which is scheduled to be installed.
4914
				unsatisfied_install = False
4915
				traversed = set()
4916
				dep_stack = self.digraph.parent_nodes(dep.parent)
4917
				while dep_stack:
4918
					node = dep_stack.pop()
4919
					if not isinstance(node, Package):
4920
						continue
4921
					if node.operation == "merge":
4922
						unsatisfied_install = True
4923
						break
4924
					if node in traversed:
4925
						continue
4926
					traversed.add(node)
4927
					dep_stack.extend(self.digraph.parent_nodes(node))
4928
4929
				if unsatisfied_install:
4930
					unsatisfied_deps.append(dep)
4931
4932
			if masked_tasks or unsatisfied_deps:
4933
				# This probably means that a required package
4934
				# was dropped via --skipfirst. It makes the
4935
				# resume list invalid, so convert it to a
4936
				# UnsatisfiedResumeDep exception.
4937
				raise self.UnsatisfiedResumeDep(self,
4938
					masked_tasks + unsatisfied_deps)
4939
			self._serialized_tasks_cache = None
4940
			try:
4941
				self.altlist()
4942
			except self._unknown_internal_error:
4943
				return False
4944
4945
		return True
4946
4947
	def _load_favorites(self, favorites):
4948
		"""
4949
		Use a list of favorites to resume state from a
4950
		previous select_files() call. This creates similar
4951
		DependencyArg instances to those that would have
4952
		been created by the original select_files() call.
4953
		This allows Package instances to be matched with
4954
		DependencyArg instances during graph creation.
4955
		"""
4956
		root_config = self.roots[self.target_root]
4957
		getSetAtoms = root_config.setconfig.getSetAtoms
4958
		sets = root_config.sets
4959
		args = []
4960
		for x in favorites:
4961
			if not isinstance(x, basestring):
4962
				continue
4963
			if x in ("system", "world"):
4964
				x = SETPREFIX + x
4965
			if x.startswith(SETPREFIX):
4966
				s = x[len(SETPREFIX):]
4967
				if s not in sets:
4968
					continue
4969
				if s in self._sets:
4970
					continue
4971
				# Recursively expand sets so that containment tests in
4972
				# self._get_parent_sets() properly match atoms in nested
4973
				# sets (like if world contains system).
4974
				expanded_set = InternalPackageSet(
4975
					initial_atoms=getSetAtoms(s))
4976
				self._sets[s] = expanded_set
4977
				args.append(SetArg(arg=x, set=expanded_set,
4978
					root_config=root_config))
4979
			else:
4980
				if not portage.isvalidatom(x):
4981
					continue
4982
				args.append(AtomArg(arg=x, atom=x,
4983
					root_config=root_config))
4984
4985
		self._set_args(args)
4986
		return args
4987
4988
	class UnsatisfiedResumeDep(portage.exception.PortageException):
4989
		"""
4990
		A dependency of a resume list is not installed. This
4991
		can occur when a required package is dropped from the
4992
		merge list via --skipfirst.
4993
		"""
4994
		def __init__(self, depgraph, value):
4995
			portage.exception.PortageException.__init__(self, value)
4996
			self.depgraph = depgraph
4997
4998
	class _internal_exception(portage.exception.PortageException):
4999
		def __init__(self, value=""):
5000
			portage.exception.PortageException.__init__(self, value)
5001
5002
	class _unknown_internal_error(_internal_exception):
5003
		"""
5004
		Used by the depgraph internally to terminate graph creation.
5005
		The specific reason for the failure should have been dumped
5006
		to stderr, unfortunately, the exact reason for the failure
5007
		may not be known.
5008
		"""
5009
5010
	class _serialize_tasks_retry(_internal_exception):
5011
		"""
5012
		This is raised by the _serialize_tasks() method when it needs to
5013
		be called again for some reason. The only case that it's currently
5014
		used for is when neglected dependencies need to be added to the
5015
		graph in order to avoid making a potentially unsafe decision.
5016
		"""
5017
5018
	class _dep_check_composite_db(portage.dbapi):
5019
		"""
5020
		A dbapi-like interface that is optimized for use in dep_check() calls.
5021
		This is built on top of the existing depgraph package selection logic.
5022
		Some packages that have been added to the graph may be masked from this
5023
		view in order to influence the atom preference selection that occurs
5024
		via dep_check().
5025
		"""
5026
		def __init__(self, depgraph, root):
5027
			portage.dbapi.__init__(self)
5028
			self._depgraph = depgraph
5029
			self._root = root
5030
			self._match_cache = {}
5031
			self._cpv_pkg_map = {}
5032
5033
		def _clear_cache(self):
5034
			self._match_cache.clear()
5035
			self._cpv_pkg_map.clear()
5036
5037
		def match(self, atom):
5038
			ret = self._match_cache.get(atom)
5039
			if ret is not None:
5040
				return ret[:]
5041
			orig_atom = atom
5042
			if "/" not in atom:
5043
				atom = self._dep_expand(atom)
5044
			pkg, existing = self._depgraph._select_package(self._root, atom)
5045
			if not pkg:
5046
				ret = []
5047
			else:
5048
				# Return the highest available from select_package() as well as
5049
				# any matching slots in the graph db.
5050
				slots = set()
5051
				slots.add(pkg.metadata["SLOT"])
5052
				atom_cp = portage.dep_getkey(atom)
5053
				if pkg.cp.startswith("virtual/"):
5054
					# For new-style virtual lookahead that occurs inside
5055
					# dep_check(), examine all slots. This is needed
5056
					# so that newer slots will not unnecessarily be pulled in
5057
					# when a satisfying lower slot is already installed. For
5058
					# example, if virtual/jdk-1.4 is satisfied via kaffe then
5059
					# there's no need to pull in a newer slot to satisfy a
5060
					# virtual/jdk dependency.
5061
					for db, pkg_type, built, installed, db_keys in \
5062
						self._depgraph._filtered_trees[self._root]["dbs"]:
5063
						for cpv in db.match(atom):
5064
							if portage.cpv_getkey(cpv) != pkg.cp:
5065
								continue
5066
							slots.add(db.aux_get(cpv, ["SLOT"])[0])
5067
				ret = []
5068
				if self._visible(pkg):
5069
					self._cpv_pkg_map[pkg.cpv] = pkg
5070
					ret.append(pkg.cpv)
5071
				slots.remove(pkg.metadata["SLOT"])
5072
				while slots:
5073
					slot_atom = "%s:%s" % (atom_cp, slots.pop())
5074
					pkg, existing = self._depgraph._select_package(
5075
						self._root, slot_atom)
5076
					if not pkg:
5077
						continue
5078
					if not self._visible(pkg):
5079
						continue
5080
					self._cpv_pkg_map[pkg.cpv] = pkg
5081
					ret.append(pkg.cpv)
5082
				if ret:
5083
					self._cpv_sort_ascending(ret)
5084
			self._match_cache[orig_atom] = ret
5085
			return ret[:]
5086
5087
		def _visible(self, pkg):
5088
			if pkg.installed and "selective" not in self._depgraph.myparams:
5089
				try:
5090
					arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
5091
				except (StopIteration, portage.exception.InvalidDependString):
5092
					arg = None
5093
				if arg:
5094
					return False
5095
			if pkg.installed:
5096
				try:
5097
					if not visible(
5098
						self._depgraph.pkgsettings[pkg.root], pkg):
5099
						return False
5100
				except portage.exception.InvalidDependString:
5101
					pass
5102
			in_graph = self._depgraph._slot_pkg_map[
5103
				self._root].get(pkg.slot_atom)
5104
			if in_graph is None:
5105
				# Mask choices for packages which are not the highest visible
5106
				# version within their slot (since they usually trigger slot
5107
				# conflicts).
5108
				highest_visible, in_graph = self._depgraph._select_package(
5109
					self._root, pkg.slot_atom)
5110
				if pkg != highest_visible:
5111
					return False
5112
			elif in_graph != pkg:
5113
				# Mask choices for packages that would trigger a slot
5114
				# conflict with a previously selected package.
5115
				return False
5116
			return True
5117
5118
		def _dep_expand(self, atom):
5119
			"""
5120
			This is only needed for old installed packages that may
5121
			contain atoms that are not fully qualified with a specific
5122
			category. Emulate the cpv_expand() function that's used by
5123
			dbapi.match() in cases like this. If there are multiple
5124
			matches, it's often due to a new-style virtual that has
5125
			been added, so try to filter those out to avoid raising
5126
			a ValueError.
5127
			"""
5128
			root_config = self._depgraph.roots[self._root]
5129
			orig_atom = atom
5130
			expanded_atoms = self._depgraph._dep_expand(root_config, atom)
5131
			if len(expanded_atoms) > 1:
5132
				non_virtual_atoms = []
5133
				for x in expanded_atoms:
5134
					if not portage.dep_getkey(x).startswith("virtual/"):
5135
						non_virtual_atoms.append(x)
5136
				if len(non_virtual_atoms) == 1:
5137
					expanded_atoms = non_virtual_atoms
5138
			if len(expanded_atoms) > 1:
5139
				# compatible with portage.cpv_expand()
5140
				raise portage.exception.AmbiguousPackageName(
5141
					[portage.dep_getkey(x) for x in expanded_atoms])
5142
			if expanded_atoms:
5143
				atom = expanded_atoms[0]
5144
			else:
5145
				null_atom = insert_category_into_atom(atom, "null")
5146
				null_cp = portage.dep_getkey(null_atom)
5147
				cat, atom_pn = portage.catsplit(null_cp)
5148
				virts_p = root_config.settings.get_virts_p().get(atom_pn)
5149
				if virts_p:
5150
					# Allow the resolver to choose which virtual.
5151
					atom = insert_category_into_atom(atom, "virtual")
5152
				else:
5153
					atom = insert_category_into_atom(atom, "null")
5154
			return atom
5155
5156
		def aux_get(self, cpv, wants):
5157
			metadata = self._cpv_pkg_map[cpv].metadata
5158
			return [metadata.get(x, "") for x in wants]
5159
5160
class Scheduler(PollScheduler):
5161
5162
	_opts_ignore_blockers = \
5163
		frozenset(["--buildpkgonly",
5164
		"--fetchonly", "--fetch-all-uri",
5165
		"--nodeps", "--pretend"])
5166
5167
	_opts_no_background = \
5168
		frozenset(["--pretend",
5169
		"--fetchonly", "--fetch-all-uri"])
5170
5171
	_opts_no_restart = frozenset(["--buildpkgonly",
5172
		"--fetchonly", "--fetch-all-uri", "--pretend"])
5173
5174
	_bad_resume_opts = set(["--ask", "--changelog",
5175
		"--resume", "--skipfirst"])
5176
5177
	_fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log')
5178
5179
	class _iface_class(SlotObject):
5180
		__slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
5181
			"dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
5182
			"scheduleSetup", "scheduleUnpack", "scheduleYield",
5183
			"unregister")
5184
5185
	class _fetch_iface_class(SlotObject):
5186
		__slots__ = ("log_file", "schedule")
5187
5188
	_task_queues_class = slot_dict_class(
5189
		("merge", "jobs", "fetch", "unpack"), prefix="")
5190
5191
	class _build_opts_class(SlotObject):
5192
		__slots__ = ("buildpkg", "buildpkgonly",
5193
			"fetch_all_uri", "fetchonly", "pretend")
5194
5195
	class _binpkg_opts_class(SlotObject):
5196
		__slots__ = ("fetchonly", "getbinpkg", "pretend")
5197
5198
	class _pkg_count_class(SlotObject):
5199
		__slots__ = ("curval", "maxval")
5200
5201
	class _emerge_log_class(SlotObject):
5202
		__slots__ = ("xterm_titles",)
5203
5204
		def log(self, *pargs, **kwargs):
5205
			if not self.xterm_titles:
5206
				# Avoid interference with the scheduler's status display.
5207
				kwargs.pop("short_msg", None)
5208
			emergelog(self.xterm_titles, *pargs, **kwargs)
5209
5210
	class _failed_pkg(SlotObject):
5211
		__slots__ = ("build_dir", "build_log", "pkg", "returncode")
5212
5213
	class _ConfigPool(object):
5214
		"""Interface for a task to temporarily allocate a config
5215
		instance from a pool. This allows a task to be constructed
5216
		long before the config instance actually becomes needed, like
5217
		when prefetchers are constructed for the whole merge list."""
5218
		__slots__ = ("_root", "_allocate", "_deallocate")
5219
		def __init__(self, root, allocate, deallocate):
5220
			self._root = root
5221
			self._allocate = allocate
5222
			self._deallocate = deallocate
5223
		def allocate(self):
5224
			return self._allocate(self._root)
5225
		def deallocate(self, settings):
5226
			self._deallocate(settings)
5227
5228
	class _unknown_internal_error(portage.exception.PortageException):
5229
		"""
5230
		Used internally to terminate scheduling. The specific reason for
5231
		the failure should have been dumped to stderr.
5232
		"""
5233
		def __init__(self, value=""):
5234
			portage.exception.PortageException.__init__(self, value)
5235
5236
	def __init__(self, settings, trees, mtimedb, myopts,
5237
		spinner, mergelist, favorites, digraph):
5238
		PollScheduler.__init__(self)
5239
		self.settings = settings
5240
		self.target_root = settings["ROOT"]
5241
		self.trees = trees
5242
		self.myopts = myopts
5243
		self._spinner = spinner
5244
		self._mtimedb = mtimedb
5245
		self._mergelist = mergelist
5246
		self._favorites = favorites
5247
		self._args_set = InternalPackageSet(favorites)
5248
		self._build_opts = self._build_opts_class()
5249
		for k in self._build_opts.__slots__:
5250
			setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
5251
		self._binpkg_opts = self._binpkg_opts_class()
5252
		for k in self._binpkg_opts.__slots__:
5253
			setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
5254
5255
		self.curval = 0
5256
		self._logger = self._emerge_log_class()
5257
		self._task_queues = self._task_queues_class()
5258
		for k in self._task_queues.allowed_keys:
5259
			setattr(self._task_queues, k,
5260
				SequentialTaskQueue())
5261
5262
		# Holds merges that will wait to be executed when no builds are
5263
		# executing. This is useful for system packages since dependencies
5264
		# on system packages are frequently unspecified.
5265
		self._merge_wait_queue = []
5266
		# Holds merges that have been transfered from the merge_wait_queue to
5267
		# the actual merge queue. They are removed from this list upon
5268
		# completion. Other packages can start building only when this list is
5269
		# empty.
5270
		self._merge_wait_scheduled = []
5271
5272
		# Holds system packages and their deep runtime dependencies. Before
5273
		# being merged, these packages go to merge_wait_queue, to be merged
5274
		# when no other packages are building.
5275
		self._deep_system_deps = set()
5276
5277
		# Holds packages to merge which will satisfy currently unsatisfied
5278
		# deep runtime dependencies of system packages. If this is not empty
5279
		# then no parallel builds will be spawned until it is empty. This
5280
		# minimizes the possibility that a build will fail due to the system
5281
		# being in a fragile state. For example, see bug #259954.
5282
		self._unsatisfied_system_deps = set()
5283
5284
		self._status_display = JobStatusDisplay(
5285
			xterm_titles=('notitles' not in settings.features))
5286
		self._max_load = myopts.get("--load-average")
5287
		max_jobs = myopts.get("--jobs")
5288
		if max_jobs is None:
5289
			max_jobs = 1
5290
		self._set_max_jobs(max_jobs)
5291
5292
		# The root where the currently running
5293
		# portage instance is installed.
5294
		self._running_root = trees["/"]["root_config"]
5295
		self.edebug = 0
5296
		if settings.get("PORTAGE_DEBUG", "") == "1":
5297
			self.edebug = 1
5298
		self.pkgsettings = {}
5299
		self._config_pool = {}
5300
		self._blocker_db = {}
5301
		for root in trees:
5302
			self._config_pool[root] = []
5303
			self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
5304
5305
		fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
5306
			schedule=self._schedule_fetch)
5307
		self._sched_iface = self._iface_class(
5308
			dblinkEbuildPhase=self._dblink_ebuild_phase,
5309
			dblinkDisplayMerge=self._dblink_display_merge,
5310
			dblinkElog=self._dblink_elog,
5311
			dblinkEmergeLog=self._dblink_emerge_log,
5312
			fetch=fetch_iface, register=self._register,
5313
			schedule=self._schedule_wait,
5314
			scheduleSetup=self._schedule_setup,
5315
			scheduleUnpack=self._schedule_unpack,
5316
			scheduleYield=self._schedule_yield,
5317
			unregister=self._unregister)
5318
5319
		self._prefetchers = weakref.WeakValueDictionary()
5320
		self._pkg_queue = []
5321
		self._completed_tasks = set()
5322
5323
		self._failed_pkgs = []
5324
		self._failed_pkgs_all = []
5325
		self._failed_pkgs_die_msgs = []
5326
		self._post_mod_echo_msgs = []
5327
		self._parallel_fetch = False
5328
		merge_count = len([x for x in mergelist \
5329
			if isinstance(x, Package) and x.operation == "merge"])
5330
		self._pkg_count = self._pkg_count_class(
5331
			curval=0, maxval=merge_count)
5332
		self._status_display.maxval = self._pkg_count.maxval
5333
5334
		# The load average takes some time to respond when new
5335
		# jobs are added, so we need to limit the rate of adding
5336
		# new jobs.
5337
		self._job_delay_max = 10
5338
		self._job_delay_factor = 1.0
5339
		self._job_delay_exp = 1.5
5340
		self._previous_job_start_time = None
5341
5342
		self._set_digraph(digraph)
5343
5344
		# This is used to memoize the _choose_pkg() result when
5345
		# no packages can be chosen until one of the existing
5346
		# jobs completes.
5347
		self._choose_pkg_return_early = False
5348
5349
		features = self.settings.features
5350
		if "parallel-fetch" in features and \
5351
			not ("--pretend" in self.myopts or \
5352
			"--fetch-all-uri" in self.myopts or \
5353
			"--fetchonly" in self.myopts):
5354
			if "distlocks" not in features:
5355
				portage.writemsg(red("!!!")+"\n", noiselevel=-1)
5356
				portage.writemsg(red("!!!")+" parallel-fetching " + \
5357
					"requires the distlocks feature enabled"+"\n",
5358
					noiselevel=-1)
5359
				portage.writemsg(red("!!!")+" you have it disabled, " + \
5360
					"thus parallel-fetching is being disabled"+"\n",
5361
					noiselevel=-1)
5362
				portage.writemsg(red("!!!")+"\n", noiselevel=-1)
5363
			elif len(mergelist) > 1:
5364
				self._parallel_fetch = True
5365
5366
		if self._parallel_fetch:
5367
				# clear out existing fetch log if it exists
5368
				try:
5369
					open(self._fetch_log, 'w')
5370
				except EnvironmentError:
5371
					pass
5372
5373
		self._running_portage = None
5374
		portage_match = self._running_root.trees["vartree"].dbapi.match(
5375
			portage.const.PORTAGE_PACKAGE_ATOM)
5376
		if portage_match:
5377
			cpv = portage_match.pop()
5378
			self._running_portage = self._pkg(cpv, "installed",
5379
				self._running_root, installed=True)
5380
5381
	def _poll(self, timeout=None):
5382
		self._schedule()
5383
		PollScheduler._poll(self, timeout=timeout)
5384
5385
	def _set_max_jobs(self, max_jobs):
5386
		self._max_jobs = max_jobs
5387
		self._task_queues.jobs.max_jobs = max_jobs
5388
5389
	def _background_mode(self):
5390
		"""
5391
		Check if background mode is enabled and adjust states as necessary.
5392
5393
		@rtype: bool
5394
		@returns: True if background mode is enabled, False otherwise.
5395
		"""
5396
		background = (self._max_jobs is True or \
5397
			self._max_jobs > 1 or "--quiet" in self.myopts) and \
5398
			not bool(self._opts_no_background.intersection(self.myopts))
5399
5400
		if background:
5401
			interactive_tasks = self._get_interactive_tasks()
5402
			if interactive_tasks:
5403
				background = False
5404
				writemsg_level(">>> Sending package output to stdio due " + \
5405
					"to interactive package(s):\n",
5406
					level=logging.INFO, noiselevel=-1)
5407
				msg = [""]
5408
				for pkg in interactive_tasks:
5409
					pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
5410
					if pkg.root != "/":
5411
						pkg_str += " for " + pkg.root
5412
					msg.append(pkg_str)
5413
				msg.append("")
5414
				writemsg_level("".join("%s\n" % (l,) for l in msg),
5415
					level=logging.INFO, noiselevel=-1)
5416
				if self._max_jobs is True or self._max_jobs > 1:
5417
					self._set_max_jobs(1)
5418
					writemsg_level(">>> Setting --jobs=1 due " + \
5419
						"to the above interactive package(s)\n",
5420
						level=logging.INFO, noiselevel=-1)
5421
5422
		self._status_display.quiet = \
5423
			not background or \
5424
			("--quiet" in self.myopts and \
5425
			"--verbose" not in self.myopts)
5426
5427
		self._logger.xterm_titles = \
5428
			"notitles" not in self.settings.features and \
5429
			self._status_display.quiet
5430
5431
		return background
5432
5433
	def _get_interactive_tasks(self):
5434
		from portage import flatten
5435
		from portage.dep import use_reduce, paren_reduce
5436
		interactive_tasks = []
5437
		for task in self._mergelist:
5438
			if not (isinstance(task, Package) and \
5439
				task.operation == "merge"):
5440
				continue
5441
			try:
5442
				properties = flatten(use_reduce(paren_reduce(
5443
					task.metadata["PROPERTIES"]), uselist=task.use.enabled))
5444
			except portage.exception.InvalidDependString, e:
5445
				show_invalid_depstring_notice(task,
5446
					task.metadata["PROPERTIES"], str(e))
5447
				raise self._unknown_internal_error()
5448
			if "interactive" in properties:
5449
				interactive_tasks.append(task)
5450
		return interactive_tasks
5451
5452
	def _set_digraph(self, digraph):
5453
		if "--nodeps" in self.myopts or \
5454
			(self._max_jobs is not True and self._max_jobs < 2):
5455
			# save some memory
5456
			self._digraph = None
5457
			return
5458
5459
		self._digraph = digraph
5460
		self._find_system_deps()
5461
		self._prune_digraph()
5462
		self._prevent_builddir_collisions()
5463
5464
	def _find_system_deps(self):
5465
		"""
5466
		Find system packages and their deep runtime dependencies. Before being
5467
		merged, these packages go to merge_wait_queue, to be merged when no
5468
		other packages are building.
5469
		"""
5470
		deep_system_deps = self._deep_system_deps
5471
		deep_system_deps.clear()
5472
		deep_system_deps.update(
5473
			_find_deep_system_runtime_deps(self._digraph))
5474
		deep_system_deps.difference_update([pkg for pkg in \
5475
			deep_system_deps if pkg.operation != "merge"])
5476
5477
	def _prune_digraph(self):
5478
		"""
5479
		Prune any root nodes that are irrelevant.
5480
		"""
5481
5482
		graph = self._digraph
5483
		completed_tasks = self._completed_tasks
5484
		removed_nodes = set()
5485
		while True:
5486
			for node in graph.root_nodes():
5487
				if not isinstance(node, Package) or \
5488
					(node.installed and node.operation == "nomerge") or \
5489
					node.onlydeps or \
5490
					node in completed_tasks:
5491
					removed_nodes.add(node)
5492
			if removed_nodes:
5493
				graph.difference_update(removed_nodes)
5494
			if not removed_nodes:
5495
				break
5496
			removed_nodes.clear()
5497
5498
	def _prevent_builddir_collisions(self):
5499
		"""
5500
		When building stages, sometimes the same exact cpv needs to be merged
5501
		to both $ROOTs. Add edges to the digraph in order to avoid collisions
5502
		in the builddir. Currently, normal file locks would be inappropriate
5503
		for this purpose since emerge holds all of it's build dir locks from
5504
		the main process.
5505
		"""
5506
		cpv_map = {}
5507
		for pkg in self._mergelist:
5508
			if not isinstance(pkg, Package):
5509
				# a satisfied blocker
5510
				continue
5511
			if pkg.installed:
5512
				continue
5513
			if pkg.cpv not in cpv_map:
5514
				cpv_map[pkg.cpv] = [pkg]
5515
				continue
5516
			for earlier_pkg in cpv_map[pkg.cpv]:
5517
				self._digraph.add(earlier_pkg, pkg,
5518
					priority=DepPriority(buildtime=True))
5519
			cpv_map[pkg.cpv].append(pkg)
5520
5521
	class _pkg_failure(portage.exception.PortageException):
5522
		"""
5523
		An instance of this class is raised by unmerge() when
5524
		an uninstallation fails.
5525
		"""
5526
		status = 1
5527
		def __init__(self, *pargs):
5528
			portage.exception.PortageException.__init__(self, pargs)
5529
			if pargs:
5530
				self.status = pargs[0]
5531
5532
	def _schedule_fetch(self, fetcher):
5533
		"""
5534
		Schedule a fetcher on the fetch queue, in order to
5535
		serialize access to the fetch log.
5536
		"""
5537
		self._task_queues.fetch.addFront(fetcher)
5538
5539
	def _schedule_setup(self, setup_phase):
5540
		"""
5541
		Schedule a setup phase on the merge queue, in order to
5542
		serialize unsandboxed access to the live filesystem.
5543
		"""
5544
		self._task_queues.merge.addFront(setup_phase)
5545
		self._schedule()
5546
5547
	def _schedule_unpack(self, unpack_phase):
5548
		"""
5549
		Schedule an unpack phase on the unpack queue, in order
5550
		to serialize $DISTDIR access for live ebuilds.
5551
		"""
5552
		self._task_queues.unpack.add(unpack_phase)
5553
5554
	def _find_blockers(self, new_pkg):
5555
		"""
5556
		Returns a callable which should be called only when
5557
		the vdb lock has been acquired.
5558
		"""
5559
		def get_blockers():
5560
			return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
5561
		return get_blockers
5562
5563
	def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
5564
		if self._opts_ignore_blockers.intersection(self.myopts):
5565
			return None
5566
5567
		# Call gc.collect() here to avoid heap overflow that
5568
		# triggers 'Cannot allocate memory' errors (reported
5569
		# with python-2.5).
5570
		import gc
5571
		gc.collect()
5572
5573
		blocker_db = self._blocker_db[new_pkg.root]
5574
5575
		blocker_dblinks = []
5576
		for blocking_pkg in blocker_db.findInstalledBlockers(
5577
			new_pkg, acquire_lock=acquire_lock):
5578
			if new_pkg.slot_atom == blocking_pkg.slot_atom:
5579
				continue
5580
			if new_pkg.cpv == blocking_pkg.cpv:
5581
				continue
5582
			blocker_dblinks.append(portage.dblink(
5583
				blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
5584
				self.pkgsettings[blocking_pkg.root], treetype="vartree",
5585
				vartree=self.trees[blocking_pkg.root]["vartree"]))
5586
5587
		gc.collect()
5588
5589
		return blocker_dblinks
5590
5591
	def _dblink_pkg(self, pkg_dblink):
5592
		cpv = pkg_dblink.mycpv
5593
		type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
5594
		root_config = self.trees[pkg_dblink.myroot]["root_config"]
5595
		installed = type_name == "installed"
5596
		return self._pkg(cpv, type_name, root_config, installed=installed)
5597
5598
	def _append_to_log_path(self, log_path, msg):
5599
		f = open(log_path, 'a')
5600
		try:
5601
			f.write(msg)
5602
		finally:
5603
			f.close()
5604
5605
	def _dblink_elog(self, pkg_dblink, phase, func, msgs):
5606
5607
		log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
5608
		log_file = None
5609
		out = sys.stdout
5610
		background = self._background
5611
5612
		if background and log_path is not None:
5613
			log_file = open(log_path, 'a')
5614
			out = log_file
5615
5616
		try:
5617
			for msg in msgs:
5618
				func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
5619
		finally:
5620
			if log_file is not None:
5621
				log_file.close()
5622
5623
	def _dblink_emerge_log(self, msg):
5624
		self._logger.log(msg)
5625
5626
	def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
5627
		log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
5628
		background = self._background
5629
5630
		if log_path is None:
5631
			if not (background and level < logging.WARN):
5632
				portage.util.writemsg_level(msg,
5633
					level=level, noiselevel=noiselevel)
5634
		else:
5635
			if not background:
5636
				portage.util.writemsg_level(msg,
5637
					level=level, noiselevel=noiselevel)
5638
			self._append_to_log_path(log_path, msg)
5639
5640
	def _dblink_ebuild_phase(self,
5641
		pkg_dblink, pkg_dbapi, ebuild_path, phase):
5642
		"""
5643
		Using this callback for merge phases allows the scheduler
5644
		to run while these phases execute asynchronously, and allows
5645
		the scheduler control output handling.
5646
		"""
5647
5648
		scheduler = self._sched_iface
5649
		settings = pkg_dblink.settings
5650
		pkg = self._dblink_pkg(pkg_dblink)
5651
		background = self._background
5652
		log_path = settings.get("PORTAGE_LOG_FILE")
5653
5654
		ebuild_phase = EbuildPhase(background=background,
5655
			pkg=pkg, phase=phase, scheduler=scheduler,
5656
			settings=settings, tree=pkg_dblink.treetype)
5657
		ebuild_phase.start()
5658
		ebuild_phase.wait()
5659
5660
		return ebuild_phase.returncode
5661
5662
	def _generate_digests(self):
5663
		"""
5664
		Generate digests if necessary for --digests or FEATURES=digest.
5665
		In order to avoid interference, this must done before parallel
5666
		tasks are started.
5667
		"""
5668
5669
		if '--fetchonly' in self.myopts:
5670
			return os.EX_OK
5671
5672
		digest = '--digest' in self.myopts
5673
		if not digest:
5674
			for pkgsettings in self.pkgsettings.itervalues():
5675
				if 'digest' in pkgsettings.features:
5676
					digest = True
5677
					break
5678
5679
		if not digest:
5680
			return os.EX_OK
5681
5682
		for x in self._mergelist:
5683
			if not isinstance(x, Package) or \
5684
				x.type_name != 'ebuild' or \
5685
				x.operation != 'merge':
5686
				continue
5687
			pkgsettings = self.pkgsettings[x.root]
5688
			if '--digest' not in self.myopts and \
5689
				'digest' not in pkgsettings.features:
5690
				continue
5691
			portdb = x.root_config.trees['porttree'].dbapi
5692
			ebuild_path = portdb.findname(x.cpv)
5693
			if not ebuild_path:
5694
				writemsg_level(
5695
					"!!! Could not locate ebuild for '%s'.\n" \
5696
					% x.cpv, level=logging.ERROR, noiselevel=-1)
5697
				return 1
5698
			pkgsettings['O'] = os.path.dirname(ebuild_path)
5699
			if not portage.digestgen([], pkgsettings, myportdb=portdb):
5700
				writemsg_level(
5701
					"!!! Unable to generate manifest for '%s'.\n" \
5702
					% x.cpv, level=logging.ERROR, noiselevel=-1)
5703
				return 1
5704
5705
		return os.EX_OK
5706
5707
	def _check_manifests(self):
5708
		# Verify all the manifests now so that the user is notified of failure
5709
		# as soon as possible.
5710
		if "strict" not in self.settings.features or \
5711
			"--fetchonly" in self.myopts or \
5712
			"--fetch-all-uri" in self.myopts:
5713
			return os.EX_OK
5714
5715
		shown_verifying_msg = False
5716
		quiet_settings = {}
5717
		for myroot, pkgsettings in self.pkgsettings.iteritems():
5718
			quiet_config = portage.config(clone=pkgsettings)
5719
			quiet_config["PORTAGE_QUIET"] = "1"
5720
			quiet_config.backup_changes("PORTAGE_QUIET")
5721
			quiet_settings[myroot] = quiet_config
5722
			del quiet_config
5723
5724
		for x in self._mergelist:
5725
			if not isinstance(x, Package) or \
5726
				x.type_name != "ebuild":
5727
				continue
5728
5729
			if not shown_verifying_msg:
5730
				shown_verifying_msg = True
5731
				self._status_msg("Verifying ebuild manifests")
5732
5733
			root_config = x.root_config
5734
			portdb = root_config.trees["porttree"].dbapi
5735
			quiet_config = quiet_settings[root_config.root]
5736
			quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
5737
			if not portage.digestcheck([], quiet_config, strict=True):
5738
				return 1
5739
5740
		return os.EX_OK
5741
5742
	def _add_prefetchers(self):
5743
5744
		if not self._parallel_fetch:
5745
			return
5746
5747
		if self._parallel_fetch:
5748
			self._status_msg("Starting parallel fetch")
5749
5750
			prefetchers = self._prefetchers
5751
			getbinpkg = "--getbinpkg" in self.myopts
5752
5753
			# In order to avoid "waiting for lock" messages
5754
			# at the beginning, which annoy users, never
5755
			# spawn a prefetcher for the first package.
5756
			for pkg in self._mergelist[1:]:
5757
				prefetcher = self._create_prefetcher(pkg)
5758
				if prefetcher is not None:
5759
					self._task_queues.fetch.add(prefetcher)
5760
					prefetchers[pkg] = prefetcher
5761
5762
	def _create_prefetcher(self, pkg):
5763
		"""
5764
		@return: a prefetcher, or None if not applicable
5765
		"""
5766
		prefetcher = None
5767
5768
		if not isinstance(pkg, Package):
5769
			pass
5770
5771
		elif pkg.type_name == "ebuild":
5772
5773
			prefetcher = EbuildFetcher(background=True,
5774
				config_pool=self._ConfigPool(pkg.root,
5775
				self._allocate_config, self._deallocate_config),
5776
				fetchonly=1, logfile=self._fetch_log,
5777
				pkg=pkg, prefetch=True, scheduler=self._sched_iface)
5778
5779
		elif pkg.type_name == "binary" and \
5780
			"--getbinpkg" in self.myopts and \
5781
			pkg.root_config.trees["bintree"].isremote(pkg.cpv):
5782
5783
			prefetcher = BinpkgPrefetcher(background=True,
5784
				pkg=pkg, scheduler=self._sched_iface)
5785
5786
		return prefetcher
5787
5788
	def _is_restart_scheduled(self):
5789
		"""
5790
		Check if the merge list contains a replacement
5791
		for the current running instance, that will result
5792
		in restart after merge.
5793
		@rtype: bool
5794
		@returns: True if a restart is scheduled, False otherwise.
5795
		"""
5796
		if self._opts_no_restart.intersection(self.myopts):
5797
			return False
5798
5799
		mergelist = self._mergelist
5800
5801
		for i, pkg in enumerate(mergelist):
5802
			if self._is_restart_necessary(pkg) and \
5803
				i != len(mergelist) - 1:
5804
				return True
5805
5806
		return False
5807
5808
	def _is_restart_necessary(self, pkg):
5809
		"""
5810
		@return: True if merging the given package
5811
			requires restart, False otherwise.
5812
		"""
5813
5814
		# Figure out if we need a restart.
5815
		if pkg.root == self._running_root.root and \
5816
			portage.match_from_list(
5817
			portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
5818
			if self._running_portage:
5819
				return pkg.cpv != self._running_portage.cpv
5820
			return True
5821
		return False
5822
5823
	def _restart_if_necessary(self, pkg):
5824
		"""
5825
		Use execv() to restart emerge. This happens
5826
		if portage upgrades itself and there are
5827
		remaining packages in the list.
5828
		"""
5829
5830
		if self._opts_no_restart.intersection(self.myopts):
5831
			return
5832
5833
		if not self._is_restart_necessary(pkg):
5834
			return
5835
5836
		if pkg == self._mergelist[-1]:
5837
			return
5838
5839
		self._main_loop_cleanup()
5840
5841
		logger = self._logger
5842
		pkg_count = self._pkg_count
5843
		mtimedb = self._mtimedb
5844
		bad_resume_opts = self._bad_resume_opts
5845
5846
		logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
5847
			(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
5848
5849
		logger.log(" *** RESTARTING " + \
5850
			"emerge via exec() after change of " + \
5851
			"portage version.")
5852
5853
		mtimedb["resume"]["mergelist"].remove(list(pkg))
5854
		mtimedb.commit()
5855
		portage.run_exitfuncs()
5856
		mynewargv = [sys.argv[0], "--resume"]
5857
		resume_opts = self.myopts.copy()
5858
		# For automatic resume, we need to prevent
5859
		# any of bad_resume_opts from leaking in
5860
		# via EMERGE_DEFAULT_OPTS.
5861
		resume_opts["--ignore-default-opts"] = True
5862
		for myopt, myarg in resume_opts.iteritems():
5863
			if myopt not in bad_resume_opts:
5864
				if myarg is True:
5865
					mynewargv.append(myopt)
5866
				else:
5867
					mynewargv.append(myopt +"="+ str(myarg))
5868
		# priority only needs to be adjusted on the first run
5869
		os.environ["PORTAGE_NICENESS"] = "0"
5870
		os.execv(mynewargv[0], mynewargv)
5871
5872
	def merge(self):
5873
5874
		if "--resume" in self.myopts:
5875
			# We're resuming.
5876
			portage.writemsg_stdout(
5877
				colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
5878
			self._logger.log(" *** Resuming merge...")
5879
5880
		self._save_resume_list()
5881
5882
		try:
5883
			self._background = self._background_mode()
5884
		except self._unknown_internal_error:
5885
			return 1
5886
5887
		for root in self.trees:
5888
			root_config = self.trees[root]["root_config"]
5889
5890
			# Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
5891
			# since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
5892
			# for ensuring sane $PWD (bug #239560) and storing elog messages.
5893
			tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
5894
			if not tmpdir or not os.path.isdir(tmpdir):
5895
				msg = "The directory specified in your " + \
5896
					"PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
5897
				"does not exist. Please create this " + \
5898
				"directory or correct your PORTAGE_TMPDIR setting."
5899
				msg = textwrap.wrap(msg, 70)
5900
				out = portage.output.EOutput()
5901
				for l in msg:
5902
					out.eerror(l)
5903
				return 1
5904
5905
			if self._background:
5906
				root_config.settings.unlock()
5907
				root_config.settings["PORTAGE_BACKGROUND"] = "1"
5908
				root_config.settings.backup_changes("PORTAGE_BACKGROUND")
5909
				root_config.settings.lock()
5910
5911
			self.pkgsettings[root] = portage.config(
5912
				clone=root_config.settings)
5913
5914
		rval = self._generate_digests()
5915
		if rval != os.EX_OK:
5916
			return rval
5917
5918
		rval = self._check_manifests()
5919
		if rval != os.EX_OK:
5920
			return rval
5921
5922
		keep_going = "--keep-going" in self.myopts
5923
		fetchonly = self._build_opts.fetchonly
5924
		mtimedb = self._mtimedb
5925
		failed_pkgs = self._failed_pkgs
5926
5927
		while True:
5928
			rval = self._merge()
5929
			if rval == os.EX_OK or fetchonly or not keep_going:
5930
				break
5931
			if "resume" not in mtimedb:
5932
				break
5933
			mergelist = self._mtimedb["resume"].get("mergelist")
5934
			if not mergelist:
5935
				break
5936
5937
			if not failed_pkgs:
5938
				break
5939
5940
			for failed_pkg in failed_pkgs:
5941
				mergelist.remove(list(failed_pkg.pkg))
5942
5943
			self._failed_pkgs_all.extend(failed_pkgs)
5944
			del failed_pkgs[:]
5945
5946
			if not mergelist:
5947
				break
5948
5949
			if not self._calc_resume_list():
5950
				break
5951
5952
			clear_caches(self.trees)
5953
			if not self._mergelist:
5954
				break
5955
5956
			self._save_resume_list()
5957
			self._pkg_count.curval = 0
5958
			self._pkg_count.maxval = len([x for x in self._mergelist \
5959
				if isinstance(x, Package) and x.operation == "merge"])
5960
			self._status_display.maxval = self._pkg_count.maxval
5961
5962
		self._logger.log(" *** Finished. Cleaning up...")
5963
5964
		if failed_pkgs:
5965
			self._failed_pkgs_all.extend(failed_pkgs)
5966
			del failed_pkgs[:]
5967
5968
		background = self._background
5969
		failure_log_shown = False
5970
		if background and len(self._failed_pkgs_all) == 1:
5971
			# If only one package failed then just show it's
5972
			# whole log for easy viewing.
5973
			failed_pkg = self._failed_pkgs_all[-1]
5974
			build_dir = failed_pkg.build_dir
5975
			log_file = None
5976
5977
			log_paths = [failed_pkg.build_log]
5978
5979
			log_path = self._locate_failure_log(failed_pkg)
5980
			if log_path is not None:
5981
				try:
5982
					log_file = open(log_path)
5983
				except IOError:
5984
					pass
5985
5986
			if log_file is not None:
5987
				try:
5988
					for line in log_file:
5989
						writemsg_level(line, noiselevel=-1)
5990
				finally:
5991
					log_file.close()
5992
				failure_log_shown = True
5993
5994
		# Dump mod_echo output now since it tends to flood the terminal.
5995
		# This allows us to avoid having more important output, generated
5996
		# later, from being swept away by the mod_echo output.
5997
		mod_echo_output =  _flush_elog_mod_echo()
5998
5999
		if background and not failure_log_shown and \
6000
			self._failed_pkgs_all and \
6001
			self._failed_pkgs_die_msgs and \
6002
			not mod_echo_output:
6003
6004
			printer = portage.output.EOutput()
6005
			for mysettings, key, logentries in self._failed_pkgs_die_msgs:
6006
				root_msg = ""
6007
				if mysettings["ROOT"] != "/":
6008
					root_msg = " merged to %s" % mysettings["ROOT"]
6009
				print
6010
				printer.einfo("Error messages for package %s%s:" % \
6011
					(colorize("INFORM", key), root_msg))
6012
				print
6013
				for phase in portage.const.EBUILD_PHASES:
6014
					if phase not in logentries:
6015
						continue
6016
					for msgtype, msgcontent in logentries[phase]:
6017
						if isinstance(msgcontent, basestring):
6018
							msgcontent = [msgcontent]
6019
						for line in msgcontent:
6020
							printer.eerror(line.strip("\n"))
6021
6022
		if self._post_mod_echo_msgs:
6023
			for msg in self._post_mod_echo_msgs:
6024
				msg()
6025
6026
		if len(self._failed_pkgs_all) > 1 or \
6027
			(self._failed_pkgs_all and "--keep-going" in self.myopts):
6028
			if len(self._failed_pkgs_all) > 1:
6029
				msg = "The following %d packages have " % \
6030
					len(self._failed_pkgs_all) + \
6031
					"failed to build or install:"
6032
			else:
6033
				msg = "The following package has " + \
6034
					"failed to build or install:"
6035
			prefix = bad(" * ")
6036
			writemsg(prefix + "\n", noiselevel=-1)
6037
			from textwrap import wrap
6038
			for line in wrap(msg, 72):
6039
				writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
6040
			writemsg(prefix + "\n", noiselevel=-1)
6041
			for failed_pkg in self._failed_pkgs_all:
6042
				writemsg("%s\t%s\n" % (prefix,
6043
					colorize("INFORM", str(failed_pkg.pkg))),
6044
					noiselevel=-1)
6045
			writemsg(prefix + "\n", noiselevel=-1)
6046
6047
		return rval
6048
6049
	def _elog_listener(self, mysettings, key, logentries, fulltext):
6050
		errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
6051
		if errors:
6052
			self._failed_pkgs_die_msgs.append(
6053
				(mysettings, key, errors))
6054
6055
	def _locate_failure_log(self, failed_pkg):
6056
6057
		build_dir = failed_pkg.build_dir
6058
		log_file = None
6059
6060
		log_paths = [failed_pkg.build_log]
6061
6062
		for log_path in log_paths:
6063
			if not log_path:
6064
				continue
6065
6066
			try:
6067
				log_size = os.stat(log_path).st_size
6068
			except OSError:
6069
				continue
6070
6071
			if log_size == 0:
6072
				continue
6073
6074
			return log_path
6075
6076
		return None
6077
6078
	def _add_packages(self):
6079
		pkg_queue = self._pkg_queue
6080
		for pkg in self._mergelist:
6081
			if isinstance(pkg, Package):
6082
				pkg_queue.append(pkg)
6083
			elif isinstance(pkg, Blocker):
6084
				pass
6085
6086
	def _system_merge_started(self, merge):
6087
		"""
6088
		Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
6089
		"""
6090
		graph = self._digraph
6091
		if graph is None:
6092
			return
6093
		pkg = merge.merge.pkg
6094
6095
		# Skip this if $ROOT != / since it shouldn't matter if there
6096
		# are unsatisfied system runtime deps in this case.
6097
		if pkg.root != '/':
6098
			return
6099
6100
		completed_tasks = self._completed_tasks
6101
		unsatisfied = self._unsatisfied_system_deps
6102
6103
		def ignore_non_runtime_or_satisfied(priority):
6104
			"""
6105
			Ignore non-runtime and satisfied runtime priorities.
6106
			"""
6107
			if isinstance(priority, DepPriority) and \
6108
				not priority.satisfied and \
6109
				(priority.runtime or priority.runtime_post):
6110
				return False
6111
			return True
6112
6113
		# When checking for unsatisfied runtime deps, only check
6114
		# direct deps since indirect deps are checked when the
6115
		# corresponding parent is merged.
6116
		for child in graph.child_nodes(pkg,
6117
			ignore_priority=ignore_non_runtime_or_satisfied):
6118
			if not isinstance(child, Package) or \
6119
				child.operation == 'uninstall':
6120
				continue
6121
			if child is pkg:
6122
				continue
6123
			if child.operation == 'merge' and \
6124
				child not in completed_tasks:
6125
				unsatisfied.add(child)
6126
6127
	def _merge_wait_exit_handler(self, task):
6128
		self._merge_wait_scheduled.remove(task)
6129
		self._merge_exit(task)
6130
6131
	def _merge_exit(self, merge):
6132
		self._do_merge_exit(merge)
6133
		self._deallocate_config(merge.merge.settings)
6134
		if merge.returncode == os.EX_OK and \
6135
			not merge.merge.pkg.installed:
6136
			self._status_display.curval += 1
6137
		self._status_display.merges = len(self._task_queues.merge)
6138
		self._schedule()
6139
6140
	def _do_merge_exit(self, merge):
6141
		pkg = merge.merge.pkg
6142
		if merge.returncode != os.EX_OK:
6143
			settings = merge.merge.settings
6144
			build_dir = settings.get("PORTAGE_BUILDDIR")
6145
			build_log = settings.get("PORTAGE_LOG_FILE")
6146
6147
			self._failed_pkgs.append(self._failed_pkg(
6148
				build_dir=build_dir, build_log=build_log,
6149
				pkg=pkg,
6150
				returncode=merge.returncode))
6151
			self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
6152
6153
			self._status_display.failed = len(self._failed_pkgs)
6154
			return
6155
6156
		self._task_complete(pkg)
6157
		pkg_to_replace = merge.merge.pkg_to_replace
6158
		if pkg_to_replace is not None:
6159
			# When a package is replaced, mark it's uninstall
6160
			# task complete (if any).
6161
			uninst_hash_key = \
6162
				("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
6163
			self._task_complete(uninst_hash_key)
6164
6165
		if pkg.installed:
6166
			return
6167
6168
		self._restart_if_necessary(pkg)
6169
6170
		# Call mtimedb.commit() after each merge so that
6171
		# --resume still works after being interrupted
6172
		# by reboot, sigkill or similar.
6173
		mtimedb = self._mtimedb
6174
		mtimedb["resume"]["mergelist"].remove(list(pkg))
6175
		if not mtimedb["resume"]["mergelist"]:
6176
			del mtimedb["resume"]
6177
		mtimedb.commit()
6178
6179
	def _build_exit(self, build):
6180
		if build.returncode == os.EX_OK:
6181
			self.curval += 1
6182
			merge = PackageMerge(merge=build)
6183
			if not build.build_opts.buildpkgonly and \
6184
				build.pkg in self._deep_system_deps:
6185
				# Since dependencies on system packages are frequently
6186
				# unspecified, merge them only when no builds are executing.
6187
				self._merge_wait_queue.append(merge)
6188
				merge.addStartListener(self._system_merge_started)
6189
			else:
6190
				merge.addExitListener(self._merge_exit)
6191
				self._task_queues.merge.add(merge)
6192
				self._status_display.merges = len(self._task_queues.merge)
6193
		else:
6194
			settings = build.settings
6195
			build_dir = settings.get("PORTAGE_BUILDDIR")
6196
			build_log = settings.get("PORTAGE_LOG_FILE")
6197
6198
			self._failed_pkgs.append(self._failed_pkg(
6199
				build_dir=build_dir, build_log=build_log,
6200
				pkg=build.pkg,
6201
				returncode=build.returncode))
6202
			self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
6203
6204
			self._status_display.failed = len(self._failed_pkgs)
6205
			self._deallocate_config(build.settings)
6206
		self._jobs -= 1
6207
		self._status_display.running = self._jobs
6208
		self._schedule()
6209
6210
	def _extract_exit(self, build):
6211
		self._build_exit(build)
6212
6213
	def _task_complete(self, pkg):
6214
		self._completed_tasks.add(pkg)
6215
		self._unsatisfied_system_deps.discard(pkg)
6216
		self._choose_pkg_return_early = False
6217
6218
	def _merge(self):
6219
6220
		self._add_prefetchers()
6221
		self._add_packages()
6222
		pkg_queue = self._pkg_queue
6223
		failed_pkgs = self._failed_pkgs
6224
		portage.locks._quiet = self._background
6225
		portage.elog._emerge_elog_listener = self._elog_listener
6226
		rval = os.EX_OK
6227
6228
		try:
6229
			self._main_loop()
6230
		finally:
6231
			self._main_loop_cleanup()
6232
			portage.locks._quiet = False
6233
			portage.elog._emerge_elog_listener = None
6234
			if failed_pkgs:
6235
				rval = failed_pkgs[-1].returncode
6236
6237
		return rval
6238
6239
	def _main_loop_cleanup(self):
6240
		del self._pkg_queue[:]
6241
		self._completed_tasks.clear()
6242
		self._deep_system_deps.clear()
6243
		self._unsatisfied_system_deps.clear()
6244
		self._choose_pkg_return_early = False
6245
		self._status_display.reset()
6246
		self._digraph = None
6247
		self._task_queues.fetch.clear()
6248
6249
	def _choose_pkg(self):
6250
		"""
6251
		Choose a task that has all it's dependencies satisfied.
6252
		"""
6253
6254
		if self._choose_pkg_return_early:
6255
			return None
6256
6257
		if self._digraph is None:
6258
			if (self._jobs or self._task_queues.merge) and \
6259
				not ("--nodeps" in self.myopts and \
6260
				(self._max_jobs is True or self._max_jobs > 1)):
6261
				self._choose_pkg_return_early = True
6262
				return None
6263
			return self._pkg_queue.pop(0)
6264
6265
		if not (self._jobs or self._task_queues.merge):
6266
			return self._pkg_queue.pop(0)
6267
6268
		self._prune_digraph()
6269
6270
		chosen_pkg = None
6271
		later = set(self._pkg_queue)
6272
		for pkg in self._pkg_queue:
6273
			later.remove(pkg)
6274
			if not self._dependent_on_scheduled_merges(pkg, later):
6275
				chosen_pkg = pkg
6276
				break
6277
6278
		if chosen_pkg is not None:
6279
			self._pkg_queue.remove(chosen_pkg)
6280
6281
		if chosen_pkg is None:
6282
			# There's no point in searching for a package to
6283
			# choose until at least one of the existing jobs
6284
			# completes.
6285
			self._choose_pkg_return_early = True
6286
6287
		return chosen_pkg
6288
6289
	def _dependent_on_scheduled_merges(self, pkg, later):
6290
		"""
6291
		Traverse the subgraph of the given packages deep dependencies
6292
		to see if it contains any scheduled merges.
6293
		@param pkg: a package to check dependencies for
6294
		@type pkg: Package
6295
		@param later: packages for which dependence should be ignored
6296
			since they will be merged later than pkg anyway and therefore
6297
			delaying the merge of pkg will not result in a more optimal
6298
			merge order
6299
		@type later: set
6300
		@rtype: bool
6301
		@returns: True if the package is dependent, False otherwise.
6302
		"""
6303
6304
		graph = self._digraph
6305
		completed_tasks = self._completed_tasks
6306
6307
		dependent = False
6308
		traversed_nodes = set([pkg])
6309
		direct_deps = graph.child_nodes(pkg)
6310
		node_stack = direct_deps
6311
		direct_deps = frozenset(direct_deps)
6312
		while node_stack:
6313
			node = node_stack.pop()
6314
			if node in traversed_nodes:
6315
				continue
6316
			traversed_nodes.add(node)
6317
			if not ((node.installed and node.operation == "nomerge") or \
6318
				(node.operation == "uninstall" and \
6319
				node not in direct_deps) or \
6320
				node in completed_tasks or \
6321
				node in later):
6322
				dependent = True
6323
				break
6324
			node_stack.extend(graph.child_nodes(node))
6325
6326
		return dependent
6327
6328
	def _allocate_config(self, root):
6329
		"""
6330
		Allocate a unique config instance for a task in order
6331
		to prevent interference between parallel tasks.
6332
		"""
6333
		if self._config_pool[root]:
6334
			temp_settings = self._config_pool[root].pop()
6335
		else:
6336
			temp_settings = portage.config(clone=self.pkgsettings[root])
6337
		# Since config.setcpv() isn't guaranteed to call config.reset() due to
6338
		# performance reasons, call it here to make sure all settings from the
6339
		# previous package get flushed out (such as PORTAGE_LOG_FILE).
6340
		temp_settings.reload()
6341
		temp_settings.reset()
6342
		return temp_settings
6343
6344
	def _deallocate_config(self, settings):
6345
		self._config_pool[settings["ROOT"]].append(settings)
6346
6347
	def _main_loop(self):
6348
6349
		# Only allow 1 job max if a restart is scheduled
6350
		# due to portage update.
6351
		if self._is_restart_scheduled() or \
6352
			self._opts_no_background.intersection(self.myopts):
6353
			self._set_max_jobs(1)
6354
6355
		merge_queue = self._task_queues.merge
6356
6357
		while self._schedule():
6358
			if self._poll_event_handlers:
6359
				self._poll_loop()
6360
6361
		while True:
6362
			self._schedule()
6363
			if not (self._jobs or merge_queue):
6364
				break
6365
			if self._poll_event_handlers:
6366
				self._poll_loop()
6367
6368
	def _keep_scheduling(self):
6369
		return bool(self._pkg_queue and \
6370
			not (self._failed_pkgs and not self._build_opts.fetchonly))
6371
6372
	def _schedule_tasks(self):
6373
6374
		# When the number of jobs drops to zero, process all waiting merges.
6375
		if not self._jobs and self._merge_wait_queue:
6376
			for task in self._merge_wait_queue:
6377
				task.addExitListener(self._merge_wait_exit_handler)
6378
				self._task_queues.merge.add(task)
6379
			self._status_display.merges = len(self._task_queues.merge)
6380
			self._merge_wait_scheduled.extend(self._merge_wait_queue)
6381
			del self._merge_wait_queue[:]
6382
6383
		self._schedule_tasks_imp()
6384
		self._status_display.display()
6385
6386
		state_change = 0
6387
		for q in self._task_queues.values():
6388
			if q.schedule():
6389
				state_change += 1
6390
6391
		# Cancel prefetchers if they're the only reason
6392
		# the main poll loop is still running.
6393
		if self._failed_pkgs and not self._build_opts.fetchonly and \
6394
			not (self._jobs or self._task_queues.merge) and \
6395
			self._task_queues.fetch:
6396
			self._task_queues.fetch.clear()
6397
			state_change += 1
6398
6399
		if state_change:
6400
			self._schedule_tasks_imp()
6401
			self._status_display.display()
6402
6403
		return self._keep_scheduling()
6404
6405
	def _job_delay(self):
6406
		"""
6407
		@rtype: bool
6408
		@returns: True if job scheduling should be delayed, False otherwise.
6409
		"""
6410
6411
		if self._jobs and self._max_load is not None:
6412
6413
			current_time = time.time()
6414
6415
			delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
6416
			if delay > self._job_delay_max:
6417
				delay = self._job_delay_max
6418
			if (current_time - self._previous_job_start_time) < delay:
6419
				return True
6420
6421
		return False
6422
6423
	def _schedule_tasks_imp(self):
6424
		"""
6425
		@rtype: bool
6426
		@returns: True if state changed, False otherwise.
6427
		"""
6428
6429
		state_change = 0
6430
6431
		while True:
6432
6433
			if not self._keep_scheduling():
6434
				return bool(state_change)
6435
6436
			if self._choose_pkg_return_early or \
6437
				self._merge_wait_scheduled or \
6438
				(self._jobs and self._unsatisfied_system_deps) or \
6439
				not self._can_add_job() or \
6440
				self._job_delay():
6441
				return bool(state_change)
6442
6443
			pkg = self._choose_pkg()
6444
			if pkg is None:
6445
				return bool(state_change)
6446
6447
			state_change += 1
6448
6449
			if not pkg.installed:
6450
				self._pkg_count.curval += 1
6451
6452
			task = self._task(pkg)
6453
6454
			if pkg.installed:
6455
				merge = PackageMerge(merge=task)
6456
				merge.addExitListener(self._merge_exit)
6457
				self._task_queues.merge.add(merge)
6458
6459
			elif pkg.built:
6460
				self._jobs += 1
6461
				self._previous_job_start_time = time.time()
6462
				self._status_display.running = self._jobs
6463
				task.addExitListener(self._extract_exit)
6464
				self._task_queues.jobs.add(task)
6465
6466
			else:
6467
				self._jobs += 1
6468
				self._previous_job_start_time = time.time()
6469
				self._status_display.running = self._jobs
6470
				task.addExitListener(self._build_exit)
6471
				self._task_queues.jobs.add(task)
6472
6473
		return bool(state_change)
6474
6475
	def _task(self, pkg):
6476
6477
		pkg_to_replace = None
6478
		if pkg.operation != "uninstall":
6479
			vardb = pkg.root_config.trees["vartree"].dbapi
6480
			previous_cpv = vardb.match(pkg.slot_atom)
6481
			if previous_cpv:
6482
				previous_cpv = previous_cpv.pop()
6483
				pkg_to_replace = self._pkg(previous_cpv,
6484
					"installed", pkg.root_config, installed=True)
6485
6486
		task = MergeListItem(args_set=self._args_set,
6487
			background=self._background, binpkg_opts=self._binpkg_opts,
6488
			build_opts=self._build_opts,
6489
			config_pool=self._ConfigPool(pkg.root,
6490
			self._allocate_config, self._deallocate_config),
6491
			emerge_opts=self.myopts,
6492
			find_blockers=self._find_blockers(pkg), logger=self._logger,
6493
			mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
6494
			pkg_to_replace=pkg_to_replace,
6495
			prefetcher=self._prefetchers.get(pkg),
6496
			scheduler=self._sched_iface,
6497
			settings=self._allocate_config(pkg.root),
6498
			statusMessage=self._status_msg,
6499
			world_atom=self._world_atom)
6500
6501
		return task
6502
6503
	def _failed_pkg_msg(self, failed_pkg, action, preposition):
6504
		pkg = failed_pkg.pkg
6505
		msg = "%s to %s %s" % \
6506
			(bad("Failed"), action, colorize("INFORM", pkg.cpv))
6507
		if pkg.root != "/":
6508
			msg += " %s %s" % (preposition, pkg.root)
6509
6510
		log_path = self._locate_failure_log(failed_pkg)
6511
		if log_path is not None:
6512
			msg += ", Log file:"
6513
		self._status_msg(msg)
6514
6515
		if log_path is not None:
6516
			self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
6517
6518
	def _status_msg(self, msg):
6519
		"""
6520
		Display a brief status message (no newlines) in the status display.
6521
		This is called by tasks to provide feedback to the user. This
6522
		delegates the resposibility of generating \r and \n control characters,
6523
		to guarantee that lines are created or erased when necessary and
6524
		appropriate.
6525
6526
		@type msg: str
6527
		@param msg: a brief status message (no newlines allowed)
6528
		"""
6529
		if not self._background:
6530
			writemsg_level("\n")
6531
		self._status_display.displayMessage(msg)
6532
6533
	def _save_resume_list(self):
6534
		"""
6535
		Do this before verifying the ebuild Manifests since it might
6536
		be possible for the user to use --resume --skipfirst get past
6537
		a non-essential package with a broken digest.
6538
		"""
6539
		mtimedb = self._mtimedb
6540
		mtimedb["resume"]["mergelist"] = [list(x) \
6541
			for x in self._mergelist \
6542
			if isinstance(x, Package) and x.operation == "merge"]
6543
6544
		mtimedb.commit()
6545
6546
	def _calc_resume_list(self):
6547
		"""
6548
		Use the current resume list to calculate a new one,
6549
		dropping any packages with unsatisfied deps.
6550
		@rtype: bool
6551
		@returns: True if successful, False otherwise.
6552
		"""
6553
		print colorize("GOOD", "*** Resuming merge...")
6554
6555
		if self._show_list():
6556
			if "--tree" in self.myopts:
6557
				portage.writemsg_stdout("\n" + \
6558
					darkgreen("These are the packages that " + \
6559
					"would be merged, in reverse order:\n\n"))
6560
6561
			else:
6562
				portage.writemsg_stdout("\n" + \
6563
					darkgreen("These are the packages that " + \
6564
					"would be merged, in order:\n\n"))
6565
6566
		show_spinner = "--quiet" not in self.myopts and \
6567
			"--nodeps" not in self.myopts
6568
6569
		if show_spinner:
6570
			print "Calculating dependencies  ",
6571
6572
		myparams = create_depgraph_params(self.myopts, None)
6573
		success = False
6574
		e = None
6575
		try:
6576
			success, mydepgraph, dropped_tasks = resume_depgraph(
6577
				self.settings, self.trees, self._mtimedb, self.myopts,
6578
				myparams, self._spinner)
6579
		except depgraph.UnsatisfiedResumeDep, exc:
6580
			# rename variable to avoid python-3.0 error:
6581
			# SyntaxError: can not delete variable 'e' referenced in nested
6582
			#              scope
6583
			e = exc
6584
			mydepgraph = e.depgraph
6585
			dropped_tasks = set()
6586
6587
		if show_spinner:
6588
			print "\b\b... done!"
6589
6590
		if e is not None:
6591
			def unsatisfied_resume_dep_msg():
6592
				mydepgraph.display_problems()
6593
				out = portage.output.EOutput()
6594
				out.eerror("One or more packages are either masked or " + \
6595
					"have missing dependencies:")
6596
				out.eerror("")
6597
				indent = "  "
6598
				show_parents = set()
6599
				for dep in e.value:
6600
					if dep.parent in show_parents:
6601
						continue
6602
					show_parents.add(dep.parent)
6603
					if dep.atom is None:
6604
						out.eerror(indent + "Masked package:")
6605
						out.eerror(2 * indent + str(dep.parent))
6606
						out.eerror("")
6607
					else:
6608
						out.eerror(indent + str(dep.atom) + " pulled in by:")
6609
						out.eerror(2 * indent + str(dep.parent))
6610
						out.eerror("")
6611
				msg = "The resume list contains packages " + \
6612
					"that are either masked or have " + \
6613
					"unsatisfied dependencies. " + \
6614
					"Please restart/continue " + \
6615
					"the operation manually, or use --skipfirst " + \
6616
					"to skip the first package in the list and " + \
6617
					"any other packages that may be " + \
6618
					"masked or have missing dependencies."
6619
				for line in textwrap.wrap(msg, 72):
6620
					out.eerror(line)
6621
			self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
6622
			return False
6623
6624
		if success and self._show_list():
6625
			mylist = mydepgraph.altlist()
6626
			if mylist:
6627
				if "--tree" in self.myopts:
6628
					mylist.reverse()
6629
				mydepgraph.display(mylist, favorites=self._favorites)
6630
6631
		if not success:
6632
			self._post_mod_echo_msgs.append(mydepgraph.display_problems)
6633
			return False
6634
		mydepgraph.display_problems()
6635
6636
		mylist = mydepgraph.altlist()
6637
		mydepgraph.break_refs(mylist)
6638
		mydepgraph.break_refs(dropped_tasks)
6639
		self._mergelist = mylist
6640
		self._set_digraph(mydepgraph.schedulerGraph())
6641
6642
		msg_width = 75
6643
		for task in dropped_tasks:
6644
			if not (isinstance(task, Package) and task.operation == "merge"):
6645
				continue
6646
			pkg = task
6647
			msg = "emerge --keep-going:" + \
6648
				" %s" % (pkg.cpv,)
6649
			if pkg.root != "/":
6650
				msg += " for %s" % (pkg.root,)
6651
			msg += " dropped due to unsatisfied dependency."
6652
			for line in textwrap.wrap(msg, msg_width):
6653
				eerror(line, phase="other", key=pkg.cpv)
6654
			settings = self.pkgsettings[pkg.root]
6655
			# Ensure that log collection from $T is disabled inside
6656
			# elog_process(), since any logs that might exist are
6657
			# not valid here.
6658
			settings.pop("T", None)
6659
			portage.elog.elog_process(pkg.cpv, settings)
6660
			self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
6661
6662
		return True
6663
6664
	def _show_list(self):
6665
		myopts = self.myopts
6666
		if "--quiet" not in myopts and \
6667
			("--ask" in myopts or "--tree" in myopts or \
6668
			"--verbose" in myopts):
6669
			return True
6670
		return False
6671
6672
	def _world_atom(self, pkg):
6673
		"""
6674
		Add the package to the world file, but only if
6675
		it's supposed to be added. Otherwise, do nothing.
6676
		"""
6677
6678
		if set(("--buildpkgonly", "--fetchonly",
6679
			"--fetch-all-uri",
6680
			"--oneshot", "--onlydeps",
6681
			"--pretend")).intersection(self.myopts):
6682
			return
6683
6684
		if pkg.root != self.target_root:
6685
			return
6686
6687
		args_set = self._args_set
6688
		if not args_set.findAtomForPackage(pkg):
6689
			return
6690
6691
		logger = self._logger
6692
		pkg_count = self._pkg_count
6693
		root_config = pkg.root_config
6694
		world_set = root_config.sets["world"]
6695
		world_locked = False
6696
		if hasattr(world_set, "lock"):
6697
			world_set.lock()
6698
			world_locked = True
6699
6700
		try:
6701
			if hasattr(world_set, "load"):
6702
				world_set.load() # maybe it's changed on disk
6703
6704
			atom = create_world_atom(pkg, args_set, root_config)
6705
			if atom:
6706
				if hasattr(world_set, "add"):
6707
					self._status_msg(('Recording %s in "world" ' + \
6708
						'favorites file...') % atom)
6709
					logger.log(" === (%s of %s) Updating world file (%s)" % \
6710
						(pkg_count.curval, pkg_count.maxval, pkg.cpv))
6711
					world_set.add(atom)
6712
				else:
6713
					writemsg_level('\n!!! Unable to record %s in "world"\n' % \
6714
						(atom,), level=logging.WARN, noiselevel=-1)
6715
		finally:
6716
			if world_locked:
6717
				world_set.unlock()
6718
6719
	def _pkg(self, cpv, type_name, root_config, installed=False):
6720
		"""
6721
		Get a package instance from the cache, or create a new
6722
		one if necessary. Raises KeyError from aux_get if it
6723
		failures for some reason (package does not exist or is
6724
		corrupt).
6725
		"""
6726
		operation = "merge"
6727
		if installed:
6728
			operation = "nomerge"
6729
6730
		if self._digraph is not None:
6731
			# Reuse existing instance when available.
6732
			pkg = self._digraph.get(
6733
				(type_name, root_config.root, cpv, operation))
6734
			if pkg is not None:
6735
				return pkg
6736
6737
		tree_type = depgraph.pkg_tree_map[type_name]
6738
		db = root_config.trees[tree_type].dbapi
6739
		db_keys = list(self.trees[root_config.root][
6740
			tree_type].dbapi._aux_cache_keys)
6741
		metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6742
		pkg = Package(cpv=cpv, metadata=metadata,
6743
			root_config=root_config, installed=installed)
6744
		if type_name == "ebuild":
6745
			settings = self.pkgsettings[root_config.root]
6746
			settings.setcpv(pkg)
6747
			pkg.metadata["USE"] = settings["PORTAGE_USE"]
6748
			pkg.metadata['CHOST'] = settings.get('CHOST', '')
6749
6750
		return pkg
6751
6752
def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
189
def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
6753
190
6754
	if os.path.exists("/usr/bin/install-info"):
191
	if os.path.exists("/usr/bin/install-info"):
Lines 6958-6980 Link Here
6958
		print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
395
		print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
6959
396
6960
397
6961
def _flush_elog_mod_echo():
6962
	"""
6963
	Dump the mod_echo output now so that our other
6964
	notifications are shown last.
6965
	@rtype: bool
6966
	@returns: True if messages were shown, False otherwise.
6967
	"""
6968
	messages_shown = False
6969
	try:
6970
		from portage.elog import mod_echo
6971
	except ImportError:
6972
		pass # happens during downgrade to a version without the module
6973
	else:
6974
		messages_shown = bool(mod_echo._items)
6975
		mod_echo.finalize()
6976
	return messages_shown
6977
6978
def post_emerge(root_config, myopts, mtimedb, retval):
398
def post_emerge(root_config, myopts, mtimedb, retval):
6979
	"""
399
	"""
6980
	Misc. things to run at the end of a merge session.
400
	Misc. things to run at the end of a merge session.
Lines 7134-7167 Link Here
7134
	manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
554
	manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
7135
	return manager.getUnreadItems( repo_id, update=update )
555
	return manager.getUnreadItems( repo_id, update=update )
7136
556
7137
def insert_category_into_atom(atom, category):
7138
	alphanum = re.search(r'\w', atom)
7139
	if alphanum:
7140
		ret = atom[:alphanum.start()] + "%s/" % category + \
7141
			atom[alphanum.start():]
7142
	else:
7143
		ret = None
7144
	return ret
7145
7146
def is_valid_package_atom(x):
7147
	if "/" not in x:
7148
		alphanum = re.search(r'\w', x)
7149
		if alphanum:
7150
			x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
7151
	return portage.isvalidatom(x)
7152
7153
def show_blocker_docs_link():
7154
	print
7155
	print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
7156
	print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
7157
	print
7158
	print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
7159
	print
7160
7161
def show_mask_docs():
7162
	print "For more information, see the MASKED PACKAGES section in the emerge"
7163
	print "man page or refer to the Gentoo Handbook."
7164
7165
def action_sync(settings, trees, mtimedb, myopts, myaction):
557
def action_sync(settings, trees, mtimedb, myopts, myaction):
7166
	xterm_titles = "notitles" not in settings.features
558
	xterm_titles = "notitles" not in settings.features
7167
	emergelog(xterm_titles, " === sync")
559
	emergelog(xterm_titles, " === sync")
Lines 9108-9187 Link Here
9108
	else:
2500
	else:
9109
		print "Number removed:       "+str(len(cleanlist))
2501
		print "Number removed:       "+str(len(cleanlist))
9110
2502
9111
def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
9112
	"""
9113
	Construct a depgraph for the given resume list. This will raise
9114
	PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
9115
	@rtype: tuple
9116
	@returns: (success, depgraph, dropped_tasks)
9117
	"""
9118
	skip_masked = True
9119
	skip_unsatisfied = True
9120
	mergelist = mtimedb["resume"]["mergelist"]
9121
	dropped_tasks = set()
9122
	while True:
9123
		mydepgraph = depgraph(settings, trees,
9124
			myopts, myparams, spinner)
9125
		try:
9126
			success = mydepgraph.loadResumeCommand(mtimedb["resume"],
9127
				skip_masked=skip_masked)
9128
		except depgraph.UnsatisfiedResumeDep, e:
9129
			if not skip_unsatisfied:
9130
				raise
9131
9132
			graph = mydepgraph.digraph
9133
			unsatisfied_parents = dict((dep.parent, dep.parent) \
9134
				for dep in e.value)
9135
			traversed_nodes = set()
9136
			unsatisfied_stack = list(unsatisfied_parents)
9137
			while unsatisfied_stack:
9138
				pkg = unsatisfied_stack.pop()
9139
				if pkg in traversed_nodes:
9140
					continue
9141
				traversed_nodes.add(pkg)
9142
9143
				# If this package was pulled in by a parent
9144
				# package scheduled for merge, removing this
9145
				# package may cause the the parent package's
9146
				# dependency to become unsatisfied.
9147
				for parent_node in graph.parent_nodes(pkg):
9148
					if not isinstance(parent_node, Package) \
9149
						or parent_node.operation not in ("merge", "nomerge"):
9150
						continue
9151
					unsatisfied = \
9152
						graph.child_nodes(parent_node,
9153
						ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
9154
					if pkg in unsatisfied:
9155
						unsatisfied_parents[parent_node] = parent_node
9156
						unsatisfied_stack.append(parent_node)
9157
9158
			pruned_mergelist = []
9159
			for x in mergelist:
9160
				if isinstance(x, list) and \
9161
					tuple(x) not in unsatisfied_parents:
9162
					pruned_mergelist.append(x)
9163
9164
			# If the mergelist doesn't shrink then this loop is infinite.
9165
			if len(pruned_mergelist) == len(mergelist):
9166
				# This happens if a package can't be dropped because
9167
				# it's already installed, but it has unsatisfied PDEPEND.
9168
				raise
9169
			mergelist[:] = pruned_mergelist
9170
9171
			# Exclude installed packages that have been removed from the graph due
9172
			# to failure to build/install runtime dependencies after the dependent
9173
			# package has already been installed.
9174
			dropped_tasks.update(pkg for pkg in \
9175
				unsatisfied_parents if pkg.operation != "nomerge")
9176
			mydepgraph.break_refs(unsatisfied_parents)
9177
9178
			del e, graph, traversed_nodes, \
9179
				unsatisfied_parents, unsatisfied_stack
9180
			continue
9181
		else:
9182
			break
9183
	return (success, mydepgraph, dropped_tasks)
9184
9185
def action_build(settings, trees, mtimedb,
2503
def action_build(settings, trees, mtimedb,
9186
	myopts, myaction, myfiles, spinner):
2504
	myopts, myaction, myfiles, spinner):
9187
2505
Lines 9847-9862 Link Here
9847
		settings = trees[myroot]["vartree"].settings
3165
		settings = trees[myroot]["vartree"].settings
9848
		settings.validate()
3166
		settings.validate()
9849
3167
9850
def clear_caches(trees):
9851
	for d in trees.itervalues():
9852
		d["porttree"].dbapi.melt()
9853
		d["porttree"].dbapi._aux_cache.clear()
9854
		d["bintree"].dbapi._aux_cache.clear()
9855
		d["bintree"].dbapi._clear_cache()
9856
		d["vartree"].dbapi.linkmap._clear_cache()
9857
	portage.dircache.clear()
9858
	gc.collect()
9859
9860
def load_emerge_config(trees=None):
3168
def load_emerge_config(trees=None):
9861
	kwargs = {}
3169
	kwargs = {}
9862
	for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
3170
	for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
Lines 10255-10282 Link Here
10255
				msg += " for '%s'" % root
3563
				msg += " for '%s'" % root
10256
			writemsg_level(msg, level=logging.WARN, noiselevel=-1)
3564
			writemsg_level(msg, level=logging.WARN, noiselevel=-1)
10257
3565
10258
def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
10259
10260
	if "--quiet" in myopts:
10261
		print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
10262
		print "!!! one of the following fully-qualified ebuild names instead:\n"
10263
		for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
10264
			print "    " + colorize("INFORM", cp)
10265
		return
10266
10267
	s = search(root_config, spinner, "--searchdesc" in myopts,
10268
		"--quiet" not in myopts, "--usepkg" in myopts,
10269
		"--usepkgonly" in myopts)
10270
	null_cp = portage.dep_getkey(insert_category_into_atom(
10271
		arg, "null"))
10272
	cat, atom_pn = portage.catsplit(null_cp)
10273
	s.searchkey = atom_pn
10274
	for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
10275
		s.addCP(cp)
10276
	s.output()
10277
	print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
10278
	print "!!! one of the above fully-qualified ebuild names instead.\n"
10279
10280
def profile_check(trees, myaction, myopts):
3566
def profile_check(trees, myaction, myopts):
10281
	if myaction in ("info", "sync"):
3567
	if myaction in ("info", "sync"):
10282
		return os.EX_OK
3568
		return os.EX_OK
(-)Scheduler.py (+1641 lines)
Line 0 Link Here
1
import logging
2
import os
3
import sys
4
import textwrap
5
import time
6
import weakref
7
from itertools import izip
8
9
try:
10
	import portage
11
except ImportError:
12
	from os import path as osp
13
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
14
	import portage
15
16
from portage.cache.mappings import slot_dict_class
17
from portage.elog.messages import eerror
18
from portage.output import colorize, create_color_func, darkgreen, red
19
bad = create_color_func("BAD")
20
from portage.sets.base import InternalPackageSet
21
from portage.util import writemsg, writemsg_level
22
23
from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
24
from _emerge.Blocker import Blocker
25
from _emerge.BlockerDB import BlockerDB
26
from _emerge.clear_caches import clear_caches
27
from _emerge.create_depgraph_params import create_depgraph_params
28
from _emerge.create_world_atom import create_world_atom
29
from _emerge.DepPriority import DepPriority
30
from _emerge.EbuildFetcher import EbuildFetcher
31
from _emerge.EbuildPhase import EbuildPhase
32
from _emerge.emergelog import emergelog, _emerge_log_dir
33
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
34
from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
35
from _emerge.JobStatusDisplay import JobStatusDisplay
36
from _emerge.MergeListItem import MergeListItem
37
from _emerge.Package import Package
38
from _emerge.PackageMerge import PackageMerge
39
from _emerge.PollScheduler import PollScheduler
40
from _emerge.RootConfig import RootConfig
41
from _emerge.SlotObject import SlotObject
42
from _emerge.SequentialTaskQueue import SequentialTaskQueue
43
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
44
45
import portage.proxy.lazyimport
46
import portage.proxy as proxy
47
proxy.lazyimport.lazyimport(globals(),
48
	'_emerge.depgraph:depgraph',
49
)
50
51
class Scheduler(PollScheduler):
52
53
	_opts_ignore_blockers = \
54
		frozenset(["--buildpkgonly",
55
		"--fetchonly", "--fetch-all-uri",
56
		"--nodeps", "--pretend"])
57
58
	_opts_no_background = \
59
		frozenset(["--pretend",
60
		"--fetchonly", "--fetch-all-uri"])
61
62
	_opts_no_restart = frozenset(["--buildpkgonly",
63
		"--fetchonly", "--fetch-all-uri", "--pretend"])
64
65
	_bad_resume_opts = set(["--ask", "--changelog",
66
		"--resume", "--skipfirst"])
67
68
	_fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log')
69
70
	class _iface_class(SlotObject):
71
		__slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
72
			"dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
73
			"scheduleSetup", "scheduleUnpack", "scheduleYield",
74
			"unregister")
75
76
	class _fetch_iface_class(SlotObject):
77
		__slots__ = ("log_file", "schedule")
78
79
	_task_queues_class = slot_dict_class(
80
		("merge", "jobs", "fetch", "unpack"), prefix="")
81
82
	class _build_opts_class(SlotObject):
83
		__slots__ = ("buildpkg", "buildpkgonly",
84
			"fetch_all_uri", "fetchonly", "pretend")
85
86
	class _binpkg_opts_class(SlotObject):
87
		__slots__ = ("fetchonly", "getbinpkg", "pretend")
88
89
	class _pkg_count_class(SlotObject):
90
		__slots__ = ("curval", "maxval")
91
92
	class _emerge_log_class(SlotObject):
93
		__slots__ = ("xterm_titles",)
94
95
		def log(self, *pargs, **kwargs):
96
			if not self.xterm_titles:
97
				# Avoid interference with the scheduler's status display.
98
				kwargs.pop("short_msg", None)
99
			emergelog(self.xterm_titles, *pargs, **kwargs)
100
101
	class _failed_pkg(SlotObject):
102
		__slots__ = ("build_dir", "build_log", "pkg", "returncode")
103
104
	class _ConfigPool(object):
105
		"""Interface for a task to temporarily allocate a config
106
		instance from a pool. This allows a task to be constructed
107
		long before the config instance actually becomes needed, like
108
		when prefetchers are constructed for the whole merge list."""
109
		__slots__ = ("_root", "_allocate", "_deallocate")
110
		def __init__(self, root, allocate, deallocate):
111
			self._root = root
112
			self._allocate = allocate
113
			self._deallocate = deallocate
114
		def allocate(self):
115
			return self._allocate(self._root)
116
		def deallocate(self, settings):
117
			self._deallocate(settings)
118
119
	class _unknown_internal_error(portage.exception.PortageException):
120
		"""
121
		Used internally to terminate scheduling. The specific reason for
122
		the failure should have been dumped to stderr.
123
		"""
124
		def __init__(self, value=""):
125
			portage.exception.PortageException.__init__(self, value)
126
127
	def __init__(self, settings, trees, mtimedb, myopts,
128
		spinner, mergelist, favorites, digraph):
129
		PollScheduler.__init__(self)
130
		self.settings = settings
131
		self.target_root = settings["ROOT"]
132
		self.trees = trees
133
		self.myopts = myopts
134
		self._spinner = spinner
135
		self._mtimedb = mtimedb
136
		self._mergelist = mergelist
137
		self._favorites = favorites
138
		self._args_set = InternalPackageSet(favorites)
139
		self._build_opts = self._build_opts_class()
140
		for k in self._build_opts.__slots__:
141
			setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
142
		self._binpkg_opts = self._binpkg_opts_class()
143
		for k in self._binpkg_opts.__slots__:
144
			setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
145
146
		self.curval = 0
147
		self._logger = self._emerge_log_class()
148
		self._task_queues = self._task_queues_class()
149
		for k in self._task_queues.allowed_keys:
150
			setattr(self._task_queues, k,
151
				SequentialTaskQueue())
152
153
		# Holds merges that will wait to be executed when no builds are
154
		# executing. This is useful for system packages since dependencies
155
		# on system packages are frequently unspecified.
156
		self._merge_wait_queue = []
157
		# Holds merges that have been transfered from the merge_wait_queue to
158
		# the actual merge queue. They are removed from this list upon
159
		# completion. Other packages can start building only when this list is
160
		# empty.
161
		self._merge_wait_scheduled = []
162
163
		# Holds system packages and their deep runtime dependencies. Before
164
		# being merged, these packages go to merge_wait_queue, to be merged
165
		# when no other packages are building.
166
		self._deep_system_deps = set()
167
168
		# Holds packages to merge which will satisfy currently unsatisfied
169
		# deep runtime dependencies of system packages. If this is not empty
170
		# then no parallel builds will be spawned until it is empty. This
171
		# minimizes the possibility that a build will fail due to the system
172
		# being in a fragile state. For example, see bug #259954.
173
		self._unsatisfied_system_deps = set()
174
175
		self._status_display = JobStatusDisplay(
176
			xterm_titles=('notitles' not in settings.features))
177
		self._max_load = myopts.get("--load-average")
178
		max_jobs = myopts.get("--jobs")
179
		if max_jobs is None:
180
			max_jobs = 1
181
		self._set_max_jobs(max_jobs)
182
183
		# The root where the currently running
184
		# portage instance is installed.
185
		self._running_root = trees["/"]["root_config"]
186
		self.edebug = 0
187
		if settings.get("PORTAGE_DEBUG", "") == "1":
188
			self.edebug = 1
189
		self.pkgsettings = {}
190
		self._config_pool = {}
191
		self._blocker_db = {}
192
		for root in trees:
193
			self._config_pool[root] = []
194
			self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
195
196
		fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
197
			schedule=self._schedule_fetch)
198
		self._sched_iface = self._iface_class(
199
			dblinkEbuildPhase=self._dblink_ebuild_phase,
200
			dblinkDisplayMerge=self._dblink_display_merge,
201
			dblinkElog=self._dblink_elog,
202
			dblinkEmergeLog=self._dblink_emerge_log,
203
			fetch=fetch_iface, register=self._register,
204
			schedule=self._schedule_wait,
205
			scheduleSetup=self._schedule_setup,
206
			scheduleUnpack=self._schedule_unpack,
207
			scheduleYield=self._schedule_yield,
208
			unregister=self._unregister)
209
210
		self._prefetchers = weakref.WeakValueDictionary()
211
		self._pkg_queue = []
212
		self._completed_tasks = set()
213
214
		self._failed_pkgs = []
215
		self._failed_pkgs_all = []
216
		self._failed_pkgs_die_msgs = []
217
		self._post_mod_echo_msgs = []
218
		self._parallel_fetch = False
219
		merge_count = len([x for x in mergelist \
220
			if isinstance(x, Package) and x.operation == "merge"])
221
		self._pkg_count = self._pkg_count_class(
222
			curval=0, maxval=merge_count)
223
		self._status_display.maxval = self._pkg_count.maxval
224
225
		# The load average takes some time to respond when new
226
		# jobs are added, so we need to limit the rate of adding
227
		# new jobs.
228
		self._job_delay_max = 10
229
		self._job_delay_factor = 1.0
230
		self._job_delay_exp = 1.5
231
		self._previous_job_start_time = None
232
233
		self._set_digraph(digraph)
234
235
		# This is used to memoize the _choose_pkg() result when
236
		# no packages can be chosen until one of the existing
237
		# jobs completes.
238
		self._choose_pkg_return_early = False
239
240
		features = self.settings.features
241
		if "parallel-fetch" in features and \
242
			not ("--pretend" in self.myopts or \
243
			"--fetch-all-uri" in self.myopts or \
244
			"--fetchonly" in self.myopts):
245
			if "distlocks" not in features:
246
				portage.writemsg(red("!!!")+"\n", noiselevel=-1)
247
				portage.writemsg(red("!!!")+" parallel-fetching " + \
248
					"requires the distlocks feature enabled"+"\n",
249
					noiselevel=-1)
250
				portage.writemsg(red("!!!")+" you have it disabled, " + \
251
					"thus parallel-fetching is being disabled"+"\n",
252
					noiselevel=-1)
253
				portage.writemsg(red("!!!")+"\n", noiselevel=-1)
254
			elif len(mergelist) > 1:
255
				self._parallel_fetch = True
256
257
		if self._parallel_fetch:
258
				# clear out existing fetch log if it exists
259
				try:
260
					open(self._fetch_log, 'w')
261
				except EnvironmentError:
262
					pass
263
264
		self._running_portage = None
265
		portage_match = self._running_root.trees["vartree"].dbapi.match(
266
			portage.const.PORTAGE_PACKAGE_ATOM)
267
		if portage_match:
268
			cpv = portage_match.pop()
269
			self._running_portage = self._pkg(cpv, "installed",
270
				self._running_root, installed=True)
271
272
	def _poll(self, timeout=None):
273
		self._schedule()
274
		PollScheduler._poll(self, timeout=timeout)
275
276
	def _set_max_jobs(self, max_jobs):
277
		self._max_jobs = max_jobs
278
		self._task_queues.jobs.max_jobs = max_jobs
279
280
	def _background_mode(self):
281
		"""
282
		Check if background mode is enabled and adjust states as necessary.
283
284
		@rtype: bool
285
		@returns: True if background mode is enabled, False otherwise.
286
		"""
287
		background = (self._max_jobs is True or \
288
			self._max_jobs > 1 or "--quiet" in self.myopts) and \
289
			not bool(self._opts_no_background.intersection(self.myopts))
290
291
		if background:
292
			interactive_tasks = self._get_interactive_tasks()
293
			if interactive_tasks:
294
				background = False
295
				writemsg_level(">>> Sending package output to stdio due " + \
296
					"to interactive package(s):\n",
297
					level=logging.INFO, noiselevel=-1)
298
				msg = [""]
299
				for pkg in interactive_tasks:
300
					pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
301
					if pkg.root != "/":
302
						pkg_str += " for " + pkg.root
303
					msg.append(pkg_str)
304
				msg.append("")
305
				writemsg_level("".join("%s\n" % (l,) for l in msg),
306
					level=logging.INFO, noiselevel=-1)
307
				if self._max_jobs is True or self._max_jobs > 1:
308
					self._set_max_jobs(1)
309
					writemsg_level(">>> Setting --jobs=1 due " + \
310
						"to the above interactive package(s)\n",
311
						level=logging.INFO, noiselevel=-1)
312
313
		self._status_display.quiet = \
314
			not background or \
315
			("--quiet" in self.myopts and \
316
			"--verbose" not in self.myopts)
317
318
		self._logger.xterm_titles = \
319
			"notitles" not in self.settings.features and \
320
			self._status_display.quiet
321
322
		return background
323
324
	def _get_interactive_tasks(self):
325
		from portage import flatten
326
		from portage.dep import use_reduce, paren_reduce
327
		interactive_tasks = []
328
		for task in self._mergelist:
329
			if not (isinstance(task, Package) and \
330
				task.operation == "merge"):
331
				continue
332
			try:
333
				properties = flatten(use_reduce(paren_reduce(
334
					task.metadata["PROPERTIES"]), uselist=task.use.enabled))
335
			except portage.exception.InvalidDependString, e:
336
				show_invalid_depstring_notice(task,
337
					task.metadata["PROPERTIES"], str(e))
338
				raise self._unknown_internal_error()
339
			if "interactive" in properties:
340
				interactive_tasks.append(task)
341
		return interactive_tasks
342
343
	def _set_digraph(self, digraph):
344
		if "--nodeps" in self.myopts or \
345
			(self._max_jobs is not True and self._max_jobs < 2):
346
			# save some memory
347
			self._digraph = None
348
			return
349
350
		self._digraph = digraph
351
		self._find_system_deps()
352
		self._prune_digraph()
353
		self._prevent_builddir_collisions()
354
355
	def _find_system_deps(self):
356
		"""
357
		Find system packages and their deep runtime dependencies. Before being
358
		merged, these packages go to merge_wait_queue, to be merged when no
359
		other packages are building.
360
		"""
361
		deep_system_deps = self._deep_system_deps
362
		deep_system_deps.clear()
363
		deep_system_deps.update(
364
			_find_deep_system_runtime_deps(self._digraph))
365
		deep_system_deps.difference_update([pkg for pkg in \
366
			deep_system_deps if pkg.operation != "merge"])
367
368
	def _prune_digraph(self):
369
		"""
370
		Prune any root nodes that are irrelevant.
371
		"""
372
373
		graph = self._digraph
374
		completed_tasks = self._completed_tasks
375
		removed_nodes = set()
376
		while True:
377
			for node in graph.root_nodes():
378
				if not isinstance(node, Package) or \
379
					(node.installed and node.operation == "nomerge") or \
380
					node.onlydeps or \
381
					node in completed_tasks:
382
					removed_nodes.add(node)
383
			if removed_nodes:
384
				graph.difference_update(removed_nodes)
385
			if not removed_nodes:
386
				break
387
			removed_nodes.clear()
388
389
	def _prevent_builddir_collisions(self):
390
		"""
391
		When building stages, sometimes the same exact cpv needs to be merged
392
		to both $ROOTs. Add edges to the digraph in order to avoid collisions
393
		in the builddir. Currently, normal file locks would be inappropriate
394
		for this purpose since emerge holds all of it's build dir locks from
395
		the main process.
396
		"""
397
		cpv_map = {}
398
		for pkg in self._mergelist:
399
			if not isinstance(pkg, Package):
400
				# a satisfied blocker
401
				continue
402
			if pkg.installed:
403
				continue
404
			if pkg.cpv not in cpv_map:
405
				cpv_map[pkg.cpv] = [pkg]
406
				continue
407
			for earlier_pkg in cpv_map[pkg.cpv]:
408
				self._digraph.add(earlier_pkg, pkg,
409
					priority=DepPriority(buildtime=True))
410
			cpv_map[pkg.cpv].append(pkg)
411
412
	class _pkg_failure(portage.exception.PortageException):
413
		"""
414
		An instance of this class is raised by unmerge() when
415
		an uninstallation fails.
416
		"""
417
		status = 1
418
		def __init__(self, *pargs):
419
			portage.exception.PortageException.__init__(self, pargs)
420
			if pargs:
421
				self.status = pargs[0]
422
423
	def _schedule_fetch(self, fetcher):
424
		"""
425
		Schedule a fetcher on the fetch queue, in order to
426
		serialize access to the fetch log.
427
		"""
428
		self._task_queues.fetch.addFront(fetcher)
429
430
	def _schedule_setup(self, setup_phase):
431
		"""
432
		Schedule a setup phase on the merge queue, in order to
433
		serialize unsandboxed access to the live filesystem.
434
		"""
435
		self._task_queues.merge.addFront(setup_phase)
436
		self._schedule()
437
438
	def _schedule_unpack(self, unpack_phase):
439
		"""
440
		Schedule an unpack phase on the unpack queue, in order
441
		to serialize $DISTDIR access for live ebuilds.
442
		"""
443
		self._task_queues.unpack.add(unpack_phase)
444
445
	def _find_blockers(self, new_pkg):
446
		"""
447
		Returns a callable which should be called only when
448
		the vdb lock has been acquired.
449
		"""
450
		def get_blockers():
451
			return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
452
		return get_blockers
453
454
	def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
455
		if self._opts_ignore_blockers.intersection(self.myopts):
456
			return None
457
458
		# Call gc.collect() here to avoid heap overflow that
459
		# triggers 'Cannot allocate memory' errors (reported
460
		# with python-2.5).
461
		import gc
462
		gc.collect()
463
464
		blocker_db = self._blocker_db[new_pkg.root]
465
466
		blocker_dblinks = []
467
		for blocking_pkg in blocker_db.findInstalledBlockers(
468
			new_pkg, acquire_lock=acquire_lock):
469
			if new_pkg.slot_atom == blocking_pkg.slot_atom:
470
				continue
471
			if new_pkg.cpv == blocking_pkg.cpv:
472
				continue
473
			blocker_dblinks.append(portage.dblink(
474
				blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
475
				self.pkgsettings[blocking_pkg.root], treetype="vartree",
476
				vartree=self.trees[blocking_pkg.root]["vartree"]))
477
478
		gc.collect()
479
480
		return blocker_dblinks
481
482
	def _dblink_pkg(self, pkg_dblink):
483
		cpv = pkg_dblink.mycpv
484
		type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
485
		root_config = self.trees[pkg_dblink.myroot]["root_config"]
486
		installed = type_name == "installed"
487
		return self._pkg(cpv, type_name, root_config, installed=installed)
488
489
	def _append_to_log_path(self, log_path, msg):
490
		f = open(log_path, 'a')
491
		try:
492
			f.write(msg)
493
		finally:
494
			f.close()
495
496
	def _dblink_elog(self, pkg_dblink, phase, func, msgs):
497
498
		log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
499
		log_file = None
500
		out = sys.stdout
501
		background = self._background
502
503
		if background and log_path is not None:
504
			log_file = open(log_path, 'a')
505
			out = log_file
506
507
		try:
508
			for msg in msgs:
509
				func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
510
		finally:
511
			if log_file is not None:
512
				log_file.close()
513
514
	def _dblink_emerge_log(self, msg):
515
		self._logger.log(msg)
516
517
	def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
518
		log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
519
		background = self._background
520
521
		if log_path is None:
522
			if not (background and level < logging.WARN):
523
				portage.util.writemsg_level(msg,
524
					level=level, noiselevel=noiselevel)
525
		else:
526
			if not background:
527
				portage.util.writemsg_level(msg,
528
					level=level, noiselevel=noiselevel)
529
			self._append_to_log_path(log_path, msg)
530
531
	def _dblink_ebuild_phase(self,
532
		pkg_dblink, pkg_dbapi, ebuild_path, phase):
533
		"""
534
		Using this callback for merge phases allows the scheduler
535
		to run while these phases execute asynchronously, and allows
536
		the scheduler control output handling.
537
		"""
538
539
		scheduler = self._sched_iface
540
		settings = pkg_dblink.settings
541
		pkg = self._dblink_pkg(pkg_dblink)
542
		background = self._background
543
		log_path = settings.get("PORTAGE_LOG_FILE")
544
545
		ebuild_phase = EbuildPhase(background=background,
546
			pkg=pkg, phase=phase, scheduler=scheduler,
547
			settings=settings, tree=pkg_dblink.treetype)
548
		ebuild_phase.start()
549
		ebuild_phase.wait()
550
551
		return ebuild_phase.returncode
552
553
	def _generate_digests(self):
554
		"""
555
		Generate digests if necessary for --digests or FEATURES=digest.
556
		In order to avoid interference, this must done before parallel
557
		tasks are started.
558
		"""
559
560
		if '--fetchonly' in self.myopts:
561
			return os.EX_OK
562
563
		digest = '--digest' in self.myopts
564
		if not digest:
565
			for pkgsettings in self.pkgsettings.itervalues():
566
				if 'digest' in pkgsettings.features:
567
					digest = True
568
					break
569
570
		if not digest:
571
			return os.EX_OK
572
573
		for x in self._mergelist:
574
			if not isinstance(x, Package) or \
575
				x.type_name != 'ebuild' or \
576
				x.operation != 'merge':
577
				continue
578
			pkgsettings = self.pkgsettings[x.root]
579
			if '--digest' not in self.myopts and \
580
				'digest' not in pkgsettings.features:
581
				continue
582
			portdb = x.root_config.trees['porttree'].dbapi
583
			ebuild_path = portdb.findname(x.cpv)
584
			if not ebuild_path:
585
				writemsg_level(
586
					"!!! Could not locate ebuild for '%s'.\n" \
587
					% x.cpv, level=logging.ERROR, noiselevel=-1)
588
				return 1
589
			pkgsettings['O'] = os.path.dirname(ebuild_path)
590
			if not portage.digestgen([], pkgsettings, myportdb=portdb):
591
				writemsg_level(
592
					"!!! Unable to generate manifest for '%s'.\n" \
593
					% x.cpv, level=logging.ERROR, noiselevel=-1)
594
				return 1
595
596
		return os.EX_OK
597
598
	def _check_manifests(self):
599
		# Verify all the manifests now so that the user is notified of failure
600
		# as soon as possible.
601
		if "strict" not in self.settings.features or \
602
			"--fetchonly" in self.myopts or \
603
			"--fetch-all-uri" in self.myopts:
604
			return os.EX_OK
605
606
		shown_verifying_msg = False
607
		quiet_settings = {}
608
		for myroot, pkgsettings in self.pkgsettings.iteritems():
609
			quiet_config = portage.config(clone=pkgsettings)
610
			quiet_config["PORTAGE_QUIET"] = "1"
611
			quiet_config.backup_changes("PORTAGE_QUIET")
612
			quiet_settings[myroot] = quiet_config
613
			del quiet_config
614
615
		for x in self._mergelist:
616
			if not isinstance(x, Package) or \
617
				x.type_name != "ebuild":
618
				continue
619
620
			if not shown_verifying_msg:
621
				shown_verifying_msg = True
622
				self._status_msg("Verifying ebuild manifests")
623
624
			root_config = x.root_config
625
			portdb = root_config.trees["porttree"].dbapi
626
			quiet_config = quiet_settings[root_config.root]
627
			quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
628
			if not portage.digestcheck([], quiet_config, strict=True):
629
				return 1
630
631
		return os.EX_OK
632
633
	def _add_prefetchers(self):
634
635
		if not self._parallel_fetch:
636
			return
637
638
		if self._parallel_fetch:
639
			self._status_msg("Starting parallel fetch")
640
641
			prefetchers = self._prefetchers
642
			getbinpkg = "--getbinpkg" in self.myopts
643
644
			# In order to avoid "waiting for lock" messages
645
			# at the beginning, which annoy users, never
646
			# spawn a prefetcher for the first package.
647
			for pkg in self._mergelist[1:]:
648
				prefetcher = self._create_prefetcher(pkg)
649
				if prefetcher is not None:
650
					self._task_queues.fetch.add(prefetcher)
651
					prefetchers[pkg] = prefetcher
652
653
	def _create_prefetcher(self, pkg):
654
		"""
655
		@return: a prefetcher, or None if not applicable
656
		"""
657
		prefetcher = None
658
659
		if not isinstance(pkg, Package):
660
			pass
661
662
		elif pkg.type_name == "ebuild":
663
664
			prefetcher = EbuildFetcher(background=True,
665
				config_pool=self._ConfigPool(pkg.root,
666
				self._allocate_config, self._deallocate_config),
667
				fetchonly=1, logfile=self._fetch_log,
668
				pkg=pkg, prefetch=True, scheduler=self._sched_iface)
669
670
		elif pkg.type_name == "binary" and \
671
			"--getbinpkg" in self.myopts and \
672
			pkg.root_config.trees["bintree"].isremote(pkg.cpv):
673
674
			prefetcher = BinpkgPrefetcher(background=True,
675
				pkg=pkg, scheduler=self._sched_iface)
676
677
		return prefetcher
678
679
	def _is_restart_scheduled(self):
680
		"""
681
		Check if the merge list contains a replacement
682
		for the current running instance, that will result
683
		in restart after merge.
684
		@rtype: bool
685
		@returns: True if a restart is scheduled, False otherwise.
686
		"""
687
		if self._opts_no_restart.intersection(self.myopts):
688
			return False
689
690
		mergelist = self._mergelist
691
692
		for i, pkg in enumerate(mergelist):
693
			if self._is_restart_necessary(pkg) and \
694
				i != len(mergelist) - 1:
695
				return True
696
697
		return False
698
699
	def _is_restart_necessary(self, pkg):
700
		"""
701
		@return: True if merging the given package
702
			requires restart, False otherwise.
703
		"""
704
705
		# Figure out if we need a restart.
706
		if pkg.root == self._running_root.root and \
707
			portage.match_from_list(
708
			portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
709
			if self._running_portage:
710
				return pkg.cpv != self._running_portage.cpv
711
			return True
712
		return False
713
714
	def _restart_if_necessary(self, pkg):
715
		"""
716
		Use execv() to restart emerge. This happens
717
		if portage upgrades itself and there are
718
		remaining packages in the list.
719
		"""
720
721
		if self._opts_no_restart.intersection(self.myopts):
722
			return
723
724
		if not self._is_restart_necessary(pkg):
725
			return
726
727
		if pkg == self._mergelist[-1]:
728
			return
729
730
		self._main_loop_cleanup()
731
732
		logger = self._logger
733
		pkg_count = self._pkg_count
734
		mtimedb = self._mtimedb
735
		bad_resume_opts = self._bad_resume_opts
736
737
		logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
738
			(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
739
740
		logger.log(" *** RESTARTING " + \
741
			"emerge via exec() after change of " + \
742
			"portage version.")
743
744
		mtimedb["resume"]["mergelist"].remove(list(pkg))
745
		mtimedb.commit()
746
		portage.run_exitfuncs()
747
		mynewargv = [sys.argv[0], "--resume"]
748
		resume_opts = self.myopts.copy()
749
		# For automatic resume, we need to prevent
750
		# any of bad_resume_opts from leaking in
751
		# via EMERGE_DEFAULT_OPTS.
752
		resume_opts["--ignore-default-opts"] = True
753
		for myopt, myarg in resume_opts.iteritems():
754
			if myopt not in bad_resume_opts:
755
				if myarg is True:
756
					mynewargv.append(myopt)
757
				else:
758
					mynewargv.append(myopt +"="+ str(myarg))
759
		# priority only needs to be adjusted on the first run
760
		os.environ["PORTAGE_NICENESS"] = "0"
761
		os.execv(mynewargv[0], mynewargv)
762
763
	def merge(self):
764
765
		if "--resume" in self.myopts:
766
			# We're resuming.
767
			portage.writemsg_stdout(
768
				colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
769
			self._logger.log(" *** Resuming merge...")
770
771
		self._save_resume_list()
772
773
		try:
774
			self._background = self._background_mode()
775
		except self._unknown_internal_error:
776
			return 1
777
778
		for root in self.trees:
779
			root_config = self.trees[root]["root_config"]
780
781
			# Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
782
			# since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
783
			# for ensuring sane $PWD (bug #239560) and storing elog messages.
784
			tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
785
			if not tmpdir or not os.path.isdir(tmpdir):
786
				msg = "The directory specified in your " + \
787
					"PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
788
				"does not exist. Please create this " + \
789
				"directory or correct your PORTAGE_TMPDIR setting."
790
				msg = textwrap.wrap(msg, 70)
791
				out = portage.output.EOutput()
792
				for l in msg:
793
					out.eerror(l)
794
				return 1
795
796
			if self._background:
797
				root_config.settings.unlock()
798
				root_config.settings["PORTAGE_BACKGROUND"] = "1"
799
				root_config.settings.backup_changes("PORTAGE_BACKGROUND")
800
				root_config.settings.lock()
801
802
			self.pkgsettings[root] = portage.config(
803
				clone=root_config.settings)
804
805
		rval = self._generate_digests()
806
		if rval != os.EX_OK:
807
			return rval
808
809
		rval = self._check_manifests()
810
		if rval != os.EX_OK:
811
			return rval
812
813
		keep_going = "--keep-going" in self.myopts
814
		fetchonly = self._build_opts.fetchonly
815
		mtimedb = self._mtimedb
816
		failed_pkgs = self._failed_pkgs
817
818
		while True:
819
			rval = self._merge()
820
			if rval == os.EX_OK or fetchonly or not keep_going:
821
				break
822
			if "resume" not in mtimedb:
823
				break
824
			mergelist = self._mtimedb["resume"].get("mergelist")
825
			if not mergelist:
826
				break
827
828
			if not failed_pkgs:
829
				break
830
831
			for failed_pkg in failed_pkgs:
832
				mergelist.remove(list(failed_pkg.pkg))
833
834
			self._failed_pkgs_all.extend(failed_pkgs)
835
			del failed_pkgs[:]
836
837
			if not mergelist:
838
				break
839
840
			if not self._calc_resume_list():
841
				break
842
843
			clear_caches(self.trees)
844
			if not self._mergelist:
845
				break
846
847
			self._save_resume_list()
848
			self._pkg_count.curval = 0
849
			self._pkg_count.maxval = len([x for x in self._mergelist \
850
				if isinstance(x, Package) and x.operation == "merge"])
851
			self._status_display.maxval = self._pkg_count.maxval
852
853
		self._logger.log(" *** Finished. Cleaning up...")
854
855
		if failed_pkgs:
856
			self._failed_pkgs_all.extend(failed_pkgs)
857
			del failed_pkgs[:]
858
859
		background = self._background
860
		failure_log_shown = False
861
		if background and len(self._failed_pkgs_all) == 1:
862
			# If only one package failed then just show it's
863
			# whole log for easy viewing.
864
			failed_pkg = self._failed_pkgs_all[-1]
865
			build_dir = failed_pkg.build_dir
866
			log_file = None
867
868
			log_paths = [failed_pkg.build_log]
869
870
			log_path = self._locate_failure_log(failed_pkg)
871
			if log_path is not None:
872
				try:
873
					log_file = open(log_path)
874
				except IOError:
875
					pass
876
877
			if log_file is not None:
878
				try:
879
					for line in log_file:
880
						writemsg_level(line, noiselevel=-1)
881
				finally:
882
					log_file.close()
883
				failure_log_shown = True
884
885
		# Dump mod_echo output now since it tends to flood the terminal.
886
		# This allows us to avoid having more important output, generated
887
		# later, from being swept away by the mod_echo output.
888
		mod_echo_output =  _flush_elog_mod_echo()
889
890
		if background and not failure_log_shown and \
891
			self._failed_pkgs_all and \
892
			self._failed_pkgs_die_msgs and \
893
			not mod_echo_output:
894
895
			printer = portage.output.EOutput()
896
			for mysettings, key, logentries in self._failed_pkgs_die_msgs:
897
				root_msg = ""
898
				if mysettings["ROOT"] != "/":
899
					root_msg = " merged to %s" % mysettings["ROOT"]
900
				print
901
				printer.einfo("Error messages for package %s%s:" % \
902
					(colorize("INFORM", key), root_msg))
903
				print
904
				for phase in portage.const.EBUILD_PHASES:
905
					if phase not in logentries:
906
						continue
907
					for msgtype, msgcontent in logentries[phase]:
908
						if isinstance(msgcontent, basestring):
909
							msgcontent = [msgcontent]
910
						for line in msgcontent:
911
							printer.eerror(line.strip("\n"))
912
913
		if self._post_mod_echo_msgs:
914
			for msg in self._post_mod_echo_msgs:
915
				msg()
916
917
		if len(self._failed_pkgs_all) > 1 or \
918
			(self._failed_pkgs_all and "--keep-going" in self.myopts):
919
			if len(self._failed_pkgs_all) > 1:
920
				msg = "The following %d packages have " % \
921
					len(self._failed_pkgs_all) + \
922
					"failed to build or install:"
923
			else:
924
				msg = "The following package has " + \
925
					"failed to build or install:"
926
			prefix = bad(" * ")
927
			writemsg(prefix + "\n", noiselevel=-1)
928
			from textwrap import wrap
929
			for line in wrap(msg, 72):
930
				writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
931
			writemsg(prefix + "\n", noiselevel=-1)
932
			for failed_pkg in self._failed_pkgs_all:
933
				writemsg("%s\t%s\n" % (prefix,
934
					colorize("INFORM", str(failed_pkg.pkg))),
935
					noiselevel=-1)
936
			writemsg(prefix + "\n", noiselevel=-1)
937
938
		return rval
939
940
	def _elog_listener(self, mysettings, key, logentries, fulltext):
941
		errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
942
		if errors:
943
			self._failed_pkgs_die_msgs.append(
944
				(mysettings, key, errors))
945
946
	def _locate_failure_log(self, failed_pkg):
947
948
		build_dir = failed_pkg.build_dir
949
		log_file = None
950
951
		log_paths = [failed_pkg.build_log]
952
953
		for log_path in log_paths:
954
			if not log_path:
955
				continue
956
957
			try:
958
				log_size = os.stat(log_path).st_size
959
			except OSError:
960
				continue
961
962
			if log_size == 0:
963
				continue
964
965
			return log_path
966
967
		return None
968
969
	def _add_packages(self):
970
		pkg_queue = self._pkg_queue
971
		for pkg in self._mergelist:
972
			if isinstance(pkg, Package):
973
				pkg_queue.append(pkg)
974
			elif isinstance(pkg, Blocker):
975
				pass
976
977
	def _system_merge_started(self, merge):
978
		"""
979
		Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
980
		"""
981
		graph = self._digraph
982
		if graph is None:
983
			return
984
		pkg = merge.merge.pkg
985
986
		# Skip this if $ROOT != / since it shouldn't matter if there
987
		# are unsatisfied system runtime deps in this case.
988
		if pkg.root != '/':
989
			return
990
991
		completed_tasks = self._completed_tasks
992
		unsatisfied = self._unsatisfied_system_deps
993
994
		def ignore_non_runtime_or_satisfied(priority):
995
			"""
996
			Ignore non-runtime and satisfied runtime priorities.
997
			"""
998
			if isinstance(priority, DepPriority) and \
999
				not priority.satisfied and \
1000
				(priority.runtime or priority.runtime_post):
1001
				return False
1002
			return True
1003
1004
		# When checking for unsatisfied runtime deps, only check
1005
		# direct deps since indirect deps are checked when the
1006
		# corresponding parent is merged.
1007
		for child in graph.child_nodes(pkg,
1008
			ignore_priority=ignore_non_runtime_or_satisfied):
1009
			if not isinstance(child, Package) or \
1010
				child.operation == 'uninstall':
1011
				continue
1012
			if child is pkg:
1013
				continue
1014
			if child.operation == 'merge' and \
1015
				child not in completed_tasks:
1016
				unsatisfied.add(child)
1017
1018
	def _merge_wait_exit_handler(self, task):
1019
		self._merge_wait_scheduled.remove(task)
1020
		self._merge_exit(task)
1021
1022
	def _merge_exit(self, merge):
1023
		self._do_merge_exit(merge)
1024
		self._deallocate_config(merge.merge.settings)
1025
		if merge.returncode == os.EX_OK and \
1026
			not merge.merge.pkg.installed:
1027
			self._status_display.curval += 1
1028
		self._status_display.merges = len(self._task_queues.merge)
1029
		self._schedule()
1030
1031
	def _do_merge_exit(self, merge):
1032
		pkg = merge.merge.pkg
1033
		if merge.returncode != os.EX_OK:
1034
			settings = merge.merge.settings
1035
			build_dir = settings.get("PORTAGE_BUILDDIR")
1036
			build_log = settings.get("PORTAGE_LOG_FILE")
1037
1038
			self._failed_pkgs.append(self._failed_pkg(
1039
				build_dir=build_dir, build_log=build_log,
1040
				pkg=pkg,
1041
				returncode=merge.returncode))
1042
			self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
1043
1044
			self._status_display.failed = len(self._failed_pkgs)
1045
			return
1046
1047
		self._task_complete(pkg)
1048
		pkg_to_replace = merge.merge.pkg_to_replace
1049
		if pkg_to_replace is not None:
1050
			# When a package is replaced, mark it's uninstall
1051
			# task complete (if any).
1052
			uninst_hash_key = \
1053
				("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
1054
			self._task_complete(uninst_hash_key)
1055
1056
		if pkg.installed:
1057
			return
1058
1059
		self._restart_if_necessary(pkg)
1060
1061
		# Call mtimedb.commit() after each merge so that
1062
		# --resume still works after being interrupted
1063
		# by reboot, sigkill or similar.
1064
		mtimedb = self._mtimedb
1065
		mtimedb["resume"]["mergelist"].remove(list(pkg))
1066
		if not mtimedb["resume"]["mergelist"]:
1067
			del mtimedb["resume"]
1068
		mtimedb.commit()
1069
1070
	def _build_exit(self, build):
1071
		if build.returncode == os.EX_OK:
1072
			self.curval += 1
1073
			merge = PackageMerge(merge=build)
1074
			if not build.build_opts.buildpkgonly and \
1075
				build.pkg in self._deep_system_deps:
1076
				# Since dependencies on system packages are frequently
1077
				# unspecified, merge them only when no builds are executing.
1078
				self._merge_wait_queue.append(merge)
1079
				merge.addStartListener(self._system_merge_started)
1080
			else:
1081
				merge.addExitListener(self._merge_exit)
1082
				self._task_queues.merge.add(merge)
1083
				self._status_display.merges = len(self._task_queues.merge)
1084
		else:
1085
			settings = build.settings
1086
			build_dir = settings.get("PORTAGE_BUILDDIR")
1087
			build_log = settings.get("PORTAGE_LOG_FILE")
1088
1089
			self._failed_pkgs.append(self._failed_pkg(
1090
				build_dir=build_dir, build_log=build_log,
1091
				pkg=build.pkg,
1092
				returncode=build.returncode))
1093
			self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
1094
1095
			self._status_display.failed = len(self._failed_pkgs)
1096
			self._deallocate_config(build.settings)
1097
		self._jobs -= 1
1098
		self._status_display.running = self._jobs
1099
		self._schedule()
1100
1101
	def _extract_exit(self, build):
1102
		self._build_exit(build)
1103
1104
	def _task_complete(self, pkg):
1105
		self._completed_tasks.add(pkg)
1106
		self._unsatisfied_system_deps.discard(pkg)
1107
		self._choose_pkg_return_early = False
1108
1109
	def _merge(self):
1110
1111
		self._add_prefetchers()
1112
		self._add_packages()
1113
		pkg_queue = self._pkg_queue
1114
		failed_pkgs = self._failed_pkgs
1115
		portage.locks._quiet = self._background
1116
		portage.elog._emerge_elog_listener = self._elog_listener
1117
		rval = os.EX_OK
1118
1119
		try:
1120
			self._main_loop()
1121
		finally:
1122
			self._main_loop_cleanup()
1123
			portage.locks._quiet = False
1124
			portage.elog._emerge_elog_listener = None
1125
			if failed_pkgs:
1126
				rval = failed_pkgs[-1].returncode
1127
1128
		return rval
1129
1130
	def _main_loop_cleanup(self):
1131
		del self._pkg_queue[:]
1132
		self._completed_tasks.clear()
1133
		self._deep_system_deps.clear()
1134
		self._unsatisfied_system_deps.clear()
1135
		self._choose_pkg_return_early = False
1136
		self._status_display.reset()
1137
		self._digraph = None
1138
		self._task_queues.fetch.clear()
1139
1140
	def _choose_pkg(self):
1141
		"""
1142
		Choose a task that has all it's dependencies satisfied.
1143
		"""
1144
1145
		if self._choose_pkg_return_early:
1146
			return None
1147
1148
		if self._digraph is None:
1149
			if (self._jobs or self._task_queues.merge) and \
1150
				not ("--nodeps" in self.myopts and \
1151
				(self._max_jobs is True or self._max_jobs > 1)):
1152
				self._choose_pkg_return_early = True
1153
				return None
1154
			return self._pkg_queue.pop(0)
1155
1156
		if not (self._jobs or self._task_queues.merge):
1157
			return self._pkg_queue.pop(0)
1158
1159
		self._prune_digraph()
1160
1161
		chosen_pkg = None
1162
		later = set(self._pkg_queue)
1163
		for pkg in self._pkg_queue:
1164
			later.remove(pkg)
1165
			if not self._dependent_on_scheduled_merges(pkg, later):
1166
				chosen_pkg = pkg
1167
				break
1168
1169
		if chosen_pkg is not None:
1170
			self._pkg_queue.remove(chosen_pkg)
1171
1172
		if chosen_pkg is None:
1173
			# There's no point in searching for a package to
1174
			# choose until at least one of the existing jobs
1175
			# completes.
1176
			self._choose_pkg_return_early = True
1177
1178
		return chosen_pkg
1179
1180
	def _dependent_on_scheduled_merges(self, pkg, later):
1181
		"""
1182
		Traverse the subgraph of the given packages deep dependencies
1183
		to see if it contains any scheduled merges.
1184
		@param pkg: a package to check dependencies for
1185
		@type pkg: Package
1186
		@param later: packages for which dependence should be ignored
1187
			since they will be merged later than pkg anyway and therefore
1188
			delaying the merge of pkg will not result in a more optimal
1189
			merge order
1190
		@type later: set
1191
		@rtype: bool
1192
		@returns: True if the package is dependent, False otherwise.
1193
		"""
1194
1195
		graph = self._digraph
1196
		completed_tasks = self._completed_tasks
1197
1198
		dependent = False
1199
		traversed_nodes = set([pkg])
1200
		direct_deps = graph.child_nodes(pkg)
1201
		node_stack = direct_deps
1202
		direct_deps = frozenset(direct_deps)
1203
		while node_stack:
1204
			node = node_stack.pop()
1205
			if node in traversed_nodes:
1206
				continue
1207
			traversed_nodes.add(node)
1208
			if not ((node.installed and node.operation == "nomerge") or \
1209
				(node.operation == "uninstall" and \
1210
				node not in direct_deps) or \
1211
				node in completed_tasks or \
1212
				node in later):
1213
				dependent = True
1214
				break
1215
			node_stack.extend(graph.child_nodes(node))
1216
1217
		return dependent
1218
1219
	def _allocate_config(self, root):
1220
		"""
1221
		Allocate a unique config instance for a task in order
1222
		to prevent interference between parallel tasks.
1223
		"""
1224
		if self._config_pool[root]:
1225
			temp_settings = self._config_pool[root].pop()
1226
		else:
1227
			temp_settings = portage.config(clone=self.pkgsettings[root])
1228
		# Since config.setcpv() isn't guaranteed to call config.reset() due to
1229
		# performance reasons, call it here to make sure all settings from the
1230
		# previous package get flushed out (such as PORTAGE_LOG_FILE).
1231
		temp_settings.reload()
1232
		temp_settings.reset()
1233
		return temp_settings
1234
1235
	def _deallocate_config(self, settings):
1236
		self._config_pool[settings["ROOT"]].append(settings)
1237
1238
	def _main_loop(self):
1239
1240
		# Only allow 1 job max if a restart is scheduled
1241
		# due to portage update.
1242
		if self._is_restart_scheduled() or \
1243
			self._opts_no_background.intersection(self.myopts):
1244
			self._set_max_jobs(1)
1245
1246
		merge_queue = self._task_queues.merge
1247
1248
		while self._schedule():
1249
			if self._poll_event_handlers:
1250
				self._poll_loop()
1251
1252
		while True:
1253
			self._schedule()
1254
			if not (self._jobs or merge_queue):
1255
				break
1256
			if self._poll_event_handlers:
1257
				self._poll_loop()
1258
1259
	def _keep_scheduling(self):
1260
		return bool(self._pkg_queue and \
1261
			not (self._failed_pkgs and not self._build_opts.fetchonly))
1262
1263
	def _schedule_tasks(self):
1264
1265
		# When the number of jobs drops to zero, process all waiting merges.
1266
		if not self._jobs and self._merge_wait_queue:
1267
			for task in self._merge_wait_queue:
1268
				task.addExitListener(self._merge_wait_exit_handler)
1269
				self._task_queues.merge.add(task)
1270
			self._status_display.merges = len(self._task_queues.merge)
1271
			self._merge_wait_scheduled.extend(self._merge_wait_queue)
1272
			del self._merge_wait_queue[:]
1273
1274
		self._schedule_tasks_imp()
1275
		self._status_display.display()
1276
1277
		state_change = 0
1278
		for q in self._task_queues.values():
1279
			if q.schedule():
1280
				state_change += 1
1281
1282
		# Cancel prefetchers if they're the only reason
1283
		# the main poll loop is still running.
1284
		if self._failed_pkgs and not self._build_opts.fetchonly and \
1285
			not (self._jobs or self._task_queues.merge) and \
1286
			self._task_queues.fetch:
1287
			self._task_queues.fetch.clear()
1288
			state_change += 1
1289
1290
		if state_change:
1291
			self._schedule_tasks_imp()
1292
			self._status_display.display()
1293
1294
		return self._keep_scheduling()
1295
1296
	def _job_delay(self):
1297
		"""
1298
		@rtype: bool
1299
		@returns: True if job scheduling should be delayed, False otherwise.
1300
		"""
1301
1302
		if self._jobs and self._max_load is not None:
1303
1304
			current_time = time.time()
1305
1306
			delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
1307
			if delay > self._job_delay_max:
1308
				delay = self._job_delay_max
1309
			if (current_time - self._previous_job_start_time) < delay:
1310
				return True
1311
1312
		return False
1313
1314
	def _schedule_tasks_imp(self):
1315
		"""
1316
		@rtype: bool
1317
		@returns: True if state changed, False otherwise.
1318
		"""
1319
1320
		state_change = 0
1321
1322
		while True:
1323
1324
			if not self._keep_scheduling():
1325
				return bool(state_change)
1326
1327
			if self._choose_pkg_return_early or \
1328
				self._merge_wait_scheduled or \
1329
				(self._jobs and self._unsatisfied_system_deps) or \
1330
				not self._can_add_job() or \
1331
				self._job_delay():
1332
				return bool(state_change)
1333
1334
			pkg = self._choose_pkg()
1335
			if pkg is None:
1336
				return bool(state_change)
1337
1338
			state_change += 1
1339
1340
			if not pkg.installed:
1341
				self._pkg_count.curval += 1
1342
1343
			task = self._task(pkg)
1344
1345
			if pkg.installed:
1346
				merge = PackageMerge(merge=task)
1347
				merge.addExitListener(self._merge_exit)
1348
				self._task_queues.merge.add(merge)
1349
1350
			elif pkg.built:
1351
				self._jobs += 1
1352
				self._previous_job_start_time = time.time()
1353
				self._status_display.running = self._jobs
1354
				task.addExitListener(self._extract_exit)
1355
				self._task_queues.jobs.add(task)
1356
1357
			else:
1358
				self._jobs += 1
1359
				self._previous_job_start_time = time.time()
1360
				self._status_display.running = self._jobs
1361
				task.addExitListener(self._build_exit)
1362
				self._task_queues.jobs.add(task)
1363
1364
		return bool(state_change)
1365
1366
	def _task(self, pkg):
1367
1368
		pkg_to_replace = None
1369
		if pkg.operation != "uninstall":
1370
			vardb = pkg.root_config.trees["vartree"].dbapi
1371
			previous_cpv = vardb.match(pkg.slot_atom)
1372
			if previous_cpv:
1373
				previous_cpv = previous_cpv.pop()
1374
				pkg_to_replace = self._pkg(previous_cpv,
1375
					"installed", pkg.root_config, installed=True)
1376
1377
		task = MergeListItem(args_set=self._args_set,
1378
			background=self._background, binpkg_opts=self._binpkg_opts,
1379
			build_opts=self._build_opts,
1380
			config_pool=self._ConfigPool(pkg.root,
1381
			self._allocate_config, self._deallocate_config),
1382
			emerge_opts=self.myopts,
1383
			find_blockers=self._find_blockers(pkg), logger=self._logger,
1384
			mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
1385
			pkg_to_replace=pkg_to_replace,
1386
			prefetcher=self._prefetchers.get(pkg),
1387
			scheduler=self._sched_iface,
1388
			settings=self._allocate_config(pkg.root),
1389
			statusMessage=self._status_msg,
1390
			world_atom=self._world_atom)
1391
1392
		return task
1393
1394
	def _failed_pkg_msg(self, failed_pkg, action, preposition):
1395
		pkg = failed_pkg.pkg
1396
		msg = "%s to %s %s" % \
1397
			(bad("Failed"), action, colorize("INFORM", pkg.cpv))
1398
		if pkg.root != "/":
1399
			msg += " %s %s" % (preposition, pkg.root)
1400
1401
		log_path = self._locate_failure_log(failed_pkg)
1402
		if log_path is not None:
1403
			msg += ", Log file:"
1404
		self._status_msg(msg)
1405
1406
		if log_path is not None:
1407
			self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
1408
1409
	def _status_msg(self, msg):
1410
		"""
1411
		Display a brief status message (no newlines) in the status display.
1412
		This is called by tasks to provide feedback to the user. This
1413
		delegates the resposibility of generating \r and \n control characters,
1414
		to guarantee that lines are created or erased when necessary and
1415
		appropriate.
1416
1417
		@type msg: str
1418
		@param msg: a brief status message (no newlines allowed)
1419
		"""
1420
		if not self._background:
1421
			writemsg_level("\n")
1422
		self._status_display.displayMessage(msg)
1423
1424
	def _save_resume_list(self):
1425
		"""
1426
		Do this before verifying the ebuild Manifests since it might
1427
		be possible for the user to use --resume --skipfirst get past
1428
		a non-essential package with a broken digest.
1429
		"""
1430
		mtimedb = self._mtimedb
1431
		mtimedb["resume"]["mergelist"] = [list(x) \
1432
			for x in self._mergelist \
1433
			if isinstance(x, Package) and x.operation == "merge"]
1434
1435
		mtimedb.commit()
1436
1437
	def _calc_resume_list(self):
1438
		"""
1439
		Use the current resume list to calculate a new one,
1440
		dropping any packages with unsatisfied deps.
1441
		@rtype: bool
1442
		@returns: True if successful, False otherwise.
1443
		"""
1444
		print colorize("GOOD", "*** Resuming merge...")
1445
1446
		if self._show_list():
1447
			if "--tree" in self.myopts:
1448
				portage.writemsg_stdout("\n" + \
1449
					darkgreen("These are the packages that " + \
1450
					"would be merged, in reverse order:\n\n"))
1451
1452
			else:
1453
				portage.writemsg_stdout("\n" + \
1454
					darkgreen("These are the packages that " + \
1455
					"would be merged, in order:\n\n"))
1456
1457
		show_spinner = "--quiet" not in self.myopts and \
1458
			"--nodeps" not in self.myopts
1459
1460
		if show_spinner:
1461
			print "Calculating dependencies  ",
1462
1463
		myparams = create_depgraph_params(self.myopts, None)
1464
		success = False
1465
		e = None
1466
		try:
1467
			success, mydepgraph, dropped_tasks = resume_depgraph(
1468
				self.settings, self.trees, self._mtimedb, self.myopts,
1469
				myparams, self._spinner)
1470
		except depgraph.UnsatisfiedResumeDep, exc:
1471
			# rename variable to avoid python-3.0 error:
1472
			# SyntaxError: can not delete variable 'e' referenced in nested
1473
			#              scope
1474
			e = exc
1475
			mydepgraph = e.depgraph
1476
			dropped_tasks = set()
1477
1478
		if show_spinner:
1479
			print "\b\b... done!"
1480
1481
		if e is not None:
1482
			def unsatisfied_resume_dep_msg():
1483
				mydepgraph.display_problems()
1484
				out = portage.output.EOutput()
1485
				out.eerror("One or more packages are either masked or " + \
1486
					"have missing dependencies:")
1487
				out.eerror("")
1488
				indent = "  "
1489
				show_parents = set()
1490
				for dep in e.value:
1491
					if dep.parent in show_parents:
1492
						continue
1493
					show_parents.add(dep.parent)
1494
					if dep.atom is None:
1495
						out.eerror(indent + "Masked package:")
1496
						out.eerror(2 * indent + str(dep.parent))
1497
						out.eerror("")
1498
					else:
1499
						out.eerror(indent + str(dep.atom) + " pulled in by:")
1500
						out.eerror(2 * indent + str(dep.parent))
1501
						out.eerror("")
1502
				msg = "The resume list contains packages " + \
1503
					"that are either masked or have " + \
1504
					"unsatisfied dependencies. " + \
1505
					"Please restart/continue " + \
1506
					"the operation manually, or use --skipfirst " + \
1507
					"to skip the first package in the list and " + \
1508
					"any other packages that may be " + \
1509
					"masked or have missing dependencies."
1510
				for line in textwrap.wrap(msg, 72):
1511
					out.eerror(line)
1512
			self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
1513
			return False
1514
1515
		if success and self._show_list():
1516
			mylist = mydepgraph.altlist()
1517
			if mylist:
1518
				if "--tree" in self.myopts:
1519
					mylist.reverse()
1520
				mydepgraph.display(mylist, favorites=self._favorites)
1521
1522
		if not success:
1523
			self._post_mod_echo_msgs.append(mydepgraph.display_problems)
1524
			return False
1525
		mydepgraph.display_problems()
1526
1527
		mylist = mydepgraph.altlist()
1528
		mydepgraph.break_refs(mylist)
1529
		mydepgraph.break_refs(dropped_tasks)
1530
		self._mergelist = mylist
1531
		self._set_digraph(mydepgraph.schedulerGraph())
1532
1533
		msg_width = 75
1534
		for task in dropped_tasks:
1535
			if not (isinstance(task, Package) and task.operation == "merge"):
1536
				continue
1537
			pkg = task
1538
			msg = "emerge --keep-going:" + \
1539
				" %s" % (pkg.cpv,)
1540
			if pkg.root != "/":
1541
				msg += " for %s" % (pkg.root,)
1542
			msg += " dropped due to unsatisfied dependency."
1543
			for line in textwrap.wrap(msg, msg_width):
1544
				eerror(line, phase="other", key=pkg.cpv)
1545
			settings = self.pkgsettings[pkg.root]
1546
			# Ensure that log collection from $T is disabled inside
1547
			# elog_process(), since any logs that might exist are
1548
			# not valid here.
1549
			settings.pop("T", None)
1550
			portage.elog.elog_process(pkg.cpv, settings)
1551
			self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
1552
1553
		return True
1554
1555
	def _show_list(self):
1556
		myopts = self.myopts
1557
		if "--quiet" not in myopts and \
1558
			("--ask" in myopts or "--tree" in myopts or \
1559
			"--verbose" in myopts):
1560
			return True
1561
		return False
1562
1563
	def _world_atom(self, pkg):
1564
		"""
1565
		Add the package to the world file, but only if
1566
		it's supposed to be added. Otherwise, do nothing.
1567
		"""
1568
1569
		if set(("--buildpkgonly", "--fetchonly",
1570
			"--fetch-all-uri",
1571
			"--oneshot", "--onlydeps",
1572
			"--pretend")).intersection(self.myopts):
1573
			return
1574
1575
		if pkg.root != self.target_root:
1576
			return
1577
1578
		args_set = self._args_set
1579
		if not args_set.findAtomForPackage(pkg):
1580
			return
1581
1582
		logger = self._logger
1583
		pkg_count = self._pkg_count
1584
		root_config = pkg.root_config
1585
		world_set = root_config.sets["world"]
1586
		world_locked = False
1587
		if hasattr(world_set, "lock"):
1588
			world_set.lock()
1589
			world_locked = True
1590
1591
		try:
1592
			if hasattr(world_set, "load"):
1593
				world_set.load() # maybe it's changed on disk
1594
1595
			atom = create_world_atom(pkg, args_set, root_config)
1596
			if atom:
1597
				if hasattr(world_set, "add"):
1598
					self._status_msg(('Recording %s in "world" ' + \
1599
						'favorites file...') % atom)
1600
					logger.log(" === (%s of %s) Updating world file (%s)" % \
1601
						(pkg_count.curval, pkg_count.maxval, pkg.cpv))
1602
					world_set.add(atom)
1603
				else:
1604
					writemsg_level('\n!!! Unable to record %s in "world"\n' % \
1605
						(atom,), level=logging.WARN, noiselevel=-1)
1606
		finally:
1607
			if world_locked:
1608
				world_set.unlock()
1609
1610
	def _pkg(self, cpv, type_name, root_config, installed=False):
1611
		"""
1612
		Get a package instance from the cache, or create a new
1613
		one if necessary. Raises KeyError from aux_get if it
1614
		failures for some reason (package does not exist or is
1615
		corrupt).
1616
		"""
1617
		operation = "merge"
1618
		if installed:
1619
			operation = "nomerge"
1620
1621
		if self._digraph is not None:
1622
			# Reuse existing instance when available.
1623
			pkg = self._digraph.get(
1624
				(type_name, root_config.root, cpv, operation))
1625
			if pkg is not None:
1626
				return pkg
1627
1628
		tree_type = depgraph.pkg_tree_map[type_name]
1629
		db = root_config.trees[tree_type].dbapi
1630
		db_keys = list(self.trees[root_config.root][
1631
			tree_type].dbapi._aux_cache_keys)
1632
		metadata = izip(db_keys, db.aux_get(cpv, db_keys))
1633
		pkg = Package(cpv=cpv, metadata=metadata,
1634
			root_config=root_config, installed=installed)
1635
		if type_name == "ebuild":
1636
			settings = self.pkgsettings[root_config.root]
1637
			settings.setcpv(pkg)
1638
			pkg.metadata["USE"] = settings["PORTAGE_USE"]
1639
			pkg.metadata['CHOST'] = settings.get('CHOST', '')
1640
1641
		return pkg
(-)clear_caches.py (+19 lines)
Line 0 Link Here
1
import gc
2
3
try:
4
	import portage
5
except ImportError:
6
	from os import path as osp
7
	import sys
8
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
9
	import portage
10
11
def clear_caches(trees):
12
	for d in trees.itervalues():
13
		d["porttree"].dbapi.melt()
14
		d["porttree"].dbapi._aux_cache.clear()
15
		d["bintree"].dbapi._aux_cache.clear()
16
		d["bintree"].dbapi._clear_cache()
17
		d["vartree"].dbapi.linkmap._clear_cache()
18
	portage.dircache.clear()
19
	gc.collect()
(-)_flush_elog_mod_echo.py (+17 lines)
Line 0 Link Here
1
def _flush_elog_mod_echo():
2
	"""
3
	Dump the mod_echo output now so that our other
4
	notifications are shown last.
5
	@rtype: bool
6
	@returns: True if messages were shown, False otherwise.
7
	"""
8
	messages_shown = False
9
	try:
10
		from portage.elog import mod_echo
11
	except ImportError:
12
		pass # happens during downgrade to a version without the module
13
	else:
14
		messages_shown = bool(mod_echo._items)
15
		mod_echo.finalize()
16
	return messages_shown
17
(-)depgraph.py (+4973 lines)
Line 0 Link Here
1
import gc
2
import os
3
import re
4
import sys
5
import textwrap
6
from itertools import chain, izip
7
8
try:
9
	import portage
10
except ImportError:
11
	from os import path as osp
12
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
13
	import portage
14
15
from portage import digraph
16
from portage.output import bold, blue, colorize, create_color_func, darkblue, \
17
	darkgreen, green, nc_len, red, teal, turquoise, yellow
18
bad = create_color_func("BAD")
19
from portage.sets import SETPREFIX
20
from portage.sets.base import InternalPackageSet
21
from portage.util import cmp_sort_key, writemsg
22
23
from _emerge.AtomArg import AtomArg
24
from _emerge.Blocker import Blocker
25
from _emerge.BlockerCache import BlockerCache
26
from _emerge.BlockerDepPriority import BlockerDepPriority
27
from _emerge.countdown import countdown
28
from _emerge.create_world_atom import create_world_atom
29
from _emerge.Dependency import Dependency
30
from _emerge.DependencyArg import DependencyArg
31
from _emerge.DepPriority import DepPriority
32
from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
33
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
34
from _emerge.FakeVartree import FakeVartree
35
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
36
from _emerge.format_size import format_size
37
from _emerge.is_valid_package_atom import is_valid_package_atom
38
from _emerge.Package import Package
39
from _emerge.PackageArg import PackageArg
40
from _emerge.PackageCounters import PackageCounters
41
from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
42
from _emerge.RepoDisplay import RepoDisplay
43
from _emerge.RootConfig import RootConfig
44
from _emerge.search import search
45
from _emerge.SetArg import SetArg
46
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
47
from _emerge.UnmergeDepPriority import UnmergeDepPriority
48
from _emerge.visible import visible
49
50
import portage.proxy.lazyimport
51
import portage.proxy as proxy
52
proxy.lazyimport.lazyimport(globals(),
53
	'_emerge.Scheduler:Scheduler',
54
)
55
#from _emerge.Scheduler import Scheduler
56
class depgraph(object):
57
58
	pkg_tree_map = RootConfig.pkg_tree_map
59
60
	_dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
61
62
	def __init__(self, settings, trees, myopts, myparams, spinner):
63
		self.settings = settings
64
		self.target_root = settings["ROOT"]
65
		self.myopts = myopts
66
		self.myparams = myparams
67
		self.edebug = 0
68
		if settings.get("PORTAGE_DEBUG", "") == "1":
69
			self.edebug = 1
70
		self.spinner = spinner
71
		self._running_root = trees["/"]["root_config"]
72
		self._opts_no_restart = Scheduler._opts_no_restart
73
		self.pkgsettings = {}
74
		# Maps slot atom to package for each Package added to the graph.
75
		self._slot_pkg_map = {}
76
		# Maps nodes to the reasons they were selected for reinstallation.
77
		self._reinstall_nodes = {}
78
		self.mydbapi = {}
79
		self.trees = {}
80
		self._trees_orig = trees
81
		self.roots = {}
82
		# Contains a filtered view of preferred packages that are selected
83
		# from available repositories.
84
		self._filtered_trees = {}
85
		# Contains installed packages and new packages that have been added
86
		# to the graph.
87
		self._graph_trees = {}
88
		# All Package instances
89
		self._pkg_cache = {}
90
		for myroot in trees:
91
			self.trees[myroot] = {}
92
			# Create a RootConfig instance that references
93
			# the FakeVartree instead of the real one.
94
			self.roots[myroot] = RootConfig(
95
				trees[myroot]["vartree"].settings,
96
				self.trees[myroot],
97
				trees[myroot]["root_config"].setconfig)
98
			for tree in ("porttree", "bintree"):
99
				self.trees[myroot][tree] = trees[myroot][tree]
100
			self.trees[myroot]["vartree"] = \
101
				FakeVartree(trees[myroot]["root_config"],
102
					pkg_cache=self._pkg_cache)
103
			self.pkgsettings[myroot] = portage.config(
104
				clone=self.trees[myroot]["vartree"].settings)
105
			self._slot_pkg_map[myroot] = {}
106
			vardb = self.trees[myroot]["vartree"].dbapi
107
			preload_installed_pkgs = "--nodeps" not in self.myopts and \
108
				"--buildpkgonly" not in self.myopts
109
			# This fakedbapi instance will model the state that the vdb will
110
			# have after new packages have been installed.
111
			fakedb = PackageVirtualDbapi(vardb.settings)
112
			if preload_installed_pkgs:
113
				for pkg in vardb:
114
					self.spinner.update()
115
					# This triggers metadata updates via FakeVartree.
116
					vardb.aux_get(pkg.cpv, [])
117
					fakedb.cpv_inject(pkg)
118
119
			# Now that the vardb state is cached in our FakeVartree,
120
			# we won't be needing the real vartree cache for awhile.
121
			# To make some room on the heap, clear the vardbapi
122
			# caches.
123
			trees[myroot]["vartree"].dbapi._clear_cache()
124
			gc.collect()
125
126
			self.mydbapi[myroot] = fakedb
127
			def graph_tree():
128
				pass
129
			graph_tree.dbapi = fakedb
130
			self._graph_trees[myroot] = {}
131
			self._filtered_trees[myroot] = {}
132
			# Substitute the graph tree for the vartree in dep_check() since we
133
			# want atom selections to be consistent with package selections
134
			# have already been made.
135
			self._graph_trees[myroot]["porttree"]   = graph_tree
136
			self._graph_trees[myroot]["vartree"]    = graph_tree
137
			def filtered_tree():
138
				pass
139
			filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
140
			self._filtered_trees[myroot]["porttree"] = filtered_tree
141
142
			# Passing in graph_tree as the vartree here could lead to better
143
			# atom selections in some cases by causing atoms for packages that
144
			# have been added to the graph to be preferred over other choices.
145
			# However, it can trigger atom selections that result in
146
			# unresolvable direct circular dependencies. For example, this
147
			# happens with gwydion-dylan which depends on either itself or
148
			# gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
149
			# gwydion-dylan-bin needs to be selected in order to avoid a
150
			# an unresolvable direct circular dependency.
151
			#
152
			# To solve the problem described above, pass in "graph_db" so that
153
			# packages that have been added to the graph are distinguishable
154
			# from other available packages and installed packages. Also, pass
155
			# the parent package into self._select_atoms() calls so that
156
			# unresolvable direct circular dependencies can be detected and
157
			# avoided when possible.
158
			self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
159
			self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
160
161
			dbs = []
162
			portdb = self.trees[myroot]["porttree"].dbapi
163
			bindb  = self.trees[myroot]["bintree"].dbapi
164
			vardb  = self.trees[myroot]["vartree"].dbapi
165
			#               (db, pkg_type, built, installed, db_keys)
166
			if "--usepkgonly" not in self.myopts:
167
				db_keys = list(portdb._aux_cache_keys)
168
				dbs.append((portdb, "ebuild", False, False, db_keys))
169
			if "--usepkg" in self.myopts:
170
				db_keys = list(bindb._aux_cache_keys)
171
				dbs.append((bindb,  "binary", True, False, db_keys))
172
			db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
173
			dbs.append((vardb, "installed", True, True, db_keys))
174
			self._filtered_trees[myroot]["dbs"] = dbs
175
			if "--usepkg" in self.myopts:
176
				self.trees[myroot]["bintree"].populate(
177
					"--getbinpkg" in self.myopts,
178
					"--getbinpkgonly" in self.myopts)
179
		del trees
180
181
		self.digraph=portage.digraph()
182
		# contains all sets added to the graph
183
		self._sets = {}
184
		# contains atoms given as arguments
185
		self._sets["args"] = InternalPackageSet()
186
		# contains all atoms from all sets added to the graph, including
187
		# atoms given as arguments
188
		self._set_atoms = InternalPackageSet()
189
		self._atom_arg_map = {}
190
		# contains all nodes pulled in by self._set_atoms
191
		self._set_nodes = set()
192
		# Contains only Blocker -> Uninstall edges
193
		self._blocker_uninstalls = digraph()
194
		# Contains only Package -> Blocker edges
195
		self._blocker_parents = digraph()
196
		# Contains only irrelevant Package -> Blocker edges
197
		self._irrelevant_blockers = digraph()
198
		# Contains only unsolvable Package -> Blocker edges
199
		self._unsolvable_blockers = digraph()
200
		# Contains all Blocker -> Blocked Package edges
201
		self._blocked_pkgs = digraph()
202
		# Contains world packages that have been protected from
203
		# uninstallation but may not have been added to the graph
204
		# if the graph is not complete yet.
205
		self._blocked_world_pkgs = {}
206
		self._slot_collision_info = {}
207
		# Slot collision nodes are not allowed to block other packages since
208
		# blocker validation is only able to account for one package per slot.
209
		self._slot_collision_nodes = set()
210
		self._parent_atoms = {}
211
		self._slot_conflict_parent_atoms = set()
212
		self._serialized_tasks_cache = None
213
		self._scheduler_graph = None
214
		self._displayed_list = None
215
		self._pprovided_args = []
216
		self._missing_args = []
217
		self._masked_installed = set()
218
		self._unsatisfied_deps_for_display = []
219
		self._unsatisfied_blockers_for_display = None
220
		self._circular_deps_for_display = None
221
		self._dep_stack = []
222
		self._dep_disjunctive_stack = []
223
		self._unsatisfied_deps = []
224
		self._initially_unsatisfied_deps = []
225
		self._ignored_deps = []
226
		self._required_set_names = set(["system", "world"])
227
		self._select_atoms = self._select_atoms_highest_available
228
		self._select_package = self._select_pkg_highest_available
229
		self._highest_pkg_cache = {}
230
231
	def _show_slot_collision_notice(self):
232
		"""Show an informational message advising the user to mask one of the
233
		the packages. In some cases it may be possible to resolve this
234
		automatically, but support for backtracking (removal nodes that have
235
		already been selected) will be required in order to handle all possible
236
		cases.
237
		"""
238
239
		if not self._slot_collision_info:
240
			return
241
242
		self._show_merge_list()
243
244
		msg = []
245
		msg.append("\n!!! Multiple package instances within a single " + \
246
			"package slot have been pulled\n")
247
		msg.append("!!! into the dependency graph, resulting" + \
248
			" in a slot conflict:\n\n")
249
		indent = "  "
250
		# Max number of parents shown, to avoid flooding the display.
251
		max_parents = 3
252
		explanation_columns = 70
253
		explanations = 0
254
		for (slot_atom, root), slot_nodes \
255
			in self._slot_collision_info.iteritems():
256
			msg.append(str(slot_atom))
257
			msg.append("\n\n")
258
259
			for node in slot_nodes:
260
				msg.append(indent)
261
				msg.append(str(node))
262
				parent_atoms = self._parent_atoms.get(node)
263
				if parent_atoms:
264
					pruned_list = set()
265
					# Prefer conflict atoms over others.
266
					for parent_atom in parent_atoms:
267
						if len(pruned_list) >= max_parents:
268
							break
269
						if parent_atom in self._slot_conflict_parent_atoms:
270
							pruned_list.add(parent_atom)
271
272
					# If this package was pulled in by conflict atoms then
273
					# show those alone since those are the most interesting.
274
					if not pruned_list:
275
						# When generating the pruned list, prefer instances
276
						# of DependencyArg over instances of Package.
277
						for parent_atom in parent_atoms:
278
							if len(pruned_list) >= max_parents:
279
								break
280
							parent, atom = parent_atom
281
							if isinstance(parent, DependencyArg):
282
								pruned_list.add(parent_atom)
283
						# Prefer Packages instances that themselves have been
284
						# pulled into collision slots.
285
						for parent_atom in parent_atoms:
286
							if len(pruned_list) >= max_parents:
287
								break
288
							parent, atom = parent_atom
289
							if isinstance(parent, Package) and \
290
								(parent.slot_atom, parent.root) \
291
								in self._slot_collision_info:
292
								pruned_list.add(parent_atom)
293
						for parent_atom in parent_atoms:
294
							if len(pruned_list) >= max_parents:
295
								break
296
							pruned_list.add(parent_atom)
297
					omitted_parents = len(parent_atoms) - len(pruned_list)
298
					parent_atoms = pruned_list
299
					msg.append(" pulled in by\n")
300
					for parent_atom in parent_atoms:
301
						parent, atom = parent_atom
302
						msg.append(2*indent)
303
						if isinstance(parent,
304
							(PackageArg, AtomArg)):
305
							# For PackageArg and AtomArg types, it's
306
							# redundant to display the atom attribute.
307
							msg.append(str(parent))
308
						else:
309
							# Display the specific atom from SetArg or
310
							# Package types.
311
							msg.append("%s required by %s" % (atom, parent))
312
						msg.append("\n")
313
					if omitted_parents:
314
						msg.append(2*indent)
315
						msg.append("(and %d more)\n" % omitted_parents)
316
				else:
317
					msg.append(" (no parents)\n")
318
				msg.append("\n")
319
			explanation = self._slot_conflict_explanation(slot_nodes)
320
			if explanation:
321
				explanations += 1
322
				msg.append(indent + "Explanation:\n\n")
323
				for line in textwrap.wrap(explanation, explanation_columns):
324
					msg.append(2*indent + line + "\n")
325
				msg.append("\n")
326
		msg.append("\n")
327
		sys.stderr.write("".join(msg))
328
		sys.stderr.flush()
329
330
		explanations_for_all = explanations == len(self._slot_collision_info)
331
332
		if explanations_for_all or "--quiet" in self.myopts:
333
			return
334
335
		msg = []
336
		msg.append("It may be possible to solve this problem ")
337
		msg.append("by using package.mask to prevent one of ")
338
		msg.append("those packages from being selected. ")
339
		msg.append("However, it is also possible that conflicting ")
340
		msg.append("dependencies exist such that they are impossible to ")
341
		msg.append("satisfy simultaneously.  If such a conflict exists in ")
342
		msg.append("the dependencies of two different packages, then those ")
343
		msg.append("packages can not be installed simultaneously.")
344
345
		from formatter import AbstractFormatter, DumbWriter
346
		f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
347
		for x in msg:
348
			f.add_flowing_data(x)
349
		f.end_paragraph(1)
350
351
		msg = []
352
		msg.append("For more information, see MASKED PACKAGES ")
353
		msg.append("section in the emerge man page or refer ")
354
		msg.append("to the Gentoo Handbook.")
355
		for x in msg:
356
			f.add_flowing_data(x)
357
		f.end_paragraph(1)
358
		f.writer.flush()
359
360
	def _slot_conflict_explanation(self, slot_nodes):
361
		"""
362
		When a slot conflict occurs due to USE deps, there are a few
363
		different cases to consider:
364
365
		1) New USE are correctly set but --newuse wasn't requested so an
366
		   installed package with incorrect USE happened to get pulled
367
		   into graph before the new one.
368
369
		2) New USE are incorrectly set but an installed package has correct
370
		   USE so it got pulled into the graph, and a new instance also got
371
		   pulled in due to --newuse or an upgrade.
372
373
		3) Multiple USE deps exist that can't be satisfied simultaneously,
374
		   and multiple package instances got pulled into the same slot to
375
		   satisfy the conflicting deps.
376
377
		Currently, explanations and suggested courses of action are generated
378
		for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
379
		"""
380
381
		if len(slot_nodes) != 2:
382
			# Suggestions are only implemented for
383
			# conflicts between two packages.
384
			return None
385
386
		all_conflict_atoms = self._slot_conflict_parent_atoms
387
		matched_node = None
388
		matched_atoms = None
389
		unmatched_node = None
390
		for node in slot_nodes:
391
			parent_atoms = self._parent_atoms.get(node)
392
			if not parent_atoms:
393
				# Normally, there are always parent atoms. If there are
394
				# none then something unexpected is happening and there's
395
				# currently no suggestion for this case.
396
				return None
397
			conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
398
			for parent_atom in conflict_atoms:
399
				parent, atom = parent_atom
400
				if not atom.use:
401
					# Suggestions are currently only implemented for cases
402
					# in which all conflict atoms have USE deps.
403
					return None
404
			if conflict_atoms:
405
				if matched_node is not None:
406
					# If conflict atoms match multiple nodes
407
					# then there's no suggestion.
408
					return None
409
				matched_node = node
410
				matched_atoms = conflict_atoms
411
			else:
412
				if unmatched_node is not None:
413
					# Neither node is matched by conflict atoms, and
414
					# there is no suggestion for this case.
415
					return None
416
				unmatched_node = node
417
418
		if matched_node is None or unmatched_node is None:
419
			# This shouldn't happen.
420
			return None
421
422
		if unmatched_node.installed and not matched_node.installed and \
423
			unmatched_node.cpv == matched_node.cpv:
424
			# If the conflicting packages are the same version then
425
			# --newuse should be all that's needed. If they are different
426
			# versions then there's some other problem.
427
			return "New USE are correctly set, but --newuse wasn't" + \
428
				" requested, so an installed package with incorrect USE " + \
429
				"happened to get pulled into the dependency graph. " + \
430
				"In order to solve " + \
431
				"this, either specify the --newuse option or explicitly " + \
432
				" reinstall '%s'." % matched_node.slot_atom
433
434
		if matched_node.installed and not unmatched_node.installed:
435
			atoms = sorted(set(atom for parent, atom in matched_atoms))
436
			explanation = ("New USE for '%s' are incorrectly set. " + \
437
				"In order to solve this, adjust USE to satisfy '%s'") % \
438
				(matched_node.slot_atom, atoms[0])
439
			if len(atoms) > 1:
440
				for atom in atoms[1:-1]:
441
					explanation += ", '%s'" % (atom,)
442
				if len(atoms) > 2:
443
					explanation += ","
444
				explanation += " and '%s'" % (atoms[-1],)
445
			explanation += "."
446
			return explanation
447
448
		return None
449
450
	def _process_slot_conflicts(self):
451
		"""
452
		Process slot conflict data to identify specific atoms which
453
		lead to conflict. These atoms only match a subset of the
454
		packages that have been pulled into a given slot.
455
		"""
456
		for (slot_atom, root), slot_nodes \
457
			in self._slot_collision_info.iteritems():
458
459
			all_parent_atoms = set()
460
			for pkg in slot_nodes:
461
				parent_atoms = self._parent_atoms.get(pkg)
462
				if not parent_atoms:
463
					continue
464
				all_parent_atoms.update(parent_atoms)
465
466
			for pkg in slot_nodes:
467
				parent_atoms = self._parent_atoms.get(pkg)
468
				if parent_atoms is None:
469
					parent_atoms = set()
470
					self._parent_atoms[pkg] = parent_atoms
471
				for parent_atom in all_parent_atoms:
472
					if parent_atom in parent_atoms:
473
						continue
474
					# Use package set for matching since it will match via
475
					# PROVIDE when necessary, while match_from_list does not.
476
					parent, atom = parent_atom
477
					atom_set = InternalPackageSet(
478
						initial_atoms=(atom,))
479
					if atom_set.findAtomForPackage(pkg):
480
						parent_atoms.add(parent_atom)
481
					else:
482
						self._slot_conflict_parent_atoms.add(parent_atom)
483
484
	def _reinstall_for_flags(self, forced_flags,
485
		orig_use, orig_iuse, cur_use, cur_iuse):
486
		"""Return a set of flags that trigger reinstallation, or None if there
487
		are no such flags."""
488
		if "--newuse" in self.myopts:
489
			flags = set(orig_iuse.symmetric_difference(
490
				cur_iuse).difference(forced_flags))
491
			flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
492
				cur_iuse.intersection(cur_use)))
493
			if flags:
494
				return flags
495
		elif "changed-use" == self.myopts.get("--reinstall"):
496
			flags = orig_iuse.intersection(orig_use).symmetric_difference(
497
				cur_iuse.intersection(cur_use))
498
			if flags:
499
				return flags
500
		return None
501
502
	def _create_graph(self, allow_unsatisfied=False):
503
		dep_stack = self._dep_stack
504
		dep_disjunctive_stack = self._dep_disjunctive_stack
505
		while dep_stack or dep_disjunctive_stack:
506
			self.spinner.update()
507
			while dep_stack:
508
				dep = dep_stack.pop()
509
				if isinstance(dep, Package):
510
					if not self._add_pkg_deps(dep,
511
						allow_unsatisfied=allow_unsatisfied):
512
						return 0
513
					continue
514
				if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
515
					return 0
516
			if dep_disjunctive_stack:
517
				if not self._pop_disjunction(allow_unsatisfied):
518
					return 0
519
		return 1
520
521
	def _add_dep(self, dep, allow_unsatisfied=False):
522
		debug = "--debug" in self.myopts
523
		buildpkgonly = "--buildpkgonly" in self.myopts
524
		nodeps = "--nodeps" in self.myopts
525
		empty = "empty" in self.myparams
526
		deep = "deep" in self.myparams
527
		update = "--update" in self.myopts and dep.depth <= 1
528
		if dep.blocker:
529
			if not buildpkgonly and \
530
				not nodeps and \
531
				dep.parent not in self._slot_collision_nodes:
532
				if dep.parent.onlydeps:
533
					# It's safe to ignore blockers if the
534
					# parent is an --onlydeps node.
535
					return 1
536
				# The blocker applies to the root where
537
				# the parent is or will be installed.
538
				blocker = Blocker(atom=dep.atom,
539
					eapi=dep.parent.metadata["EAPI"],
540
					root=dep.parent.root)
541
				self._blocker_parents.add(blocker, dep.parent)
542
			return 1
543
		dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
544
			onlydeps=dep.onlydeps)
545
		if not dep_pkg:
546
			if dep.priority.optional:
547
				# This could be an unecessary build-time dep
548
				# pulled in by --with-bdeps=y.
549
				return 1
550
			if allow_unsatisfied:
551
				self._unsatisfied_deps.append(dep)
552
				return 1
553
			self._unsatisfied_deps_for_display.append(
554
				((dep.root, dep.atom), {"myparent":dep.parent}))
555
			return 0
556
		# In some cases, dep_check will return deps that shouldn't
557
		# be proccessed any further, so they are identified and
558
		# discarded here. Try to discard as few as possible since
559
		# discarded dependencies reduce the amount of information
560
		# available for optimization of merge order.
561
		if dep.priority.satisfied and \
562
			not dep_pkg.installed and \
563
			not (existing_node or empty or deep or update):
564
			myarg = None
565
			if dep.root == self.target_root:
566
				try:
567
					myarg = self._iter_atoms_for_pkg(dep_pkg).next()
568
				except StopIteration:
569
					pass
570
				except portage.exception.InvalidDependString:
571
					if not dep_pkg.installed:
572
						# This shouldn't happen since the package
573
						# should have been masked.
574
						raise
575
			if not myarg:
576
				self._ignored_deps.append(dep)
577
				return 1
578
579
		if not self._add_pkg(dep_pkg, dep):
580
			return 0
581
		return 1
582
583
	def _add_pkg(self, pkg, dep):
584
		myparent = None
585
		priority = None
586
		depth = 0
587
		if dep is None:
588
			dep = Dependency()
589
		else:
590
			myparent = dep.parent
591
			priority = dep.priority
592
			depth = dep.depth
593
		if priority is None:
594
			priority = DepPriority()
595
		"""
596
		Fills the digraph with nodes comprised of packages to merge.
597
		mybigkey is the package spec of the package to merge.
598
		myparent is the package depending on mybigkey ( or None )
599
		addme = Should we add this package to the digraph or are we just looking at it's deps?
600
			Think --onlydeps, we need to ignore packages in that case.
601
		#stuff to add:
602
		#SLOT-aware emerge
603
		#IUSE-aware emerge -> USE DEP aware depgraph
604
		#"no downgrade" emerge
605
		"""
606
		# Ensure that the dependencies of the same package
607
		# are never processed more than once.
608
		previously_added = pkg in self.digraph
609
610
		# select the correct /var database that we'll be checking against
611
		vardbapi = self.trees[pkg.root]["vartree"].dbapi
612
		pkgsettings = self.pkgsettings[pkg.root]
613
614
		arg_atoms = None
615
		if True:
616
			try:
617
				arg_atoms = list(self._iter_atoms_for_pkg(pkg))
618
			except portage.exception.InvalidDependString, e:
619
				if not pkg.installed:
620
					show_invalid_depstring_notice(
621
						pkg, pkg.metadata["PROVIDE"], str(e))
622
					return 0
623
				del e
624
625
		if not pkg.onlydeps:
626
			if not pkg.installed and \
627
				"empty" not in self.myparams and \
628
				vardbapi.match(pkg.slot_atom):
629
				# Increase the priority of dependencies on packages that
630
				# are being rebuilt. This optimizes merge order so that
631
				# dependencies are rebuilt/updated as soon as possible,
632
				# which is needed especially when emerge is called by
633
				# revdep-rebuild since dependencies may be affected by ABI
634
				# breakage that has rendered them useless. Don't adjust
635
				# priority here when in "empty" mode since all packages
636
				# are being merged in that case.
637
				priority.rebuild = True
638
639
			existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
640
			slot_collision = False
641
			if existing_node:
642
				existing_node_matches = pkg.cpv == existing_node.cpv
643
				if existing_node_matches and \
644
					pkg != existing_node and \
645
					dep.atom is not None:
646
					# Use package set for matching since it will match via
647
					# PROVIDE when necessary, while match_from_list does not.
648
					atom_set = InternalPackageSet(initial_atoms=[dep.atom])
649
					if not atom_set.findAtomForPackage(existing_node):
650
						existing_node_matches = False
651
				if existing_node_matches:
652
					# The existing node can be reused.
653
					if arg_atoms:
654
						for parent_atom in arg_atoms:
655
							parent, atom = parent_atom
656
							self.digraph.add(existing_node, parent,
657
								priority=priority)
658
							self._add_parent_atom(existing_node, parent_atom)
659
					# If a direct circular dependency is not an unsatisfied
660
					# buildtime dependency then drop it here since otherwise
661
					# it can skew the merge order calculation in an unwanted
662
					# way.
663
					if existing_node != myparent or \
664
						(priority.buildtime and not priority.satisfied):
665
						self.digraph.addnode(existing_node, myparent,
666
							priority=priority)
667
						if dep.atom is not None and dep.parent is not None:
668
							self._add_parent_atom(existing_node,
669
								(dep.parent, dep.atom))
670
					return 1
671
				else:
672
673
					# A slot collision has occurred.  Sometimes this coincides
674
					# with unresolvable blockers, so the slot collision will be
675
					# shown later if there are no unresolvable blockers.
676
					self._add_slot_conflict(pkg)
677
					slot_collision = True
678
679
			if slot_collision:
680
				# Now add this node to the graph so that self.display()
681
				# can show use flags and --tree portage.output.  This node is
682
				# only being partially added to the graph.  It must not be
683
				# allowed to interfere with the other nodes that have been
684
				# added.  Do not overwrite data for existing nodes in
685
				# self.mydbapi since that data will be used for blocker
686
				# validation.
687
				# Even though the graph is now invalid, continue to process
688
				# dependencies so that things like --fetchonly can still
689
				# function despite collisions.
690
				pass
691
			elif not previously_added:
692
				self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
693
				self.mydbapi[pkg.root].cpv_inject(pkg)
694
				self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
695
696
			if not pkg.installed:
697
				# Allow this package to satisfy old-style virtuals in case it
698
				# doesn't already. Any pre-existing providers will be preferred
699
				# over this one.
700
				try:
701
					pkgsettings.setinst(pkg.cpv, pkg.metadata)
702
					# For consistency, also update the global virtuals.
703
					settings = self.roots[pkg.root].settings
704
					settings.unlock()
705
					settings.setinst(pkg.cpv, pkg.metadata)
706
					settings.lock()
707
				except portage.exception.InvalidDependString, e:
708
					show_invalid_depstring_notice(
709
						pkg, pkg.metadata["PROVIDE"], str(e))
710
					del e
711
					return 0
712
713
		if arg_atoms:
714
			self._set_nodes.add(pkg)
715
716
		# Do this even when addme is False (--onlydeps) so that the
717
		# parent/child relationship is always known in case
718
		# self._show_slot_collision_notice() needs to be called later.
719
		self.digraph.add(pkg, myparent, priority=priority)
720
		if dep.atom is not None and dep.parent is not None:
721
			self._add_parent_atom(pkg, (dep.parent, dep.atom))
722
723
		if arg_atoms:
724
			for parent_atom in arg_atoms:
725
				parent, atom = parent_atom
726
				self.digraph.add(pkg, parent, priority=priority)
727
				self._add_parent_atom(pkg, parent_atom)
728
729
		""" This section determines whether we go deeper into dependencies or not.
730
		    We want to go deeper on a few occasions:
731
		    Installing package A, we need to make sure package A's deps are met.
732
		    emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
733
		    If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
734
		"""
735
		dep_stack = self._dep_stack
736
		if "recurse" not in self.myparams:
737
			return 1
738
		elif pkg.installed and \
739
			"deep" not in self.myparams:
740
			dep_stack = self._ignored_deps
741
742
		self.spinner.update()
743
744
		if arg_atoms:
745
			depth = 0
746
		pkg.depth = depth
747
		if not previously_added:
748
			dep_stack.append(pkg)
749
		return 1
750
751
	def _add_parent_atom(self, pkg, parent_atom):
752
		parent_atoms = self._parent_atoms.get(pkg)
753
		if parent_atoms is None:
754
			parent_atoms = set()
755
			self._parent_atoms[pkg] = parent_atoms
756
		parent_atoms.add(parent_atom)
757
758
	def _add_slot_conflict(self, pkg):
759
		self._slot_collision_nodes.add(pkg)
760
		slot_key = (pkg.slot_atom, pkg.root)
761
		slot_nodes = self._slot_collision_info.get(slot_key)
762
		if slot_nodes is None:
763
			slot_nodes = set()
764
			slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
765
			self._slot_collision_info[slot_key] = slot_nodes
766
		slot_nodes.add(pkg)
767
768
	def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
769
770
		mytype = pkg.type_name
771
		myroot = pkg.root
772
		mykey = pkg.cpv
773
		metadata = pkg.metadata
774
		myuse = pkg.use.enabled
775
		jbigkey = pkg
776
		depth = pkg.depth + 1
777
		removal_action = "remove" in self.myparams
778
779
		edepend={}
780
		depkeys = ["DEPEND","RDEPEND","PDEPEND"]
781
		for k in depkeys:
782
			edepend[k] = metadata[k]
783
784
		if not pkg.built and \
785
			"--buildpkgonly" in self.myopts and \
786
			"deep" not in self.myparams and \
787
			"empty" not in self.myparams:
788
			edepend["RDEPEND"] = ""
789
			edepend["PDEPEND"] = ""
790
		bdeps_optional = False
791
792
		if pkg.built and not removal_action:
793
			if self.myopts.get("--with-bdeps", "n") == "y":
794
				# Pull in build time deps as requested, but marked them as
795
				# "optional" since they are not strictly required. This allows
796
				# more freedom in the merge order calculation for solving
797
				# circular dependencies. Don't convert to PDEPEND since that
798
				# could make --with-bdeps=y less effective if it is used to
799
				# adjust merge order to prevent built_with_use() calls from
800
				# failing.
801
				bdeps_optional = True
802
			else:
803
				# built packages do not have build time dependencies.
804
				edepend["DEPEND"] = ""
805
806
		if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
807
			edepend["DEPEND"] = ""
808
809
		bdeps_root = "/"
810
		root_deps = self.myopts.get("--root-deps")
811
		if root_deps is not None:
812
			if root_deps is True:
813
				bdeps_root = myroot
814
			elif root_deps == "rdeps":
815
				edepend["DEPEND"] = ""
816
817
		deps = (
818
			(bdeps_root, edepend["DEPEND"],
819
				self._priority(buildtime=(not bdeps_optional),
820
				optional=bdeps_optional)),
821
			(myroot, edepend["RDEPEND"], self._priority(runtime=True)),
822
			(myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
823
		)
824
825
		debug = "--debug" in self.myopts
826
		strict = mytype != "installed"
827
		try:
828
			if not strict:
829
				portage.dep._dep_check_strict = False
830
831
			for dep_root, dep_string, dep_priority in deps:
832
				if not dep_string:
833
					continue
834
				if debug:
835
					print
836
					print "Parent:   ", jbigkey
837
					print "Depstring:", dep_string
838
					print "Priority:", dep_priority
839
840
				try:
841
842
					dep_string = portage.dep.paren_normalize(
843
						portage.dep.use_reduce(
844
						portage.dep.paren_reduce(dep_string),
845
						uselist=pkg.use.enabled))
846
847
					dep_string = list(self._queue_disjunctive_deps(
848
						pkg, dep_root, dep_priority, dep_string))
849
850
				except portage.exception.InvalidDependString, e:
851
					if pkg.installed:
852
						del e
853
						continue
854
					show_invalid_depstring_notice(pkg, dep_string, str(e))
855
					return 0
856
857
				if not dep_string:
858
					continue
859
860
				dep_string = portage.dep.paren_enclose(dep_string)
861
862
				if not self._add_pkg_dep_string(
863
					pkg, dep_root, dep_priority, dep_string,
864
					allow_unsatisfied):
865
					return 0
866
867
		except portage.exception.AmbiguousPackageName, e:
868
			pkgs = e.args[0]
869
			portage.writemsg("\n\n!!! An atom in the dependencies " + \
870
				"is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
871
			for cpv in pkgs:
872
				portage.writemsg("    %s\n" % cpv, noiselevel=-1)
873
			portage.writemsg("\n", noiselevel=-1)
874
			if mytype == "binary":
875
				portage.writemsg(
876
					"!!! This binary package cannot be installed: '%s'\n" % \
877
					mykey, noiselevel=-1)
878
			elif mytype == "ebuild":
879
				portdb = self.roots[myroot].trees["porttree"].dbapi
880
				myebuild, mylocation = portdb.findname2(mykey)
881
				portage.writemsg("!!! This ebuild cannot be installed: " + \
882
					"'%s'\n" % myebuild, noiselevel=-1)
883
			portage.writemsg("!!! Please notify the package maintainer " + \
884
				"that atoms must be fully-qualified.\n", noiselevel=-1)
885
			return 0
886
		finally:
887
			portage.dep._dep_check_strict = True
888
		return 1
889
890
	def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
891
		allow_unsatisfied):
892
		depth = pkg.depth + 1
893
		debug = "--debug" in self.myopts
894
		strict = pkg.type_name != "installed"
895
896
		if debug:
897
			print
898
			print "Parent:   ", pkg
899
			print "Depstring:", dep_string
900
			print "Priority:", dep_priority
901
902
		try:
903
			selected_atoms = self._select_atoms(dep_root,
904
				dep_string, myuse=pkg.use.enabled, parent=pkg,
905
				strict=strict, priority=dep_priority)
906
		except portage.exception.InvalidDependString, e:
907
			show_invalid_depstring_notice(pkg, dep_string, str(e))
908
			del e
909
			if pkg.installed:
910
				return 1
911
			return 0
912
913
		if debug:
914
			print "Candidates:", selected_atoms
915
916
		vardb = self.roots[dep_root].trees["vartree"].dbapi
917
918
		for atom in selected_atoms:
919
			try:
920
921
				atom = portage.dep.Atom(atom)
922
923
				mypriority = dep_priority.copy()
924
				if not atom.blocker and vardb.match(atom):
925
					mypriority.satisfied = True
926
927
				if not self._add_dep(Dependency(atom=atom,
928
					blocker=atom.blocker, depth=depth, parent=pkg,
929
					priority=mypriority, root=dep_root),
930
					allow_unsatisfied=allow_unsatisfied):
931
					return 0
932
933
			except portage.exception.InvalidAtom, e:
934
				show_invalid_depstring_notice(
935
					pkg, dep_string, str(e))
936
				del e
937
				if not pkg.installed:
938
					return 0
939
940
		if debug:
941
			print "Exiting...", pkg
942
943
		return 1
944
945
	def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
946
		"""
947
		Queue disjunctive (virtual and ||) deps in self._dep_disjunctive_stack.
948
		Yields non-disjunctive deps. Raises InvalidDependString when 
949
		necessary.
950
		"""
951
		i = 0
952
		while i < len(dep_struct):
953
			x = dep_struct[i]
954
			if isinstance(x, list):
955
				for y in self._queue_disjunctive_deps(
956
					pkg, dep_root, dep_priority, x):
957
					yield y
958
			elif x == "||":
959
				self._queue_disjunction(pkg, dep_root, dep_priority,
960
					[ x, dep_struct[ i + 1 ] ] )
961
				i += 1
962
			else:
963
				try:
964
					x = portage.dep.Atom(x)
965
				except portage.exception.InvalidAtom:
966
					if not pkg.installed:
967
						raise portage.exception.InvalidDependString(
968
							"invalid atom: '%s'" % x)
969
				else:
970
					# Note: Eventually this will check for PROPERTIES=virtual
971
					# or whatever other metadata gets implemented for this
972
					# purpose.
973
					if x.cp.startswith('virtual/'):
974
						self._queue_disjunction( pkg, dep_root,
975
							dep_priority, [ str(x) ] )
976
					else:
977
						yield str(x)
978
			i += 1
979
980
	def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
981
		self._dep_disjunctive_stack.append(
982
			(pkg, dep_root, dep_priority, dep_struct))
983
984
	def _pop_disjunction(self, allow_unsatisfied):
985
		"""
986
		Pop one disjunctive dep from self._dep_disjunctive_stack, and use it to
987
		populate self._dep_stack.
988
		"""
989
		pkg, dep_root, dep_priority, dep_struct = \
990
			self._dep_disjunctive_stack.pop()
991
		dep_string = portage.dep.paren_enclose(dep_struct)
992
		if not self._add_pkg_dep_string(
993
			pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
994
			return 0
995
		return 1
996
997
	def _priority(self, **kwargs):
998
		if "remove" in self.myparams:
999
			priority_constructor = UnmergeDepPriority
1000
		else:
1001
			priority_constructor = DepPriority
1002
		return priority_constructor(**kwargs)
1003
1004
	def _dep_expand(self, root_config, atom_without_category):
1005
		"""
1006
		@param root_config: a root config instance
1007
		@type root_config: RootConfig
1008
		@param atom_without_category: an atom without a category component
1009
		@type atom_without_category: String
1010
		@rtype: list
1011
		@returns: a list of atoms containing categories (possibly empty)
1012
		"""
1013
		null_cp = portage.dep_getkey(insert_category_into_atom(
1014
			atom_without_category, "null"))
1015
		cat, atom_pn = portage.catsplit(null_cp)
1016
1017
		dbs = self._filtered_trees[root_config.root]["dbs"]
1018
		categories = set()
1019
		for db, pkg_type, built, installed, db_keys in dbs:
1020
			for cat in db.categories:
1021
				if db.cp_list("%s/%s" % (cat, atom_pn)):
1022
					categories.add(cat)
1023
1024
		deps = []
1025
		for cat in categories:
1026
			deps.append(insert_category_into_atom(
1027
				atom_without_category, cat))
1028
		return deps
1029
1030
	def _have_new_virt(self, root, atom_cp):
1031
		ret = False
1032
		for db, pkg_type, built, installed, db_keys in \
1033
			self._filtered_trees[root]["dbs"]:
1034
			if db.cp_list(atom_cp):
1035
				ret = True
1036
				break
1037
		return ret
1038
1039
	def _iter_atoms_for_pkg(self, pkg):
1040
		# TODO: add multiple $ROOT support
1041
		if pkg.root != self.target_root:
1042
			return
1043
		atom_arg_map = self._atom_arg_map
1044
		root_config = self.roots[pkg.root]
1045
		for atom in self._set_atoms.iterAtomsForPackage(pkg):
1046
			atom_cp = portage.dep_getkey(atom)
1047
			if atom_cp != pkg.cp and \
1048
				self._have_new_virt(pkg.root, atom_cp):
1049
				continue
1050
			visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
1051
			visible_pkgs.reverse() # descending order
1052
			higher_slot = None
1053
			for visible_pkg in visible_pkgs:
1054
				if visible_pkg.cp != atom_cp:
1055
					continue
1056
				if pkg >= visible_pkg:
1057
					# This is descending order, and we're not
1058
					# interested in any versions <= pkg given.
1059
					break
1060
				if pkg.slot_atom != visible_pkg.slot_atom:
1061
					higher_slot = visible_pkg
1062
					break
1063
			if higher_slot is not None:
1064
				continue
1065
			for arg in atom_arg_map[(atom, pkg.root)]:
1066
				if isinstance(arg, PackageArg) and \
1067
					arg.package != pkg:
1068
					continue
1069
				yield arg, atom
1070
1071
	def select_files(self, myfiles):
1072
		"""Given a list of .tbz2s, .ebuilds sets, and deps, create the
1073
		appropriate depgraph and return a favorite list."""
1074
		debug = "--debug" in self.myopts
1075
		root_config = self.roots[self.target_root]
1076
		sets = root_config.sets
1077
		getSetAtoms = root_config.setconfig.getSetAtoms
1078
		myfavorites=[]
1079
		myroot = self.target_root
1080
		dbs = self._filtered_trees[myroot]["dbs"]
1081
		vardb = self.trees[myroot]["vartree"].dbapi
1082
		real_vardb = self._trees_orig[myroot]["vartree"].dbapi
1083
		portdb = self.trees[myroot]["porttree"].dbapi
1084
		bindb = self.trees[myroot]["bintree"].dbapi
1085
		pkgsettings = self.pkgsettings[myroot]
1086
		args = []
1087
		onlydeps = "--onlydeps" in self.myopts
1088
		lookup_owners = []
1089
		for x in myfiles:
1090
			ext = os.path.splitext(x)[1]
1091
			if ext==".tbz2":
1092
				if not os.path.exists(x):
1093
					if os.path.exists(
1094
						os.path.join(pkgsettings["PKGDIR"], "All", x)):
1095
						x = os.path.join(pkgsettings["PKGDIR"], "All", x)
1096
					elif os.path.exists(
1097
						os.path.join(pkgsettings["PKGDIR"], x)):
1098
						x = os.path.join(pkgsettings["PKGDIR"], x)
1099
					else:
1100
						print "\n\n!!! Binary package '"+str(x)+"' does not exist."
1101
						print "!!! Please ensure the tbz2 exists as specified.\n"
1102
						return 0, myfavorites
1103
				mytbz2=portage.xpak.tbz2(x)
1104
				mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1105
				if os.path.realpath(x) != \
1106
					os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
1107
					print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
1108
					return 0, myfavorites
1109
				db_keys = list(bindb._aux_cache_keys)
1110
				metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
1111
				pkg = Package(type_name="binary", root_config=root_config,
1112
					cpv=mykey, built=True, metadata=metadata,
1113
					onlydeps=onlydeps)
1114
				self._pkg_cache[pkg] = pkg
1115
				args.append(PackageArg(arg=x, package=pkg,
1116
					root_config=root_config))
1117
			elif ext==".ebuild":
1118
				ebuild_path = portage.util.normalize_path(os.path.abspath(x))
1119
				pkgdir = os.path.dirname(ebuild_path)
1120
				tree_root = os.path.dirname(os.path.dirname(pkgdir))
1121
				cp = pkgdir[len(tree_root)+1:]
1122
				e = portage.exception.PackageNotFound(
1123
					("%s is not in a valid portage tree " + \
1124
					"hierarchy or does not exist") % x)
1125
				if not portage.isvalidatom(cp):
1126
					raise e
1127
				cat = portage.catsplit(cp)[0]
1128
				mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
1129
				if not portage.isvalidatom("="+mykey):
1130
					raise e
1131
				ebuild_path = portdb.findname(mykey)
1132
				if ebuild_path:
1133
					if ebuild_path != os.path.join(os.path.realpath(tree_root),
1134
						cp, os.path.basename(ebuild_path)):
1135
						print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
1136
						return 0, myfavorites
1137
					if mykey not in portdb.xmatch(
1138
						"match-visible", portage.dep_getkey(mykey)):
1139
						print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
1140
						print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
1141
						print colorize("BAD", "*** page for details.")
1142
						countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
1143
							"Continuing...")
1144
				else:
1145
					raise portage.exception.PackageNotFound(
1146
						"%s is not in a valid portage tree hierarchy or does not exist" % x)
1147
				db_keys = list(portdb._aux_cache_keys)
1148
				metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
1149
				pkg = Package(type_name="ebuild", root_config=root_config,
1150
					cpv=mykey, metadata=metadata, onlydeps=onlydeps)
1151
				pkgsettings.setcpv(pkg)
1152
				pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
1153
				pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
1154
				self._pkg_cache[pkg] = pkg
1155
				args.append(PackageArg(arg=x, package=pkg,
1156
					root_config=root_config))
1157
			elif x.startswith(os.path.sep):
1158
				if not x.startswith(myroot):
1159
					portage.writemsg(("\n\n!!! '%s' does not start with" + \
1160
						" $ROOT.\n") % x, noiselevel=-1)
1161
					return 0, []
1162
				# Queue these up since it's most efficient to handle
1163
				# multiple files in a single iter_owners() call.
1164
				lookup_owners.append(x)
1165
			else:
1166
				if x in ("system", "world"):
1167
					x = SETPREFIX + x
1168
				if x.startswith(SETPREFIX):
1169
					s = x[len(SETPREFIX):]
1170
					if s not in sets:
1171
						raise portage.exception.PackageSetNotFound(s)
1172
					if s in self._sets:
1173
						continue
1174
					# Recursively expand sets so that containment tests in
1175
					# self._get_parent_sets() properly match atoms in nested
1176
					# sets (like if world contains system).
1177
					expanded_set = InternalPackageSet(
1178
						initial_atoms=getSetAtoms(s))
1179
					self._sets[s] = expanded_set
1180
					args.append(SetArg(arg=x, set=expanded_set,
1181
						root_config=root_config))
1182
					continue
1183
				if not is_valid_package_atom(x):
1184
					portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
1185
						noiselevel=-1)
1186
					portage.writemsg("!!! Please check ebuild(5) for full details.\n")
1187
					portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
1188
					return (0,[])
1189
				# Don't expand categories or old-style virtuals here unless
1190
				# necessary. Expansion of old-style virtuals here causes at
1191
				# least the following problems:
1192
				#   1) It's more difficult to determine which set(s) an atom
1193
				#      came from, if any.
1194
				#   2) It takes away freedom from the resolver to choose other
1195
				#      possible expansions when necessary.
1196
				if "/" in x:
1197
					args.append(AtomArg(arg=x, atom=x,
1198
						root_config=root_config))
1199
					continue
1200
				expanded_atoms = self._dep_expand(root_config, x)
1201
				installed_cp_set = set()
1202
				for atom in expanded_atoms:
1203
					atom_cp = portage.dep_getkey(atom)
1204
					if vardb.cp_list(atom_cp):
1205
						installed_cp_set.add(atom_cp)
1206
1207
				if len(installed_cp_set) > 1:
1208
					non_virtual_cps = set()
1209
					for atom_cp in installed_cp_set:
1210
						if not atom_cp.startswith("virtual/"):
1211
							non_virtual_cps.add(atom_cp)
1212
					if len(non_virtual_cps) == 1:
1213
						installed_cp_set = non_virtual_cps
1214
1215
				if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
1216
					installed_cp = iter(installed_cp_set).next()
1217
					expanded_atoms = [atom for atom in expanded_atoms \
1218
						if portage.dep_getkey(atom) == installed_cp]
1219
1220
				if len(expanded_atoms) > 1:
1221
					print
1222
					print
1223
					ambiguous_package_name(x, expanded_atoms, root_config,
1224
						self.spinner, self.myopts)
1225
					return False, myfavorites
1226
				if expanded_atoms:
1227
					atom = expanded_atoms[0]
1228
				else:
1229
					null_atom = insert_category_into_atom(x, "null")
1230
					null_cp = portage.dep_getkey(null_atom)
1231
					cat, atom_pn = portage.catsplit(null_cp)
1232
					virts_p = root_config.settings.get_virts_p().get(atom_pn)
1233
					if virts_p:
1234
						# Allow the depgraph to choose which virtual.
1235
						atom = insert_category_into_atom(x, "virtual")
1236
					else:
1237
						atom = insert_category_into_atom(x, "null")
1238
1239
				args.append(AtomArg(arg=x, atom=atom,
1240
					root_config=root_config))
1241
1242
		if lookup_owners:
1243
			relative_paths = []
1244
			search_for_multiple = False
1245
			if len(lookup_owners) > 1:
1246
				search_for_multiple = True
1247
1248
			for x in lookup_owners:
1249
				if not search_for_multiple and os.path.isdir(x):
1250
					search_for_multiple = True
1251
				relative_paths.append(x[len(myroot):])
1252
1253
			owners = set()
1254
			for pkg, relative_path in \
1255
				real_vardb._owners.iter_owners(relative_paths):
1256
				owners.add(pkg.mycpv)
1257
				if not search_for_multiple:
1258
					break
1259
1260
			if not owners:
1261
				portage.writemsg(("\n\n!!! '%s' is not claimed " + \
1262
					"by any package.\n") % lookup_owners[0], noiselevel=-1)
1263
				return 0, []
1264
1265
			for cpv in owners:
1266
				slot = vardb.aux_get(cpv, ["SLOT"])[0]
1267
				if not slot:
1268
					# portage now masks packages with missing slot, but it's
1269
					# possible that one was installed by an older version
1270
					atom = portage.cpv_getkey(cpv)
1271
				else:
1272
					atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
1273
				args.append(AtomArg(arg=atom, atom=atom,
1274
					root_config=root_config))
1275
1276
		if "--update" in self.myopts:
1277
			# In some cases, the greedy slots behavior can pull in a slot that
1278
			# the user would want to uninstall due to it being blocked by a
1279
			# newer version in a different slot. Therefore, it's necessary to
1280
			# detect and discard any that should be uninstalled. Each time
1281
			# that arguments are updated, package selections are repeated in
1282
			# order to ensure consistency with the current arguments:
1283
			#
1284
			#  1) Initialize args
1285
			#  2) Select packages and generate initial greedy atoms
1286
			#  3) Update args with greedy atoms
1287
			#  4) Select packages and generate greedy atoms again, while
1288
			#     accounting for any blockers between selected packages
1289
			#  5) Update args with revised greedy atoms
1290
1291
			self._set_args(args)
1292
			greedy_args = []
1293
			for arg in args:
1294
				greedy_args.append(arg)
1295
				if not isinstance(arg, AtomArg):
1296
					continue
1297
				for atom in self._greedy_slots(arg.root_config, arg.atom):
1298
					greedy_args.append(
1299
						AtomArg(arg=arg.arg, atom=atom,
1300
							root_config=arg.root_config))
1301
1302
			self._set_args(greedy_args)
1303
			del greedy_args
1304
1305
			# Revise greedy atoms, accounting for any blockers
1306
			# between selected packages.
1307
			revised_greedy_args = []
1308
			for arg in args:
1309
				revised_greedy_args.append(arg)
1310
				if not isinstance(arg, AtomArg):
1311
					continue
1312
				for atom in self._greedy_slots(arg.root_config, arg.atom,
1313
					blocker_lookahead=True):
1314
					revised_greedy_args.append(
1315
						AtomArg(arg=arg.arg, atom=atom,
1316
							root_config=arg.root_config))
1317
			args = revised_greedy_args
1318
			del revised_greedy_args
1319
1320
		self._set_args(args)
1321
1322
		myfavorites = set(myfavorites)
1323
		for arg in args:
1324
			if isinstance(arg, (AtomArg, PackageArg)):
1325
				myfavorites.add(arg.atom)
1326
			elif isinstance(arg, SetArg):
1327
				myfavorites.add(arg.arg)
1328
		myfavorites = list(myfavorites)
1329
1330
		pprovideddict = pkgsettings.pprovideddict
1331
		if debug:
1332
			portage.writemsg("\n", noiselevel=-1)
1333
		# Order needs to be preserved since a feature of --nodeps
1334
		# is to allow the user to force a specific merge order.
1335
		args.reverse()
1336
		while args:
1337
			arg = args.pop()
1338
			for atom in arg.set:
1339
				self.spinner.update()
1340
				dep = Dependency(atom=atom, onlydeps=onlydeps,
1341
					root=myroot, parent=arg)
1342
				atom_cp = portage.dep_getkey(atom)
1343
				try:
1344
					pprovided = pprovideddict.get(portage.dep_getkey(atom))
1345
					if pprovided and portage.match_from_list(atom, pprovided):
1346
						# A provided package has been specified on the command line.
1347
						self._pprovided_args.append((arg, atom))
1348
						continue
1349
					if isinstance(arg, PackageArg):
1350
						if not self._add_pkg(arg.package, dep) or \
1351
							not self._create_graph():
1352
							sys.stderr.write(("\n\n!!! Problem resolving " + \
1353
								"dependencies for %s\n") % arg.arg)
1354
							return 0, myfavorites
1355
						continue
1356
					if debug:
1357
						portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
1358
							(arg, atom), noiselevel=-1)
1359
					pkg, existing_node = self._select_package(
1360
						myroot, atom, onlydeps=onlydeps)
1361
					if not pkg:
1362
						if not (isinstance(arg, SetArg) and \
1363
							arg.name in ("system", "world")):
1364
							self._unsatisfied_deps_for_display.append(
1365
								((myroot, atom), {}))
1366
							return 0, myfavorites
1367
						self._missing_args.append((arg, atom))
1368
						continue
1369
					if atom_cp != pkg.cp:
1370
						# For old-style virtuals, we need to repeat the
1371
						# package.provided check against the selected package.
1372
						expanded_atom = atom.replace(atom_cp, pkg.cp)
1373
						pprovided = pprovideddict.get(pkg.cp)
1374
						if pprovided and \
1375
							portage.match_from_list(expanded_atom, pprovided):
1376
							# A provided package has been
1377
							# specified on the command line.
1378
							self._pprovided_args.append((arg, atom))
1379
							continue
1380
					if pkg.installed and "selective" not in self.myparams:
1381
						self._unsatisfied_deps_for_display.append(
1382
							((myroot, atom), {}))
1383
						# Previous behavior was to bail out in this case, but
1384
						# since the dep is satisfied by the installed package,
1385
						# it's more friendly to continue building the graph
1386
						# and just show a warning message. Therefore, only bail
1387
						# out here if the atom is not from either the system or
1388
						# world set.
1389
						if not (isinstance(arg, SetArg) and \
1390
							arg.name in ("system", "world")):
1391
							return 0, myfavorites
1392
1393
					# Add the selected package to the graph as soon as possible
1394
					# so that later dep_check() calls can use it as feedback
1395
					# for making more consistent atom selections.
1396
					if not self._add_pkg(pkg, dep):
1397
						if isinstance(arg, SetArg):
1398
							sys.stderr.write(("\n\n!!! Problem resolving " + \
1399
								"dependencies for %s from %s\n") % \
1400
								(atom, arg.arg))
1401
						else:
1402
							sys.stderr.write(("\n\n!!! Problem resolving " + \
1403
								"dependencies for %s\n") % atom)
1404
						return 0, myfavorites
1405
1406
				except portage.exception.MissingSignature, e:
1407
					portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
1408
					portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1409
					portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1410
					portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1411
					portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1412
					return 0, myfavorites
1413
				except portage.exception.InvalidSignature, e:
1414
					portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
1415
					portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
1416
					portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
1417
					portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
1418
					portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
1419
					return 0, myfavorites
1420
				except SystemExit, e:
1421
					raise # Needed else can't exit
1422
				except Exception, e:
1423
					print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
1424
					print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
1425
					raise
1426
1427
		# Now that the root packages have been added to the graph,
1428
		# process the dependencies.
1429
		if not self._create_graph():
1430
			return 0, myfavorites
1431
1432
		missing=0
1433
		if "--usepkgonly" in self.myopts:
1434
			for xs in self.digraph.all_nodes():
1435
				if not isinstance(xs, Package):
1436
					continue
1437
				if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
1438
					if missing == 0:
1439
						print
1440
					missing += 1
1441
					print "Missing binary for:",xs[2]
1442
1443
		try:
1444
			self.altlist()
1445
		except self._unknown_internal_error:
1446
			return False, myfavorites
1447
1448
		# We're true here unless we are missing binaries.
1449
		return (not missing,myfavorites)
1450
1451
	def _set_args(self, args):
1452
		"""
1453
		Create the "args" package set from atoms and packages given as
1454
		arguments. This method can be called multiple times if necessary.
1455
		The package selection cache is automatically invalidated, since
1456
		arguments influence package selections.
1457
		"""
1458
		args_set = self._sets["args"]
1459
		args_set.clear()
1460
		for arg in args:
1461
			if not isinstance(arg, (AtomArg, PackageArg)):
1462
				continue
1463
			atom = arg.atom
1464
			if atom in args_set:
1465
				continue
1466
			args_set.add(atom)
1467
1468
		self._set_atoms.clear()
1469
		self._set_atoms.update(chain(*self._sets.itervalues()))
1470
		atom_arg_map = self._atom_arg_map
1471
		atom_arg_map.clear()
1472
		for arg in args:
1473
			for atom in arg.set:
1474
				atom_key = (atom, arg.root_config.root)
1475
				refs = atom_arg_map.get(atom_key)
1476
				if refs is None:
1477
					refs = []
1478
					atom_arg_map[atom_key] = refs
1479
					if arg not in refs:
1480
						refs.append(arg)
1481
1482
		# Invalidate the package selection cache, since
1483
		# arguments influence package selections.
1484
		self._highest_pkg_cache.clear()
1485
		for trees in self._filtered_trees.itervalues():
1486
			trees["porttree"].dbapi._clear_cache()
1487
1488
	def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
1489
		"""
1490
		Return a list of slot atoms corresponding to installed slots that
1491
		differ from the slot of the highest visible match. When
1492
		blocker_lookahead is True, slot atoms that would trigger a blocker
1493
		conflict are automatically discarded, potentially allowing automatic
1494
		uninstallation of older slots when appropriate.
1495
		"""
1496
		highest_pkg, in_graph = self._select_package(root_config.root, atom)
1497
		if highest_pkg is None:
1498
			return []
1499
		vardb = root_config.trees["vartree"].dbapi
1500
		slots = set()
1501
		for cpv in vardb.match(atom):
1502
			# don't mix new virtuals with old virtuals
1503
			if portage.cpv_getkey(cpv) == highest_pkg.cp:
1504
				slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
1505
1506
		slots.add(highest_pkg.metadata["SLOT"])
1507
		if len(slots) == 1:
1508
			return []
1509
		greedy_pkgs = []
1510
		slots.remove(highest_pkg.metadata["SLOT"])
1511
		while slots:
1512
			slot = slots.pop()
1513
			slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
1514
			pkg, in_graph = self._select_package(root_config.root, slot_atom)
1515
			if pkg is not None and \
1516
				pkg.cp == highest_pkg.cp and pkg < highest_pkg:
1517
				greedy_pkgs.append(pkg)
1518
		if not greedy_pkgs:
1519
			return []
1520
		if not blocker_lookahead:
1521
			return [pkg.slot_atom for pkg in greedy_pkgs]
1522
1523
		blockers = {}
1524
		blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
1525
		for pkg in greedy_pkgs + [highest_pkg]:
1526
			dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
1527
			try:
1528
				atoms = self._select_atoms(
1529
					pkg.root, dep_str, pkg.use.enabled,
1530
					parent=pkg, strict=True)
1531
			except portage.exception.InvalidDependString:
1532
				continue
1533
			blocker_atoms = (x for x in atoms if x.blocker)
1534
			blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
1535
1536
		if highest_pkg not in blockers:
1537
			return []
1538
1539
		# filter packages with invalid deps
1540
		greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
1541
1542
		# filter packages that conflict with highest_pkg
1543
		greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
1544
			(blockers[highest_pkg].findAtomForPackage(pkg) or \
1545
			blockers[pkg].findAtomForPackage(highest_pkg))]
1546
1547
		if not greedy_pkgs:
1548
			return []
1549
1550
		# If two packages conflict, discard the lower version.
1551
		discard_pkgs = set()
1552
		greedy_pkgs.sort(reverse=True)
1553
		for i in xrange(len(greedy_pkgs) - 1):
1554
			pkg1 = greedy_pkgs[i]
1555
			if pkg1 in discard_pkgs:
1556
				continue
1557
			for j in xrange(i + 1, len(greedy_pkgs)):
1558
				pkg2 = greedy_pkgs[j]
1559
				if pkg2 in discard_pkgs:
1560
					continue
1561
				if blockers[pkg1].findAtomForPackage(pkg2) or \
1562
					blockers[pkg2].findAtomForPackage(pkg1):
1563
					# pkg1 > pkg2
1564
					discard_pkgs.add(pkg2)
1565
1566
		return [pkg.slot_atom for pkg in greedy_pkgs \
1567
			if pkg not in discard_pkgs]
1568
1569
	def _select_atoms_from_graph(self, *pargs, **kwargs):
1570
		"""
1571
		Prefer atoms matching packages that have already been
1572
		added to the graph or those that are installed and have
1573
		not been scheduled for replacement.
1574
		"""
1575
		kwargs["trees"] = self._graph_trees
1576
		return self._select_atoms_highest_available(*pargs, **kwargs)
1577
1578
	def _select_atoms_highest_available(self, root, depstring,
1579
		myuse=None, parent=None, strict=True, trees=None, priority=None):
1580
		"""This will raise InvalidDependString if necessary. If trees is
1581
		None then self._filtered_trees is used."""
1582
		pkgsettings = self.pkgsettings[root]
1583
		if trees is None:
1584
			trees = self._filtered_trees
1585
		if not getattr(priority, "buildtime", False):
1586
			# The parent should only be passed to dep_check() for buildtime
1587
			# dependencies since that's the only case when it's appropriate
1588
			# to trigger the circular dependency avoidance code which uses it.
1589
			# It's important not to trigger the same circular dependency
1590
			# avoidance code for runtime dependencies since it's not needed
1591
			# and it can promote an incorrect package choice.
1592
			parent = None
1593
		if True:
1594
			try:
1595
				if parent is not None:
1596
					trees[root]["parent"] = parent
1597
				if not strict:
1598
					portage.dep._dep_check_strict = False
1599
				mycheck = portage.dep_check(depstring, None,
1600
					pkgsettings, myuse=myuse,
1601
					myroot=root, trees=trees)
1602
			finally:
1603
				if parent is not None:
1604
					trees[root].pop("parent")
1605
				portage.dep._dep_check_strict = True
1606
			if not mycheck[0]:
1607
				raise portage.exception.InvalidDependString(mycheck[1])
1608
			selected_atoms = mycheck[1]
1609
		return selected_atoms
1610
1611
	def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
1612
		atom = portage.dep.Atom(atom)
1613
		atom_set = InternalPackageSet(initial_atoms=(atom,))
1614
		atom_without_use = atom
1615
		if atom.use:
1616
			atom_without_use = portage.dep.remove_slot(atom)
1617
			if atom.slot:
1618
				atom_without_use += ":" + atom.slot
1619
			atom_without_use = portage.dep.Atom(atom_without_use)
1620
		xinfo = '"%s"' % atom
1621
		if arg:
1622
			xinfo='"%s"' % arg
1623
		# Discard null/ from failed cpv_expand category expansion.
1624
		xinfo = xinfo.replace("null/", "")
1625
		masked_packages = []
1626
		missing_use = []
1627
		masked_pkg_instances = set()
1628
		missing_licenses = []
1629
		have_eapi_mask = False
1630
		pkgsettings = self.pkgsettings[root]
1631
		implicit_iuse = pkgsettings._get_implicit_iuse()
1632
		root_config = self.roots[root]
1633
		portdb = self.roots[root].trees["porttree"].dbapi
1634
		dbs = self._filtered_trees[root]["dbs"]
1635
		for db, pkg_type, built, installed, db_keys in dbs:
1636
			if installed:
1637
				continue
1638
			match = db.match
1639
			if hasattr(db, "xmatch"):
1640
				cpv_list = db.xmatch("match-all", atom_without_use)
1641
			else:
1642
				cpv_list = db.match(atom_without_use)
1643
			# descending order
1644
			cpv_list.reverse()
1645
			for cpv in cpv_list:
1646
				metadata, mreasons  = get_mask_info(root_config, cpv,
1647
					pkgsettings, db, pkg_type, built, installed, db_keys)
1648
				if metadata is not None:
1649
					pkg = Package(built=built, cpv=cpv,
1650
						installed=installed, metadata=metadata,
1651
						root_config=root_config)
1652
					if pkg.cp != atom.cp:
1653
						# A cpv can be returned from dbapi.match() as an
1654
						# old-style virtual match even in cases when the
1655
						# package does not actually PROVIDE the virtual.
1656
						# Filter out any such false matches here.
1657
						if not atom_set.findAtomForPackage(pkg):
1658
							continue
1659
					if mreasons:
1660
						masked_pkg_instances.add(pkg)
1661
					if atom.use:
1662
						missing_use.append(pkg)
1663
						if not mreasons:
1664
							continue
1665
				masked_packages.append(
1666
					(root_config, pkgsettings, cpv, metadata, mreasons))
1667
1668
		missing_use_reasons = []
1669
		missing_iuse_reasons = []
1670
		for pkg in missing_use:
1671
			use = pkg.use.enabled
1672
			iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
1673
			iuse_re = re.compile("^(%s)$" % "|".join(iuse))
1674
			missing_iuse = []
1675
			for x in atom.use.required:
1676
				if iuse_re.match(x) is None:
1677
					missing_iuse.append(x)
1678
			mreasons = []
1679
			if missing_iuse:
1680
				mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
1681
				missing_iuse_reasons.append((pkg, mreasons))
1682
			else:
1683
				need_enable = sorted(atom.use.enabled.difference(use))
1684
				need_disable = sorted(atom.use.disabled.intersection(use))
1685
				if need_enable or need_disable:
1686
					changes = []
1687
					changes.extend(colorize("red", "+" + x) \
1688
						for x in need_enable)
1689
					changes.extend(colorize("blue", "-" + x) \
1690
						for x in need_disable)
1691
					mreasons.append("Change USE: %s" % " ".join(changes))
1692
					missing_use_reasons.append((pkg, mreasons))
1693
1694
		unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
1695
			in missing_use_reasons if pkg not in masked_pkg_instances]
1696
1697
		unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
1698
			in missing_iuse_reasons if pkg not in masked_pkg_instances]
1699
1700
		show_missing_use = False
1701
		if unmasked_use_reasons:
1702
			# Only show the latest version.
1703
			show_missing_use = unmasked_use_reasons[:1]
1704
		elif unmasked_iuse_reasons:
1705
			if missing_use_reasons:
1706
				# All packages with required IUSE are masked,
1707
				# so display a normal masking message.
1708
				pass
1709
			else:
1710
				show_missing_use = unmasked_iuse_reasons
1711
1712
		if show_missing_use:
1713
			print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
1714
			print "!!! One of the following packages is required to complete your request:"
1715
			for pkg, mreasons in show_missing_use:
1716
				print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
1717
1718
		elif masked_packages:
1719
			print "\n!!! " + \
1720
				colorize("BAD", "All ebuilds that could satisfy ") + \
1721
				colorize("INFORM", xinfo) + \
1722
				colorize("BAD", " have been masked.")
1723
			print "!!! One of the following masked packages is required to complete your request:"
1724
			have_eapi_mask = show_masked_packages(masked_packages)
1725
			if have_eapi_mask:
1726
				print
1727
				msg = ("The current version of portage supports " + \
1728
					"EAPI '%s'. You must upgrade to a newer version" + \
1729
					" of portage before EAPI masked packages can" + \
1730
					" be installed.") % portage.const.EAPI
1731
				from textwrap import wrap
1732
				for line in wrap(msg, 75):
1733
					print line
1734
			print
1735
			show_mask_docs()
1736
		else:
1737
			print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
1738
1739
		# Show parent nodes and the argument that pulled them in.
1740
		traversed_nodes = set()
1741
		node = myparent
1742
		msg = []
1743
		while node is not None:
1744
			traversed_nodes.add(node)
1745
			msg.append('(dependency required by "%s" [%s])' % \
1746
				(colorize('INFORM', str(node.cpv)), node.type_name))
1747
			# When traversing to parents, prefer arguments over packages
1748
			# since arguments are root nodes. Never traverse the same
1749
			# package twice, in order to prevent an infinite loop.
1750
			selected_parent = None
1751
			for parent in self.digraph.parent_nodes(node):
1752
				if isinstance(parent, DependencyArg):
1753
					msg.append('(dependency required by "%s" [argument])' % \
1754
						(colorize('INFORM', str(parent))))
1755
					selected_parent = None
1756
					break
1757
				if parent not in traversed_nodes:
1758
					selected_parent = parent
1759
			node = selected_parent
1760
		for line in msg:
1761
			print line
1762
1763
		print
1764
1765
	def _select_pkg_highest_available(self, root, atom, onlydeps=False):
1766
		cache_key = (root, atom, onlydeps)
1767
		ret = self._highest_pkg_cache.get(cache_key)
1768
		if ret is not None:
1769
			pkg, existing = ret
1770
			if pkg and not existing:
1771
				existing = self._slot_pkg_map[root].get(pkg.slot_atom)
1772
				if existing and existing == pkg:
1773
					# Update the cache to reflect that the
1774
					# package has been added to the graph.
1775
					ret = pkg, pkg
1776
					self._highest_pkg_cache[cache_key] = ret
1777
			return ret
1778
		ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
1779
		self._highest_pkg_cache[cache_key] = ret
1780
		pkg, existing = ret
1781
		if pkg is not None:
1782
			settings = pkg.root_config.settings
1783
			if visible(settings, pkg) and not (pkg.installed and \
1784
				settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
1785
				pkg.root_config.visible_pkgs.cpv_inject(pkg)
1786
		return ret
1787
1788
	def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
1789
		root_config = self.roots[root]
1790
		pkgsettings = self.pkgsettings[root]
1791
		dbs = self._filtered_trees[root]["dbs"]
1792
		vardb = self.roots[root].trees["vartree"].dbapi
1793
		portdb = self.roots[root].trees["porttree"].dbapi
1794
		# List of acceptable packages, ordered by type preference.
1795
		matched_packages = []
1796
		highest_version = None
1797
		if not isinstance(atom, portage.dep.Atom):
1798
			atom = portage.dep.Atom(atom)
1799
		atom_cp = atom.cp
1800
		atom_set = InternalPackageSet(initial_atoms=(atom,))
1801
		existing_node = None
1802
		myeb = None
1803
		usepkgonly = "--usepkgonly" in self.myopts
1804
		empty = "empty" in self.myparams
1805
		selective = "selective" in self.myparams
1806
		reinstall = False
1807
		noreplace = "--noreplace" in self.myopts
1808
		# Behavior of the "selective" parameter depends on
1809
		# whether or not a package matches an argument atom.
1810
		# If an installed package provides an old-style
1811
		# virtual that is no longer provided by an available
1812
		# package, the installed package may match an argument
1813
		# atom even though none of the available packages do.
1814
		# Therefore, "selective" logic does not consider
1815
		# whether or not an installed package matches an
1816
		# argument atom. It only considers whether or not
1817
		# available packages match argument atoms, which is
1818
		# represented by the found_available_arg flag.
1819
		found_available_arg = False
1820
		for find_existing_node in True, False:
1821
			if existing_node:
1822
				break
1823
			for db, pkg_type, built, installed, db_keys in dbs:
1824
				if existing_node:
1825
					break
1826
				if installed and not find_existing_node:
1827
					want_reinstall = reinstall or empty or \
1828
						(found_available_arg and not selective)
1829
					if want_reinstall and matched_packages:
1830
						continue
1831
				if hasattr(db, "xmatch"):
1832
					cpv_list = db.xmatch("match-all", atom)
1833
				else:
1834
					cpv_list = db.match(atom)
1835
1836
				# USE=multislot can make an installed package appear as if
1837
				# it doesn't satisfy a slot dependency. Rebuilding the ebuild
1838
				# won't do any good as long as USE=multislot is enabled since
1839
				# the newly built package still won't have the expected slot.
1840
				# Therefore, assume that such SLOT dependencies are already
1841
				# satisfied rather than forcing a rebuild.
1842
				if installed and not cpv_list and atom.slot:
1843
					for cpv in db.match(atom.cp):
1844
						slot_available = False
1845
						for other_db, other_type, other_built, \
1846
							other_installed, other_keys in dbs:
1847
							try:
1848
								if atom.slot == \
1849
									other_db.aux_get(cpv, ["SLOT"])[0]:
1850
									slot_available = True
1851
									break
1852
							except KeyError:
1853
								pass
1854
						if not slot_available:
1855
							continue
1856
						inst_pkg = self._pkg(cpv, "installed",
1857
							root_config, installed=installed)
1858
						# Remove the slot from the atom and verify that
1859
						# the package matches the resulting atom.
1860
						atom_without_slot = portage.dep.remove_slot(atom)
1861
						if atom.use:
1862
							atom_without_slot += str(atom.use)
1863
						atom_without_slot = portage.dep.Atom(atom_without_slot)
1864
						if portage.match_from_list(
1865
							atom_without_slot, [inst_pkg]):
1866
							cpv_list = [inst_pkg.cpv]
1867
						break
1868
1869
				if not cpv_list:
1870
					continue
1871
				pkg_status = "merge"
1872
				if installed or onlydeps:
1873
					pkg_status = "nomerge"
1874
				# descending order
1875
				cpv_list.reverse()
1876
				for cpv in cpv_list:
1877
					# Make --noreplace take precedence over --newuse.
1878
					if not installed and noreplace and \
1879
						cpv in vardb.match(atom):
1880
						# If the installed version is masked, it may
1881
						# be necessary to look at lower versions,
1882
						# in case there is a visible downgrade.
1883
						continue
1884
					reinstall_for_flags = None
1885
					cache_key = (pkg_type, root, cpv, pkg_status)
1886
					calculated_use = True
1887
					pkg = self._pkg_cache.get(cache_key)
1888
					if pkg is None:
1889
						calculated_use = False
1890
						try:
1891
							metadata = izip(db_keys, db.aux_get(cpv, db_keys))
1892
						except KeyError:
1893
							continue
1894
						pkg = Package(built=built, cpv=cpv,
1895
							installed=installed, metadata=metadata,
1896
							onlydeps=onlydeps, root_config=root_config,
1897
							type_name=pkg_type)
1898
						metadata = pkg.metadata
1899
						if not built:
1900
							metadata['CHOST'] = pkgsettings.get('CHOST', '')
1901
						if not built and ("?" in metadata["LICENSE"] or \
1902
							"?" in metadata["PROVIDE"]):
1903
							# This is avoided whenever possible because
1904
							# it's expensive. It only needs to be done here
1905
							# if it has an effect on visibility.
1906
							pkgsettings.setcpv(pkg)
1907
							metadata["USE"] = pkgsettings["PORTAGE_USE"]
1908
							calculated_use = True
1909
						self._pkg_cache[pkg] = pkg
1910
1911
					if not installed or (built and matched_packages):
1912
						# Only enforce visibility on installed packages
1913
						# if there is at least one other visible package
1914
						# available. By filtering installed masked packages
1915
						# here, packages that have been masked since they
1916
						# were installed can be automatically downgraded
1917
						# to an unmasked version.
1918
						try:
1919
							if not visible(pkgsettings, pkg):
1920
								continue
1921
						except portage.exception.InvalidDependString:
1922
							if not installed:
1923
								continue
1924
1925
						# Enable upgrade or downgrade to a version
1926
						# with visible KEYWORDS when the installed
1927
						# version is masked by KEYWORDS, but never
1928
						# reinstall the same exact version only due
1929
						# to a KEYWORDS mask.
1930
						if built and matched_packages:
1931
1932
							different_version = None
1933
							for avail_pkg in matched_packages:
1934
								if not portage.dep.cpvequal(
1935
									pkg.cpv, avail_pkg.cpv):
1936
									different_version = avail_pkg
1937
									break
1938
							if different_version is not None:
1939
1940
								if installed and \
1941
									pkgsettings._getMissingKeywords(
1942
									pkg.cpv, pkg.metadata):
1943
									continue
1944
1945
								# If the ebuild no longer exists or it's
1946
								# keywords have been dropped, reject built
1947
								# instances (installed or binary).
1948
								# If --usepkgonly is enabled, assume that
1949
								# the ebuild status should be ignored.
1950
								if not usepkgonly:
1951
									try:
1952
										pkg_eb = self._pkg(
1953
											pkg.cpv, "ebuild", root_config)
1954
									except portage.exception.PackageNotFound:
1955
										continue
1956
									else:
1957
										if not visible(pkgsettings, pkg_eb):
1958
											continue
1959
1960
					if not pkg.built and not calculated_use:
1961
						# This is avoided whenever possible because
1962
						# it's expensive.
1963
						pkgsettings.setcpv(pkg)
1964
						pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
1965
1966
					if pkg.cp != atom.cp:
1967
						# A cpv can be returned from dbapi.match() as an
1968
						# old-style virtual match even in cases when the
1969
						# package does not actually PROVIDE the virtual.
1970
						# Filter out any such false matches here.
1971
						if not atom_set.findAtomForPackage(pkg):
1972
							continue
1973
1974
					myarg = None
1975
					if root == self.target_root:
1976
						try:
1977
							# Ebuild USE must have been calculated prior
1978
							# to this point, in case atoms have USE deps.
1979
							myarg = self._iter_atoms_for_pkg(pkg).next()
1980
						except StopIteration:
1981
							pass
1982
						except portage.exception.InvalidDependString:
1983
							if not installed:
1984
								# masked by corruption
1985
								continue
1986
					if not installed and myarg:
1987
						found_available_arg = True
1988
1989
					if atom.use and not pkg.built:
1990
						use = pkg.use.enabled
1991
						if atom.use.enabled.difference(use):
1992
							continue
1993
						if atom.use.disabled.intersection(use):
1994
							continue
1995
					if pkg.cp == atom_cp:
1996
						if highest_version is None:
1997
							highest_version = pkg
1998
						elif pkg > highest_version:
1999
							highest_version = pkg
2000
					# At this point, we've found the highest visible
2001
					# match from the current repo. Any lower versions
2002
					# from this repo are ignored, so this so the loop
2003
					# will always end with a break statement below
2004
					# this point.
2005
					if find_existing_node:
2006
						e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
2007
						if not e_pkg:
2008
							break
2009
						if portage.dep.match_from_list(atom, [e_pkg]):
2010
							if highest_version and \
2011
								e_pkg.cp == atom_cp and \
2012
								e_pkg < highest_version and \
2013
								e_pkg.slot_atom != highest_version.slot_atom:
2014
								# There is a higher version available in a
2015
								# different slot, so this existing node is
2016
								# irrelevant.
2017
								pass
2018
							else:
2019
								matched_packages.append(e_pkg)
2020
								existing_node = e_pkg
2021
						break
2022
					# Compare built package to current config and
2023
					# reject the built package if necessary.
2024
					if built and not installed and \
2025
						("--newuse" in self.myopts or \
2026
						"--reinstall" in self.myopts):
2027
						iuses = pkg.iuse.all
2028
						old_use = pkg.use.enabled
2029
						if myeb:
2030
							pkgsettings.setcpv(myeb)
2031
						else:
2032
							pkgsettings.setcpv(pkg)
2033
						now_use = pkgsettings["PORTAGE_USE"].split()
2034
						forced_flags = set()
2035
						forced_flags.update(pkgsettings.useforce)
2036
						forced_flags.update(pkgsettings.usemask)
2037
						cur_iuse = iuses
2038
						if myeb and not usepkgonly:
2039
							cur_iuse = myeb.iuse.all
2040
						if self._reinstall_for_flags(forced_flags,
2041
							old_use, iuses,
2042
							now_use, cur_iuse):
2043
							break
2044
					# Compare current config to installed package
2045
					# and do not reinstall if possible.
2046
					if not installed and \
2047
						("--newuse" in self.myopts or \
2048
						"--reinstall" in self.myopts) and \
2049
						cpv in vardb.match(atom):
2050
						pkgsettings.setcpv(pkg)
2051
						forced_flags = set()
2052
						forced_flags.update(pkgsettings.useforce)
2053
						forced_flags.update(pkgsettings.usemask)
2054
						old_use = vardb.aux_get(cpv, ["USE"])[0].split()
2055
						old_iuse = set(filter_iuse_defaults(
2056
							vardb.aux_get(cpv, ["IUSE"])[0].split()))
2057
						cur_use = pkg.use.enabled
2058
						cur_iuse = pkg.iuse.all
2059
						reinstall_for_flags = \
2060
							self._reinstall_for_flags(
2061
							forced_flags, old_use, old_iuse,
2062
							cur_use, cur_iuse)
2063
						if reinstall_for_flags:
2064
							reinstall = True
2065
					if not built:
2066
						myeb = pkg
2067
					matched_packages.append(pkg)
2068
					if reinstall_for_flags:
2069
						self._reinstall_nodes[pkg] = \
2070
							reinstall_for_flags
2071
					break
2072
2073
		if not matched_packages:
2074
			return None, None
2075
2076
		if "--debug" in self.myopts:
2077
			for pkg in matched_packages:
2078
				portage.writemsg("%s %s\n" % \
2079
					((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
2080
2081
		# Filter out any old-style virtual matches if they are
2082
		# mixed with new-style virtual matches.
2083
		cp = portage.dep_getkey(atom)
2084
		if len(matched_packages) > 1 and \
2085
			"virtual" == portage.catsplit(cp)[0]:
2086
			for pkg in matched_packages:
2087
				if pkg.cp != cp:
2088
					continue
2089
				# Got a new-style virtual, so filter
2090
				# out any old-style virtuals.
2091
				matched_packages = [pkg for pkg in matched_packages \
2092
					if pkg.cp == cp]
2093
				break
2094
2095
		if len(matched_packages) > 1:
2096
			bestmatch = portage.best(
2097
				[pkg.cpv for pkg in matched_packages])
2098
			matched_packages = [pkg for pkg in matched_packages \
2099
				if portage.dep.cpvequal(pkg.cpv, bestmatch)]
2100
2101
		# ordered by type preference ("ebuild" type is the last resort)
2102
		return  matched_packages[-1], existing_node
2103
2104
	def _select_pkg_from_graph(self, root, atom, onlydeps=False):
2105
		"""
2106
		Select packages that have already been added to the graph or
2107
		those that are installed and have not been scheduled for
2108
		replacement.
2109
		"""
2110
		graph_db = self._graph_trees[root]["porttree"].dbapi
2111
		matches = graph_db.match_pkgs(atom)
2112
		if not matches:
2113
			return None, None
2114
		pkg = matches[-1] # highest match
2115
		in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
2116
		return pkg, in_graph
2117
2118
	def _complete_graph(self):
2119
		"""
2120
		Add any deep dependencies of required sets (args, system, world) that
2121
		have not been pulled into the graph yet. This ensures that the graph
2122
		is consistent such that initially satisfied deep dependencies are not
2123
		broken in the new graph. Initially unsatisfied dependencies are
2124
		irrelevant since we only want to avoid breaking dependencies that are
2125
		intially satisfied.
2126
2127
		Since this method can consume enough time to disturb users, it is
2128
		currently only enabled by the --complete-graph option.
2129
		"""
2130
		if "--buildpkgonly" in self.myopts or \
2131
			"recurse" not in self.myparams:
2132
			return 1
2133
2134
		if "complete" not in self.myparams:
2135
			# Skip this to avoid consuming enough time to disturb users.
2136
			return 1
2137
2138
		# Put the depgraph into a mode that causes it to only
2139
		# select packages that have already been added to the
2140
		# graph or those that are installed and have not been
2141
		# scheduled for replacement. Also, toggle the "deep"
2142
		# parameter so that all dependencies are traversed and
2143
		# accounted for.
2144
		self._select_atoms = self._select_atoms_from_graph
2145
		self._select_package = self._select_pkg_from_graph
2146
		already_deep = "deep" in self.myparams
2147
		if not already_deep:
2148
			self.myparams.add("deep")
2149
2150
		for root in self.roots:
2151
			required_set_names = self._required_set_names.copy()
2152
			if root == self.target_root and \
2153
				(already_deep or "empty" in self.myparams):
2154
				required_set_names.difference_update(self._sets)
2155
			if not required_set_names and not self._ignored_deps:
2156
				continue
2157
			root_config = self.roots[root]
2158
			setconfig = root_config.setconfig
2159
			args = []
2160
			# Reuse existing SetArg instances when available.
2161
			for arg in self.digraph.root_nodes():
2162
				if not isinstance(arg, SetArg):
2163
					continue
2164
				if arg.root_config != root_config:
2165
					continue
2166
				if arg.name in required_set_names:
2167
					args.append(arg)
2168
					required_set_names.remove(arg.name)
2169
			# Create new SetArg instances only when necessary.
2170
			for s in required_set_names:
2171
				expanded_set = InternalPackageSet(
2172
					initial_atoms=setconfig.getSetAtoms(s))
2173
				atom = SETPREFIX + s
2174
				args.append(SetArg(arg=atom, set=expanded_set,
2175
					root_config=root_config))
2176
			vardb = root_config.trees["vartree"].dbapi
2177
			for arg in args:
2178
				for atom in arg.set:
2179
					self._dep_stack.append(
2180
						Dependency(atom=atom, root=root, parent=arg))
2181
			if self._ignored_deps:
2182
				self._dep_stack.extend(self._ignored_deps)
2183
				self._ignored_deps = []
2184
			if not self._create_graph(allow_unsatisfied=True):
2185
				return 0
2186
			# Check the unsatisfied deps to see if any initially satisfied deps
2187
			# will become unsatisfied due to an upgrade. Initially unsatisfied
2188
			# deps are irrelevant since we only want to avoid breaking deps
2189
			# that are initially satisfied.
2190
			while self._unsatisfied_deps:
2191
				dep = self._unsatisfied_deps.pop()
2192
				matches = vardb.match_pkgs(dep.atom)
2193
				if not matches:
2194
					self._initially_unsatisfied_deps.append(dep)
2195
					continue
2196
				# An scheduled installation broke a deep dependency.
2197
				# Add the installed package to the graph so that it
2198
				# will be appropriately reported as a slot collision
2199
				# (possibly solvable via backtracking).
2200
				pkg = matches[-1] # highest match
2201
				if not self._add_pkg(pkg, dep):
2202
					return 0
2203
				if not self._create_graph(allow_unsatisfied=True):
2204
					return 0
2205
		return 1
2206
2207
	def _pkg(self, cpv, type_name, root_config, installed=False):
2208
		"""
2209
		Get a package instance from the cache, or create a new
2210
		one if necessary. Raises KeyError from aux_get if it
2211
		failures for some reason (package does not exist or is
2212
		corrupt).
2213
		"""
2214
		operation = "merge"
2215
		if installed:
2216
			operation = "nomerge"
2217
		pkg = self._pkg_cache.get(
2218
			(type_name, root_config.root, cpv, operation))
2219
		if pkg is None:
2220
			tree_type = self.pkg_tree_map[type_name]
2221
			db = root_config.trees[tree_type].dbapi
2222
			db_keys = list(self._trees_orig[root_config.root][
2223
				tree_type].dbapi._aux_cache_keys)
2224
			try:
2225
				metadata = izip(db_keys, db.aux_get(cpv, db_keys))
2226
			except KeyError:
2227
				raise portage.exception.PackageNotFound(cpv)
2228
			pkg = Package(cpv=cpv, metadata=metadata,
2229
				root_config=root_config, installed=installed)
2230
			if type_name == "ebuild":
2231
				settings = self.pkgsettings[root_config.root]
2232
				settings.setcpv(pkg)
2233
				pkg.metadata["USE"] = settings["PORTAGE_USE"]
2234
				pkg.metadata['CHOST'] = settings.get('CHOST', '')
2235
			self._pkg_cache[pkg] = pkg
2236
		return pkg
2237
2238
	def validate_blockers(self):
2239
		"""Remove any blockers from the digraph that do not match any of the
2240
		packages within the graph.  If necessary, create hard deps to ensure
2241
		correct merge order such that mutually blocking packages are never
2242
		installed simultaneously."""
2243
2244
		if "--buildpkgonly" in self.myopts or \
2245
			"--nodeps" in self.myopts:
2246
			return True
2247
2248
		#if "deep" in self.myparams:
2249
		if True:
2250
			# Pull in blockers from all installed packages that haven't already
2251
			# been pulled into the depgraph.  This is not enabled by default
2252
			# due to the performance penalty that is incurred by all the
2253
			# additional dep_check calls that are required.
2254
2255
			dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
2256
			for myroot in self.trees:
2257
				vardb = self.trees[myroot]["vartree"].dbapi
2258
				portdb = self.trees[myroot]["porttree"].dbapi
2259
				pkgsettings = self.pkgsettings[myroot]
2260
				final_db = self.mydbapi[myroot]
2261
2262
				blocker_cache = BlockerCache(myroot, vardb)
2263
				stale_cache = set(blocker_cache)
2264
				for pkg in vardb:
2265
					cpv = pkg.cpv
2266
					stale_cache.discard(cpv)
2267
					pkg_in_graph = self.digraph.contains(pkg)
2268
2269
					# Check for masked installed packages. Only warn about
2270
					# packages that are in the graph in order to avoid warning
2271
					# about those that will be automatically uninstalled during
2272
					# the merge process or by --depclean.
2273
					if pkg in final_db:
2274
						if pkg_in_graph and not visible(pkgsettings, pkg):
2275
							self._masked_installed.add(pkg)
2276
2277
					blocker_atoms = None
2278
					blockers = None
2279
					if pkg_in_graph:
2280
						blockers = []
2281
						try:
2282
							blockers.extend(
2283
								self._blocker_parents.child_nodes(pkg))
2284
						except KeyError:
2285
							pass
2286
						try:
2287
							blockers.extend(
2288
								self._irrelevant_blockers.child_nodes(pkg))
2289
						except KeyError:
2290
							pass
2291
					if blockers is not None:
2292
						blockers = set(str(blocker.atom) \
2293
							for blocker in blockers)
2294
2295
					# If this node has any blockers, create a "nomerge"
2296
					# node for it so that they can be enforced.
2297
					self.spinner.update()
2298
					blocker_data = blocker_cache.get(cpv)
2299
					if blocker_data is not None and \
2300
						blocker_data.counter != long(pkg.metadata["COUNTER"]):
2301
						blocker_data = None
2302
2303
					# If blocker data from the graph is available, use
2304
					# it to validate the cache and update the cache if
2305
					# it seems invalid.
2306
					if blocker_data is not None and \
2307
						blockers is not None:
2308
						if not blockers.symmetric_difference(
2309
							blocker_data.atoms):
2310
							continue
2311
						blocker_data = None
2312
2313
					if blocker_data is None and \
2314
						blockers is not None:
2315
						# Re-use the blockers from the graph.
2316
						blocker_atoms = sorted(blockers)
2317
						counter = long(pkg.metadata["COUNTER"])
2318
						blocker_data = \
2319
							blocker_cache.BlockerData(counter, blocker_atoms)
2320
						blocker_cache[pkg.cpv] = blocker_data
2321
						continue
2322
2323
					if blocker_data:
2324
						blocker_atoms = blocker_data.atoms
2325
					else:
2326
						# Use aux_get() to trigger FakeVartree global
2327
						# updates on *DEPEND when appropriate.
2328
						depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
2329
						# It is crucial to pass in final_db here in order to
2330
						# optimize dep_check calls by eliminating atoms via
2331
						# dep_wordreduce and dep_eval calls.
2332
						try:
2333
							portage.dep._dep_check_strict = False
2334
							try:
2335
								success, atoms = portage.dep_check(depstr,
2336
									final_db, pkgsettings, myuse=pkg.use.enabled,
2337
									trees=self._graph_trees, myroot=myroot)
2338
							except Exception, e:
2339
								if isinstance(e, SystemExit):
2340
									raise
2341
								# This is helpful, for example, if a ValueError
2342
								# is thrown from cpv_expand due to multiple
2343
								# matches (this can happen if an atom lacks a
2344
								# category).
2345
								show_invalid_depstring_notice(
2346
									pkg, depstr, str(e))
2347
								del e
2348
								raise
2349
						finally:
2350
							portage.dep._dep_check_strict = True
2351
						if not success:
2352
							replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
2353
							if replacement_pkg and \
2354
								replacement_pkg[0].operation == "merge":
2355
								# This package is being replaced anyway, so
2356
								# ignore invalid dependencies so as not to
2357
								# annoy the user too much (otherwise they'd be
2358
								# forced to manually unmerge it first).
2359
								continue
2360
							show_invalid_depstring_notice(pkg, depstr, atoms)
2361
							return False
2362
						blocker_atoms = [myatom for myatom in atoms \
2363
							if myatom.startswith("!")]
2364
						blocker_atoms.sort()
2365
						counter = long(pkg.metadata["COUNTER"])
2366
						blocker_cache[cpv] = \
2367
							blocker_cache.BlockerData(counter, blocker_atoms)
2368
					if blocker_atoms:
2369
						try:
2370
							for atom in blocker_atoms:
2371
								blocker = Blocker(atom=portage.dep.Atom(atom),
2372
									eapi=pkg.metadata["EAPI"], root=myroot)
2373
								self._blocker_parents.add(blocker, pkg)
2374
						except portage.exception.InvalidAtom, e:
2375
							depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
2376
							show_invalid_depstring_notice(
2377
								pkg, depstr, "Invalid Atom: %s" % (e,))
2378
							return False
2379
				for cpv in stale_cache:
2380
					del blocker_cache[cpv]
2381
				blocker_cache.flush()
2382
				del blocker_cache
2383
2384
		# Discard any "uninstall" tasks scheduled by previous calls
2385
		# to this method, since those tasks may not make sense given
2386
		# the current graph state.
2387
		previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
2388
		if previous_uninstall_tasks:
2389
			self._blocker_uninstalls = digraph()
2390
			self.digraph.difference_update(previous_uninstall_tasks)
2391
2392
		for blocker in self._blocker_parents.leaf_nodes():
2393
			self.spinner.update()
2394
			root_config = self.roots[blocker.root]
2395
			virtuals = root_config.settings.getvirtuals()
2396
			myroot = blocker.root
2397
			initial_db = self.trees[myroot]["vartree"].dbapi
2398
			final_db = self.mydbapi[myroot]
2399
			
2400
			provider_virtual = False
2401
			if blocker.cp in virtuals and \
2402
				not self._have_new_virt(blocker.root, blocker.cp):
2403
				provider_virtual = True
2404
2405
			# Use this to check PROVIDE for each matched package
2406
			# when necessary.
2407
			atom_set = InternalPackageSet(
2408
				initial_atoms=[blocker.atom])
2409
2410
			if provider_virtual:
2411
				atoms = []
2412
				for provider_entry in virtuals[blocker.cp]:
2413
					provider_cp = \
2414
						portage.dep_getkey(provider_entry)
2415
					atoms.append(blocker.atom.replace(
2416
						blocker.cp, provider_cp))
2417
			else:
2418
				atoms = [blocker.atom]
2419
2420
			blocked_initial = set()
2421
			for atom in atoms:
2422
				for pkg in initial_db.match_pkgs(atom):
2423
					if atom_set.findAtomForPackage(pkg):
2424
						blocked_initial.add(pkg)
2425
2426
			blocked_final = set()
2427
			for atom in atoms:
2428
				for pkg in final_db.match_pkgs(atom):
2429
					if atom_set.findAtomForPackage(pkg):
2430
						blocked_final.add(pkg)
2431
2432
			if not blocked_initial and not blocked_final:
2433
				parent_pkgs = self._blocker_parents.parent_nodes(blocker)
2434
				self._blocker_parents.remove(blocker)
2435
				# Discard any parents that don't have any more blockers.
2436
				for pkg in parent_pkgs:
2437
					self._irrelevant_blockers.add(blocker, pkg)
2438
					if not self._blocker_parents.child_nodes(pkg):
2439
						self._blocker_parents.remove(pkg)
2440
				continue
2441
			for parent in self._blocker_parents.parent_nodes(blocker):
2442
				unresolved_blocks = False
2443
				depends_on_order = set()
2444
				for pkg in blocked_initial:
2445
					if pkg.slot_atom == parent.slot_atom:
2446
						# TODO: Support blocks within slots in cases where it
2447
						# might make sense.  For example, a new version might
2448
						# require that the old version be uninstalled at build
2449
						# time.
2450
						continue
2451
					if parent.installed:
2452
						# Two currently installed packages conflict with
2453
						# eachother. Ignore this case since the damage
2454
						# is already done and this would be likely to
2455
						# confuse users if displayed like a normal blocker.
2456
						continue
2457
2458
					self._blocked_pkgs.add(pkg, blocker)
2459
2460
					if parent.operation == "merge":
2461
						# Maybe the blocked package can be replaced or simply
2462
						# unmerged to resolve this block.
2463
						depends_on_order.add((pkg, parent))
2464
						continue
2465
					# None of the above blocker resolutions techniques apply,
2466
					# so apparently this one is unresolvable.
2467
					unresolved_blocks = True
2468
				for pkg in blocked_final:
2469
					if pkg.slot_atom == parent.slot_atom:
2470
						# TODO: Support blocks within slots.
2471
						continue
2472
					if parent.operation == "nomerge" and \
2473
						pkg.operation == "nomerge":
2474
						# This blocker will be handled the next time that a
2475
						# merge of either package is triggered.
2476
						continue
2477
2478
					self._blocked_pkgs.add(pkg, blocker)
2479
2480
					# Maybe the blocking package can be
2481
					# unmerged to resolve this block.
2482
					if parent.operation == "merge" and pkg.installed:
2483
						depends_on_order.add((pkg, parent))
2484
						continue
2485
					elif parent.operation == "nomerge":
2486
						depends_on_order.add((parent, pkg))
2487
						continue
2488
					# None of the above blocker resolutions techniques apply,
2489
					# so apparently this one is unresolvable.
2490
					unresolved_blocks = True
2491
2492
				# Make sure we don't unmerge any package that have been pulled
2493
				# into the graph.
2494
				if not unresolved_blocks and depends_on_order:
2495
					for inst_pkg, inst_task in depends_on_order:
2496
						if self.digraph.contains(inst_pkg) and \
2497
							self.digraph.parent_nodes(inst_pkg):
2498
							unresolved_blocks = True
2499
							break
2500
2501
				if not unresolved_blocks and depends_on_order:
2502
					for inst_pkg, inst_task in depends_on_order:
2503
						uninst_task = Package(built=inst_pkg.built,
2504
							cpv=inst_pkg.cpv, installed=inst_pkg.installed,
2505
							metadata=inst_pkg.metadata,
2506
							operation="uninstall",
2507
							root_config=inst_pkg.root_config,
2508
							type_name=inst_pkg.type_name)
2509
						self._pkg_cache[uninst_task] = uninst_task
2510
						# Enforce correct merge order with a hard dep.
2511
						self.digraph.addnode(uninst_task, inst_task,
2512
							priority=BlockerDepPriority.instance)
2513
						# Count references to this blocker so that it can be
2514
						# invalidated after nodes referencing it have been
2515
						# merged.
2516
						self._blocker_uninstalls.addnode(uninst_task, blocker)
2517
				if not unresolved_blocks and not depends_on_order:
2518
					self._irrelevant_blockers.add(blocker, parent)
2519
					self._blocker_parents.remove_edge(blocker, parent)
2520
					if not self._blocker_parents.parent_nodes(blocker):
2521
						self._blocker_parents.remove(blocker)
2522
					if not self._blocker_parents.child_nodes(parent):
2523
						self._blocker_parents.remove(parent)
2524
				if unresolved_blocks:
2525
					self._unsolvable_blockers.add(blocker, parent)
2526
2527
		return True
2528
2529
	def _accept_blocker_conflicts(self):
2530
		acceptable = False
2531
		for x in ("--buildpkgonly", "--fetchonly",
2532
			"--fetch-all-uri", "--nodeps"):
2533
			if x in self.myopts:
2534
				acceptable = True
2535
				break
2536
		return acceptable
2537
2538
	def _merge_order_bias(self, mygraph):
2539
		"""
2540
		For optimal leaf node selection, promote deep system runtime deps and
2541
		order nodes from highest to lowest overall reference count.
2542
		"""
2543
2544
		node_info = {}
2545
		for node in mygraph.order:
2546
			node_info[node] = len(mygraph.parent_nodes(node))
2547
		deep_system_deps = _find_deep_system_runtime_deps(mygraph)
2548
2549
		def cmp_merge_preference(node1, node2):
2550
2551
			if node1.operation == 'uninstall':
2552
				if node2.operation == 'uninstall':
2553
					return 0
2554
				return 1
2555
2556
			if node2.operation == 'uninstall':
2557
				if node1.operation == 'uninstall':
2558
					return 0
2559
				return -1
2560
2561
			node1_sys = node1 in deep_system_deps
2562
			node2_sys = node2 in deep_system_deps
2563
			if node1_sys != node2_sys:
2564
				if node1_sys:
2565
					return -1
2566
				return 1
2567
2568
			return node_info[node2] - node_info[node1]
2569
2570
		mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
2571
2572
	def altlist(self, reversed=False):
2573
2574
		while self._serialized_tasks_cache is None:
2575
			self._resolve_conflicts()
2576
			try:
2577
				self._serialized_tasks_cache, self._scheduler_graph = \
2578
					self._serialize_tasks()
2579
			except self._serialize_tasks_retry:
2580
				pass
2581
2582
		retlist = self._serialized_tasks_cache[:]
2583
		if reversed:
2584
			retlist.reverse()
2585
		return retlist
2586
2587
	def schedulerGraph(self):
2588
		"""
2589
		The scheduler graph is identical to the normal one except that
2590
		uninstall edges are reversed in specific cases that require
2591
		conflicting packages to be temporarily installed simultaneously.
2592
		This is intended for use by the Scheduler in it's parallelization
2593
		logic. It ensures that temporary simultaneous installation of
2594
		conflicting packages is avoided when appropriate (especially for
2595
		!!atom blockers), but allowed in specific cases that require it.
2596
2597
		Note that this method calls break_refs() which alters the state of
2598
		internal Package instances such that this depgraph instance should
2599
		not be used to perform any more calculations.
2600
		"""
2601
		if self._scheduler_graph is None:
2602
			self.altlist()
2603
		self.break_refs(self._scheduler_graph.order)
2604
		return self._scheduler_graph
2605
2606
	def break_refs(self, nodes):
2607
		"""
2608
		Take a mergelist like that returned from self.altlist() and
2609
		break any references that lead back to the depgraph. This is
2610
		useful if you want to hold references to packages without
2611
		also holding the depgraph on the heap.
2612
		"""
2613
		for node in nodes:
2614
			if hasattr(node, "root_config"):
2615
				# The FakeVartree references the _package_cache which
2616
				# references the depgraph. So that Package instances don't
2617
				# hold the depgraph and FakeVartree on the heap, replace
2618
				# the RootConfig that references the FakeVartree with the
2619
				# original RootConfig instance which references the actual
2620
				# vartree.
2621
				node.root_config = \
2622
					self._trees_orig[node.root_config.root]["root_config"]
2623
2624
	def _resolve_conflicts(self):
2625
		if not self._complete_graph():
2626
			raise self._unknown_internal_error()
2627
2628
		if not self.validate_blockers():
2629
			raise self._unknown_internal_error()
2630
2631
		if self._slot_collision_info:
2632
			self._process_slot_conflicts()
2633
2634
	def _serialize_tasks(self):
2635
2636
		if "--debug" in self.myopts:
2637
			writemsg("\ndigraph:\n\n", noiselevel=-1)
2638
			self.digraph.debug_print()
2639
			writemsg("\n", noiselevel=-1)
2640
2641
		scheduler_graph = self.digraph.copy()
2642
2643
		if '--nodeps' in self.myopts:
2644
			# Preserve the package order given on the command line.
2645
			return ([node for node in scheduler_graph \
2646
				if isinstance(node, Package) \
2647
				and node.operation == 'merge'], scheduler_graph)
2648
2649
		mygraph=self.digraph.copy()
2650
		# Prune "nomerge" root nodes if nothing depends on them, since
2651
		# otherwise they slow down merge order calculation. Don't remove
2652
		# non-root nodes since they help optimize merge order in some cases
2653
		# such as revdep-rebuild.
2654
		removed_nodes = set()
2655
		while True:
2656
			for node in mygraph.root_nodes():
2657
				if not isinstance(node, Package) or \
2658
					node.installed or node.onlydeps:
2659
					removed_nodes.add(node)
2660
			if removed_nodes:
2661
				self.spinner.update()
2662
				mygraph.difference_update(removed_nodes)
2663
			if not removed_nodes:
2664
				break
2665
			removed_nodes.clear()
2666
		self._merge_order_bias(mygraph)
2667
		def cmp_circular_bias(n1, n2):
2668
			"""
2669
			RDEPEND is stronger than PDEPEND and this function
2670
			measures such a strength bias within a circular
2671
			dependency relationship.
2672
			"""
2673
			n1_n2_medium = n2 in mygraph.child_nodes(n1,
2674
				ignore_priority=priority_range.ignore_medium_soft)
2675
			n2_n1_medium = n1 in mygraph.child_nodes(n2,
2676
				ignore_priority=priority_range.ignore_medium_soft)
2677
			if n1_n2_medium == n2_n1_medium:
2678
				return 0
2679
			elif n1_n2_medium:
2680
				return 1
2681
			return -1
2682
		myblocker_uninstalls = self._blocker_uninstalls.copy()
2683
		retlist=[]
2684
		# Contains uninstall tasks that have been scheduled to
2685
		# occur after overlapping blockers have been installed.
2686
		scheduled_uninstalls = set()
2687
		# Contains any Uninstall tasks that have been ignored
2688
		# in order to avoid the circular deps code path. These
2689
		# correspond to blocker conflicts that could not be
2690
		# resolved.
2691
		ignored_uninstall_tasks = set()
2692
		have_uninstall_task = False
2693
		complete = "complete" in self.myparams
2694
		asap_nodes = []
2695
2696
		def get_nodes(**kwargs):
2697
			"""
2698
			Returns leaf nodes excluding Uninstall instances
2699
			since those should be executed as late as possible.
2700
			"""
2701
			return [node for node in mygraph.leaf_nodes(**kwargs) \
2702
				if isinstance(node, Package) and \
2703
					(node.operation != "uninstall" or \
2704
					node in scheduled_uninstalls)]
2705
2706
		# sys-apps/portage needs special treatment if ROOT="/"
2707
		running_root = self._running_root.root
2708
		from portage.const import PORTAGE_PACKAGE_ATOM
2709
		runtime_deps = InternalPackageSet(
2710
			initial_atoms=[PORTAGE_PACKAGE_ATOM])
2711
		running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
2712
			PORTAGE_PACKAGE_ATOM)
2713
		replacement_portage = self.mydbapi[running_root].match_pkgs(
2714
			PORTAGE_PACKAGE_ATOM)
2715
2716
		if running_portage:
2717
			running_portage = running_portage[0]
2718
		else:
2719
			running_portage = None
2720
2721
		if replacement_portage:
2722
			replacement_portage = replacement_portage[0]
2723
		else:
2724
			replacement_portage = None
2725
2726
		if replacement_portage == running_portage:
2727
			replacement_portage = None
2728
2729
		if replacement_portage is not None:
2730
			# update from running_portage to replacement_portage asap
2731
			asap_nodes.append(replacement_portage)
2732
2733
		if running_portage is not None:
2734
			try:
2735
				portage_rdepend = self._select_atoms_highest_available(
2736
					running_root, running_portage.metadata["RDEPEND"],
2737
					myuse=running_portage.use.enabled,
2738
					parent=running_portage, strict=False)
2739
			except portage.exception.InvalidDependString, e:
2740
				portage.writemsg("!!! Invalid RDEPEND in " + \
2741
					"'%svar/db/pkg/%s/RDEPEND': %s\n" % \
2742
					(running_root, running_portage.cpv, e), noiselevel=-1)
2743
				del e
2744
				portage_rdepend = []
2745
			runtime_deps.update(atom for atom in portage_rdepend \
2746
				if not atom.startswith("!"))
2747
2748
		def gather_deps(ignore_priority, mergeable_nodes,
2749
			selected_nodes, node):
2750
			"""
2751
			Recursively gather a group of nodes that RDEPEND on
2752
			eachother. This ensures that they are merged as a group
2753
			and get their RDEPENDs satisfied as soon as possible.
2754
			"""
2755
			if node in selected_nodes:
2756
				return True
2757
			if node not in mergeable_nodes:
2758
				return False
2759
			if node == replacement_portage and \
2760
				mygraph.child_nodes(node,
2761
				ignore_priority=priority_range.ignore_medium_soft):
2762
				# Make sure that portage always has all of it's
2763
				# RDEPENDs installed first.
2764
				return False
2765
			selected_nodes.add(node)
2766
			for child in mygraph.child_nodes(node,
2767
				ignore_priority=ignore_priority):
2768
				if not gather_deps(ignore_priority,
2769
					mergeable_nodes, selected_nodes, child):
2770
					return False
2771
			return True
2772
2773
		def ignore_uninst_or_med(priority):
2774
			if priority is BlockerDepPriority.instance:
2775
				return True
2776
			return priority_range.ignore_medium(priority)
2777
2778
		def ignore_uninst_or_med_soft(priority):
2779
			if priority is BlockerDepPriority.instance:
2780
				return True
2781
			return priority_range.ignore_medium_soft(priority)
2782
2783
		tree_mode = "--tree" in self.myopts
2784
		# Tracks whether or not the current iteration should prefer asap_nodes
2785
		# if available.  This is set to False when the previous iteration
2786
		# failed to select any nodes.  It is reset whenever nodes are
2787
		# successfully selected.
2788
		prefer_asap = True
2789
2790
		# Controls whether or not the current iteration should drop edges that
2791
		# are "satisfied" by installed packages, in order to solve circular
2792
		# dependencies. The deep runtime dependencies of installed packages are
2793
		# not checked in this case (bug #199856), so it must be avoided
2794
		# whenever possible.
2795
		drop_satisfied = False
2796
2797
		# State of variables for successive iterations that loosen the
2798
		# criteria for node selection.
2799
		#
2800
		# iteration   prefer_asap   drop_satisfied
2801
		# 1           True          False
2802
		# 2           False         False
2803
		# 3           False         True
2804
		#
2805
		# If no nodes are selected on the last iteration, it is due to
2806
		# unresolved blockers or circular dependencies.
2807
2808
		while not mygraph.empty():
2809
			self.spinner.update()
2810
			selected_nodes = None
2811
			ignore_priority = None
2812
			if drop_satisfied or (prefer_asap and asap_nodes):
2813
				priority_range = DepPrioritySatisfiedRange
2814
			else:
2815
				priority_range = DepPriorityNormalRange
2816
			if prefer_asap and asap_nodes:
2817
				# ASAP nodes are merged before their soft deps. Go ahead and
2818
				# select root nodes here if necessary, since it's typical for
2819
				# the parent to have been removed from the graph already.
2820
				asap_nodes = [node for node in asap_nodes \
2821
					if mygraph.contains(node)]
2822
				for node in asap_nodes:
2823
					if not mygraph.child_nodes(node,
2824
						ignore_priority=priority_range.ignore_soft):
2825
						selected_nodes = [node]
2826
						asap_nodes.remove(node)
2827
						break
2828
			if not selected_nodes and \
2829
				not (prefer_asap and asap_nodes):
2830
				for i in xrange(priority_range.NONE,
2831
					priority_range.MEDIUM_SOFT + 1):
2832
					ignore_priority = priority_range.ignore_priority[i]
2833
					nodes = get_nodes(ignore_priority=ignore_priority)
2834
					if nodes:
2835
						# If there is a mix of uninstall nodes with other
2836
						# types, save the uninstall nodes for later since
2837
						# sometimes a merge node will render an uninstall
2838
						# node unnecessary (due to occupying the same slot),
2839
						# and we want to avoid executing a separate uninstall
2840
						# task in that case.
2841
						if len(nodes) > 1:
2842
							good_uninstalls = []
2843
							with_some_uninstalls_excluded = []
2844
							for node in nodes:
2845
								if node.operation == "uninstall":
2846
									slot_node = self.mydbapi[node.root
2847
										].match_pkgs(node.slot_atom)
2848
									if slot_node and \
2849
										slot_node[0].operation == "merge":
2850
										continue
2851
									good_uninstalls.append(node)
2852
								with_some_uninstalls_excluded.append(node)
2853
							if good_uninstalls:
2854
								nodes = good_uninstalls
2855
							elif with_some_uninstalls_excluded:
2856
								nodes = with_some_uninstalls_excluded
2857
							else:
2858
								nodes = nodes
2859
2860
						if ignore_priority is None and not tree_mode:
2861
							# Greedily pop all of these nodes since no
2862
							# relationship has been ignored. This optimization
2863
							# destroys --tree output, so it's disabled in tree
2864
							# mode.
2865
							selected_nodes = nodes
2866
						else:
2867
							# For optimal merge order:
2868
							#  * Only pop one node.
2869
							#  * Removing a root node (node without a parent)
2870
							#    will not produce a leaf node, so avoid it.
2871
							#  * It's normal for a selected uninstall to be a
2872
							#    root node, so don't check them for parents.
2873
							for node in nodes:
2874
								if node.operation == "uninstall" or \
2875
									mygraph.parent_nodes(node):
2876
									selected_nodes = [node]
2877
									break
2878
2879
						if selected_nodes:
2880
							break
2881
2882
			if not selected_nodes:
2883
				nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
2884
				if nodes:
2885
					mergeable_nodes = set(nodes)
2886
					if prefer_asap and asap_nodes:
2887
						nodes = asap_nodes
2888
					for i in xrange(priority_range.SOFT,
2889
						priority_range.MEDIUM_SOFT + 1):
2890
						ignore_priority = priority_range.ignore_priority[i]
2891
						for node in nodes:
2892
							if not mygraph.parent_nodes(node):
2893
								continue
2894
							selected_nodes = set()
2895
							if gather_deps(ignore_priority,
2896
								mergeable_nodes, selected_nodes, node):
2897
								break
2898
							else:
2899
								selected_nodes = None
2900
						if selected_nodes:
2901
							break
2902
2903
					if prefer_asap and asap_nodes and not selected_nodes:
2904
						# We failed to find any asap nodes to merge, so ignore
2905
						# them for the next iteration.
2906
						prefer_asap = False
2907
						continue
2908
2909
			if selected_nodes and ignore_priority is not None:
2910
				# Try to merge ignored medium_soft deps as soon as possible
2911
				# if they're not satisfied by installed packages.
2912
				for node in selected_nodes:
2913
					children = set(mygraph.child_nodes(node))
2914
					soft = children.difference(
2915
						mygraph.child_nodes(node,
2916
						ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
2917
					medium_soft = children.difference(
2918
						mygraph.child_nodes(node,
2919
							ignore_priority = \
2920
							DepPrioritySatisfiedRange.ignore_medium_soft))
2921
					medium_soft.difference_update(soft)
2922
					for child in medium_soft:
2923
						if child in selected_nodes:
2924
							continue
2925
						if child in asap_nodes:
2926
							continue
2927
						asap_nodes.append(child)
2928
2929
			if selected_nodes and len(selected_nodes) > 1:
2930
				if not isinstance(selected_nodes, list):
2931
					selected_nodes = list(selected_nodes)
2932
				selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
2933
2934
			if not selected_nodes and not myblocker_uninstalls.is_empty():
2935
				# An Uninstall task needs to be executed in order to
2936
				# avoid conflict if possible.
2937
2938
				if drop_satisfied:
2939
					priority_range = DepPrioritySatisfiedRange
2940
				else:
2941
					priority_range = DepPriorityNormalRange
2942
2943
				mergeable_nodes = get_nodes(
2944
					ignore_priority=ignore_uninst_or_med)
2945
2946
				min_parent_deps = None
2947
				uninst_task = None
2948
				for task in myblocker_uninstalls.leaf_nodes():
2949
					# Do some sanity checks so that system or world packages
2950
					# don't get uninstalled inappropriately here (only really
2951
					# necessary when --complete-graph has not been enabled).
2952
2953
					if task in ignored_uninstall_tasks:
2954
						continue
2955
2956
					if task in scheduled_uninstalls:
2957
						# It's been scheduled but it hasn't
2958
						# been executed yet due to dependence
2959
						# on installation of blocking packages.
2960
						continue
2961
2962
					root_config = self.roots[task.root]
2963
					inst_pkg = self._pkg_cache[
2964
						("installed", task.root, task.cpv, "nomerge")]
2965
2966
					if self.digraph.contains(inst_pkg):
2967
						continue
2968
2969
					forbid_overlap = False
2970
					heuristic_overlap = False
2971
					for blocker in myblocker_uninstalls.parent_nodes(task):
2972
						if blocker.eapi in ("0", "1"):
2973
							heuristic_overlap = True
2974
						elif blocker.atom.blocker.overlap.forbid:
2975
							forbid_overlap = True
2976
							break
2977
					if forbid_overlap and running_root == task.root:
2978
						continue
2979
2980
					if heuristic_overlap and running_root == task.root:
2981
						# Never uninstall sys-apps/portage or it's essential
2982
						# dependencies, except through replacement.
2983
						try:
2984
							runtime_dep_atoms = \
2985
								list(runtime_deps.iterAtomsForPackage(task))
2986
						except portage.exception.InvalidDependString, e:
2987
							portage.writemsg("!!! Invalid PROVIDE in " + \
2988
								"'%svar/db/pkg/%s/PROVIDE': %s\n" % \
2989
								(task.root, task.cpv, e), noiselevel=-1)
2990
							del e
2991
							continue
2992
2993
						# Don't uninstall a runtime dep if it appears
2994
						# to be the only suitable one installed.
2995
						skip = False
2996
						vardb = root_config.trees["vartree"].dbapi
2997
						for atom in runtime_dep_atoms:
2998
							other_version = None
2999
							for pkg in vardb.match_pkgs(atom):
3000
								if pkg.cpv == task.cpv and \
3001
									pkg.metadata["COUNTER"] == \
3002
									task.metadata["COUNTER"]:
3003
									continue
3004
								other_version = pkg
3005
								break
3006
							if other_version is None:
3007
								skip = True
3008
								break
3009
						if skip:
3010
							continue
3011
3012
						# For packages in the system set, don't take
3013
						# any chances. If the conflict can't be resolved
3014
						# by a normal replacement operation then abort.
3015
						skip = False
3016
						try:
3017
							for atom in root_config.sets[
3018
								"system"].iterAtomsForPackage(task):
3019
								skip = True
3020
								break
3021
						except portage.exception.InvalidDependString, e:
3022
							portage.writemsg("!!! Invalid PROVIDE in " + \
3023
								"'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3024
								(task.root, task.cpv, e), noiselevel=-1)
3025
							del e
3026
							skip = True
3027
						if skip:
3028
							continue
3029
3030
					# Note that the world check isn't always
3031
					# necessary since self._complete_graph() will
3032
					# add all packages from the system and world sets to the
3033
					# graph. This just allows unresolved conflicts to be
3034
					# detected as early as possible, which makes it possible
3035
					# to avoid calling self._complete_graph() when it is
3036
					# unnecessary due to blockers triggering an abortion.
3037
					if not complete:
3038
						# For packages in the world set, go ahead an uninstall
3039
						# when necessary, as long as the atom will be satisfied
3040
						# in the final state.
3041
						graph_db = self.mydbapi[task.root]
3042
						skip = False
3043
						try:
3044
							for atom in root_config.sets[
3045
								"world"].iterAtomsForPackage(task):
3046
								satisfied = False
3047
								for pkg in graph_db.match_pkgs(atom):
3048
									if pkg == inst_pkg:
3049
										continue
3050
									satisfied = True
3051
									break
3052
								if not satisfied:
3053
									skip = True
3054
									self._blocked_world_pkgs[inst_pkg] = atom
3055
									break
3056
						except portage.exception.InvalidDependString, e:
3057
							portage.writemsg("!!! Invalid PROVIDE in " + \
3058
								"'%svar/db/pkg/%s/PROVIDE': %s\n" % \
3059
								(task.root, task.cpv, e), noiselevel=-1)
3060
							del e
3061
							skip = True
3062
						if skip:
3063
							continue
3064
3065
					# Check the deps of parent nodes to ensure that
3066
					# the chosen task produces a leaf node. Maybe
3067
					# this can be optimized some more to make the
3068
					# best possible choice, but the current algorithm
3069
					# is simple and should be near optimal for most
3070
					# common cases.
3071
					mergeable_parent = False
3072
					parent_deps = set()
3073
					for parent in mygraph.parent_nodes(task):
3074
						parent_deps.update(mygraph.child_nodes(parent,
3075
							ignore_priority=priority_range.ignore_medium_soft))
3076
						if parent in mergeable_nodes and \
3077
							gather_deps(ignore_uninst_or_med_soft,
3078
							mergeable_nodes, set(), parent):
3079
							mergeable_parent = True
3080
3081
					if not mergeable_parent:
3082
						continue
3083
3084
					parent_deps.remove(task)
3085
					if min_parent_deps is None or \
3086
						len(parent_deps) < min_parent_deps:
3087
						min_parent_deps = len(parent_deps)
3088
						uninst_task = task
3089
3090
				if uninst_task is not None:
3091
					# The uninstall is performed only after blocking
3092
					# packages have been merged on top of it. File
3093
					# collisions between blocking packages are detected
3094
					# and removed from the list of files to be uninstalled.
3095
					scheduled_uninstalls.add(uninst_task)
3096
					parent_nodes = mygraph.parent_nodes(uninst_task)
3097
3098
					# Reverse the parent -> uninstall edges since we want
3099
					# to do the uninstall after blocking packages have
3100
					# been merged on top of it.
3101
					mygraph.remove(uninst_task)
3102
					for blocked_pkg in parent_nodes:
3103
						mygraph.add(blocked_pkg, uninst_task,
3104
							priority=BlockerDepPriority.instance)
3105
						scheduler_graph.remove_edge(uninst_task, blocked_pkg)
3106
						scheduler_graph.add(blocked_pkg, uninst_task,
3107
							priority=BlockerDepPriority.instance)
3108
3109
					# Reset the state variables for leaf node selection and
3110
					# continue trying to select leaf nodes.
3111
					prefer_asap = True
3112
					drop_satisfied = False
3113
					continue
3114
3115
			if not selected_nodes:
3116
				# Only select root nodes as a last resort. This case should
3117
				# only trigger when the graph is nearly empty and the only
3118
				# remaining nodes are isolated (no parents or children). Since
3119
				# the nodes must be isolated, ignore_priority is not needed.
3120
				selected_nodes = get_nodes()
3121
3122
			if not selected_nodes and not drop_satisfied:
3123
				drop_satisfied = True
3124
				continue
3125
3126
			if not selected_nodes and not myblocker_uninstalls.is_empty():
3127
				# If possible, drop an uninstall task here in order to avoid
3128
				# the circular deps code path. The corresponding blocker will
3129
				# still be counted as an unresolved conflict.
3130
				uninst_task = None
3131
				for node in myblocker_uninstalls.leaf_nodes():
3132
					try:
3133
						mygraph.remove(node)
3134
					except KeyError:
3135
						pass
3136
					else:
3137
						uninst_task = node
3138
						ignored_uninstall_tasks.add(node)
3139
						break
3140
3141
				if uninst_task is not None:
3142
					# Reset the state variables for leaf node selection and
3143
					# continue trying to select leaf nodes.
3144
					prefer_asap = True
3145
					drop_satisfied = False
3146
					continue
3147
3148
			if not selected_nodes:
3149
				self._circular_deps_for_display = mygraph
3150
				raise self._unknown_internal_error()
3151
3152
			# At this point, we've succeeded in selecting one or more nodes, so
3153
			# reset state variables for leaf node selection.
3154
			prefer_asap = True
3155
			drop_satisfied = False
3156
3157
			mygraph.difference_update(selected_nodes)
3158
3159
			for node in selected_nodes:
3160
				if isinstance(node, Package) and \
3161
					node.operation == "nomerge":
3162
					continue
3163
3164
				# Handle interactions between blockers
3165
				# and uninstallation tasks.
3166
				solved_blockers = set()
3167
				uninst_task = None
3168
				if isinstance(node, Package) and \
3169
					"uninstall" == node.operation:
3170
					have_uninstall_task = True
3171
					uninst_task = node
3172
				else:
3173
					vardb = self.trees[node.root]["vartree"].dbapi
3174
					previous_cpv = vardb.match(node.slot_atom)
3175
					if previous_cpv:
3176
						# The package will be replaced by this one, so remove
3177
						# the corresponding Uninstall task if necessary.
3178
						previous_cpv = previous_cpv[0]
3179
						uninst_task = \
3180
							("installed", node.root, previous_cpv, "uninstall")
3181
						try:
3182
							mygraph.remove(uninst_task)
3183
						except KeyError:
3184
							pass
3185
3186
				if uninst_task is not None and \
3187
					uninst_task not in ignored_uninstall_tasks and \
3188
					myblocker_uninstalls.contains(uninst_task):
3189
					blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
3190
					myblocker_uninstalls.remove(uninst_task)
3191
					# Discard any blockers that this Uninstall solves.
3192
					for blocker in blocker_nodes:
3193
						if not myblocker_uninstalls.child_nodes(blocker):
3194
							myblocker_uninstalls.remove(blocker)
3195
							solved_blockers.add(blocker)
3196
3197
				retlist.append(node)
3198
3199
				if (isinstance(node, Package) and \
3200
					"uninstall" == node.operation) or \
3201
					(uninst_task is not None and \
3202
					uninst_task in scheduled_uninstalls):
3203
					# Include satisfied blockers in the merge list
3204
					# since the user might be interested and also
3205
					# it serves as an indicator that blocking packages
3206
					# will be temporarily installed simultaneously.
3207
					for blocker in solved_blockers:
3208
						retlist.append(Blocker(atom=blocker.atom,
3209
							root=blocker.root, eapi=blocker.eapi,
3210
							satisfied=True))
3211
3212
		unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
3213
		for node in myblocker_uninstalls.root_nodes():
3214
			unsolvable_blockers.add(node)
3215
3216
		for blocker in unsolvable_blockers:
3217
			retlist.append(blocker)
3218
3219
		# If any Uninstall tasks need to be executed in order
3220
		# to avoid a conflict, complete the graph with any
3221
		# dependencies that may have been initially
3222
		# neglected (to ensure that unsafe Uninstall tasks
3223
		# are properly identified and blocked from execution).
3224
		if have_uninstall_task and \
3225
			not complete and \
3226
			not unsolvable_blockers:
3227
			self.myparams.add("complete")
3228
			raise self._serialize_tasks_retry("")
3229
3230
		if unsolvable_blockers and \
3231
			not self._accept_blocker_conflicts():
3232
			self._unsatisfied_blockers_for_display = unsolvable_blockers
3233
			self._serialized_tasks_cache = retlist[:]
3234
			self._scheduler_graph = scheduler_graph
3235
			raise self._unknown_internal_error()
3236
3237
		if self._slot_collision_info and \
3238
			not self._accept_blocker_conflicts():
3239
			self._serialized_tasks_cache = retlist[:]
3240
			self._scheduler_graph = scheduler_graph
3241
			raise self._unknown_internal_error()
3242
3243
		return retlist, scheduler_graph
3244
3245
	def _show_circular_deps(self, mygraph):
3246
		# No leaf nodes are available, so we have a circular
3247
		# dependency panic situation.  Reduce the noise level to a
3248
		# minimum via repeated elimination of root nodes since they
3249
		# have no parents and thus can not be part of a cycle.
3250
		while True:
3251
			root_nodes = mygraph.root_nodes(
3252
				ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
3253
			if not root_nodes:
3254
				break
3255
			mygraph.difference_update(root_nodes)
3256
		# Display the USE flags that are enabled on nodes that are part
3257
		# of dependency cycles in case that helps the user decide to
3258
		# disable some of them.
3259
		display_order = []
3260
		tempgraph = mygraph.copy()
3261
		while not tempgraph.empty():
3262
			nodes = tempgraph.leaf_nodes()
3263
			if not nodes:
3264
				node = tempgraph.order[0]
3265
			else:
3266
				node = nodes[0]
3267
			display_order.append(node)
3268
			tempgraph.remove(node)
3269
		display_order.reverse()
3270
		self.myopts.pop("--quiet", None)
3271
		self.myopts.pop("--verbose", None)
3272
		self.myopts["--tree"] = True
3273
		portage.writemsg("\n\n", noiselevel=-1)
3274
		self.display(display_order)
3275
		prefix = colorize("BAD", " * ")
3276
		portage.writemsg("\n", noiselevel=-1)
3277
		portage.writemsg(prefix + "Error: circular dependencies:\n",
3278
			noiselevel=-1)
3279
		portage.writemsg("\n", noiselevel=-1)
3280
		mygraph.debug_print()
3281
		portage.writemsg("\n", noiselevel=-1)
3282
		portage.writemsg(prefix + "Note that circular dependencies " + \
3283
			"can often be avoided by temporarily\n", noiselevel=-1)
3284
		portage.writemsg(prefix + "disabling USE flags that trigger " + \
3285
			"optional dependencies.\n", noiselevel=-1)
3286
3287
	def _show_merge_list(self):
3288
		if self._serialized_tasks_cache is not None and \
3289
			not (self._displayed_list and \
3290
			(self._displayed_list == self._serialized_tasks_cache or \
3291
			self._displayed_list == \
3292
				list(reversed(self._serialized_tasks_cache)))):
3293
			display_list = self._serialized_tasks_cache[:]
3294
			if "--tree" in self.myopts:
3295
				display_list.reverse()
3296
			self.display(display_list)
3297
3298
	def _show_unsatisfied_blockers(self, blockers):
3299
		self._show_merge_list()
3300
		msg = "Error: The above package list contains " + \
3301
			"packages which cannot be installed " + \
3302
			"at the same time on the same system."
3303
		prefix = colorize("BAD", " * ")
3304
		from textwrap import wrap
3305
		portage.writemsg("\n", noiselevel=-1)
3306
		for line in wrap(msg, 70):
3307
			portage.writemsg(prefix + line + "\n", noiselevel=-1)
3308
3309
		# Display the conflicting packages along with the packages
3310
		# that pulled them in. This is helpful for troubleshooting
3311
		# cases in which blockers don't solve automatically and
3312
		# the reasons are not apparent from the normal merge list
3313
		# display.
3314
3315
		conflict_pkgs = {}
3316
		for blocker in blockers:
3317
			for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
3318
				self._blocker_parents.parent_nodes(blocker)):
3319
				parent_atoms = self._parent_atoms.get(pkg)
3320
				if not parent_atoms:
3321
					atom = self._blocked_world_pkgs.get(pkg)
3322
					if atom is not None:
3323
						parent_atoms = set([("@world", atom)])
3324
				if parent_atoms:
3325
					conflict_pkgs[pkg] = parent_atoms
3326
3327
		if conflict_pkgs:
3328
			# Reduce noise by pruning packages that are only
3329
			# pulled in by other conflict packages.
3330
			pruned_pkgs = set()
3331
			for pkg, parent_atoms in conflict_pkgs.iteritems():
3332
				relevant_parent = False
3333
				for parent, atom in parent_atoms:
3334
					if parent not in conflict_pkgs:
3335
						relevant_parent = True
3336
						break
3337
				if not relevant_parent:
3338
					pruned_pkgs.add(pkg)
3339
			for pkg in pruned_pkgs:
3340
				del conflict_pkgs[pkg]
3341
3342
		if conflict_pkgs:
3343
			msg = []
3344
			msg.append("\n")
3345
			indent = "  "
3346
			# Max number of parents shown, to avoid flooding the display.
3347
			max_parents = 3
3348
			for pkg, parent_atoms in conflict_pkgs.iteritems():
3349
3350
				pruned_list = set()
3351
3352
				# Prefer packages that are not directly involved in a conflict.
3353
				for parent_atom in parent_atoms:
3354
					if len(pruned_list) >= max_parents:
3355
						break
3356
					parent, atom = parent_atom
3357
					if parent not in conflict_pkgs:
3358
						pruned_list.add(parent_atom)
3359
3360
				for parent_atom in parent_atoms:
3361
					if len(pruned_list) >= max_parents:
3362
						break
3363
					pruned_list.add(parent_atom)
3364
3365
				omitted_parents = len(parent_atoms) - len(pruned_list)
3366
				msg.append(indent + "%s pulled in by\n" % pkg)
3367
3368
				for parent_atom in pruned_list:
3369
					parent, atom = parent_atom
3370
					msg.append(2*indent)
3371
					if isinstance(parent,
3372
						(PackageArg, AtomArg)):
3373
						# For PackageArg and AtomArg types, it's
3374
						# redundant to display the atom attribute.
3375
						msg.append(str(parent))
3376
					else:
3377
						# Display the specific atom from SetArg or
3378
						# Package types.
3379
						msg.append("%s required by %s" % (atom, parent))
3380
					msg.append("\n")
3381
3382
				if omitted_parents:
3383
					msg.append(2*indent)
3384
					msg.append("(and %d more)\n" % omitted_parents)
3385
3386
				msg.append("\n")
3387
3388
			sys.stderr.write("".join(msg))
3389
			sys.stderr.flush()
3390
3391
		if "--quiet" not in self.myopts:
3392
			show_blocker_docs_link()
3393
3394
	def display(self, mylist, favorites=[], verbosity=None):
3395
3396
		# This is used to prevent display_problems() from
3397
		# redundantly displaying this exact same merge list
3398
		# again via _show_merge_list().
3399
		self._displayed_list = mylist
3400
3401
		if verbosity is None:
3402
			verbosity = ("--quiet" in self.myopts and 1 or \
3403
				"--verbose" in self.myopts and 3 or 2)
3404
		favorites_set = InternalPackageSet(favorites)
3405
		oneshot = "--oneshot" in self.myopts or \
3406
			"--onlydeps" in self.myopts
3407
		columns = "--columns" in self.myopts
3408
		changelogs=[]
3409
		p=[]
3410
		blockers = []
3411
3412
		counters = PackageCounters()
3413
3414
		if verbosity == 1 and "--verbose" not in self.myopts:
3415
			def create_use_string(*args):
3416
				return ""
3417
		else:
3418
			def create_use_string(name, cur_iuse, iuse_forced, cur_use,
3419
				old_iuse, old_use,
3420
				is_new, reinst_flags,
3421
				all_flags=(verbosity == 3 or "--quiet" in self.myopts),
3422
				alphabetical=("--alphabetical" in self.myopts)):
3423
				enabled = []
3424
				if alphabetical:
3425
					disabled = enabled
3426
					removed = enabled
3427
				else:
3428
					disabled = []
3429
					removed = []
3430
				cur_iuse = set(cur_iuse)
3431
				enabled_flags = cur_iuse.intersection(cur_use)
3432
				removed_iuse = set(old_iuse).difference(cur_iuse)
3433
				any_iuse = cur_iuse.union(old_iuse)
3434
				any_iuse = list(any_iuse)
3435
				any_iuse.sort()
3436
				for flag in any_iuse:
3437
					flag_str = None
3438
					isEnabled = False
3439
					reinst_flag = reinst_flags and flag in reinst_flags
3440
					if flag in enabled_flags:
3441
						isEnabled = True
3442
						if is_new or flag in old_use and \
3443
							(all_flags or reinst_flag):
3444
							flag_str = red(flag)
3445
						elif flag not in old_iuse:
3446
							flag_str = yellow(flag) + "%*"
3447
						elif flag not in old_use:
3448
							flag_str = green(flag) + "*"
3449
					elif flag in removed_iuse:
3450
						if all_flags or reinst_flag:
3451
							flag_str = yellow("-" + flag) + "%"
3452
							if flag in old_use:
3453
								flag_str += "*"
3454
							flag_str = "(" + flag_str + ")"
3455
							removed.append(flag_str)
3456
						continue
3457
					else:
3458
						if is_new or flag in old_iuse and \
3459
							flag not in old_use and \
3460
							(all_flags or reinst_flag):
3461
							flag_str = blue("-" + flag)
3462
						elif flag not in old_iuse:
3463
							flag_str = yellow("-" + flag)
3464
							if flag not in iuse_forced:
3465
								flag_str += "%"
3466
						elif flag in old_use:
3467
							flag_str = green("-" + flag) + "*"
3468
					if flag_str:
3469
						if flag in iuse_forced:
3470
							flag_str = "(" + flag_str + ")"
3471
						if isEnabled:
3472
							enabled.append(flag_str)
3473
						else:
3474
							disabled.append(flag_str)
3475
3476
				if alphabetical:
3477
					ret = " ".join(enabled)
3478
				else:
3479
					ret = " ".join(enabled + disabled + removed)
3480
				if ret:
3481
					ret = '%s="%s" ' % (name, ret)
3482
				return ret
3483
3484
		repo_display = RepoDisplay(self.roots)
3485
3486
		tree_nodes = []
3487
		display_list = []
3488
		mygraph = self.digraph.copy()
3489
3490
		# If there are any Uninstall instances, add the corresponding
3491
		# blockers to the digraph (useful for --tree display).
3492
3493
		executed_uninstalls = set(node for node in mylist \
3494
			if isinstance(node, Package) and node.operation == "unmerge")
3495
3496
		for uninstall in self._blocker_uninstalls.leaf_nodes():
3497
			uninstall_parents = \
3498
				self._blocker_uninstalls.parent_nodes(uninstall)
3499
			if not uninstall_parents:
3500
				continue
3501
3502
			# Remove the corresponding "nomerge" node and substitute
3503
			# the Uninstall node.
3504
			inst_pkg = self._pkg_cache[
3505
				("installed", uninstall.root, uninstall.cpv, "nomerge")]
3506
			try:
3507
				mygraph.remove(inst_pkg)
3508
			except KeyError:
3509
				pass
3510
3511
			try:
3512
				inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
3513
			except KeyError:
3514
				inst_pkg_blockers = []
3515
3516
			# Break the Package -> Uninstall edges.
3517
			mygraph.remove(uninstall)
3518
3519
			# Resolution of a package's blockers
3520
			# depend on it's own uninstallation.
3521
			for blocker in inst_pkg_blockers:
3522
				mygraph.add(uninstall, blocker)
3523
3524
			# Expand Package -> Uninstall edges into
3525
			# Package -> Blocker -> Uninstall edges.
3526
			for blocker in uninstall_parents:
3527
				mygraph.add(uninstall, blocker)
3528
				for parent in self._blocker_parents.parent_nodes(blocker):
3529
					if parent != inst_pkg:
3530
						mygraph.add(blocker, parent)
3531
3532
			# If the uninstall task did not need to be executed because
3533
			# of an upgrade, display Blocker -> Upgrade edges since the
3534
			# corresponding Blocker -> Uninstall edges will not be shown.
3535
			upgrade_node = \
3536
				self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
3537
			if upgrade_node is not None and \
3538
				uninstall not in executed_uninstalls:
3539
				for blocker in uninstall_parents:
3540
					mygraph.add(upgrade_node, blocker)
3541
3542
		unsatisfied_blockers = []
3543
		i = 0
3544
		depth = 0
3545
		shown_edges = set()
3546
		for x in mylist:
3547
			if isinstance(x, Blocker) and not x.satisfied:
3548
				unsatisfied_blockers.append(x)
3549
				continue
3550
			graph_key = x
3551
			if "--tree" in self.myopts:
3552
				depth = len(tree_nodes)
3553
				while depth and graph_key not in \
3554
					mygraph.child_nodes(tree_nodes[depth-1]):
3555
						depth -= 1
3556
				if depth:
3557
					tree_nodes = tree_nodes[:depth]
3558
					tree_nodes.append(graph_key)
3559
					display_list.append((x, depth, True))
3560
					shown_edges.add((graph_key, tree_nodes[depth-1]))
3561
				else:
3562
					traversed_nodes = set() # prevent endless circles
3563
					traversed_nodes.add(graph_key)
3564
					def add_parents(current_node, ordered):
3565
						parent_nodes = None
3566
						# Do not traverse to parents if this node is an
3567
						# an argument or a direct member of a set that has
3568
						# been specified as an argument (system or world).
3569
						if current_node not in self._set_nodes:
3570
							parent_nodes = mygraph.parent_nodes(current_node)
3571
						if parent_nodes:
3572
							child_nodes = set(mygraph.child_nodes(current_node))
3573
							selected_parent = None
3574
							# First, try to avoid a direct cycle.
3575
							for node in parent_nodes:
3576
								if not isinstance(node, (Blocker, Package)):
3577
									continue
3578
								if node not in traversed_nodes and \
3579
									node not in child_nodes:
3580
									edge = (current_node, node)
3581
									if edge in shown_edges:
3582
										continue
3583
									selected_parent = node
3584
									break
3585
							if not selected_parent:
3586
								# A direct cycle is unavoidable.
3587
								for node in parent_nodes:
3588
									if not isinstance(node, (Blocker, Package)):
3589
										continue
3590
									if node not in traversed_nodes:
3591
										edge = (current_node, node)
3592
										if edge in shown_edges:
3593
											continue
3594
										selected_parent = node
3595
										break
3596
							if selected_parent:
3597
								shown_edges.add((current_node, selected_parent))
3598
								traversed_nodes.add(selected_parent)
3599
								add_parents(selected_parent, False)
3600
						display_list.append((current_node,
3601
							len(tree_nodes), ordered))
3602
						tree_nodes.append(current_node)
3603
					tree_nodes = []
3604
					add_parents(graph_key, True)
3605
			else:
3606
				display_list.append((x, depth, True))
3607
		mylist = display_list
3608
		for x in unsatisfied_blockers:
3609
			mylist.append((x, 0, True))
3610
3611
		last_merge_depth = 0
3612
		for i in xrange(len(mylist)-1,-1,-1):
3613
			graph_key, depth, ordered = mylist[i]
3614
			if not ordered and depth == 0 and i > 0 \
3615
				and graph_key == mylist[i-1][0] and \
3616
				mylist[i-1][1] == 0:
3617
				# An ordered node got a consecutive duplicate when the tree was
3618
				# being filled in.
3619
				del mylist[i]
3620
				continue
3621
			if ordered and graph_key[-1] != "nomerge":
3622
				last_merge_depth = depth
3623
				continue
3624
			if depth >= last_merge_depth or \
3625
				i < len(mylist) - 1 and \
3626
				depth >= mylist[i+1][1]:
3627
					del mylist[i]
3628
3629
		from portage import flatten
3630
		from portage.dep import use_reduce, paren_reduce
3631
		# files to fetch list - avoids counting a same file twice
3632
		# in size display (verbose mode)
3633
		myfetchlist=[]
3634
3635
		# Use this set to detect when all the "repoadd" strings are "[0]"
3636
		# and disable the entire repo display in this case.
3637
		repoadd_set = set()
3638
3639
		for mylist_index in xrange(len(mylist)):
3640
			x, depth, ordered = mylist[mylist_index]
3641
			pkg_type = x[0]
3642
			myroot = x[1]
3643
			pkg_key = x[2]
3644
			portdb = self.trees[myroot]["porttree"].dbapi
3645
			bindb  = self.trees[myroot]["bintree"].dbapi
3646
			vardb = self.trees[myroot]["vartree"].dbapi
3647
			vartree = self.trees[myroot]["vartree"]
3648
			pkgsettings = self.pkgsettings[myroot]
3649
3650
			fetch=" "
3651
			indent = " " * depth
3652
3653
			if isinstance(x, Blocker):
3654
				if x.satisfied:
3655
					blocker_style = "PKG_BLOCKER_SATISFIED"
3656
					addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
3657
				else:
3658
					blocker_style = "PKG_BLOCKER"
3659
					addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
3660
				if ordered:
3661
					counters.blocks += 1
3662
					if x.satisfied:
3663
						counters.blocks_satisfied += 1
3664
				resolved = portage.key_expand(
3665
					str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
3666
				if "--columns" in self.myopts and "--quiet" in self.myopts:
3667
					addl += " " + colorize(blocker_style, resolved)
3668
				else:
3669
					addl = "[%s %s] %s%s" % \
3670
						(colorize(blocker_style, "blocks"),
3671
						addl, indent, colorize(blocker_style, resolved))
3672
				block_parents = self._blocker_parents.parent_nodes(x)
3673
				block_parents = set([pnode[2] for pnode in block_parents])
3674
				block_parents = ", ".join(block_parents)
3675
				if resolved!=x[2]:
3676
					addl += colorize(blocker_style,
3677
						" (\"%s\" is blocking %s)") % \
3678
						(str(x.atom).lstrip("!"), block_parents)
3679
				else:
3680
					addl += colorize(blocker_style,
3681
						" (is blocking %s)") % block_parents
3682
				if isinstance(x, Blocker) and x.satisfied:
3683
					if columns:
3684
						continue
3685
					p.append(addl)
3686
				else:
3687
					blockers.append(addl)
3688
			else:
3689
				pkg_status = x[3]
3690
				pkg_merge = ordered and pkg_status == "merge"
3691
				if not pkg_merge and pkg_status == "merge":
3692
					pkg_status = "nomerge"
3693
				built = pkg_type != "ebuild"
3694
				installed = pkg_type == "installed"
3695
				pkg = x
3696
				metadata = pkg.metadata
3697
				ebuild_path = None
3698
				repo_name = metadata["repository"]
3699
				if pkg_type == "ebuild":
3700
					ebuild_path = portdb.findname(pkg_key)
3701
					if not ebuild_path: # shouldn't happen
3702
						raise portage.exception.PackageNotFound(pkg_key)
3703
					repo_path_real = os.path.dirname(os.path.dirname(
3704
						os.path.dirname(ebuild_path)))
3705
				else:
3706
					repo_path_real = portdb.getRepositoryPath(repo_name)
3707
				pkg_use = list(pkg.use.enabled)
3708
				try:
3709
					restrict = flatten(use_reduce(paren_reduce(
3710
						pkg.metadata["RESTRICT"]), uselist=pkg_use))
3711
				except portage.exception.InvalidDependString, e:
3712
					if not pkg.installed:
3713
						show_invalid_depstring_notice(x,
3714
							pkg.metadata["RESTRICT"], str(e))
3715
						del e
3716
						return 1
3717
					restrict = []
3718
				if "ebuild" == pkg_type and x[3] != "nomerge" and \
3719
					"fetch" in restrict:
3720
					fetch = red("F")
3721
					if ordered:
3722
						counters.restrict_fetch += 1
3723
					if portdb.fetch_check(pkg_key, pkg_use):
3724
						fetch = green("f")
3725
						if ordered:
3726
							counters.restrict_fetch_satisfied += 1
3727
3728
				#we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
3729
				#param is used for -u, where you still *do* want to see when something is being upgraded.
3730
				myoldbest = []
3731
				myinslotlist = None
3732
				installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
3733
				if vardb.cpv_exists(pkg_key):
3734
					addl="  "+yellow("R")+fetch+"  "
3735
					if ordered:
3736
						if pkg_merge:
3737
							counters.reinst += 1
3738
						elif pkg_status == "uninstall":
3739
							counters.uninst += 1
3740
				# filter out old-style virtual matches
3741
				elif installed_versions and \
3742
					portage.cpv_getkey(installed_versions[0]) == \
3743
					portage.cpv_getkey(pkg_key):
3744
					myinslotlist = vardb.match(pkg.slot_atom)
3745
					# If this is the first install of a new-style virtual, we
3746
					# need to filter out old-style virtual matches.
3747
					if myinslotlist and \
3748
						portage.cpv_getkey(myinslotlist[0]) != \
3749
						portage.cpv_getkey(pkg_key):
3750
						myinslotlist = None
3751
					if myinslotlist:
3752
						myoldbest = myinslotlist[:]
3753
						addl = "   " + fetch
3754
						if not portage.dep.cpvequal(pkg_key,
3755
							portage.best([pkg_key] + myoldbest)):
3756
							# Downgrade in slot
3757
							addl += turquoise("U")+blue("D")
3758
							if ordered:
3759
								counters.downgrades += 1
3760
						else:
3761
							# Update in slot
3762
							addl += turquoise("U") + " "
3763
							if ordered:
3764
								counters.upgrades += 1
3765
					else:
3766
						# New slot, mark it new.
3767
						addl = " " + green("NS") + fetch + "  "
3768
						myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
3769
						if ordered:
3770
							counters.newslot += 1
3771
3772
					if "--changelog" in self.myopts:
3773
						inst_matches = vardb.match(pkg.slot_atom)
3774
						if inst_matches:
3775
							changelogs.extend(self.calc_changelog(
3776
								portdb.findname(pkg_key),
3777
								inst_matches[0], pkg_key))
3778
				else:
3779
					addl = " " + green("N") + " " + fetch + "  "
3780
					if ordered:
3781
						counters.new += 1
3782
3783
				verboseadd = ""
3784
				repoadd = None
3785
3786
				if True:
3787
					# USE flag display
3788
					forced_flags = set()
3789
					pkgsettings.setcpv(pkg) # for package.use.{mask,force}
3790
					forced_flags.update(pkgsettings.useforce)
3791
					forced_flags.update(pkgsettings.usemask)
3792
3793
					cur_use = [flag for flag in pkg.use.enabled \
3794
						if flag in pkg.iuse.all]
3795
					cur_iuse = sorted(pkg.iuse.all)
3796
3797
					if myoldbest and myinslotlist:
3798
						previous_cpv = myoldbest[0]
3799
					else:
3800
						previous_cpv = pkg.cpv
3801
					if vardb.cpv_exists(previous_cpv):
3802
						old_iuse, old_use = vardb.aux_get(
3803
								previous_cpv, ["IUSE", "USE"])
3804
						old_iuse = list(set(
3805
							filter_iuse_defaults(old_iuse.split())))
3806
						old_iuse.sort()
3807
						old_use = old_use.split()
3808
						is_new = False
3809
					else:
3810
						old_iuse = []
3811
						old_use = []
3812
						is_new = True
3813
3814
					old_use = [flag for flag in old_use if flag in old_iuse]
3815
3816
					use_expand = pkgsettings["USE_EXPAND"].lower().split()
3817
					use_expand.sort()
3818
					use_expand.reverse()
3819
					use_expand_hidden = \
3820
						pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
3821
3822
					def map_to_use_expand(myvals, forcedFlags=False,
3823
						removeHidden=True):
3824
						ret = {}
3825
						forced = {}
3826
						for exp in use_expand:
3827
							ret[exp] = []
3828
							forced[exp] = set()
3829
							for val in myvals[:]:
3830
								if val.startswith(exp.lower()+"_"):
3831
									if val in forced_flags:
3832
										forced[exp].add(val[len(exp)+1:])
3833
									ret[exp].append(val[len(exp)+1:])
3834
									myvals.remove(val)
3835
						ret["USE"] = myvals
3836
						forced["USE"] = [val for val in myvals \
3837
							if val in forced_flags]
3838
						if removeHidden:
3839
							for exp in use_expand_hidden:
3840
								ret.pop(exp, None)
3841
						if forcedFlags:
3842
							return ret, forced
3843
						return ret
3844
3845
					# Prevent USE_EXPAND_HIDDEN flags from being hidden if they
3846
					# are the only thing that triggered reinstallation.
3847
					reinst_flags_map = {}
3848
					reinstall_for_flags = self._reinstall_nodes.get(pkg)
3849
					reinst_expand_map = None
3850
					if reinstall_for_flags:
3851
						reinst_flags_map = map_to_use_expand(
3852
							list(reinstall_for_flags), removeHidden=False)
3853
						for k in list(reinst_flags_map):
3854
							if not reinst_flags_map[k]:
3855
								del reinst_flags_map[k]
3856
						if not reinst_flags_map.get("USE"):
3857
							reinst_expand_map = reinst_flags_map.copy()
3858
							reinst_expand_map.pop("USE", None)
3859
					if reinst_expand_map and \
3860
						not set(reinst_expand_map).difference(
3861
						use_expand_hidden):
3862
						use_expand_hidden = \
3863
							set(use_expand_hidden).difference(
3864
							reinst_expand_map)
3865
3866
					cur_iuse_map, iuse_forced = \
3867
						map_to_use_expand(cur_iuse, forcedFlags=True)
3868
					cur_use_map = map_to_use_expand(cur_use)
3869
					old_iuse_map = map_to_use_expand(old_iuse)
3870
					old_use_map = map_to_use_expand(old_use)
3871
3872
					use_expand.sort()
3873
					use_expand.insert(0, "USE")
3874
					
3875
					for key in use_expand:
3876
						if key in use_expand_hidden:
3877
							continue
3878
						verboseadd += create_use_string(key.upper(),
3879
							cur_iuse_map[key], iuse_forced[key],
3880
							cur_use_map[key], old_iuse_map[key],
3881
							old_use_map[key], is_new,
3882
							reinst_flags_map.get(key))
3883
3884
				if verbosity == 3:
3885
					# size verbose
3886
					mysize=0
3887
					if pkg_type == "ebuild" and pkg_merge:
3888
						try:
3889
							myfilesdict = portdb.getfetchsizes(pkg_key,
3890
								useflags=pkg_use, debug=self.edebug)
3891
						except portage.exception.InvalidDependString, e:
3892
							src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
3893
							show_invalid_depstring_notice(x, src_uri, str(e))
3894
							del e
3895
							return 1
3896
						if myfilesdict is None:
3897
							myfilesdict="[empty/missing/bad digest]"
3898
						else:
3899
							for myfetchfile in myfilesdict:
3900
								if myfetchfile not in myfetchlist:
3901
									mysize+=myfilesdict[myfetchfile]
3902
									myfetchlist.append(myfetchfile)
3903
							if ordered:
3904
								counters.totalsize += mysize
3905
						verboseadd += format_size(mysize)
3906
3907
					# overlay verbose
3908
					# assign index for a previous version in the same slot
3909
					has_previous = False
3910
					repo_name_prev = None
3911
					slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
3912
						metadata["SLOT"])
3913
					slot_matches = vardb.match(slot_atom)
3914
					if slot_matches:
3915
						has_previous = True
3916
						repo_name_prev = vardb.aux_get(slot_matches[0],
3917
							["repository"])[0]
3918
3919
					# now use the data to generate output
3920
					if pkg.installed or not has_previous:
3921
						repoadd = repo_display.repoStr(repo_path_real)
3922
					else:
3923
						repo_path_prev = None
3924
						if repo_name_prev:
3925
							repo_path_prev = portdb.getRepositoryPath(
3926
								repo_name_prev)
3927
						if repo_path_prev == repo_path_real:
3928
							repoadd = repo_display.repoStr(repo_path_real)
3929
						else:
3930
							repoadd = "%s=>%s" % (
3931
								repo_display.repoStr(repo_path_prev),
3932
								repo_display.repoStr(repo_path_real))
3933
					if repoadd:
3934
						repoadd_set.add(repoadd)
3935
3936
				xs = [portage.cpv_getkey(pkg_key)] + \
3937
					list(portage.catpkgsplit(pkg_key)[2:])
3938
				if xs[2] == "r0":
3939
					xs[2] = ""
3940
				else:
3941
					xs[2] = "-" + xs[2]
3942
3943
				mywidth = 130
3944
				if "COLUMNWIDTH" in self.settings:
3945
					try:
3946
						mywidth = int(self.settings["COLUMNWIDTH"])
3947
					except ValueError, e:
3948
						portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
3949
						portage.writemsg(
3950
							"!!! Unable to parse COLUMNWIDTH='%s'\n" % \
3951
							self.settings["COLUMNWIDTH"], noiselevel=-1)
3952
						del e
3953
				oldlp = mywidth - 30
3954
				newlp = oldlp - 30
3955
3956
				# Convert myoldbest from a list to a string.
3957
				if not myoldbest:
3958
					myoldbest = ""
3959
				else:
3960
					for pos, key in enumerate(myoldbest):
3961
						key = portage.catpkgsplit(key)[2] + \
3962
							"-" + portage.catpkgsplit(key)[3]
3963
						if key[-3:] == "-r0":
3964
							key = key[:-3]
3965
						myoldbest[pos] = key
3966
					myoldbest = blue("["+", ".join(myoldbest)+"]")
3967
3968
				pkg_cp = xs[0]
3969
				root_config = self.roots[myroot]
3970
				system_set = root_config.sets["system"]
3971
				world_set  = root_config.sets["world"]
3972
3973
				pkg_system = False
3974
				pkg_world = False
3975
				try:
3976
					pkg_system = system_set.findAtomForPackage(pkg)
3977
					pkg_world  = world_set.findAtomForPackage(pkg)
3978
					if not (oneshot or pkg_world) and \
3979
						myroot == self.target_root and \
3980
						favorites_set.findAtomForPackage(pkg):
3981
						# Maybe it will be added to world now.
3982
						if create_world_atom(pkg, favorites_set, root_config):
3983
							pkg_world = True
3984
				except portage.exception.InvalidDependString:
3985
					# This is reported elsewhere if relevant.
3986
					pass
3987
3988
				def pkgprint(pkg_str):
3989
					if pkg_merge:
3990
						if pkg_system:
3991
							return colorize("PKG_MERGE_SYSTEM", pkg_str)
3992
						elif pkg_world:
3993
							return colorize("PKG_MERGE_WORLD", pkg_str)
3994
						else:
3995
							return colorize("PKG_MERGE", pkg_str)
3996
					elif pkg_status == "uninstall":
3997
						return colorize("PKG_UNINSTALL", pkg_str)
3998
					else:
3999
						if pkg_system:
4000
							return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
4001
						elif pkg_world:
4002
							return colorize("PKG_NOMERGE_WORLD", pkg_str)
4003
						else:
4004
							return colorize("PKG_NOMERGE", pkg_str)
4005
4006
				try:
4007
					properties = flatten(use_reduce(paren_reduce(
4008
						pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
4009
				except portage.exception.InvalidDependString, e:
4010
					if not pkg.installed:
4011
						show_invalid_depstring_notice(pkg,
4012
							pkg.metadata["PROPERTIES"], str(e))
4013
						del e
4014
						return 1
4015
					properties = []
4016
				interactive = "interactive" in properties
4017
				if interactive and pkg.operation == "merge":
4018
					addl = colorize("WARN", "I") + addl[1:]
4019
					if ordered:
4020
						counters.interactive += 1
4021
4022
				if x[1]!="/":
4023
					if myoldbest:
4024
						myoldbest +=" "
4025
					if "--columns" in self.myopts:
4026
						if "--quiet" in self.myopts:
4027
							myprint=addl+" "+indent+pkgprint(pkg_cp)
4028
							myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
4029
							myprint=myprint+myoldbest
4030
							myprint=myprint+darkgreen("to "+x[1])
4031
							verboseadd = None
4032
						else:
4033
							if not pkg_merge:
4034
								myprint = "[%s] %s%s" % \
4035
									(pkgprint(pkg_status.ljust(13)),
4036
									indent, pkgprint(pkg.cp))
4037
							else:
4038
								myprint = "[%s %s] %s%s" % \
4039
									(pkgprint(pkg.type_name), addl,
4040
									indent, pkgprint(pkg.cp))
4041
							if (newlp-nc_len(myprint)) > 0:
4042
								myprint=myprint+(" "*(newlp-nc_len(myprint)))
4043
							myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
4044
							if (oldlp-nc_len(myprint)) > 0:
4045
								myprint=myprint+" "*(oldlp-nc_len(myprint))
4046
							myprint=myprint+myoldbest
4047
							myprint += darkgreen("to " + pkg.root)
4048
					else:
4049
						if not pkg_merge:
4050
							myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
4051
						else:
4052
							myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
4053
						myprint += indent + pkgprint(pkg_key) + " " + \
4054
							myoldbest + darkgreen("to " + myroot)
4055
				else:
4056
					if "--columns" in self.myopts:
4057
						if "--quiet" in self.myopts:
4058
							myprint=addl+" "+indent+pkgprint(pkg_cp)
4059
							myprint=myprint+" "+green(xs[1]+xs[2])+" "
4060
							myprint=myprint+myoldbest
4061
							verboseadd = None
4062
						else:
4063
							if not pkg_merge:
4064
								myprint = "[%s] %s%s" % \
4065
									(pkgprint(pkg_status.ljust(13)),
4066
									indent, pkgprint(pkg.cp))
4067
							else:
4068
								myprint = "[%s %s] %s%s" % \
4069
									(pkgprint(pkg.type_name), addl,
4070
									indent, pkgprint(pkg.cp))
4071
							if (newlp-nc_len(myprint)) > 0:
4072
								myprint=myprint+(" "*(newlp-nc_len(myprint)))
4073
							myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
4074
							if (oldlp-nc_len(myprint)) > 0:
4075
								myprint=myprint+(" "*(oldlp-nc_len(myprint)))
4076
							myprint += myoldbest
4077
					else:
4078
						if not pkg_merge:
4079
							myprint = "[%s] %s%s %s" % \
4080
								(pkgprint(pkg_status.ljust(13)),
4081
								indent, pkgprint(pkg.cpv),
4082
								myoldbest)
4083
						else:
4084
							myprint = "[%s %s] %s%s %s" % \
4085
								(pkgprint(pkg_type), addl, indent,
4086
								pkgprint(pkg.cpv), myoldbest)
4087
4088
				if columns and pkg.operation == "uninstall":
4089
					continue
4090
				p.append((myprint, verboseadd, repoadd))
4091
4092
				if "--tree" not in self.myopts and \
4093
					"--quiet" not in self.myopts and \
4094
					not self._opts_no_restart.intersection(self.myopts) and \
4095
					pkg.root == self._running_root.root and \
4096
					portage.match_from_list(
4097
					portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
4098
					not vardb.cpv_exists(pkg.cpv) and \
4099
					"--quiet" not in self.myopts:
4100
						if mylist_index < len(mylist) - 1:
4101
							p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
4102
							p.append(colorize("WARN", "    then resume the merge."))
4103
4104
		out = sys.stdout
4105
		show_repos = repoadd_set and repoadd_set != set(["0"])
4106
4107
		for x in p:
4108
			if isinstance(x, basestring):
4109
				out.write("%s\n" % (x,))
4110
				continue
4111
4112
			myprint, verboseadd, repoadd = x
4113
4114
			if verboseadd:
4115
				myprint += " " + verboseadd
4116
4117
			if show_repos and repoadd:
4118
				myprint += " " + teal("[%s]" % repoadd)
4119
4120
			out.write("%s\n" % (myprint,))
4121
4122
		for x in blockers:
4123
			print x
4124
4125
		if verbosity == 3:
4126
			print
4127
			print counters
4128
			if show_repos:
4129
				sys.stdout.write(str(repo_display))
4130
4131
		if "--changelog" in self.myopts:
4132
			print
4133
			for revision,text in changelogs:
4134
				print bold('*'+revision)
4135
				sys.stdout.write(text)
4136
4137
		sys.stdout.flush()
4138
		return os.EX_OK
4139
4140
	def display_problems(self):
4141
		"""
4142
		Display problems with the dependency graph such as slot collisions.
4143
		This is called internally by display() to show the problems _after_
4144
		the merge list where it is most likely to be seen, but if display()
4145
		is not going to be called then this method should be called explicitly
4146
		to ensure that the user is notified of problems with the graph.
4147
4148
		All output goes to stderr, except for unsatisfied dependencies which
4149
		go to stdout for parsing by programs such as autounmask.
4150
		"""
4151
4152
		# Note that show_masked_packages() sends it's output to
4153
		# stdout, and some programs such as autounmask parse the
4154
		# output in cases when emerge bails out. However, when
4155
		# show_masked_packages() is called for installed packages
4156
		# here, the message is a warning that is more appropriate
4157
		# to send to stderr, so temporarily redirect stdout to
4158
		# stderr. TODO: Fix output code so there's a cleaner way
4159
		# to redirect everything to stderr.
4160
		sys.stdout.flush()
4161
		sys.stderr.flush()
4162
		stdout = sys.stdout
4163
		try:
4164
			sys.stdout = sys.stderr
4165
			self._display_problems()
4166
		finally:
4167
			sys.stdout = stdout
4168
			sys.stdout.flush()
4169
			sys.stderr.flush()
4170
4171
		# This goes to stdout for parsing by programs like autounmask.
4172
		for pargs, kwargs in self._unsatisfied_deps_for_display:
4173
			self._show_unsatisfied_dep(*pargs, **kwargs)
4174
4175
	def _display_problems(self):
4176
		if self._circular_deps_for_display is not None:
4177
			self._show_circular_deps(
4178
				self._circular_deps_for_display)
4179
4180
		# The user is only notified of a slot conflict if
4181
		# there are no unresolvable blocker conflicts.
4182
		if self._unsatisfied_blockers_for_display is not None:
4183
			self._show_unsatisfied_blockers(
4184
				self._unsatisfied_blockers_for_display)
4185
		else:
4186
			self._show_slot_collision_notice()
4187
4188
		# TODO: Add generic support for "set problem" handlers so that
4189
		# the below warnings aren't special cases for world only.
4190
4191
		if self._missing_args:
4192
			world_problems = False
4193
			if "world" in self._sets:
4194
				# Filter out indirect members of world (from nested sets)
4195
				# since only direct members of world are desired here.
4196
				world_set = self.roots[self.target_root].sets["world"]
4197
				for arg, atom in self._missing_args:
4198
					if arg.name == "world" and atom in world_set:
4199
						world_problems = True
4200
						break
4201
4202
			if world_problems:
4203
				sys.stderr.write("\n!!! Problems have been " + \
4204
					"detected with your world file\n")
4205
				sys.stderr.write("!!! Please run " + \
4206
					green("emaint --check world")+"\n\n")
4207
4208
		if self._missing_args:
4209
			sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4210
				" Ebuilds for the following packages are either all\n")
4211
			sys.stderr.write(colorize("BAD", "!!!") + \
4212
				" masked or don't exist:\n")
4213
			sys.stderr.write(" ".join(str(atom) for arg, atom in \
4214
				self._missing_args) + "\n")
4215
4216
		if self._pprovided_args:
4217
			arg_refs = {}
4218
			for arg, atom in self._pprovided_args:
4219
				if isinstance(arg, SetArg):
4220
					parent = arg.name
4221
					arg_atom = (atom, atom)
4222
				else:
4223
					parent = "args"
4224
					arg_atom = (arg.arg, atom)
4225
				refs = arg_refs.setdefault(arg_atom, [])
4226
				if parent not in refs:
4227
					refs.append(parent)
4228
			msg = []
4229
			msg.append(bad("\nWARNING: "))
4230
			if len(self._pprovided_args) > 1:
4231
				msg.append("Requested packages will not be " + \
4232
					"merged because they are listed in\n")
4233
			else:
4234
				msg.append("A requested package will not be " + \
4235
					"merged because it is listed in\n")
4236
			msg.append("package.provided:\n\n")
4237
			problems_sets = set()
4238
			for (arg, atom), refs in arg_refs.iteritems():
4239
				ref_string = ""
4240
				if refs:
4241
					problems_sets.update(refs)
4242
					refs.sort()
4243
					ref_string = ", ".join(["'%s'" % name for name in refs])
4244
					ref_string = " pulled in by " + ref_string
4245
				msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
4246
			msg.append("\n")
4247
			if "world" in problems_sets:
4248
				msg.append("This problem can be solved in one of the following ways:\n\n")
4249
				msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
4250
				msg.append("  B) Uninstall offending packages (cleans them from world).\n")
4251
				msg.append("  C) Remove offending entries from package.provided.\n\n")
4252
				msg.append("The best course of action depends on the reason that an offending\n")
4253
				msg.append("package.provided entry exists.\n\n")
4254
			sys.stderr.write("".join(msg))
4255
4256
		masked_packages = []
4257
		for pkg in self._masked_installed:
4258
			root_config = pkg.root_config
4259
			pkgsettings = self.pkgsettings[pkg.root]
4260
			mreasons = get_masking_status(pkg, pkgsettings, root_config)
4261
			masked_packages.append((root_config, pkgsettings,
4262
				pkg.cpv, pkg.metadata, mreasons))
4263
		if masked_packages:
4264
			sys.stderr.write("\n" + colorize("BAD", "!!!") + \
4265
				" The following installed packages are masked:\n")
4266
			show_masked_packages(masked_packages)
4267
			show_mask_docs()
4268
			print
4269
4270
	def calc_changelog(self,ebuildpath,current,next):
4271
		if ebuildpath == None or not os.path.exists(ebuildpath):
4272
			return []
4273
		current = '-'.join(portage.catpkgsplit(current)[1:])
4274
		if current.endswith('-r0'):
4275
			current = current[:-3]
4276
		next = '-'.join(portage.catpkgsplit(next)[1:])
4277
		if next.endswith('-r0'):
4278
			next = next[:-3]
4279
		changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
4280
		try:
4281
			changelog = open(changelogpath).read()
4282
		except SystemExit, e:
4283
			raise # Needed else can't exit
4284
		except:
4285
			return []
4286
		divisions = self.find_changelog_tags(changelog)
4287
		#print 'XX from',current,'to',next
4288
		#for div,text in divisions: print 'XX',div
4289
		# skip entries for all revisions above the one we are about to emerge
4290
		for i in range(len(divisions)):
4291
			if divisions[i][0]==next:
4292
				divisions = divisions[i:]
4293
				break
4294
		# find out how many entries we are going to display
4295
		for i in range(len(divisions)):
4296
			if divisions[i][0]==current:
4297
				divisions = divisions[:i]
4298
				break
4299
		else:
4300
		    # couldnt find the current revision in the list. display nothing
4301
			return []
4302
		return divisions
4303
4304
	def find_changelog_tags(self,changelog):
4305
		divs = []
4306
		release = None
4307
		while 1:
4308
			match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
4309
			if match is None:
4310
				if release is not None:
4311
					divs.append((release,changelog))
4312
				return divs
4313
			if release is not None:
4314
				divs.append((release,changelog[:match.start()]))
4315
			changelog = changelog[match.end():]
4316
			release = match.group(1)
4317
			if release.endswith('.ebuild'):
4318
				release = release[:-7]
4319
			if release.endswith('-r0'):
4320
				release = release[:-3]
4321
4322
	def saveNomergeFavorites(self):
4323
		"""Find atoms in favorites that are not in the mergelist and add them
4324
		to the world file if necessary."""
4325
		for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
4326
			"--oneshot", "--onlydeps", "--pretend"):
4327
			if x in self.myopts:
4328
				return
4329
		root_config = self.roots[self.target_root]
4330
		world_set = root_config.sets["world"]
4331
4332
		world_locked = False
4333
		if hasattr(world_set, "lock"):
4334
			world_set.lock()
4335
			world_locked = True
4336
4337
		if hasattr(world_set, "load"):
4338
			world_set.load() # maybe it's changed on disk
4339
4340
		args_set = self._sets["args"]
4341
		portdb = self.trees[self.target_root]["porttree"].dbapi
4342
		added_favorites = set()
4343
		for x in self._set_nodes:
4344
			pkg_type, root, pkg_key, pkg_status = x
4345
			if pkg_status != "nomerge":
4346
				continue
4347
4348
			try:
4349
				myfavkey = create_world_atom(x, args_set, root_config)
4350
				if myfavkey:
4351
					if myfavkey in added_favorites:
4352
						continue
4353
					added_favorites.add(myfavkey)
4354
			except portage.exception.InvalidDependString, e:
4355
				writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
4356
					(pkg_key, str(e)), noiselevel=-1)
4357
				writemsg("!!! see '%s'\n\n" % os.path.join(
4358
					root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
4359
				del e
4360
		all_added = []
4361
		for k in self._sets:
4362
			if k in ("args", "world") or not root_config.sets[k].world_candidate:
4363
				continue
4364
			s = SETPREFIX + k
4365
			if s in world_set:
4366
				continue
4367
			all_added.append(SETPREFIX + k)
4368
		all_added.extend(added_favorites)
4369
		all_added.sort()
4370
		for a in all_added:
4371
			print ">>> Recording %s in \"world\" favorites file..." % \
4372
				colorize("INFORM", str(a))
4373
		if all_added:
4374
			world_set.update(all_added)
4375
4376
		if world_locked:
4377
			world_set.unlock()
4378
4379
	def loadResumeCommand(self, resume_data, skip_masked=True,
4380
		skip_missing=True):
4381
		"""
4382
		Add a resume command to the graph and validate it in the process.  This
4383
		will raise a PackageNotFound exception if a package is not available.
4384
		"""
4385
4386
		if not isinstance(resume_data, dict):
4387
			return False
4388
4389
		mergelist = resume_data.get("mergelist")
4390
		if not isinstance(mergelist, list):
4391
			mergelist = []
4392
4393
		fakedb = self.mydbapi
4394
		trees = self.trees
4395
		serialized_tasks = []
4396
		masked_tasks = []
4397
		for x in mergelist:
4398
			if not (isinstance(x, list) and len(x) == 4):
4399
				continue
4400
			pkg_type, myroot, pkg_key, action = x
4401
			if pkg_type not in self.pkg_tree_map:
4402
				continue
4403
			if action != "merge":
4404
				continue
4405
			tree_type = self.pkg_tree_map[pkg_type]
4406
			mydb = trees[myroot][tree_type].dbapi
4407
			db_keys = list(self._trees_orig[myroot][
4408
				tree_type].dbapi._aux_cache_keys)
4409
			try:
4410
				metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
4411
			except KeyError:
4412
				# It does no exist or it is corrupt.
4413
				if action == "uninstall":
4414
					continue
4415
				if skip_missing:
4416
					# TODO: log these somewhere
4417
					continue
4418
				raise portage.exception.PackageNotFound(pkg_key)
4419
			installed = action == "uninstall"
4420
			built = pkg_type != "ebuild"
4421
			root_config = self.roots[myroot]
4422
			pkg = Package(built=built, cpv=pkg_key,
4423
				installed=installed, metadata=metadata,
4424
				operation=action, root_config=root_config,
4425
				type_name=pkg_type)
4426
			if pkg_type == "ebuild":
4427
				pkgsettings = self.pkgsettings[myroot]
4428
				pkgsettings.setcpv(pkg)
4429
				pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
4430
				pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
4431
			self._pkg_cache[pkg] = pkg
4432
4433
			root_config = self.roots[pkg.root]
4434
			if "merge" == pkg.operation and \
4435
				not visible(root_config.settings, pkg):
4436
				if skip_masked:
4437
					masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
4438
				else:
4439
					self._unsatisfied_deps_for_display.append(
4440
						((pkg.root, "="+pkg.cpv), {"myparent":None}))
4441
4442
			fakedb[myroot].cpv_inject(pkg)
4443
			serialized_tasks.append(pkg)
4444
			self.spinner.update()
4445
4446
		if self._unsatisfied_deps_for_display:
4447
			return False
4448
4449
		if not serialized_tasks or "--nodeps" in self.myopts:
4450
			self._serialized_tasks_cache = serialized_tasks
4451
			self._scheduler_graph = self.digraph
4452
		else:
4453
			self._select_package = self._select_pkg_from_graph
4454
			self.myparams.add("selective")
4455
			# Always traverse deep dependencies in order to account for
4456
			# potentially unsatisfied dependencies of installed packages.
4457
			# This is necessary for correct --keep-going or --resume operation
4458
			# in case a package from a group of circularly dependent packages
4459
			# fails. In this case, a package which has recently been installed
4460
			# may have an unsatisfied circular dependency (pulled in by
4461
			# PDEPEND, for example). So, even though a package is already
4462
			# installed, it may not have all of it's dependencies satisfied, so
4463
			# it may not be usable. If such a package is in the subgraph of
4464
			# deep depenedencies of a scheduled build, that build needs to
4465
			# be cancelled. In order for this type of situation to be
4466
			# recognized, deep traversal of dependencies is required.
4467
			self.myparams.add("deep")
4468
4469
			favorites = resume_data.get("favorites")
4470
			args_set = self._sets["args"]
4471
			if isinstance(favorites, list):
4472
				args = self._load_favorites(favorites)
4473
			else:
4474
				args = []
4475
4476
			for task in serialized_tasks:
4477
				if isinstance(task, Package) and \
4478
					task.operation == "merge":
4479
					if not self._add_pkg(task, None):
4480
						return False
4481
4482
			# Packages for argument atoms need to be explicitly
4483
			# added via _add_pkg() so that they are included in the
4484
			# digraph (needed at least for --tree display).
4485
			for arg in args:
4486
				for atom in arg.set:
4487
					pkg, existing_node = self._select_package(
4488
						arg.root_config.root, atom)
4489
					if existing_node is None and \
4490
						pkg is not None:
4491
						if not self._add_pkg(pkg, Dependency(atom=atom,
4492
							root=pkg.root, parent=arg)):
4493
							return False
4494
4495
			# Allow unsatisfied deps here to avoid showing a masking
4496
			# message for an unsatisfied dep that isn't necessarily
4497
			# masked.
4498
			if not self._create_graph(allow_unsatisfied=True):
4499
				return False
4500
4501
			unsatisfied_deps = []
4502
			for dep in self._unsatisfied_deps:
4503
				if not isinstance(dep.parent, Package):
4504
					continue
4505
				if dep.parent.operation == "merge":
4506
					unsatisfied_deps.append(dep)
4507
					continue
4508
4509
				# For unsatisfied deps of installed packages, only account for
4510
				# them if they are in the subgraph of dependencies of a package
4511
				# which is scheduled to be installed.
4512
				unsatisfied_install = False
4513
				traversed = set()
4514
				dep_stack = self.digraph.parent_nodes(dep.parent)
4515
				while dep_stack:
4516
					node = dep_stack.pop()
4517
					if not isinstance(node, Package):
4518
						continue
4519
					if node.operation == "merge":
4520
						unsatisfied_install = True
4521
						break
4522
					if node in traversed:
4523
						continue
4524
					traversed.add(node)
4525
					dep_stack.extend(self.digraph.parent_nodes(node))
4526
4527
				if unsatisfied_install:
4528
					unsatisfied_deps.append(dep)
4529
4530
			if masked_tasks or unsatisfied_deps:
4531
				# This probably means that a required package
4532
				# was dropped via --skipfirst. It makes the
4533
				# resume list invalid, so convert it to a
4534
				# UnsatisfiedResumeDep exception.
4535
				raise self.UnsatisfiedResumeDep(self,
4536
					masked_tasks + unsatisfied_deps)
4537
			self._serialized_tasks_cache = None
4538
			try:
4539
				self.altlist()
4540
			except self._unknown_internal_error:
4541
				return False
4542
4543
		return True
4544
4545
	def _load_favorites(self, favorites):
4546
		"""
4547
		Use a list of favorites to resume state from a
4548
		previous select_files() call. This creates similar
4549
		DependencyArg instances to those that would have
4550
		been created by the original select_files() call.
4551
		This allows Package instances to be matched with
4552
		DependencyArg instances during graph creation.
4553
		"""
4554
		root_config = self.roots[self.target_root]
4555
		getSetAtoms = root_config.setconfig.getSetAtoms
4556
		sets = root_config.sets
4557
		args = []
4558
		for x in favorites:
4559
			if not isinstance(x, basestring):
4560
				continue
4561
			if x in ("system", "world"):
4562
				x = SETPREFIX + x
4563
			if x.startswith(SETPREFIX):
4564
				s = x[len(SETPREFIX):]
4565
				if s not in sets:
4566
					continue
4567
				if s in self._sets:
4568
					continue
4569
				# Recursively expand sets so that containment tests in
4570
				# self._get_parent_sets() properly match atoms in nested
4571
				# sets (like if world contains system).
4572
				expanded_set = InternalPackageSet(
4573
					initial_atoms=getSetAtoms(s))
4574
				self._sets[s] = expanded_set
4575
				args.append(SetArg(arg=x, set=expanded_set,
4576
					root_config=root_config))
4577
			else:
4578
				if not portage.isvalidatom(x):
4579
					continue
4580
				args.append(AtomArg(arg=x, atom=x,
4581
					root_config=root_config))
4582
4583
		self._set_args(args)
4584
		return args
4585
4586
	class UnsatisfiedResumeDep(portage.exception.PortageException):
4587
		"""
4588
		A dependency of a resume list is not installed. This
4589
		can occur when a required package is dropped from the
4590
		merge list via --skipfirst.
4591
		"""
4592
		def __init__(self, depgraph, value):
4593
			portage.exception.PortageException.__init__(self, value)
4594
			self.depgraph = depgraph
4595
4596
	class _internal_exception(portage.exception.PortageException):
4597
		def __init__(self, value=""):
4598
			portage.exception.PortageException.__init__(self, value)
4599
4600
	class _unknown_internal_error(_internal_exception):
4601
		"""
4602
		Used by the depgraph internally to terminate graph creation.
4603
		The specific reason for the failure should have been dumped
4604
		to stderr, unfortunately, the exact reason for the failure
4605
		may not be known.
4606
		"""
4607
4608
	class _serialize_tasks_retry(_internal_exception):
4609
		"""
4610
		This is raised by the _serialize_tasks() method when it needs to
4611
		be called again for some reason. The only case that it's currently
4612
		used for is when neglected dependencies need to be added to the
4613
		graph in order to avoid making a potentially unsafe decision.
4614
		"""
4615
4616
	class _dep_check_composite_db(portage.dbapi):
4617
		"""
4618
		A dbapi-like interface that is optimized for use in dep_check() calls.
4619
		This is built on top of the existing depgraph package selection logic.
4620
		Some packages that have been added to the graph may be masked from this
4621
		view in order to influence the atom preference selection that occurs
4622
		via dep_check().
4623
		"""
4624
		def __init__(self, depgraph, root):
4625
			portage.dbapi.__init__(self)
4626
			self._depgraph = depgraph
4627
			self._root = root
4628
			self._match_cache = {}
4629
			self._cpv_pkg_map = {}
4630
4631
		def _clear_cache(self):
4632
			self._match_cache.clear()
4633
			self._cpv_pkg_map.clear()
4634
4635
		def match(self, atom):
4636
			ret = self._match_cache.get(atom)
4637
			if ret is not None:
4638
				return ret[:]
4639
			orig_atom = atom
4640
			if "/" not in atom:
4641
				atom = self._dep_expand(atom)
4642
			pkg, existing = self._depgraph._select_package(self._root, atom)
4643
			if not pkg:
4644
				ret = []
4645
			else:
4646
				# Return the highest available from select_package() as well as
4647
				# any matching slots in the graph db.
4648
				slots = set()
4649
				slots.add(pkg.metadata["SLOT"])
4650
				atom_cp = portage.dep_getkey(atom)
4651
				if pkg.cp.startswith("virtual/"):
4652
					# For new-style virtual lookahead that occurs inside
4653
					# dep_check(), examine all slots. This is needed
4654
					# so that newer slots will not unnecessarily be pulled in
4655
					# when a satisfying lower slot is already installed. For
4656
					# example, if virtual/jdk-1.4 is satisfied via kaffe then
4657
					# there's no need to pull in a newer slot to satisfy a
4658
					# virtual/jdk dependency.
4659
					for db, pkg_type, built, installed, db_keys in \
4660
						self._depgraph._filtered_trees[self._root]["dbs"]:
4661
						for cpv in db.match(atom):
4662
							if portage.cpv_getkey(cpv) != pkg.cp:
4663
								continue
4664
							slots.add(db.aux_get(cpv, ["SLOT"])[0])
4665
				ret = []
4666
				if self._visible(pkg):
4667
					self._cpv_pkg_map[pkg.cpv] = pkg
4668
					ret.append(pkg.cpv)
4669
				slots.remove(pkg.metadata["SLOT"])
4670
				while slots:
4671
					slot_atom = "%s:%s" % (atom_cp, slots.pop())
4672
					pkg, existing = self._depgraph._select_package(
4673
						self._root, slot_atom)
4674
					if not pkg:
4675
						continue
4676
					if not self._visible(pkg):
4677
						continue
4678
					self._cpv_pkg_map[pkg.cpv] = pkg
4679
					ret.append(pkg.cpv)
4680
				if ret:
4681
					self._cpv_sort_ascending(ret)
4682
			self._match_cache[orig_atom] = ret
4683
			return ret[:]
4684
4685
		def _visible(self, pkg):
4686
			if pkg.installed and "selective" not in self._depgraph.myparams:
4687
				try:
4688
					arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
4689
				except (StopIteration, portage.exception.InvalidDependString):
4690
					arg = None
4691
				if arg:
4692
					return False
4693
			if pkg.installed:
4694
				try:
4695
					if not visible(
4696
						self._depgraph.pkgsettings[pkg.root], pkg):
4697
						return False
4698
				except portage.exception.InvalidDependString:
4699
					pass
4700
			in_graph = self._depgraph._slot_pkg_map[
4701
				self._root].get(pkg.slot_atom)
4702
			if in_graph is None:
4703
				# Mask choices for packages which are not the highest visible
4704
				# version within their slot (since they usually trigger slot
4705
				# conflicts).
4706
				highest_visible, in_graph = self._depgraph._select_package(
4707
					self._root, pkg.slot_atom)
4708
				if pkg != highest_visible:
4709
					return False
4710
			elif in_graph != pkg:
4711
				# Mask choices for packages that would trigger a slot
4712
				# conflict with a previously selected package.
4713
				return False
4714
			return True
4715
4716
		def _dep_expand(self, atom):
4717
			"""
4718
			This is only needed for old installed packages that may
4719
			contain atoms that are not fully qualified with a specific
4720
			category. Emulate the cpv_expand() function that's used by
4721
			dbapi.match() in cases like this. If there are multiple
4722
			matches, it's often due to a new-style virtual that has
4723
			been added, so try to filter those out to avoid raising
4724
			a ValueError.
4725
			"""
4726
			root_config = self._depgraph.roots[self._root]
4727
			orig_atom = atom
4728
			expanded_atoms = self._depgraph._dep_expand(root_config, atom)
4729
			if len(expanded_atoms) > 1:
4730
				non_virtual_atoms = []
4731
				for x in expanded_atoms:
4732
					if not portage.dep_getkey(x).startswith("virtual/"):
4733
						non_virtual_atoms.append(x)
4734
				if len(non_virtual_atoms) == 1:
4735
					expanded_atoms = non_virtual_atoms
4736
			if len(expanded_atoms) > 1:
4737
				# compatible with portage.cpv_expand()
4738
				raise portage.exception.AmbiguousPackageName(
4739
					[portage.dep_getkey(x) for x in expanded_atoms])
4740
			if expanded_atoms:
4741
				atom = expanded_atoms[0]
4742
			else:
4743
				null_atom = insert_category_into_atom(atom, "null")
4744
				null_cp = portage.dep_getkey(null_atom)
4745
				cat, atom_pn = portage.catsplit(null_cp)
4746
				virts_p = root_config.settings.get_virts_p().get(atom_pn)
4747
				if virts_p:
4748
					# Allow the resolver to choose which virtual.
4749
					atom = insert_category_into_atom(atom, "virtual")
4750
				else:
4751
					atom = insert_category_into_atom(atom, "null")
4752
			return atom
4753
4754
		def aux_get(self, cpv, wants):
4755
			metadata = self._cpv_pkg_map[cpv].metadata
4756
			return [metadata.get(x, "") for x in wants]
4757
4758
4759
def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
4760
4761
	if "--quiet" in myopts:
4762
		print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
4763
		print "!!! one of the following fully-qualified ebuild names instead:\n"
4764
		for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
4765
			print "    " + colorize("INFORM", cp)
4766
		return
4767
4768
	s = search(root_config, spinner, "--searchdesc" in myopts,
4769
		"--quiet" not in myopts, "--usepkg" in myopts,
4770
		"--usepkgonly" in myopts)
4771
	null_cp = portage.dep_getkey(insert_category_into_atom(
4772
		arg, "null"))
4773
	cat, atom_pn = portage.catsplit(null_cp)
4774
	s.searchkey = atom_pn
4775
	for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
4776
		s.addCP(cp)
4777
	s.output()
4778
	print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
4779
	print "!!! one of the above fully-qualified ebuild names instead.\n"
4780
4781
def insert_category_into_atom(atom, category):
4782
	alphanum = re.search(r'\w', atom)
4783
	if alphanum:
4784
		ret = atom[:alphanum.start()] + "%s/" % category + \
4785
			atom[alphanum.start():]
4786
	else:
4787
		ret = None
4788
	return ret
4789
4790
def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
4791
	"""
4792
	Construct a depgraph for the given resume list. This will raise
4793
	PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
4794
	@rtype: tuple
4795
	@returns: (success, depgraph, dropped_tasks)
4796
	"""
4797
	skip_masked = True
4798
	skip_unsatisfied = True
4799
	mergelist = mtimedb["resume"]["mergelist"]
4800
	dropped_tasks = set()
4801
	while True:
4802
		mydepgraph = depgraph(settings, trees,
4803
			myopts, myparams, spinner)
4804
		try:
4805
			success = mydepgraph.loadResumeCommand(mtimedb["resume"],
4806
				skip_masked=skip_masked)
4807
		except depgraph.UnsatisfiedResumeDep, e:
4808
			if not skip_unsatisfied:
4809
				raise
4810
4811
			graph = mydepgraph.digraph
4812
			unsatisfied_parents = dict((dep.parent, dep.parent) \
4813
				for dep in e.value)
4814
			traversed_nodes = set()
4815
			unsatisfied_stack = list(unsatisfied_parents)
4816
			while unsatisfied_stack:
4817
				pkg = unsatisfied_stack.pop()
4818
				if pkg in traversed_nodes:
4819
					continue
4820
				traversed_nodes.add(pkg)
4821
4822
				# If this package was pulled in by a parent
4823
				# package scheduled for merge, removing this
4824
				# package may cause the the parent package's
4825
				# dependency to become unsatisfied.
4826
				for parent_node in graph.parent_nodes(pkg):
4827
					if not isinstance(parent_node, Package) \
4828
						or parent_node.operation not in ("merge", "nomerge"):
4829
						continue
4830
					unsatisfied = \
4831
						graph.child_nodes(parent_node,
4832
						ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
4833
					if pkg in unsatisfied:
4834
						unsatisfied_parents[parent_node] = parent_node
4835
						unsatisfied_stack.append(parent_node)
4836
4837
			pruned_mergelist = []
4838
			for x in mergelist:
4839
				if isinstance(x, list) and \
4840
					tuple(x) not in unsatisfied_parents:
4841
					pruned_mergelist.append(x)
4842
4843
			# If the mergelist doesn't shrink then this loop is infinite.
4844
			if len(pruned_mergelist) == len(mergelist):
4845
				# This happens if a package can't be dropped because
4846
				# it's already installed, but it has unsatisfied PDEPEND.
4847
				raise
4848
			mergelist[:] = pruned_mergelist
4849
4850
			# Exclude installed packages that have been removed from the graph due
4851
			# to failure to build/install runtime dependencies after the dependent
4852
			# package has already been installed.
4853
			dropped_tasks.update(pkg for pkg in \
4854
				unsatisfied_parents if pkg.operation != "nomerge")
4855
			mydepgraph.break_refs(unsatisfied_parents)
4856
4857
			del e, graph, traversed_nodes, \
4858
				unsatisfied_parents, unsatisfied_stack
4859
			continue
4860
		else:
4861
			break
4862
	return (success, mydepgraph, dropped_tasks)
4863
4864
def get_mask_info(root_config, cpv, pkgsettings,
4865
	db, pkg_type, built, installed, db_keys):
4866
	eapi_masked = False
4867
	try:
4868
		metadata = dict(izip(db_keys,
4869
			db.aux_get(cpv, db_keys)))
4870
	except KeyError:
4871
		metadata = None
4872
	if metadata and not built:
4873
		pkgsettings.setcpv(cpv, mydb=metadata)
4874
		metadata["USE"] = pkgsettings["PORTAGE_USE"]
4875
		metadata['CHOST'] = pkgsettings.get('CHOST', '')
4876
	if metadata is None:
4877
		mreasons = ["corruption"]
4878
	else:
4879
		eapi = metadata['EAPI']
4880
		if eapi[:1] == '-':
4881
			eapi = eapi[1:]
4882
		if not portage.eapi_is_supported(eapi):
4883
			mreasons = ['EAPI %s' % eapi]
4884
		else:
4885
			pkg = Package(type_name=pkg_type, root_config=root_config,
4886
				cpv=cpv, built=built, installed=installed, metadata=metadata)
4887
			mreasons = get_masking_status(pkg, pkgsettings, root_config)
4888
	return metadata, mreasons
4889
4890
def show_masked_packages(masked_packages):
4891
	shown_licenses = set()
4892
	shown_comments = set()
4893
	# Maybe there is both an ebuild and a binary. Only
4894
	# show one of them to avoid redundant appearance.
4895
	shown_cpvs = set()
4896
	have_eapi_mask = False
4897
	for (root_config, pkgsettings, cpv,
4898
		metadata, mreasons) in masked_packages:
4899
		if cpv in shown_cpvs:
4900
			continue
4901
		shown_cpvs.add(cpv)
4902
		comment, filename = None, None
4903
		if "package.mask" in mreasons:
4904
			comment, filename = \
4905
				portage.getmaskingreason(
4906
				cpv, metadata=metadata,
4907
				settings=pkgsettings,
4908
				portdb=root_config.trees["porttree"].dbapi,
4909
				return_location=True)
4910
		missing_licenses = []
4911
		if metadata:
4912
			if not portage.eapi_is_supported(metadata["EAPI"]):
4913
				have_eapi_mask = True
4914
			try:
4915
				missing_licenses = \
4916
					pkgsettings._getMissingLicenses(
4917
						cpv, metadata)
4918
			except portage.exception.InvalidDependString:
4919
				# This will have already been reported
4920
				# above via mreasons.
4921
				pass
4922
4923
		print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
4924
		if comment and comment not in shown_comments:
4925
			print filename+":"
4926
			print comment
4927
			shown_comments.add(comment)
4928
		portdb = root_config.trees["porttree"].dbapi
4929
		for l in missing_licenses:
4930
			l_path = portdb.findLicensePath(l)
4931
			if l in shown_licenses:
4932
				continue
4933
			msg = ("A copy of the '%s' license" + \
4934
			" is located at '%s'.") % (l, l_path)
4935
			print msg
4936
			print
4937
			shown_licenses.add(l)
4938
	return have_eapi_mask
4939
4940
def show_mask_docs():
4941
	print "For more information, see the MASKED PACKAGES section in the emerge"
4942
	print "man page or refer to the Gentoo Handbook."
4943
4944
def filter_iuse_defaults(iuse):
4945
	for flag in iuse:
4946
		if flag.startswith("+") or flag.startswith("-"):
4947
			yield flag[1:]
4948
		else:
4949
			yield flag
4950
4951
def show_blocker_docs_link():
4952
	print
4953
	print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
4954
	print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
4955
	print
4956
	print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
4957
	print
4958
4959
def get_masking_status(pkg, pkgsettings, root_config):
4960
4961
	mreasons = portage.getmaskingstatus(
4962
		pkg, settings=pkgsettings,
4963
		portdb=root_config.trees["porttree"].dbapi)
4964
4965
	if not pkg.installed:
4966
		if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
4967
			mreasons.append("CHOST: %s" % \
4968
				pkg.metadata["CHOST"])
4969
4970
	if not pkg.metadata["SLOT"]:
4971
		mreasons.append("invalid: SLOT is undefined")
4972
4973
	return mreasons
(-)create_depgraph_params.py (+33 lines)
Line 0 Link Here
1
def create_depgraph_params(myopts, myaction):
2
	#configure emerge engine parameters
3
	#
4
	# self:      include _this_ package regardless of if it is merged.
5
	# selective: exclude the package if it is merged
6
	# recurse:   go into the dependencies
7
	# deep:      go into the dependencies of already merged packages
8
	# empty:     pretend nothing is merged
9
	# complete:  completely account for all known dependencies
10
	# remove:    build graph for use in removing packages
11
	myparams = set(["recurse"])
12
13
	if myaction == "remove":
14
		myparams.add("remove")
15
		myparams.add("complete")
16
		return myparams
17
18
	if "--update" in myopts or \
19
		"--newuse" in myopts or \
20
		"--reinstall" in myopts or \
21
		"--noreplace" in myopts:
22
		myparams.add("selective")
23
	if "--emptytree" in myopts:
24
		myparams.add("empty")
25
		myparams.discard("selective")
26
	if "--nodeps" in myopts:
27
		myparams.discard("recurse")
28
	if "--deep" in myopts:
29
		myparams.add("deep")
30
	if "--complete-graph" in myopts:
31
		myparams.add("complete")
32
	return myparams
33
(-)is_valid_package_atom.py (+16 lines)
Line 0 Link Here
1
import re
2
3
try:
4
	import portage
5
except ImportError:
6
	from os import path as osp
7
	import sys
8
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
9
	import portage
10
11
def is_valid_package_atom(x):
12
	if "/" not in x:
13
		alphanum = re.search(r'\w', x)
14
		if alphanum:
15
			x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
16
	return portage.isvalidatom(x)
(-)create_world_atom.py (+91 lines)
Line 0 Link Here
1
try:
2
	import portage
3
except ImportError:
4
	from os import path as osp
5
	import sys
6
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
7
	import portage
8
9
def create_world_atom(pkg, args_set, root_config):
10
	"""Create a new atom for the world file if one does not exist.  If the
11
	argument atom is precise enough to identify a specific slot then a slot
12
	atom will be returned. Atoms that are in the system set may also be stored
13
	in world since system atoms can only match one slot while world atoms can
14
	be greedy with respect to slots.  Unslotted system packages will not be
15
	stored in world."""
16
17
	arg_atom = args_set.findAtomForPackage(pkg)
18
	if not arg_atom:
19
		return None
20
	cp = portage.dep_getkey(arg_atom)
21
	new_world_atom = cp
22
	sets = root_config.sets
23
	portdb = root_config.trees["porttree"].dbapi
24
	vardb = root_config.trees["vartree"].dbapi
25
	available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
26
		for cpv in portdb.match(cp))
27
	slotted = len(available_slots) > 1 or \
28
		(len(available_slots) == 1 and "0" not in available_slots)
29
	if not slotted:
30
		# check the vdb in case this is multislot
31
		available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
32
			for cpv in vardb.match(cp))
33
		slotted = len(available_slots) > 1 or \
34
			(len(available_slots) == 1 and "0" not in available_slots)
35
	if slotted and arg_atom != cp:
36
		# If the user gave a specific atom, store it as a
37
		# slot atom in the world file.
38
		slot_atom = pkg.slot_atom
39
40
		# For USE=multislot, there are a couple of cases to
41
		# handle here:
42
		#
43
		# 1) SLOT="0", but the real SLOT spontaneously changed to some
44
		#    unknown value, so just record an unslotted atom.
45
		#
46
		# 2) SLOT comes from an installed package and there is no
47
		#    matching SLOT in the portage tree.
48
		#
49
		# Make sure that the slot atom is available in either the
50
		# portdb or the vardb, since otherwise the user certainly
51
		# doesn't want the SLOT atom recorded in the world file
52
		# (case 1 above).  If it's only available in the vardb,
53
		# the user may be trying to prevent a USE=multislot
54
		# package from being removed by --depclean (case 2 above).
55
56
		mydb = portdb
57
		if not portdb.match(slot_atom):
58
			# SLOT seems to come from an installed multislot package
59
			mydb = vardb
60
		# If there is no installed package matching the SLOT atom,
61
		# it probably changed SLOT spontaneously due to USE=multislot,
62
		# so just record an unslotted atom.
63
		if vardb.match(slot_atom):
64
			# Now verify that the argument is precise
65
			# enough to identify a specific slot.
66
			matches = mydb.match(arg_atom)
67
			matched_slots = set()
68
			for cpv in matches:
69
				matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
70
			if len(matched_slots) == 1:
71
				new_world_atom = slot_atom
72
73
	if new_world_atom == sets["world"].findAtomForPackage(pkg):
74
		# Both atoms would be identical, so there's nothing to add.
75
		return None
76
	if not slotted:
77
		# Unlike world atoms, system atoms are not greedy for slots, so they
78
		# can't be safely excluded from world if they are slotted.
79
		system_atom = sets["system"].findAtomForPackage(pkg)
80
		if system_atom:
81
			if not portage.dep_getkey(system_atom).startswith("virtual/"):
82
				return None
83
			# System virtuals aren't safe to exclude from world since they can
84
			# match multiple old-style virtuals but only one of them will be
85
			# pulled in by update or depclean.
86
			providers = portdb.mysettings.getvirtuals().get(
87
				portage.dep_getkey(system_atom))
88
			if providers and len(providers) == 1 and providers[0] == cp:
89
				return None
90
	return new_world_atom
91
(-)_find_deep_system_runtime_deps.py (+35 lines)
Line 0 Link Here
1
from _emerge.DepPriority import DepPriority
2
from _emerge.Package import Package
3
4
def _find_deep_system_runtime_deps(graph):
5
	deep_system_deps = set()
6
	node_stack = []
7
	for node in graph:
8
		if not isinstance(node, Package) or \
9
			node.operation == 'uninstall':
10
			continue
11
		if node.root_config.sets['system'].findAtomForPackage(node):
12
			node_stack.append(node)
13
14
	def ignore_priority(priority):
15
		"""
16
		Ignore non-runtime priorities.
17
		"""
18
		if isinstance(priority, DepPriority) and \
19
			(priority.runtime or priority.runtime_post):
20
			return False
21
		return True
22
23
	while node_stack:
24
		node = node_stack.pop()
25
		if node in deep_system_deps:
26
			continue
27
		deep_system_deps.add(node)
28
		for child in graph.child_nodes(node, ignore_priority=ignore_priority):
29
			if not isinstance(child, Package) or \
30
				child.operation == 'uninstall':
31
				continue
32
			node_stack.append(child)
33
34
	return deep_system_deps
35

Return to bug 275047