Lines 9-16
Link Here
|
9 |
import signal |
9 |
import signal |
10 |
import sys |
10 |
import sys |
11 |
import textwrap |
11 |
import textwrap |
12 |
import weakref |
|
|
13 |
import gc |
14 |
import os, stat |
12 |
import os, stat |
15 |
import platform |
13 |
import platform |
16 |
|
14 |
|
Lines 23-35
Link Here
|
23 |
|
21 |
|
24 |
from portage import digraph |
22 |
from portage import digraph |
25 |
from portage.const import NEWS_LIB_PATH |
23 |
from portage.const import NEWS_LIB_PATH |
26 |
from portage.cache.mappings import slot_dict_class |
|
|
27 |
|
24 |
|
28 |
import _emerge.help |
25 |
import _emerge.help |
29 |
import portage.xpak, commands, errno, re, socket, time |
26 |
import portage.xpak, commands, errno, re, socket, time |
30 |
from portage.output import blue, bold, colorize, darkblue, darkgreen, green, \ |
27 |
from portage.output import blue, bold, colorize, darkgreen, \ |
31 |
nc_len, red, teal, turquoise, \ |
28 |
red, xtermTitleReset, yellow |
32 |
xtermTitleReset, yellow |
|
|
33 |
from portage.output import create_color_func |
29 |
from portage.output import create_color_func |
34 |
good = create_color_func("GOOD") |
30 |
good = create_color_func("GOOD") |
35 |
bad = create_color_func("BAD") |
31 |
bad = create_color_func("BAD") |
Lines 42-48
Link Here
|
42 |
import portage.exception |
38 |
import portage.exception |
43 |
from portage.cache.cache_errors import CacheError |
39 |
from portage.cache.cache_errors import CacheError |
44 |
from portage.data import secpass |
40 |
from portage.data import secpass |
45 |
from portage.elog.messages import eerror |
|
|
46 |
from portage.util import normalize_path as normpath |
41 |
from portage.util import normalize_path as normpath |
47 |
from portage.util import cmp_sort_key, writemsg, writemsg_level |
42 |
from portage.util import cmp_sort_key, writemsg, writemsg_level |
48 |
from portage.sets import load_default_config, SETPREFIX |
43 |
from portage.sets import load_default_config, SETPREFIX |
Lines 50-96
Link Here
|
50 |
|
45 |
|
51 |
from itertools import chain, izip |
46 |
from itertools import chain, izip |
52 |
|
47 |
|
53 |
from _emerge.SlotObject import SlotObject |
48 |
from _emerge.clear_caches import clear_caches |
54 |
from _emerge.DepPriority import DepPriority |
49 |
from _emerge.countdown import countdown |
55 |
from _emerge.BlockerDepPriority import BlockerDepPriority |
50 |
from _emerge.create_depgraph_params import create_depgraph_params |
56 |
from _emerge.UnmergeDepPriority import UnmergeDepPriority |
51 |
from _emerge.Dependency import Dependency |
57 |
from _emerge.DepPriorityNormalRange import DepPriorityNormalRange |
52 |
from _emerge.depgraph import depgraph, resume_depgraph |
58 |
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange |
53 |
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange |
|
|
54 |
from _emerge.emergelog import emergelog |
55 |
from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo |
56 |
from _emerge.is_valid_package_atom import is_valid_package_atom |
57 |
from _emerge.MetadataRegen import MetadataRegen |
59 |
from _emerge.Package import Package |
58 |
from _emerge.Package import Package |
60 |
from _emerge.Blocker import Blocker |
|
|
61 |
from _emerge.BlockerDB import BlockerDB |
62 |
from _emerge.EbuildFetcher import EbuildFetcher |
63 |
from _emerge.EbuildPhase import EbuildPhase |
64 |
from _emerge.BinpkgPrefetcher import BinpkgPrefetcher |
65 |
from _emerge.PackageMerge import PackageMerge |
66 |
from _emerge.DependencyArg import DependencyArg |
67 |
from _emerge.AtomArg import AtomArg |
68 |
from _emerge.PackageArg import PackageArg |
69 |
from _emerge.SetArg import SetArg |
70 |
from _emerge.Dependency import Dependency |
71 |
from _emerge.BlockerCache import BlockerCache |
72 |
from _emerge.PackageVirtualDbapi import PackageVirtualDbapi |
73 |
from _emerge.RepoDisplay import RepoDisplay |
74 |
from _emerge.UseFlagDisplay import UseFlagDisplay |
75 |
from _emerge.SequentialTaskQueue import SequentialTaskQueue |
76 |
from _emerge.ProgressHandler import ProgressHandler |
59 |
from _emerge.ProgressHandler import ProgressHandler |
77 |
from _emerge.stdout_spinner import stdout_spinner |
|
|
78 |
from _emerge.JobStatusDisplay import JobStatusDisplay |
79 |
from _emerge.PollScheduler import PollScheduler |
80 |
from _emerge.search import search |
81 |
from _emerge.visible import visible |
82 |
from _emerge.emergelog import emergelog, _emerge_log_dir |
83 |
from _emerge.userquery import userquery |
84 |
from _emerge.countdown import countdown |
85 |
from _emerge.unmerge import unmerge |
86 |
from _emerge.MergeListItem import MergeListItem |
87 |
from _emerge.MetadataRegen import MetadataRegen |
88 |
from _emerge.RootConfig import RootConfig |
60 |
from _emerge.RootConfig import RootConfig |
89 |
from _emerge.format_size import format_size |
61 |
from _emerge.Scheduler import Scheduler |
90 |
from _emerge.PackageCounters import PackageCounters |
62 |
from _emerge.search import search |
91 |
from _emerge.FakeVartree import FakeVartree |
63 |
from _emerge.SetArg import SetArg |
92 |
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice |
64 |
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice |
|
|
65 |
from _emerge.stdout_spinner import stdout_spinner |
66 |
from _emerge.unmerge import unmerge |
67 |
from _emerge.UnmergeDepPriority import UnmergeDepPriority |
68 |
from _emerge.UseFlagDisplay import UseFlagDisplay |
69 |
from _emerge.userquery import userquery |
93 |
|
70 |
|
|
|
71 |
|
94 |
actions = frozenset([ |
72 |
actions = frozenset([ |
95 |
"clean", "config", "depclean", |
73 |
"clean", "config", "depclean", |
96 |
"info", "list-sets", "metadata", |
74 |
"info", "list-sets", "metadata", |
Lines 208-6754
Link Here
|
208 |
|
186 |
|
209 |
return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")" |
187 |
return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")" |
210 |
|
188 |
|
211 |
def create_depgraph_params(myopts, myaction): |
|
|
212 |
#configure emerge engine parameters |
213 |
# |
214 |
# self: include _this_ package regardless of if it is merged. |
215 |
# selective: exclude the package if it is merged |
216 |
# recurse: go into the dependencies |
217 |
# deep: go into the dependencies of already merged packages |
218 |
# empty: pretend nothing is merged |
219 |
# complete: completely account for all known dependencies |
220 |
# remove: build graph for use in removing packages |
221 |
myparams = set(["recurse"]) |
222 |
|
223 |
if myaction == "remove": |
224 |
myparams.add("remove") |
225 |
myparams.add("complete") |
226 |
return myparams |
227 |
|
228 |
if "--update" in myopts or \ |
229 |
"--newuse" in myopts or \ |
230 |
"--reinstall" in myopts or \ |
231 |
"--noreplace" in myopts: |
232 |
myparams.add("selective") |
233 |
if "--emptytree" in myopts: |
234 |
myparams.add("empty") |
235 |
myparams.discard("selective") |
236 |
if "--nodeps" in myopts: |
237 |
myparams.discard("recurse") |
238 |
if "--deep" in myopts: |
239 |
myparams.add("deep") |
240 |
if "--complete-graph" in myopts: |
241 |
myparams.add("complete") |
242 |
return myparams |
243 |
|
244 |
def create_world_atom(pkg, args_set, root_config): |
245 |
"""Create a new atom for the world file if one does not exist. If the |
246 |
argument atom is precise enough to identify a specific slot then a slot |
247 |
atom will be returned. Atoms that are in the system set may also be stored |
248 |
in world since system atoms can only match one slot while world atoms can |
249 |
be greedy with respect to slots. Unslotted system packages will not be |
250 |
stored in world.""" |
251 |
|
252 |
arg_atom = args_set.findAtomForPackage(pkg) |
253 |
if not arg_atom: |
254 |
return None |
255 |
cp = portage.dep_getkey(arg_atom) |
256 |
new_world_atom = cp |
257 |
sets = root_config.sets |
258 |
portdb = root_config.trees["porttree"].dbapi |
259 |
vardb = root_config.trees["vartree"].dbapi |
260 |
available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \ |
261 |
for cpv in portdb.match(cp)) |
262 |
slotted = len(available_slots) > 1 or \ |
263 |
(len(available_slots) == 1 and "0" not in available_slots) |
264 |
if not slotted: |
265 |
# check the vdb in case this is multislot |
266 |
available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \ |
267 |
for cpv in vardb.match(cp)) |
268 |
slotted = len(available_slots) > 1 or \ |
269 |
(len(available_slots) == 1 and "0" not in available_slots) |
270 |
if slotted and arg_atom != cp: |
271 |
# If the user gave a specific atom, store it as a |
272 |
# slot atom in the world file. |
273 |
slot_atom = pkg.slot_atom |
274 |
|
275 |
# For USE=multislot, there are a couple of cases to |
276 |
# handle here: |
277 |
# |
278 |
# 1) SLOT="0", but the real SLOT spontaneously changed to some |
279 |
# unknown value, so just record an unslotted atom. |
280 |
# |
281 |
# 2) SLOT comes from an installed package and there is no |
282 |
# matching SLOT in the portage tree. |
283 |
# |
284 |
# Make sure that the slot atom is available in either the |
285 |
# portdb or the vardb, since otherwise the user certainly |
286 |
# doesn't want the SLOT atom recorded in the world file |
287 |
# (case 1 above). If it's only available in the vardb, |
288 |
# the user may be trying to prevent a USE=multislot |
289 |
# package from being removed by --depclean (case 2 above). |
290 |
|
291 |
mydb = portdb |
292 |
if not portdb.match(slot_atom): |
293 |
# SLOT seems to come from an installed multislot package |
294 |
mydb = vardb |
295 |
# If there is no installed package matching the SLOT atom, |
296 |
# it probably changed SLOT spontaneously due to USE=multislot, |
297 |
# so just record an unslotted atom. |
298 |
if vardb.match(slot_atom): |
299 |
# Now verify that the argument is precise |
300 |
# enough to identify a specific slot. |
301 |
matches = mydb.match(arg_atom) |
302 |
matched_slots = set() |
303 |
for cpv in matches: |
304 |
matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0]) |
305 |
if len(matched_slots) == 1: |
306 |
new_world_atom = slot_atom |
307 |
|
308 |
if new_world_atom == sets["world"].findAtomForPackage(pkg): |
309 |
# Both atoms would be identical, so there's nothing to add. |
310 |
return None |
311 |
if not slotted: |
312 |
# Unlike world atoms, system atoms are not greedy for slots, so they |
313 |
# can't be safely excluded from world if they are slotted. |
314 |
system_atom = sets["system"].findAtomForPackage(pkg) |
315 |
if system_atom: |
316 |
if not portage.dep_getkey(system_atom).startswith("virtual/"): |
317 |
return None |
318 |
# System virtuals aren't safe to exclude from world since they can |
319 |
# match multiple old-style virtuals but only one of them will be |
320 |
# pulled in by update or depclean. |
321 |
providers = portdb.mysettings.getvirtuals().get( |
322 |
portage.dep_getkey(system_atom)) |
323 |
if providers and len(providers) == 1 and providers[0] == cp: |
324 |
return None |
325 |
return new_world_atom |
326 |
|
327 |
def filter_iuse_defaults(iuse): |
328 |
for flag in iuse: |
329 |
if flag.startswith("+") or flag.startswith("-"): |
330 |
yield flag[1:] |
331 |
else: |
332 |
yield flag |
333 |
|
334 |
def _find_deep_system_runtime_deps(graph): |
335 |
deep_system_deps = set() |
336 |
node_stack = [] |
337 |
for node in graph: |
338 |
if not isinstance(node, Package) or \ |
339 |
node.operation == 'uninstall': |
340 |
continue |
341 |
if node.root_config.sets['system'].findAtomForPackage(node): |
342 |
node_stack.append(node) |
343 |
|
344 |
def ignore_priority(priority): |
345 |
""" |
346 |
Ignore non-runtime priorities. |
347 |
""" |
348 |
if isinstance(priority, DepPriority) and \ |
349 |
(priority.runtime or priority.runtime_post): |
350 |
return False |
351 |
return True |
352 |
|
353 |
while node_stack: |
354 |
node = node_stack.pop() |
355 |
if node in deep_system_deps: |
356 |
continue |
357 |
deep_system_deps.add(node) |
358 |
for child in graph.child_nodes(node, ignore_priority=ignore_priority): |
359 |
if not isinstance(child, Package) or \ |
360 |
child.operation == 'uninstall': |
361 |
continue |
362 |
node_stack.append(child) |
363 |
|
364 |
return deep_system_deps |
365 |
|
366 |
def get_masking_status(pkg, pkgsettings, root_config): |
367 |
|
368 |
mreasons = portage.getmaskingstatus( |
369 |
pkg, settings=pkgsettings, |
370 |
portdb=root_config.trees["porttree"].dbapi) |
371 |
|
372 |
if not pkg.installed: |
373 |
if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata): |
374 |
mreasons.append("CHOST: %s" % \ |
375 |
pkg.metadata["CHOST"]) |
376 |
|
377 |
if not pkg.metadata["SLOT"]: |
378 |
mreasons.append("invalid: SLOT is undefined") |
379 |
|
380 |
return mreasons |
381 |
|
382 |
def get_mask_info(root_config, cpv, pkgsettings, |
383 |
db, pkg_type, built, installed, db_keys): |
384 |
eapi_masked = False |
385 |
try: |
386 |
metadata = dict(izip(db_keys, |
387 |
db.aux_get(cpv, db_keys))) |
388 |
except KeyError: |
389 |
metadata = None |
390 |
if metadata and not built: |
391 |
pkgsettings.setcpv(cpv, mydb=metadata) |
392 |
metadata["USE"] = pkgsettings["PORTAGE_USE"] |
393 |
metadata['CHOST'] = pkgsettings.get('CHOST', '') |
394 |
if metadata is None: |
395 |
mreasons = ["corruption"] |
396 |
else: |
397 |
eapi = metadata['EAPI'] |
398 |
if eapi[:1] == '-': |
399 |
eapi = eapi[1:] |
400 |
if not portage.eapi_is_supported(eapi): |
401 |
mreasons = ['EAPI %s' % eapi] |
402 |
else: |
403 |
pkg = Package(type_name=pkg_type, root_config=root_config, |
404 |
cpv=cpv, built=built, installed=installed, metadata=metadata) |
405 |
mreasons = get_masking_status(pkg, pkgsettings, root_config) |
406 |
return metadata, mreasons |
407 |
|
408 |
def show_masked_packages(masked_packages): |
409 |
shown_licenses = set() |
410 |
shown_comments = set() |
411 |
# Maybe there is both an ebuild and a binary. Only |
412 |
# show one of them to avoid redundant appearance. |
413 |
shown_cpvs = set() |
414 |
have_eapi_mask = False |
415 |
for (root_config, pkgsettings, cpv, |
416 |
metadata, mreasons) in masked_packages: |
417 |
if cpv in shown_cpvs: |
418 |
continue |
419 |
shown_cpvs.add(cpv) |
420 |
comment, filename = None, None |
421 |
if "package.mask" in mreasons: |
422 |
comment, filename = \ |
423 |
portage.getmaskingreason( |
424 |
cpv, metadata=metadata, |
425 |
settings=pkgsettings, |
426 |
portdb=root_config.trees["porttree"].dbapi, |
427 |
return_location=True) |
428 |
missing_licenses = [] |
429 |
if metadata: |
430 |
if not portage.eapi_is_supported(metadata["EAPI"]): |
431 |
have_eapi_mask = True |
432 |
try: |
433 |
missing_licenses = \ |
434 |
pkgsettings._getMissingLicenses( |
435 |
cpv, metadata) |
436 |
except portage.exception.InvalidDependString: |
437 |
# This will have already been reported |
438 |
# above via mreasons. |
439 |
pass |
440 |
|
441 |
print "- "+cpv+" (masked by: "+", ".join(mreasons)+")" |
442 |
if comment and comment not in shown_comments: |
443 |
print filename+":" |
444 |
print comment |
445 |
shown_comments.add(comment) |
446 |
portdb = root_config.trees["porttree"].dbapi |
447 |
for l in missing_licenses: |
448 |
l_path = portdb.findLicensePath(l) |
449 |
if l in shown_licenses: |
450 |
continue |
451 |
msg = ("A copy of the '%s' license" + \ |
452 |
" is located at '%s'.") % (l, l_path) |
453 |
print msg |
454 |
print |
455 |
shown_licenses.add(l) |
456 |
return have_eapi_mask |
457 |
|
458 |
class depgraph(object): |
459 |
|
460 |
pkg_tree_map = RootConfig.pkg_tree_map |
461 |
|
462 |
_dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"] |
463 |
|
464 |
def __init__(self, settings, trees, myopts, myparams, spinner): |
465 |
self.settings = settings |
466 |
self.target_root = settings["ROOT"] |
467 |
self.myopts = myopts |
468 |
self.myparams = myparams |
469 |
self.edebug = 0 |
470 |
if settings.get("PORTAGE_DEBUG", "") == "1": |
471 |
self.edebug = 1 |
472 |
self.spinner = spinner |
473 |
self._running_root = trees["/"]["root_config"] |
474 |
self._opts_no_restart = Scheduler._opts_no_restart |
475 |
self.pkgsettings = {} |
476 |
# Maps slot atom to package for each Package added to the graph. |
477 |
self._slot_pkg_map = {} |
478 |
# Maps nodes to the reasons they were selected for reinstallation. |
479 |
self._reinstall_nodes = {} |
480 |
self.mydbapi = {} |
481 |
self.trees = {} |
482 |
self._trees_orig = trees |
483 |
self.roots = {} |
484 |
# Contains a filtered view of preferred packages that are selected |
485 |
# from available repositories. |
486 |
self._filtered_trees = {} |
487 |
# Contains installed packages and new packages that have been added |
488 |
# to the graph. |
489 |
self._graph_trees = {} |
490 |
# All Package instances |
491 |
self._pkg_cache = {} |
492 |
for myroot in trees: |
493 |
self.trees[myroot] = {} |
494 |
# Create a RootConfig instance that references |
495 |
# the FakeVartree instead of the real one. |
496 |
self.roots[myroot] = RootConfig( |
497 |
trees[myroot]["vartree"].settings, |
498 |
self.trees[myroot], |
499 |
trees[myroot]["root_config"].setconfig) |
500 |
for tree in ("porttree", "bintree"): |
501 |
self.trees[myroot][tree] = trees[myroot][tree] |
502 |
self.trees[myroot]["vartree"] = \ |
503 |
FakeVartree(trees[myroot]["root_config"], |
504 |
pkg_cache=self._pkg_cache) |
505 |
self.pkgsettings[myroot] = portage.config( |
506 |
clone=self.trees[myroot]["vartree"].settings) |
507 |
self._slot_pkg_map[myroot] = {} |
508 |
vardb = self.trees[myroot]["vartree"].dbapi |
509 |
preload_installed_pkgs = "--nodeps" not in self.myopts and \ |
510 |
"--buildpkgonly" not in self.myopts |
511 |
# This fakedbapi instance will model the state that the vdb will |
512 |
# have after new packages have been installed. |
513 |
fakedb = PackageVirtualDbapi(vardb.settings) |
514 |
if preload_installed_pkgs: |
515 |
for pkg in vardb: |
516 |
self.spinner.update() |
517 |
# This triggers metadata updates via FakeVartree. |
518 |
vardb.aux_get(pkg.cpv, []) |
519 |
fakedb.cpv_inject(pkg) |
520 |
|
521 |
# Now that the vardb state is cached in our FakeVartree, |
522 |
# we won't be needing the real vartree cache for awhile. |
523 |
# To make some room on the heap, clear the vardbapi |
524 |
# caches. |
525 |
trees[myroot]["vartree"].dbapi._clear_cache() |
526 |
gc.collect() |
527 |
|
528 |
self.mydbapi[myroot] = fakedb |
529 |
def graph_tree(): |
530 |
pass |
531 |
graph_tree.dbapi = fakedb |
532 |
self._graph_trees[myroot] = {} |
533 |
self._filtered_trees[myroot] = {} |
534 |
# Substitute the graph tree for the vartree in dep_check() since we |
535 |
# want atom selections to be consistent with package selections |
536 |
# have already been made. |
537 |
self._graph_trees[myroot]["porttree"] = graph_tree |
538 |
self._graph_trees[myroot]["vartree"] = graph_tree |
539 |
def filtered_tree(): |
540 |
pass |
541 |
filtered_tree.dbapi = self._dep_check_composite_db(self, myroot) |
542 |
self._filtered_trees[myroot]["porttree"] = filtered_tree |
543 |
|
544 |
# Passing in graph_tree as the vartree here could lead to better |
545 |
# atom selections in some cases by causing atoms for packages that |
546 |
# have been added to the graph to be preferred over other choices. |
547 |
# However, it can trigger atom selections that result in |
548 |
# unresolvable direct circular dependencies. For example, this |
549 |
# happens with gwydion-dylan which depends on either itself or |
550 |
# gwydion-dylan-bin. In case gwydion-dylan is not yet installed, |
551 |
# gwydion-dylan-bin needs to be selected in order to avoid a |
552 |
# an unresolvable direct circular dependency. |
553 |
# |
554 |
# To solve the problem described above, pass in "graph_db" so that |
555 |
# packages that have been added to the graph are distinguishable |
556 |
# from other available packages and installed packages. Also, pass |
557 |
# the parent package into self._select_atoms() calls so that |
558 |
# unresolvable direct circular dependencies can be detected and |
559 |
# avoided when possible. |
560 |
self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi |
561 |
self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"] |
562 |
|
563 |
dbs = [] |
564 |
portdb = self.trees[myroot]["porttree"].dbapi |
565 |
bindb = self.trees[myroot]["bintree"].dbapi |
566 |
vardb = self.trees[myroot]["vartree"].dbapi |
567 |
# (db, pkg_type, built, installed, db_keys) |
568 |
if "--usepkgonly" not in self.myopts: |
569 |
db_keys = list(portdb._aux_cache_keys) |
570 |
dbs.append((portdb, "ebuild", False, False, db_keys)) |
571 |
if "--usepkg" in self.myopts: |
572 |
db_keys = list(bindb._aux_cache_keys) |
573 |
dbs.append((bindb, "binary", True, False, db_keys)) |
574 |
db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys) |
575 |
dbs.append((vardb, "installed", True, True, db_keys)) |
576 |
self._filtered_trees[myroot]["dbs"] = dbs |
577 |
if "--usepkg" in self.myopts: |
578 |
self.trees[myroot]["bintree"].populate( |
579 |
"--getbinpkg" in self.myopts, |
580 |
"--getbinpkgonly" in self.myopts) |
581 |
del trees |
582 |
|
583 |
self.digraph=portage.digraph() |
584 |
# contains all sets added to the graph |
585 |
self._sets = {} |
586 |
# contains atoms given as arguments |
587 |
self._sets["args"] = InternalPackageSet() |
588 |
# contains all atoms from all sets added to the graph, including |
589 |
# atoms given as arguments |
590 |
self._set_atoms = InternalPackageSet() |
591 |
self._atom_arg_map = {} |
592 |
# contains all nodes pulled in by self._set_atoms |
593 |
self._set_nodes = set() |
594 |
# Contains only Blocker -> Uninstall edges |
595 |
self._blocker_uninstalls = digraph() |
596 |
# Contains only Package -> Blocker edges |
597 |
self._blocker_parents = digraph() |
598 |
# Contains only irrelevant Package -> Blocker edges |
599 |
self._irrelevant_blockers = digraph() |
600 |
# Contains only unsolvable Package -> Blocker edges |
601 |
self._unsolvable_blockers = digraph() |
602 |
# Contains all Blocker -> Blocked Package edges |
603 |
self._blocked_pkgs = digraph() |
604 |
# Contains world packages that have been protected from |
605 |
# uninstallation but may not have been added to the graph |
606 |
# if the graph is not complete yet. |
607 |
self._blocked_world_pkgs = {} |
608 |
self._slot_collision_info = {} |
609 |
# Slot collision nodes are not allowed to block other packages since |
610 |
# blocker validation is only able to account for one package per slot. |
611 |
self._slot_collision_nodes = set() |
612 |
self._parent_atoms = {} |
613 |
self._slot_conflict_parent_atoms = set() |
614 |
self._serialized_tasks_cache = None |
615 |
self._scheduler_graph = None |
616 |
self._displayed_list = None |
617 |
self._pprovided_args = [] |
618 |
self._missing_args = [] |
619 |
self._masked_installed = set() |
620 |
self._unsatisfied_deps_for_display = [] |
621 |
self._unsatisfied_blockers_for_display = None |
622 |
self._circular_deps_for_display = None |
623 |
self._dep_stack = [] |
624 |
self._dep_disjunctive_stack = [] |
625 |
self._unsatisfied_deps = [] |
626 |
self._initially_unsatisfied_deps = [] |
627 |
self._ignored_deps = [] |
628 |
self._required_set_names = set(["system", "world"]) |
629 |
self._select_atoms = self._select_atoms_highest_available |
630 |
self._select_package = self._select_pkg_highest_available |
631 |
self._highest_pkg_cache = {} |
632 |
|
633 |
def _show_slot_collision_notice(self): |
634 |
"""Show an informational message advising the user to mask one of the |
635 |
the packages. In some cases it may be possible to resolve this |
636 |
automatically, but support for backtracking (removal nodes that have |
637 |
already been selected) will be required in order to handle all possible |
638 |
cases. |
639 |
""" |
640 |
|
641 |
if not self._slot_collision_info: |
642 |
return |
643 |
|
644 |
self._show_merge_list() |
645 |
|
646 |
msg = [] |
647 |
msg.append("\n!!! Multiple package instances within a single " + \ |
648 |
"package slot have been pulled\n") |
649 |
msg.append("!!! into the dependency graph, resulting" + \ |
650 |
" in a slot conflict:\n\n") |
651 |
indent = " " |
652 |
# Max number of parents shown, to avoid flooding the display. |
653 |
max_parents = 3 |
654 |
explanation_columns = 70 |
655 |
explanations = 0 |
656 |
for (slot_atom, root), slot_nodes \ |
657 |
in self._slot_collision_info.iteritems(): |
658 |
msg.append(str(slot_atom)) |
659 |
msg.append("\n\n") |
660 |
|
661 |
for node in slot_nodes: |
662 |
msg.append(indent) |
663 |
msg.append(str(node)) |
664 |
parent_atoms = self._parent_atoms.get(node) |
665 |
if parent_atoms: |
666 |
pruned_list = set() |
667 |
# Prefer conflict atoms over others. |
668 |
for parent_atom in parent_atoms: |
669 |
if len(pruned_list) >= max_parents: |
670 |
break |
671 |
if parent_atom in self._slot_conflict_parent_atoms: |
672 |
pruned_list.add(parent_atom) |
673 |
|
674 |
# If this package was pulled in by conflict atoms then |
675 |
# show those alone since those are the most interesting. |
676 |
if not pruned_list: |
677 |
# When generating the pruned list, prefer instances |
678 |
# of DependencyArg over instances of Package. |
679 |
for parent_atom in parent_atoms: |
680 |
if len(pruned_list) >= max_parents: |
681 |
break |
682 |
parent, atom = parent_atom |
683 |
if isinstance(parent, DependencyArg): |
684 |
pruned_list.add(parent_atom) |
685 |
# Prefer Packages instances that themselves have been |
686 |
# pulled into collision slots. |
687 |
for parent_atom in parent_atoms: |
688 |
if len(pruned_list) >= max_parents: |
689 |
break |
690 |
parent, atom = parent_atom |
691 |
if isinstance(parent, Package) and \ |
692 |
(parent.slot_atom, parent.root) \ |
693 |
in self._slot_collision_info: |
694 |
pruned_list.add(parent_atom) |
695 |
for parent_atom in parent_atoms: |
696 |
if len(pruned_list) >= max_parents: |
697 |
break |
698 |
pruned_list.add(parent_atom) |
699 |
omitted_parents = len(parent_atoms) - len(pruned_list) |
700 |
parent_atoms = pruned_list |
701 |
msg.append(" pulled in by\n") |
702 |
for parent_atom in parent_atoms: |
703 |
parent, atom = parent_atom |
704 |
msg.append(2*indent) |
705 |
if isinstance(parent, |
706 |
(PackageArg, AtomArg)): |
707 |
# For PackageArg and AtomArg types, it's |
708 |
# redundant to display the atom attribute. |
709 |
msg.append(str(parent)) |
710 |
else: |
711 |
# Display the specific atom from SetArg or |
712 |
# Package types. |
713 |
msg.append("%s required by %s" % (atom, parent)) |
714 |
msg.append("\n") |
715 |
if omitted_parents: |
716 |
msg.append(2*indent) |
717 |
msg.append("(and %d more)\n" % omitted_parents) |
718 |
else: |
719 |
msg.append(" (no parents)\n") |
720 |
msg.append("\n") |
721 |
explanation = self._slot_conflict_explanation(slot_nodes) |
722 |
if explanation: |
723 |
explanations += 1 |
724 |
msg.append(indent + "Explanation:\n\n") |
725 |
for line in textwrap.wrap(explanation, explanation_columns): |
726 |
msg.append(2*indent + line + "\n") |
727 |
msg.append("\n") |
728 |
msg.append("\n") |
729 |
sys.stderr.write("".join(msg)) |
730 |
sys.stderr.flush() |
731 |
|
732 |
explanations_for_all = explanations == len(self._slot_collision_info) |
733 |
|
734 |
if explanations_for_all or "--quiet" in self.myopts: |
735 |
return |
736 |
|
737 |
msg = [] |
738 |
msg.append("It may be possible to solve this problem ") |
739 |
msg.append("by using package.mask to prevent one of ") |
740 |
msg.append("those packages from being selected. ") |
741 |
msg.append("However, it is also possible that conflicting ") |
742 |
msg.append("dependencies exist such that they are impossible to ") |
743 |
msg.append("satisfy simultaneously. If such a conflict exists in ") |
744 |
msg.append("the dependencies of two different packages, then those ") |
745 |
msg.append("packages can not be installed simultaneously.") |
746 |
|
747 |
from formatter import AbstractFormatter, DumbWriter |
748 |
f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72)) |
749 |
for x in msg: |
750 |
f.add_flowing_data(x) |
751 |
f.end_paragraph(1) |
752 |
|
753 |
msg = [] |
754 |
msg.append("For more information, see MASKED PACKAGES ") |
755 |
msg.append("section in the emerge man page or refer ") |
756 |
msg.append("to the Gentoo Handbook.") |
757 |
for x in msg: |
758 |
f.add_flowing_data(x) |
759 |
f.end_paragraph(1) |
760 |
f.writer.flush() |
761 |
|
762 |
def _slot_conflict_explanation(self, slot_nodes): |
763 |
""" |
764 |
When a slot conflict occurs due to USE deps, there are a few |
765 |
different cases to consider: |
766 |
|
767 |
1) New USE are correctly set but --newuse wasn't requested so an |
768 |
installed package with incorrect USE happened to get pulled |
769 |
into graph before the new one. |
770 |
|
771 |
2) New USE are incorrectly set but an installed package has correct |
772 |
USE so it got pulled into the graph, and a new instance also got |
773 |
pulled in due to --newuse or an upgrade. |
774 |
|
775 |
3) Multiple USE deps exist that can't be satisfied simultaneously, |
776 |
and multiple package instances got pulled into the same slot to |
777 |
satisfy the conflicting deps. |
778 |
|
779 |
Currently, explanations and suggested courses of action are generated |
780 |
for cases 1 and 2. Case 3 is too complex to give a useful suggestion. |
781 |
""" |
782 |
|
783 |
if len(slot_nodes) != 2: |
784 |
# Suggestions are only implemented for |
785 |
# conflicts between two packages. |
786 |
return None |
787 |
|
788 |
all_conflict_atoms = self._slot_conflict_parent_atoms |
789 |
matched_node = None |
790 |
matched_atoms = None |
791 |
unmatched_node = None |
792 |
for node in slot_nodes: |
793 |
parent_atoms = self._parent_atoms.get(node) |
794 |
if not parent_atoms: |
795 |
# Normally, there are always parent atoms. If there are |
796 |
# none then something unexpected is happening and there's |
797 |
# currently no suggestion for this case. |
798 |
return None |
799 |
conflict_atoms = all_conflict_atoms.intersection(parent_atoms) |
800 |
for parent_atom in conflict_atoms: |
801 |
parent, atom = parent_atom |
802 |
if not atom.use: |
803 |
# Suggestions are currently only implemented for cases |
804 |
# in which all conflict atoms have USE deps. |
805 |
return None |
806 |
if conflict_atoms: |
807 |
if matched_node is not None: |
808 |
# If conflict atoms match multiple nodes |
809 |
# then there's no suggestion. |
810 |
return None |
811 |
matched_node = node |
812 |
matched_atoms = conflict_atoms |
813 |
else: |
814 |
if unmatched_node is not None: |
815 |
# Neither node is matched by conflict atoms, and |
816 |
# there is no suggestion for this case. |
817 |
return None |
818 |
unmatched_node = node |
819 |
|
820 |
if matched_node is None or unmatched_node is None: |
821 |
# This shouldn't happen. |
822 |
return None |
823 |
|
824 |
if unmatched_node.installed and not matched_node.installed and \ |
825 |
unmatched_node.cpv == matched_node.cpv: |
826 |
# If the conflicting packages are the same version then |
827 |
# --newuse should be all that's needed. If they are different |
828 |
# versions then there's some other problem. |
829 |
return "New USE are correctly set, but --newuse wasn't" + \ |
830 |
" requested, so an installed package with incorrect USE " + \ |
831 |
"happened to get pulled into the dependency graph. " + \ |
832 |
"In order to solve " + \ |
833 |
"this, either specify the --newuse option or explicitly " + \ |
834 |
" reinstall '%s'." % matched_node.slot_atom |
835 |
|
836 |
if matched_node.installed and not unmatched_node.installed: |
837 |
atoms = sorted(set(atom for parent, atom in matched_atoms)) |
838 |
explanation = ("New USE for '%s' are incorrectly set. " + \ |
839 |
"In order to solve this, adjust USE to satisfy '%s'") % \ |
840 |
(matched_node.slot_atom, atoms[0]) |
841 |
if len(atoms) > 1: |
842 |
for atom in atoms[1:-1]: |
843 |
explanation += ", '%s'" % (atom,) |
844 |
if len(atoms) > 2: |
845 |
explanation += "," |
846 |
explanation += " and '%s'" % (atoms[-1],) |
847 |
explanation += "." |
848 |
return explanation |
849 |
|
850 |
return None |
851 |
|
852 |
def _process_slot_conflicts(self): |
853 |
""" |
854 |
Process slot conflict data to identify specific atoms which |
855 |
lead to conflict. These atoms only match a subset of the |
856 |
packages that have been pulled into a given slot. |
857 |
""" |
858 |
for (slot_atom, root), slot_nodes \ |
859 |
in self._slot_collision_info.iteritems(): |
860 |
|
861 |
all_parent_atoms = set() |
862 |
for pkg in slot_nodes: |
863 |
parent_atoms = self._parent_atoms.get(pkg) |
864 |
if not parent_atoms: |
865 |
continue |
866 |
all_parent_atoms.update(parent_atoms) |
867 |
|
868 |
for pkg in slot_nodes: |
869 |
parent_atoms = self._parent_atoms.get(pkg) |
870 |
if parent_atoms is None: |
871 |
parent_atoms = set() |
872 |
self._parent_atoms[pkg] = parent_atoms |
873 |
for parent_atom in all_parent_atoms: |
874 |
if parent_atom in parent_atoms: |
875 |
continue |
876 |
# Use package set for matching since it will match via |
877 |
# PROVIDE when necessary, while match_from_list does not. |
878 |
parent, atom = parent_atom |
879 |
atom_set = InternalPackageSet( |
880 |
initial_atoms=(atom,)) |
881 |
if atom_set.findAtomForPackage(pkg): |
882 |
parent_atoms.add(parent_atom) |
883 |
else: |
884 |
self._slot_conflict_parent_atoms.add(parent_atom) |
885 |
|
886 |
def _reinstall_for_flags(self, forced_flags, |
887 |
orig_use, orig_iuse, cur_use, cur_iuse): |
888 |
"""Return a set of flags that trigger reinstallation, or None if there |
889 |
are no such flags.""" |
890 |
if "--newuse" in self.myopts: |
891 |
flags = set(orig_iuse.symmetric_difference( |
892 |
cur_iuse).difference(forced_flags)) |
893 |
flags.update(orig_iuse.intersection(orig_use).symmetric_difference( |
894 |
cur_iuse.intersection(cur_use))) |
895 |
if flags: |
896 |
return flags |
897 |
elif "changed-use" == self.myopts.get("--reinstall"): |
898 |
flags = orig_iuse.intersection(orig_use).symmetric_difference( |
899 |
cur_iuse.intersection(cur_use)) |
900 |
if flags: |
901 |
return flags |
902 |
return None |
903 |
|
904 |
def _create_graph(self, allow_unsatisfied=False): |
905 |
dep_stack = self._dep_stack |
906 |
dep_disjunctive_stack = self._dep_disjunctive_stack |
907 |
while dep_stack or dep_disjunctive_stack: |
908 |
self.spinner.update() |
909 |
while dep_stack: |
910 |
dep = dep_stack.pop() |
911 |
if isinstance(dep, Package): |
912 |
if not self._add_pkg_deps(dep, |
913 |
allow_unsatisfied=allow_unsatisfied): |
914 |
return 0 |
915 |
continue |
916 |
if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied): |
917 |
return 0 |
918 |
if dep_disjunctive_stack: |
919 |
if not self._pop_disjunction(allow_unsatisfied): |
920 |
return 0 |
921 |
return 1 |
922 |
|
923 |
def _add_dep(self, dep, allow_unsatisfied=False): |
924 |
debug = "--debug" in self.myopts |
925 |
buildpkgonly = "--buildpkgonly" in self.myopts |
926 |
nodeps = "--nodeps" in self.myopts |
927 |
empty = "empty" in self.myparams |
928 |
deep = "deep" in self.myparams |
929 |
update = "--update" in self.myopts and dep.depth <= 1 |
930 |
if dep.blocker: |
931 |
if not buildpkgonly and \ |
932 |
not nodeps and \ |
933 |
dep.parent not in self._slot_collision_nodes: |
934 |
if dep.parent.onlydeps: |
935 |
# It's safe to ignore blockers if the |
936 |
# parent is an --onlydeps node. |
937 |
return 1 |
938 |
# The blocker applies to the root where |
939 |
# the parent is or will be installed. |
940 |
blocker = Blocker(atom=dep.atom, |
941 |
eapi=dep.parent.metadata["EAPI"], |
942 |
root=dep.parent.root) |
943 |
self._blocker_parents.add(blocker, dep.parent) |
944 |
return 1 |
945 |
dep_pkg, existing_node = self._select_package(dep.root, dep.atom, |
946 |
onlydeps=dep.onlydeps) |
947 |
if not dep_pkg: |
948 |
if dep.priority.optional: |
949 |
# This could be an unecessary build-time dep |
950 |
# pulled in by --with-bdeps=y. |
951 |
return 1 |
952 |
if allow_unsatisfied: |
953 |
self._unsatisfied_deps.append(dep) |
954 |
return 1 |
955 |
self._unsatisfied_deps_for_display.append( |
956 |
((dep.root, dep.atom), {"myparent":dep.parent})) |
957 |
return 0 |
958 |
# In some cases, dep_check will return deps that shouldn't |
959 |
# be proccessed any further, so they are identified and |
960 |
# discarded here. Try to discard as few as possible since |
961 |
# discarded dependencies reduce the amount of information |
962 |
# available for optimization of merge order. |
963 |
if dep.priority.satisfied and \ |
964 |
not dep_pkg.installed and \ |
965 |
not (existing_node or empty or deep or update): |
966 |
myarg = None |
967 |
if dep.root == self.target_root: |
968 |
try: |
969 |
myarg = self._iter_atoms_for_pkg(dep_pkg).next() |
970 |
except StopIteration: |
971 |
pass |
972 |
except portage.exception.InvalidDependString: |
973 |
if not dep_pkg.installed: |
974 |
# This shouldn't happen since the package |
975 |
# should have been masked. |
976 |
raise |
977 |
if not myarg: |
978 |
self._ignored_deps.append(dep) |
979 |
return 1 |
980 |
|
981 |
if not self._add_pkg(dep_pkg, dep): |
982 |
return 0 |
983 |
return 1 |
984 |
|
985 |
def _add_pkg(self, pkg, dep): |
986 |
myparent = None |
987 |
priority = None |
988 |
depth = 0 |
989 |
if dep is None: |
990 |
dep = Dependency() |
991 |
else: |
992 |
myparent = dep.parent |
993 |
priority = dep.priority |
994 |
depth = dep.depth |
995 |
if priority is None: |
996 |
priority = DepPriority() |
997 |
""" |
998 |
Fills the digraph with nodes comprised of packages to merge. |
999 |
mybigkey is the package spec of the package to merge. |
1000 |
myparent is the package depending on mybigkey ( or None ) |
1001 |
addme = Should we add this package to the digraph or are we just looking at it's deps? |
1002 |
Think --onlydeps, we need to ignore packages in that case. |
1003 |
#stuff to add: |
1004 |
#SLOT-aware emerge |
1005 |
#IUSE-aware emerge -> USE DEP aware depgraph |
1006 |
#"no downgrade" emerge |
1007 |
""" |
1008 |
# Ensure that the dependencies of the same package |
1009 |
# are never processed more than once. |
1010 |
previously_added = pkg in self.digraph |
1011 |
|
1012 |
# select the correct /var database that we'll be checking against |
1013 |
vardbapi = self.trees[pkg.root]["vartree"].dbapi |
1014 |
pkgsettings = self.pkgsettings[pkg.root] |
1015 |
|
1016 |
arg_atoms = None |
1017 |
if True: |
1018 |
try: |
1019 |
arg_atoms = list(self._iter_atoms_for_pkg(pkg)) |
1020 |
except portage.exception.InvalidDependString, e: |
1021 |
if not pkg.installed: |
1022 |
show_invalid_depstring_notice( |
1023 |
pkg, pkg.metadata["PROVIDE"], str(e)) |
1024 |
return 0 |
1025 |
del e |
1026 |
|
1027 |
if not pkg.onlydeps: |
1028 |
if not pkg.installed and \ |
1029 |
"empty" not in self.myparams and \ |
1030 |
vardbapi.match(pkg.slot_atom): |
1031 |
# Increase the priority of dependencies on packages that |
1032 |
# are being rebuilt. This optimizes merge order so that |
1033 |
# dependencies are rebuilt/updated as soon as possible, |
1034 |
# which is needed especially when emerge is called by |
1035 |
# revdep-rebuild since dependencies may be affected by ABI |
1036 |
# breakage that has rendered them useless. Don't adjust |
1037 |
# priority here when in "empty" mode since all packages |
1038 |
# are being merged in that case. |
1039 |
priority.rebuild = True |
1040 |
|
1041 |
existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom) |
1042 |
slot_collision = False |
1043 |
if existing_node: |
1044 |
existing_node_matches = pkg.cpv == existing_node.cpv |
1045 |
if existing_node_matches and \ |
1046 |
pkg != existing_node and \ |
1047 |
dep.atom is not None: |
1048 |
# Use package set for matching since it will match via |
1049 |
# PROVIDE when necessary, while match_from_list does not. |
1050 |
atom_set = InternalPackageSet(initial_atoms=[dep.atom]) |
1051 |
if not atom_set.findAtomForPackage(existing_node): |
1052 |
existing_node_matches = False |
1053 |
if existing_node_matches: |
1054 |
# The existing node can be reused. |
1055 |
if arg_atoms: |
1056 |
for parent_atom in arg_atoms: |
1057 |
parent, atom = parent_atom |
1058 |
self.digraph.add(existing_node, parent, |
1059 |
priority=priority) |
1060 |
self._add_parent_atom(existing_node, parent_atom) |
1061 |
# If a direct circular dependency is not an unsatisfied |
1062 |
# buildtime dependency then drop it here since otherwise |
1063 |
# it can skew the merge order calculation in an unwanted |
1064 |
# way. |
1065 |
if existing_node != myparent or \ |
1066 |
(priority.buildtime and not priority.satisfied): |
1067 |
self.digraph.addnode(existing_node, myparent, |
1068 |
priority=priority) |
1069 |
if dep.atom is not None and dep.parent is not None: |
1070 |
self._add_parent_atom(existing_node, |
1071 |
(dep.parent, dep.atom)) |
1072 |
return 1 |
1073 |
else: |
1074 |
|
1075 |
# A slot collision has occurred. Sometimes this coincides |
1076 |
# with unresolvable blockers, so the slot collision will be |
1077 |
# shown later if there are no unresolvable blockers. |
1078 |
self._add_slot_conflict(pkg) |
1079 |
slot_collision = True |
1080 |
|
1081 |
if slot_collision: |
1082 |
# Now add this node to the graph so that self.display() |
1083 |
# can show use flags and --tree portage.output. This node is |
1084 |
# only being partially added to the graph. It must not be |
1085 |
# allowed to interfere with the other nodes that have been |
1086 |
# added. Do not overwrite data for existing nodes in |
1087 |
# self.mydbapi since that data will be used for blocker |
1088 |
# validation. |
1089 |
# Even though the graph is now invalid, continue to process |
1090 |
# dependencies so that things like --fetchonly can still |
1091 |
# function despite collisions. |
1092 |
pass |
1093 |
elif not previously_added: |
1094 |
self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg |
1095 |
self.mydbapi[pkg.root].cpv_inject(pkg) |
1096 |
self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache() |
1097 |
|
1098 |
if not pkg.installed: |
1099 |
# Allow this package to satisfy old-style virtuals in case it |
1100 |
# doesn't already. Any pre-existing providers will be preferred |
1101 |
# over this one. |
1102 |
try: |
1103 |
pkgsettings.setinst(pkg.cpv, pkg.metadata) |
1104 |
# For consistency, also update the global virtuals. |
1105 |
settings = self.roots[pkg.root].settings |
1106 |
settings.unlock() |
1107 |
settings.setinst(pkg.cpv, pkg.metadata) |
1108 |
settings.lock() |
1109 |
except portage.exception.InvalidDependString, e: |
1110 |
show_invalid_depstring_notice( |
1111 |
pkg, pkg.metadata["PROVIDE"], str(e)) |
1112 |
del e |
1113 |
return 0 |
1114 |
|
1115 |
if arg_atoms: |
1116 |
self._set_nodes.add(pkg) |
1117 |
|
1118 |
# Do this even when addme is False (--onlydeps) so that the |
1119 |
# parent/child relationship is always known in case |
1120 |
# self._show_slot_collision_notice() needs to be called later. |
1121 |
self.digraph.add(pkg, myparent, priority=priority) |
1122 |
if dep.atom is not None and dep.parent is not None: |
1123 |
self._add_parent_atom(pkg, (dep.parent, dep.atom)) |
1124 |
|
1125 |
if arg_atoms: |
1126 |
for parent_atom in arg_atoms: |
1127 |
parent, atom = parent_atom |
1128 |
self.digraph.add(pkg, parent, priority=priority) |
1129 |
self._add_parent_atom(pkg, parent_atom) |
1130 |
|
1131 |
""" This section determines whether we go deeper into dependencies or not. |
1132 |
We want to go deeper on a few occasions: |
1133 |
Installing package A, we need to make sure package A's deps are met. |
1134 |
emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec |
1135 |
If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies. |
1136 |
""" |
1137 |
dep_stack = self._dep_stack |
1138 |
if "recurse" not in self.myparams: |
1139 |
return 1 |
1140 |
elif pkg.installed and \ |
1141 |
"deep" not in self.myparams: |
1142 |
dep_stack = self._ignored_deps |
1143 |
|
1144 |
self.spinner.update() |
1145 |
|
1146 |
if arg_atoms: |
1147 |
depth = 0 |
1148 |
pkg.depth = depth |
1149 |
if not previously_added: |
1150 |
dep_stack.append(pkg) |
1151 |
return 1 |
1152 |
|
1153 |
def _add_parent_atom(self, pkg, parent_atom): |
1154 |
parent_atoms = self._parent_atoms.get(pkg) |
1155 |
if parent_atoms is None: |
1156 |
parent_atoms = set() |
1157 |
self._parent_atoms[pkg] = parent_atoms |
1158 |
parent_atoms.add(parent_atom) |
1159 |
|
1160 |
def _add_slot_conflict(self, pkg): |
1161 |
self._slot_collision_nodes.add(pkg) |
1162 |
slot_key = (pkg.slot_atom, pkg.root) |
1163 |
slot_nodes = self._slot_collision_info.get(slot_key) |
1164 |
if slot_nodes is None: |
1165 |
slot_nodes = set() |
1166 |
slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom]) |
1167 |
self._slot_collision_info[slot_key] = slot_nodes |
1168 |
slot_nodes.add(pkg) |
1169 |
|
1170 |
def _add_pkg_deps(self, pkg, allow_unsatisfied=False): |
1171 |
|
1172 |
mytype = pkg.type_name |
1173 |
myroot = pkg.root |
1174 |
mykey = pkg.cpv |
1175 |
metadata = pkg.metadata |
1176 |
myuse = pkg.use.enabled |
1177 |
jbigkey = pkg |
1178 |
depth = pkg.depth + 1 |
1179 |
removal_action = "remove" in self.myparams |
1180 |
|
1181 |
edepend={} |
1182 |
depkeys = ["DEPEND","RDEPEND","PDEPEND"] |
1183 |
for k in depkeys: |
1184 |
edepend[k] = metadata[k] |
1185 |
|
1186 |
if not pkg.built and \ |
1187 |
"--buildpkgonly" in self.myopts and \ |
1188 |
"deep" not in self.myparams and \ |
1189 |
"empty" not in self.myparams: |
1190 |
edepend["RDEPEND"] = "" |
1191 |
edepend["PDEPEND"] = "" |
1192 |
bdeps_optional = False |
1193 |
|
1194 |
if pkg.built and not removal_action: |
1195 |
if self.myopts.get("--with-bdeps", "n") == "y": |
1196 |
# Pull in build time deps as requested, but marked them as |
1197 |
# "optional" since they are not strictly required. This allows |
1198 |
# more freedom in the merge order calculation for solving |
1199 |
# circular dependencies. Don't convert to PDEPEND since that |
1200 |
# could make --with-bdeps=y less effective if it is used to |
1201 |
# adjust merge order to prevent built_with_use() calls from |
1202 |
# failing. |
1203 |
bdeps_optional = True |
1204 |
else: |
1205 |
# built packages do not have build time dependencies. |
1206 |
edepend["DEPEND"] = "" |
1207 |
|
1208 |
if removal_action and self.myopts.get("--with-bdeps", "y") == "n": |
1209 |
edepend["DEPEND"] = "" |
1210 |
|
1211 |
bdeps_root = "/" |
1212 |
root_deps = self.myopts.get("--root-deps") |
1213 |
if root_deps is not None: |
1214 |
if root_deps is True: |
1215 |
bdeps_root = myroot |
1216 |
elif root_deps == "rdeps": |
1217 |
edepend["DEPEND"] = "" |
1218 |
|
1219 |
deps = ( |
1220 |
(bdeps_root, edepend["DEPEND"], |
1221 |
self._priority(buildtime=(not bdeps_optional), |
1222 |
optional=bdeps_optional)), |
1223 |
(myroot, edepend["RDEPEND"], self._priority(runtime=True)), |
1224 |
(myroot, edepend["PDEPEND"], self._priority(runtime_post=True)) |
1225 |
) |
1226 |
|
1227 |
debug = "--debug" in self.myopts |
1228 |
strict = mytype != "installed" |
1229 |
try: |
1230 |
if not strict: |
1231 |
portage.dep._dep_check_strict = False |
1232 |
|
1233 |
for dep_root, dep_string, dep_priority in deps: |
1234 |
if not dep_string: |
1235 |
continue |
1236 |
if debug: |
1237 |
print |
1238 |
print "Parent: ", jbigkey |
1239 |
print "Depstring:", dep_string |
1240 |
print "Priority:", dep_priority |
1241 |
|
1242 |
try: |
1243 |
|
1244 |
dep_string = portage.dep.paren_normalize( |
1245 |
portage.dep.use_reduce( |
1246 |
portage.dep.paren_reduce(dep_string), |
1247 |
uselist=pkg.use.enabled)) |
1248 |
|
1249 |
dep_string = list(self._queue_disjunctive_deps( |
1250 |
pkg, dep_root, dep_priority, dep_string)) |
1251 |
|
1252 |
except portage.exception.InvalidDependString, e: |
1253 |
if pkg.installed: |
1254 |
del e |
1255 |
continue |
1256 |
show_invalid_depstring_notice(pkg, dep_string, str(e)) |
1257 |
return 0 |
1258 |
|
1259 |
if not dep_string: |
1260 |
continue |
1261 |
|
1262 |
dep_string = portage.dep.paren_enclose(dep_string) |
1263 |
|
1264 |
if not self._add_pkg_dep_string( |
1265 |
pkg, dep_root, dep_priority, dep_string, |
1266 |
allow_unsatisfied): |
1267 |
return 0 |
1268 |
|
1269 |
except portage.exception.AmbiguousPackageName, e: |
1270 |
pkgs = e.args[0] |
1271 |
portage.writemsg("\n\n!!! An atom in the dependencies " + \ |
1272 |
"is not fully-qualified. Multiple matches:\n\n", noiselevel=-1) |
1273 |
for cpv in pkgs: |
1274 |
portage.writemsg(" %s\n" % cpv, noiselevel=-1) |
1275 |
portage.writemsg("\n", noiselevel=-1) |
1276 |
if mytype == "binary": |
1277 |
portage.writemsg( |
1278 |
"!!! This binary package cannot be installed: '%s'\n" % \ |
1279 |
mykey, noiselevel=-1) |
1280 |
elif mytype == "ebuild": |
1281 |
portdb = self.roots[myroot].trees["porttree"].dbapi |
1282 |
myebuild, mylocation = portdb.findname2(mykey) |
1283 |
portage.writemsg("!!! This ebuild cannot be installed: " + \ |
1284 |
"'%s'\n" % myebuild, noiselevel=-1) |
1285 |
portage.writemsg("!!! Please notify the package maintainer " + \ |
1286 |
"that atoms must be fully-qualified.\n", noiselevel=-1) |
1287 |
return 0 |
1288 |
finally: |
1289 |
portage.dep._dep_check_strict = True |
1290 |
return 1 |
1291 |
|
1292 |
def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string, |
1293 |
allow_unsatisfied): |
1294 |
depth = pkg.depth + 1 |
1295 |
debug = "--debug" in self.myopts |
1296 |
strict = pkg.type_name != "installed" |
1297 |
|
1298 |
if debug: |
1299 |
print |
1300 |
print "Parent: ", pkg |
1301 |
print "Depstring:", dep_string |
1302 |
print "Priority:", dep_priority |
1303 |
|
1304 |
try: |
1305 |
selected_atoms = self._select_atoms(dep_root, |
1306 |
dep_string, myuse=pkg.use.enabled, parent=pkg, |
1307 |
strict=strict, priority=dep_priority) |
1308 |
except portage.exception.InvalidDependString, e: |
1309 |
show_invalid_depstring_notice(pkg, dep_string, str(e)) |
1310 |
del e |
1311 |
if pkg.installed: |
1312 |
return 1 |
1313 |
return 0 |
1314 |
|
1315 |
if debug: |
1316 |
print "Candidates:", selected_atoms |
1317 |
|
1318 |
vardb = self.roots[dep_root].trees["vartree"].dbapi |
1319 |
|
1320 |
for atom in selected_atoms: |
1321 |
try: |
1322 |
|
1323 |
atom = portage.dep.Atom(atom) |
1324 |
|
1325 |
mypriority = dep_priority.copy() |
1326 |
if not atom.blocker and vardb.match(atom): |
1327 |
mypriority.satisfied = True |
1328 |
|
1329 |
if not self._add_dep(Dependency(atom=atom, |
1330 |
blocker=atom.blocker, depth=depth, parent=pkg, |
1331 |
priority=mypriority, root=dep_root), |
1332 |
allow_unsatisfied=allow_unsatisfied): |
1333 |
return 0 |
1334 |
|
1335 |
except portage.exception.InvalidAtom, e: |
1336 |
show_invalid_depstring_notice( |
1337 |
pkg, dep_string, str(e)) |
1338 |
del e |
1339 |
if not pkg.installed: |
1340 |
return 0 |
1341 |
|
1342 |
if debug: |
1343 |
print "Exiting...", pkg |
1344 |
|
1345 |
return 1 |
1346 |
|
1347 |
def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct): |
1348 |
""" |
1349 |
Queue disjunctive (virtual and ||) deps in self._dep_disjunctive_stack. |
1350 |
Yields non-disjunctive deps. Raises InvalidDependString when |
1351 |
necessary. |
1352 |
""" |
1353 |
i = 0 |
1354 |
while i < len(dep_struct): |
1355 |
x = dep_struct[i] |
1356 |
if isinstance(x, list): |
1357 |
for y in self._queue_disjunctive_deps( |
1358 |
pkg, dep_root, dep_priority, x): |
1359 |
yield y |
1360 |
elif x == "||": |
1361 |
self._queue_disjunction(pkg, dep_root, dep_priority, |
1362 |
[ x, dep_struct[ i + 1 ] ] ) |
1363 |
i += 1 |
1364 |
else: |
1365 |
try: |
1366 |
x = portage.dep.Atom(x) |
1367 |
except portage.exception.InvalidAtom: |
1368 |
if not pkg.installed: |
1369 |
raise portage.exception.InvalidDependString( |
1370 |
"invalid atom: '%s'" % x) |
1371 |
else: |
1372 |
# Note: Eventually this will check for PROPERTIES=virtual |
1373 |
# or whatever other metadata gets implemented for this |
1374 |
# purpose. |
1375 |
if x.cp.startswith('virtual/'): |
1376 |
self._queue_disjunction( pkg, dep_root, |
1377 |
dep_priority, [ str(x) ] ) |
1378 |
else: |
1379 |
yield str(x) |
1380 |
i += 1 |
1381 |
|
1382 |
def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct): |
1383 |
self._dep_disjunctive_stack.append( |
1384 |
(pkg, dep_root, dep_priority, dep_struct)) |
1385 |
|
1386 |
def _pop_disjunction(self, allow_unsatisfied): |
1387 |
""" |
1388 |
Pop one disjunctive dep from self._dep_disjunctive_stack, and use it to |
1389 |
populate self._dep_stack. |
1390 |
""" |
1391 |
pkg, dep_root, dep_priority, dep_struct = \ |
1392 |
self._dep_disjunctive_stack.pop() |
1393 |
dep_string = portage.dep.paren_enclose(dep_struct) |
1394 |
if not self._add_pkg_dep_string( |
1395 |
pkg, dep_root, dep_priority, dep_string, allow_unsatisfied): |
1396 |
return 0 |
1397 |
return 1 |
1398 |
|
1399 |
def _priority(self, **kwargs): |
1400 |
if "remove" in self.myparams: |
1401 |
priority_constructor = UnmergeDepPriority |
1402 |
else: |
1403 |
priority_constructor = DepPriority |
1404 |
return priority_constructor(**kwargs) |
1405 |
|
1406 |
def _dep_expand(self, root_config, atom_without_category): |
1407 |
""" |
1408 |
@param root_config: a root config instance |
1409 |
@type root_config: RootConfig |
1410 |
@param atom_without_category: an atom without a category component |
1411 |
@type atom_without_category: String |
1412 |
@rtype: list |
1413 |
@returns: a list of atoms containing categories (possibly empty) |
1414 |
""" |
1415 |
null_cp = portage.dep_getkey(insert_category_into_atom( |
1416 |
atom_without_category, "null")) |
1417 |
cat, atom_pn = portage.catsplit(null_cp) |
1418 |
|
1419 |
dbs = self._filtered_trees[root_config.root]["dbs"] |
1420 |
categories = set() |
1421 |
for db, pkg_type, built, installed, db_keys in dbs: |
1422 |
for cat in db.categories: |
1423 |
if db.cp_list("%s/%s" % (cat, atom_pn)): |
1424 |
categories.add(cat) |
1425 |
|
1426 |
deps = [] |
1427 |
for cat in categories: |
1428 |
deps.append(insert_category_into_atom( |
1429 |
atom_without_category, cat)) |
1430 |
return deps |
1431 |
|
1432 |
def _have_new_virt(self, root, atom_cp): |
1433 |
ret = False |
1434 |
for db, pkg_type, built, installed, db_keys in \ |
1435 |
self._filtered_trees[root]["dbs"]: |
1436 |
if db.cp_list(atom_cp): |
1437 |
ret = True |
1438 |
break |
1439 |
return ret |
1440 |
|
1441 |
def _iter_atoms_for_pkg(self, pkg): |
1442 |
# TODO: add multiple $ROOT support |
1443 |
if pkg.root != self.target_root: |
1444 |
return |
1445 |
atom_arg_map = self._atom_arg_map |
1446 |
root_config = self.roots[pkg.root] |
1447 |
for atom in self._set_atoms.iterAtomsForPackage(pkg): |
1448 |
atom_cp = portage.dep_getkey(atom) |
1449 |
if atom_cp != pkg.cp and \ |
1450 |
self._have_new_virt(pkg.root, atom_cp): |
1451 |
continue |
1452 |
visible_pkgs = root_config.visible_pkgs.match_pkgs(atom) |
1453 |
visible_pkgs.reverse() # descending order |
1454 |
higher_slot = None |
1455 |
for visible_pkg in visible_pkgs: |
1456 |
if visible_pkg.cp != atom_cp: |
1457 |
continue |
1458 |
if pkg >= visible_pkg: |
1459 |
# This is descending order, and we're not |
1460 |
# interested in any versions <= pkg given. |
1461 |
break |
1462 |
if pkg.slot_atom != visible_pkg.slot_atom: |
1463 |
higher_slot = visible_pkg |
1464 |
break |
1465 |
if higher_slot is not None: |
1466 |
continue |
1467 |
for arg in atom_arg_map[(atom, pkg.root)]: |
1468 |
if isinstance(arg, PackageArg) and \ |
1469 |
arg.package != pkg: |
1470 |
continue |
1471 |
yield arg, atom |
1472 |
|
1473 |
def select_files(self, myfiles): |
1474 |
"""Given a list of .tbz2s, .ebuilds sets, and deps, create the |
1475 |
appropriate depgraph and return a favorite list.""" |
1476 |
debug = "--debug" in self.myopts |
1477 |
root_config = self.roots[self.target_root] |
1478 |
sets = root_config.sets |
1479 |
getSetAtoms = root_config.setconfig.getSetAtoms |
1480 |
myfavorites=[] |
1481 |
myroot = self.target_root |
1482 |
dbs = self._filtered_trees[myroot]["dbs"] |
1483 |
vardb = self.trees[myroot]["vartree"].dbapi |
1484 |
real_vardb = self._trees_orig[myroot]["vartree"].dbapi |
1485 |
portdb = self.trees[myroot]["porttree"].dbapi |
1486 |
bindb = self.trees[myroot]["bintree"].dbapi |
1487 |
pkgsettings = self.pkgsettings[myroot] |
1488 |
args = [] |
1489 |
onlydeps = "--onlydeps" in self.myopts |
1490 |
lookup_owners = [] |
1491 |
for x in myfiles: |
1492 |
ext = os.path.splitext(x)[1] |
1493 |
if ext==".tbz2": |
1494 |
if not os.path.exists(x): |
1495 |
if os.path.exists( |
1496 |
os.path.join(pkgsettings["PKGDIR"], "All", x)): |
1497 |
x = os.path.join(pkgsettings["PKGDIR"], "All", x) |
1498 |
elif os.path.exists( |
1499 |
os.path.join(pkgsettings["PKGDIR"], x)): |
1500 |
x = os.path.join(pkgsettings["PKGDIR"], x) |
1501 |
else: |
1502 |
print "\n\n!!! Binary package '"+str(x)+"' does not exist." |
1503 |
print "!!! Please ensure the tbz2 exists as specified.\n" |
1504 |
return 0, myfavorites |
1505 |
mytbz2=portage.xpak.tbz2(x) |
1506 |
mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0] |
1507 |
if os.path.realpath(x) != \ |
1508 |
os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)): |
1509 |
print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n") |
1510 |
return 0, myfavorites |
1511 |
db_keys = list(bindb._aux_cache_keys) |
1512 |
metadata = izip(db_keys, bindb.aux_get(mykey, db_keys)) |
1513 |
pkg = Package(type_name="binary", root_config=root_config, |
1514 |
cpv=mykey, built=True, metadata=metadata, |
1515 |
onlydeps=onlydeps) |
1516 |
self._pkg_cache[pkg] = pkg |
1517 |
args.append(PackageArg(arg=x, package=pkg, |
1518 |
root_config=root_config)) |
1519 |
elif ext==".ebuild": |
1520 |
ebuild_path = portage.util.normalize_path(os.path.abspath(x)) |
1521 |
pkgdir = os.path.dirname(ebuild_path) |
1522 |
tree_root = os.path.dirname(os.path.dirname(pkgdir)) |
1523 |
cp = pkgdir[len(tree_root)+1:] |
1524 |
e = portage.exception.PackageNotFound( |
1525 |
("%s is not in a valid portage tree " + \ |
1526 |
"hierarchy or does not exist") % x) |
1527 |
if not portage.isvalidatom(cp): |
1528 |
raise e |
1529 |
cat = portage.catsplit(cp)[0] |
1530 |
mykey = cat + "/" + os.path.basename(ebuild_path[:-7]) |
1531 |
if not portage.isvalidatom("="+mykey): |
1532 |
raise e |
1533 |
ebuild_path = portdb.findname(mykey) |
1534 |
if ebuild_path: |
1535 |
if ebuild_path != os.path.join(os.path.realpath(tree_root), |
1536 |
cp, os.path.basename(ebuild_path)): |
1537 |
print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n") |
1538 |
return 0, myfavorites |
1539 |
if mykey not in portdb.xmatch( |
1540 |
"match-visible", portage.dep_getkey(mykey)): |
1541 |
print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use") |
1542 |
print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man") |
1543 |
print colorize("BAD", "*** page for details.") |
1544 |
countdown(int(self.settings["EMERGE_WARNING_DELAY"]), |
1545 |
"Continuing...") |
1546 |
else: |
1547 |
raise portage.exception.PackageNotFound( |
1548 |
"%s is not in a valid portage tree hierarchy or does not exist" % x) |
1549 |
db_keys = list(portdb._aux_cache_keys) |
1550 |
metadata = izip(db_keys, portdb.aux_get(mykey, db_keys)) |
1551 |
pkg = Package(type_name="ebuild", root_config=root_config, |
1552 |
cpv=mykey, metadata=metadata, onlydeps=onlydeps) |
1553 |
pkgsettings.setcpv(pkg) |
1554 |
pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"] |
1555 |
pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '') |
1556 |
self._pkg_cache[pkg] = pkg |
1557 |
args.append(PackageArg(arg=x, package=pkg, |
1558 |
root_config=root_config)) |
1559 |
elif x.startswith(os.path.sep): |
1560 |
if not x.startswith(myroot): |
1561 |
portage.writemsg(("\n\n!!! '%s' does not start with" + \ |
1562 |
" $ROOT.\n") % x, noiselevel=-1) |
1563 |
return 0, [] |
1564 |
# Queue these up since it's most efficient to handle |
1565 |
# multiple files in a single iter_owners() call. |
1566 |
lookup_owners.append(x) |
1567 |
else: |
1568 |
if x in ("system", "world"): |
1569 |
x = SETPREFIX + x |
1570 |
if x.startswith(SETPREFIX): |
1571 |
s = x[len(SETPREFIX):] |
1572 |
if s not in sets: |
1573 |
raise portage.exception.PackageSetNotFound(s) |
1574 |
if s in self._sets: |
1575 |
continue |
1576 |
# Recursively expand sets so that containment tests in |
1577 |
# self._get_parent_sets() properly match atoms in nested |
1578 |
# sets (like if world contains system). |
1579 |
expanded_set = InternalPackageSet( |
1580 |
initial_atoms=getSetAtoms(s)) |
1581 |
self._sets[s] = expanded_set |
1582 |
args.append(SetArg(arg=x, set=expanded_set, |
1583 |
root_config=root_config)) |
1584 |
continue |
1585 |
if not is_valid_package_atom(x): |
1586 |
portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x, |
1587 |
noiselevel=-1) |
1588 |
portage.writemsg("!!! Please check ebuild(5) for full details.\n") |
1589 |
portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n") |
1590 |
return (0,[]) |
1591 |
# Don't expand categories or old-style virtuals here unless |
1592 |
# necessary. Expansion of old-style virtuals here causes at |
1593 |
# least the following problems: |
1594 |
# 1) It's more difficult to determine which set(s) an atom |
1595 |
# came from, if any. |
1596 |
# 2) It takes away freedom from the resolver to choose other |
1597 |
# possible expansions when necessary. |
1598 |
if "/" in x: |
1599 |
args.append(AtomArg(arg=x, atom=x, |
1600 |
root_config=root_config)) |
1601 |
continue |
1602 |
expanded_atoms = self._dep_expand(root_config, x) |
1603 |
installed_cp_set = set() |
1604 |
for atom in expanded_atoms: |
1605 |
atom_cp = portage.dep_getkey(atom) |
1606 |
if vardb.cp_list(atom_cp): |
1607 |
installed_cp_set.add(atom_cp) |
1608 |
|
1609 |
if len(installed_cp_set) > 1: |
1610 |
non_virtual_cps = set() |
1611 |
for atom_cp in installed_cp_set: |
1612 |
if not atom_cp.startswith("virtual/"): |
1613 |
non_virtual_cps.add(atom_cp) |
1614 |
if len(non_virtual_cps) == 1: |
1615 |
installed_cp_set = non_virtual_cps |
1616 |
|
1617 |
if len(expanded_atoms) > 1 and len(installed_cp_set) == 1: |
1618 |
installed_cp = iter(installed_cp_set).next() |
1619 |
expanded_atoms = [atom for atom in expanded_atoms \ |
1620 |
if portage.dep_getkey(atom) == installed_cp] |
1621 |
|
1622 |
if len(expanded_atoms) > 1: |
1623 |
print |
1624 |
print |
1625 |
ambiguous_package_name(x, expanded_atoms, root_config, |
1626 |
self.spinner, self.myopts) |
1627 |
return False, myfavorites |
1628 |
if expanded_atoms: |
1629 |
atom = expanded_atoms[0] |
1630 |
else: |
1631 |
null_atom = insert_category_into_atom(x, "null") |
1632 |
null_cp = portage.dep_getkey(null_atom) |
1633 |
cat, atom_pn = portage.catsplit(null_cp) |
1634 |
virts_p = root_config.settings.get_virts_p().get(atom_pn) |
1635 |
if virts_p: |
1636 |
# Allow the depgraph to choose which virtual. |
1637 |
atom = insert_category_into_atom(x, "virtual") |
1638 |
else: |
1639 |
atom = insert_category_into_atom(x, "null") |
1640 |
|
1641 |
args.append(AtomArg(arg=x, atom=atom, |
1642 |
root_config=root_config)) |
1643 |
|
1644 |
if lookup_owners: |
1645 |
relative_paths = [] |
1646 |
search_for_multiple = False |
1647 |
if len(lookup_owners) > 1: |
1648 |
search_for_multiple = True |
1649 |
|
1650 |
for x in lookup_owners: |
1651 |
if not search_for_multiple and os.path.isdir(x): |
1652 |
search_for_multiple = True |
1653 |
relative_paths.append(x[len(myroot):]) |
1654 |
|
1655 |
owners = set() |
1656 |
for pkg, relative_path in \ |
1657 |
real_vardb._owners.iter_owners(relative_paths): |
1658 |
owners.add(pkg.mycpv) |
1659 |
if not search_for_multiple: |
1660 |
break |
1661 |
|
1662 |
if not owners: |
1663 |
portage.writemsg(("\n\n!!! '%s' is not claimed " + \ |
1664 |
"by any package.\n") % lookup_owners[0], noiselevel=-1) |
1665 |
return 0, [] |
1666 |
|
1667 |
for cpv in owners: |
1668 |
slot = vardb.aux_get(cpv, ["SLOT"])[0] |
1669 |
if not slot: |
1670 |
# portage now masks packages with missing slot, but it's |
1671 |
# possible that one was installed by an older version |
1672 |
atom = portage.cpv_getkey(cpv) |
1673 |
else: |
1674 |
atom = "%s:%s" % (portage.cpv_getkey(cpv), slot) |
1675 |
args.append(AtomArg(arg=atom, atom=atom, |
1676 |
root_config=root_config)) |
1677 |
|
1678 |
if "--update" in self.myopts: |
1679 |
# In some cases, the greedy slots behavior can pull in a slot that |
1680 |
# the user would want to uninstall due to it being blocked by a |
1681 |
# newer version in a different slot. Therefore, it's necessary to |
1682 |
# detect and discard any that should be uninstalled. Each time |
1683 |
# that arguments are updated, package selections are repeated in |
1684 |
# order to ensure consistency with the current arguments: |
1685 |
# |
1686 |
# 1) Initialize args |
1687 |
# 2) Select packages and generate initial greedy atoms |
1688 |
# 3) Update args with greedy atoms |
1689 |
# 4) Select packages and generate greedy atoms again, while |
1690 |
# accounting for any blockers between selected packages |
1691 |
# 5) Update args with revised greedy atoms |
1692 |
|
1693 |
self._set_args(args) |
1694 |
greedy_args = [] |
1695 |
for arg in args: |
1696 |
greedy_args.append(arg) |
1697 |
if not isinstance(arg, AtomArg): |
1698 |
continue |
1699 |
for atom in self._greedy_slots(arg.root_config, arg.atom): |
1700 |
greedy_args.append( |
1701 |
AtomArg(arg=arg.arg, atom=atom, |
1702 |
root_config=arg.root_config)) |
1703 |
|
1704 |
self._set_args(greedy_args) |
1705 |
del greedy_args |
1706 |
|
1707 |
# Revise greedy atoms, accounting for any blockers |
1708 |
# between selected packages. |
1709 |
revised_greedy_args = [] |
1710 |
for arg in args: |
1711 |
revised_greedy_args.append(arg) |
1712 |
if not isinstance(arg, AtomArg): |
1713 |
continue |
1714 |
for atom in self._greedy_slots(arg.root_config, arg.atom, |
1715 |
blocker_lookahead=True): |
1716 |
revised_greedy_args.append( |
1717 |
AtomArg(arg=arg.arg, atom=atom, |
1718 |
root_config=arg.root_config)) |
1719 |
args = revised_greedy_args |
1720 |
del revised_greedy_args |
1721 |
|
1722 |
self._set_args(args) |
1723 |
|
1724 |
myfavorites = set(myfavorites) |
1725 |
for arg in args: |
1726 |
if isinstance(arg, (AtomArg, PackageArg)): |
1727 |
myfavorites.add(arg.atom) |
1728 |
elif isinstance(arg, SetArg): |
1729 |
myfavorites.add(arg.arg) |
1730 |
myfavorites = list(myfavorites) |
1731 |
|
1732 |
pprovideddict = pkgsettings.pprovideddict |
1733 |
if debug: |
1734 |
portage.writemsg("\n", noiselevel=-1) |
1735 |
# Order needs to be preserved since a feature of --nodeps |
1736 |
# is to allow the user to force a specific merge order. |
1737 |
args.reverse() |
1738 |
while args: |
1739 |
arg = args.pop() |
1740 |
for atom in arg.set: |
1741 |
self.spinner.update() |
1742 |
dep = Dependency(atom=atom, onlydeps=onlydeps, |
1743 |
root=myroot, parent=arg) |
1744 |
atom_cp = portage.dep_getkey(atom) |
1745 |
try: |
1746 |
pprovided = pprovideddict.get(portage.dep_getkey(atom)) |
1747 |
if pprovided and portage.match_from_list(atom, pprovided): |
1748 |
# A provided package has been specified on the command line. |
1749 |
self._pprovided_args.append((arg, atom)) |
1750 |
continue |
1751 |
if isinstance(arg, PackageArg): |
1752 |
if not self._add_pkg(arg.package, dep) or \ |
1753 |
not self._create_graph(): |
1754 |
sys.stderr.write(("\n\n!!! Problem resolving " + \ |
1755 |
"dependencies for %s\n") % arg.arg) |
1756 |
return 0, myfavorites |
1757 |
continue |
1758 |
if debug: |
1759 |
portage.writemsg(" Arg: %s\n Atom: %s\n" % \ |
1760 |
(arg, atom), noiselevel=-1) |
1761 |
pkg, existing_node = self._select_package( |
1762 |
myroot, atom, onlydeps=onlydeps) |
1763 |
if not pkg: |
1764 |
if not (isinstance(arg, SetArg) and \ |
1765 |
arg.name in ("system", "world")): |
1766 |
self._unsatisfied_deps_for_display.append( |
1767 |
((myroot, atom), {})) |
1768 |
return 0, myfavorites |
1769 |
self._missing_args.append((arg, atom)) |
1770 |
continue |
1771 |
if atom_cp != pkg.cp: |
1772 |
# For old-style virtuals, we need to repeat the |
1773 |
# package.provided check against the selected package. |
1774 |
expanded_atom = atom.replace(atom_cp, pkg.cp) |
1775 |
pprovided = pprovideddict.get(pkg.cp) |
1776 |
if pprovided and \ |
1777 |
portage.match_from_list(expanded_atom, pprovided): |
1778 |
# A provided package has been |
1779 |
# specified on the command line. |
1780 |
self._pprovided_args.append((arg, atom)) |
1781 |
continue |
1782 |
if pkg.installed and "selective" not in self.myparams: |
1783 |
self._unsatisfied_deps_for_display.append( |
1784 |
((myroot, atom), {})) |
1785 |
# Previous behavior was to bail out in this case, but |
1786 |
# since the dep is satisfied by the installed package, |
1787 |
# it's more friendly to continue building the graph |
1788 |
# and just show a warning message. Therefore, only bail |
1789 |
# out here if the atom is not from either the system or |
1790 |
# world set. |
1791 |
if not (isinstance(arg, SetArg) and \ |
1792 |
arg.name in ("system", "world")): |
1793 |
return 0, myfavorites |
1794 |
|
1795 |
# Add the selected package to the graph as soon as possible |
1796 |
# so that later dep_check() calls can use it as feedback |
1797 |
# for making more consistent atom selections. |
1798 |
if not self._add_pkg(pkg, dep): |
1799 |
if isinstance(arg, SetArg): |
1800 |
sys.stderr.write(("\n\n!!! Problem resolving " + \ |
1801 |
"dependencies for %s from %s\n") % \ |
1802 |
(atom, arg.arg)) |
1803 |
else: |
1804 |
sys.stderr.write(("\n\n!!! Problem resolving " + \ |
1805 |
"dependencies for %s\n") % atom) |
1806 |
return 0, myfavorites |
1807 |
|
1808 |
except portage.exception.MissingSignature, e: |
1809 |
portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n") |
1810 |
portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n") |
1811 |
portage.writemsg("!!! to aid in the detection of malicious intent.\n\n") |
1812 |
portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n") |
1813 |
portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1) |
1814 |
return 0, myfavorites |
1815 |
except portage.exception.InvalidSignature, e: |
1816 |
portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n") |
1817 |
portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n") |
1818 |
portage.writemsg("!!! to aid in the detection of malicious intent.\n\n") |
1819 |
portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n") |
1820 |
portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1) |
1821 |
return 0, myfavorites |
1822 |
except SystemExit, e: |
1823 |
raise # Needed else can't exit |
1824 |
except Exception, e: |
1825 |
print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom |
1826 |
print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None) |
1827 |
raise |
1828 |
|
1829 |
# Now that the root packages have been added to the graph, |
1830 |
# process the dependencies. |
1831 |
if not self._create_graph(): |
1832 |
return 0, myfavorites |
1833 |
|
1834 |
missing=0 |
1835 |
if "--usepkgonly" in self.myopts: |
1836 |
for xs in self.digraph.all_nodes(): |
1837 |
if not isinstance(xs, Package): |
1838 |
continue |
1839 |
if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge": |
1840 |
if missing == 0: |
1841 |
print |
1842 |
missing += 1 |
1843 |
print "Missing binary for:",xs[2] |
1844 |
|
1845 |
try: |
1846 |
self.altlist() |
1847 |
except self._unknown_internal_error: |
1848 |
return False, myfavorites |
1849 |
|
1850 |
# We're true here unless we are missing binaries. |
1851 |
return (not missing,myfavorites) |
1852 |
|
1853 |
def _set_args(self, args): |
1854 |
""" |
1855 |
Create the "args" package set from atoms and packages given as |
1856 |
arguments. This method can be called multiple times if necessary. |
1857 |
The package selection cache is automatically invalidated, since |
1858 |
arguments influence package selections. |
1859 |
""" |
1860 |
args_set = self._sets["args"] |
1861 |
args_set.clear() |
1862 |
for arg in args: |
1863 |
if not isinstance(arg, (AtomArg, PackageArg)): |
1864 |
continue |
1865 |
atom = arg.atom |
1866 |
if atom in args_set: |
1867 |
continue |
1868 |
args_set.add(atom) |
1869 |
|
1870 |
self._set_atoms.clear() |
1871 |
self._set_atoms.update(chain(*self._sets.itervalues())) |
1872 |
atom_arg_map = self._atom_arg_map |
1873 |
atom_arg_map.clear() |
1874 |
for arg in args: |
1875 |
for atom in arg.set: |
1876 |
atom_key = (atom, arg.root_config.root) |
1877 |
refs = atom_arg_map.get(atom_key) |
1878 |
if refs is None: |
1879 |
refs = [] |
1880 |
atom_arg_map[atom_key] = refs |
1881 |
if arg not in refs: |
1882 |
refs.append(arg) |
1883 |
|
1884 |
# Invalidate the package selection cache, since |
1885 |
# arguments influence package selections. |
1886 |
self._highest_pkg_cache.clear() |
1887 |
for trees in self._filtered_trees.itervalues(): |
1888 |
trees["porttree"].dbapi._clear_cache() |
1889 |
|
1890 |
def _greedy_slots(self, root_config, atom, blocker_lookahead=False): |
1891 |
""" |
1892 |
Return a list of slot atoms corresponding to installed slots that |
1893 |
differ from the slot of the highest visible match. When |
1894 |
blocker_lookahead is True, slot atoms that would trigger a blocker |
1895 |
conflict are automatically discarded, potentially allowing automatic |
1896 |
uninstallation of older slots when appropriate. |
1897 |
""" |
1898 |
highest_pkg, in_graph = self._select_package(root_config.root, atom) |
1899 |
if highest_pkg is None: |
1900 |
return [] |
1901 |
vardb = root_config.trees["vartree"].dbapi |
1902 |
slots = set() |
1903 |
for cpv in vardb.match(atom): |
1904 |
# don't mix new virtuals with old virtuals |
1905 |
if portage.cpv_getkey(cpv) == highest_pkg.cp: |
1906 |
slots.add(vardb.aux_get(cpv, ["SLOT"])[0]) |
1907 |
|
1908 |
slots.add(highest_pkg.metadata["SLOT"]) |
1909 |
if len(slots) == 1: |
1910 |
return [] |
1911 |
greedy_pkgs = [] |
1912 |
slots.remove(highest_pkg.metadata["SLOT"]) |
1913 |
while slots: |
1914 |
slot = slots.pop() |
1915 |
slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot)) |
1916 |
pkg, in_graph = self._select_package(root_config.root, slot_atom) |
1917 |
if pkg is not None and \ |
1918 |
pkg.cp == highest_pkg.cp and pkg < highest_pkg: |
1919 |
greedy_pkgs.append(pkg) |
1920 |
if not greedy_pkgs: |
1921 |
return [] |
1922 |
if not blocker_lookahead: |
1923 |
return [pkg.slot_atom for pkg in greedy_pkgs] |
1924 |
|
1925 |
blockers = {} |
1926 |
blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"] |
1927 |
for pkg in greedy_pkgs + [highest_pkg]: |
1928 |
dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys) |
1929 |
try: |
1930 |
atoms = self._select_atoms( |
1931 |
pkg.root, dep_str, pkg.use.enabled, |
1932 |
parent=pkg, strict=True) |
1933 |
except portage.exception.InvalidDependString: |
1934 |
continue |
1935 |
blocker_atoms = (x for x in atoms if x.blocker) |
1936 |
blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms) |
1937 |
|
1938 |
if highest_pkg not in blockers: |
1939 |
return [] |
1940 |
|
1941 |
# filter packages with invalid deps |
1942 |
greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers] |
1943 |
|
1944 |
# filter packages that conflict with highest_pkg |
1945 |
greedy_pkgs = [pkg for pkg in greedy_pkgs if not \ |
1946 |
(blockers[highest_pkg].findAtomForPackage(pkg) or \ |
1947 |
blockers[pkg].findAtomForPackage(highest_pkg))] |
1948 |
|
1949 |
if not greedy_pkgs: |
1950 |
return [] |
1951 |
|
1952 |
# If two packages conflict, discard the lower version. |
1953 |
discard_pkgs = set() |
1954 |
greedy_pkgs.sort(reverse=True) |
1955 |
for i in xrange(len(greedy_pkgs) - 1): |
1956 |
pkg1 = greedy_pkgs[i] |
1957 |
if pkg1 in discard_pkgs: |
1958 |
continue |
1959 |
for j in xrange(i + 1, len(greedy_pkgs)): |
1960 |
pkg2 = greedy_pkgs[j] |
1961 |
if pkg2 in discard_pkgs: |
1962 |
continue |
1963 |
if blockers[pkg1].findAtomForPackage(pkg2) or \ |
1964 |
blockers[pkg2].findAtomForPackage(pkg1): |
1965 |
# pkg1 > pkg2 |
1966 |
discard_pkgs.add(pkg2) |
1967 |
|
1968 |
return [pkg.slot_atom for pkg in greedy_pkgs \ |
1969 |
if pkg not in discard_pkgs] |
1970 |
|
1971 |
def _select_atoms_from_graph(self, *pargs, **kwargs): |
1972 |
""" |
1973 |
Prefer atoms matching packages that have already been |
1974 |
added to the graph or those that are installed and have |
1975 |
not been scheduled for replacement. |
1976 |
""" |
1977 |
kwargs["trees"] = self._graph_trees |
1978 |
return self._select_atoms_highest_available(*pargs, **kwargs) |
1979 |
|
1980 |
def _select_atoms_highest_available(self, root, depstring, |
1981 |
myuse=None, parent=None, strict=True, trees=None, priority=None): |
1982 |
"""This will raise InvalidDependString if necessary. If trees is |
1983 |
None then self._filtered_trees is used.""" |
1984 |
pkgsettings = self.pkgsettings[root] |
1985 |
if trees is None: |
1986 |
trees = self._filtered_trees |
1987 |
if not getattr(priority, "buildtime", False): |
1988 |
# The parent should only be passed to dep_check() for buildtime |
1989 |
# dependencies since that's the only case when it's appropriate |
1990 |
# to trigger the circular dependency avoidance code which uses it. |
1991 |
# It's important not to trigger the same circular dependency |
1992 |
# avoidance code for runtime dependencies since it's not needed |
1993 |
# and it can promote an incorrect package choice. |
1994 |
parent = None |
1995 |
if True: |
1996 |
try: |
1997 |
if parent is not None: |
1998 |
trees[root]["parent"] = parent |
1999 |
if not strict: |
2000 |
portage.dep._dep_check_strict = False |
2001 |
mycheck = portage.dep_check(depstring, None, |
2002 |
pkgsettings, myuse=myuse, |
2003 |
myroot=root, trees=trees) |
2004 |
finally: |
2005 |
if parent is not None: |
2006 |
trees[root].pop("parent") |
2007 |
portage.dep._dep_check_strict = True |
2008 |
if not mycheck[0]: |
2009 |
raise portage.exception.InvalidDependString(mycheck[1]) |
2010 |
selected_atoms = mycheck[1] |
2011 |
return selected_atoms |
2012 |
|
2013 |
def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None): |
2014 |
atom = portage.dep.Atom(atom) |
2015 |
atom_set = InternalPackageSet(initial_atoms=(atom,)) |
2016 |
atom_without_use = atom |
2017 |
if atom.use: |
2018 |
atom_without_use = portage.dep.remove_slot(atom) |
2019 |
if atom.slot: |
2020 |
atom_without_use += ":" + atom.slot |
2021 |
atom_without_use = portage.dep.Atom(atom_without_use) |
2022 |
xinfo = '"%s"' % atom |
2023 |
if arg: |
2024 |
xinfo='"%s"' % arg |
2025 |
# Discard null/ from failed cpv_expand category expansion. |
2026 |
xinfo = xinfo.replace("null/", "") |
2027 |
masked_packages = [] |
2028 |
missing_use = [] |
2029 |
masked_pkg_instances = set() |
2030 |
missing_licenses = [] |
2031 |
have_eapi_mask = False |
2032 |
pkgsettings = self.pkgsettings[root] |
2033 |
implicit_iuse = pkgsettings._get_implicit_iuse() |
2034 |
root_config = self.roots[root] |
2035 |
portdb = self.roots[root].trees["porttree"].dbapi |
2036 |
dbs = self._filtered_trees[root]["dbs"] |
2037 |
for db, pkg_type, built, installed, db_keys in dbs: |
2038 |
if installed: |
2039 |
continue |
2040 |
match = db.match |
2041 |
if hasattr(db, "xmatch"): |
2042 |
cpv_list = db.xmatch("match-all", atom_without_use) |
2043 |
else: |
2044 |
cpv_list = db.match(atom_without_use) |
2045 |
# descending order |
2046 |
cpv_list.reverse() |
2047 |
for cpv in cpv_list: |
2048 |
metadata, mreasons = get_mask_info(root_config, cpv, |
2049 |
pkgsettings, db, pkg_type, built, installed, db_keys) |
2050 |
if metadata is not None: |
2051 |
pkg = Package(built=built, cpv=cpv, |
2052 |
installed=installed, metadata=metadata, |
2053 |
root_config=root_config) |
2054 |
if pkg.cp != atom.cp: |
2055 |
# A cpv can be returned from dbapi.match() as an |
2056 |
# old-style virtual match even in cases when the |
2057 |
# package does not actually PROVIDE the virtual. |
2058 |
# Filter out any such false matches here. |
2059 |
if not atom_set.findAtomForPackage(pkg): |
2060 |
continue |
2061 |
if mreasons: |
2062 |
masked_pkg_instances.add(pkg) |
2063 |
if atom.use: |
2064 |
missing_use.append(pkg) |
2065 |
if not mreasons: |
2066 |
continue |
2067 |
masked_packages.append( |
2068 |
(root_config, pkgsettings, cpv, metadata, mreasons)) |
2069 |
|
2070 |
missing_use_reasons = [] |
2071 |
missing_iuse_reasons = [] |
2072 |
for pkg in missing_use: |
2073 |
use = pkg.use.enabled |
2074 |
iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all) |
2075 |
iuse_re = re.compile("^(%s)$" % "|".join(iuse)) |
2076 |
missing_iuse = [] |
2077 |
for x in atom.use.required: |
2078 |
if iuse_re.match(x) is None: |
2079 |
missing_iuse.append(x) |
2080 |
mreasons = [] |
2081 |
if missing_iuse: |
2082 |
mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse)) |
2083 |
missing_iuse_reasons.append((pkg, mreasons)) |
2084 |
else: |
2085 |
need_enable = sorted(atom.use.enabled.difference(use)) |
2086 |
need_disable = sorted(atom.use.disabled.intersection(use)) |
2087 |
if need_enable or need_disable: |
2088 |
changes = [] |
2089 |
changes.extend(colorize("red", "+" + x) \ |
2090 |
for x in need_enable) |
2091 |
changes.extend(colorize("blue", "-" + x) \ |
2092 |
for x in need_disable) |
2093 |
mreasons.append("Change USE: %s" % " ".join(changes)) |
2094 |
missing_use_reasons.append((pkg, mreasons)) |
2095 |
|
2096 |
unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \ |
2097 |
in missing_use_reasons if pkg not in masked_pkg_instances] |
2098 |
|
2099 |
unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \ |
2100 |
in missing_iuse_reasons if pkg not in masked_pkg_instances] |
2101 |
|
2102 |
show_missing_use = False |
2103 |
if unmasked_use_reasons: |
2104 |
# Only show the latest version. |
2105 |
show_missing_use = unmasked_use_reasons[:1] |
2106 |
elif unmasked_iuse_reasons: |
2107 |
if missing_use_reasons: |
2108 |
# All packages with required IUSE are masked, |
2109 |
# so display a normal masking message. |
2110 |
pass |
2111 |
else: |
2112 |
show_missing_use = unmasked_iuse_reasons |
2113 |
|
2114 |
if show_missing_use: |
2115 |
print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"." |
2116 |
print "!!! One of the following packages is required to complete your request:" |
2117 |
for pkg, mreasons in show_missing_use: |
2118 |
print "- "+pkg.cpv+" ("+", ".join(mreasons)+")" |
2119 |
|
2120 |
elif masked_packages: |
2121 |
print "\n!!! " + \ |
2122 |
colorize("BAD", "All ebuilds that could satisfy ") + \ |
2123 |
colorize("INFORM", xinfo) + \ |
2124 |
colorize("BAD", " have been masked.") |
2125 |
print "!!! One of the following masked packages is required to complete your request:" |
2126 |
have_eapi_mask = show_masked_packages(masked_packages) |
2127 |
if have_eapi_mask: |
2128 |
print |
2129 |
msg = ("The current version of portage supports " + \ |
2130 |
"EAPI '%s'. You must upgrade to a newer version" + \ |
2131 |
" of portage before EAPI masked packages can" + \ |
2132 |
" be installed.") % portage.const.EAPI |
2133 |
from textwrap import wrap |
2134 |
for line in wrap(msg, 75): |
2135 |
print line |
2136 |
print |
2137 |
show_mask_docs() |
2138 |
else: |
2139 |
print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"." |
2140 |
|
2141 |
# Show parent nodes and the argument that pulled them in. |
2142 |
traversed_nodes = set() |
2143 |
node = myparent |
2144 |
msg = [] |
2145 |
while node is not None: |
2146 |
traversed_nodes.add(node) |
2147 |
msg.append('(dependency required by "%s" [%s])' % \ |
2148 |
(colorize('INFORM', str(node.cpv)), node.type_name)) |
2149 |
# When traversing to parents, prefer arguments over packages |
2150 |
# since arguments are root nodes. Never traverse the same |
2151 |
# package twice, in order to prevent an infinite loop. |
2152 |
selected_parent = None |
2153 |
for parent in self.digraph.parent_nodes(node): |
2154 |
if isinstance(parent, DependencyArg): |
2155 |
msg.append('(dependency required by "%s" [argument])' % \ |
2156 |
(colorize('INFORM', str(parent)))) |
2157 |
selected_parent = None |
2158 |
break |
2159 |
if parent not in traversed_nodes: |
2160 |
selected_parent = parent |
2161 |
node = selected_parent |
2162 |
for line in msg: |
2163 |
print line |
2164 |
|
2165 |
print |
2166 |
|
2167 |
def _select_pkg_highest_available(self, root, atom, onlydeps=False): |
2168 |
cache_key = (root, atom, onlydeps) |
2169 |
ret = self._highest_pkg_cache.get(cache_key) |
2170 |
if ret is not None: |
2171 |
pkg, existing = ret |
2172 |
if pkg and not existing: |
2173 |
existing = self._slot_pkg_map[root].get(pkg.slot_atom) |
2174 |
if existing and existing == pkg: |
2175 |
# Update the cache to reflect that the |
2176 |
# package has been added to the graph. |
2177 |
ret = pkg, pkg |
2178 |
self._highest_pkg_cache[cache_key] = ret |
2179 |
return ret |
2180 |
ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps) |
2181 |
self._highest_pkg_cache[cache_key] = ret |
2182 |
pkg, existing = ret |
2183 |
if pkg is not None: |
2184 |
settings = pkg.root_config.settings |
2185 |
if visible(settings, pkg) and not (pkg.installed and \ |
2186 |
settings._getMissingKeywords(pkg.cpv, pkg.metadata)): |
2187 |
pkg.root_config.visible_pkgs.cpv_inject(pkg) |
2188 |
return ret |
2189 |
|
2190 |
def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False): |
2191 |
root_config = self.roots[root] |
2192 |
pkgsettings = self.pkgsettings[root] |
2193 |
dbs = self._filtered_trees[root]["dbs"] |
2194 |
vardb = self.roots[root].trees["vartree"].dbapi |
2195 |
portdb = self.roots[root].trees["porttree"].dbapi |
2196 |
# List of acceptable packages, ordered by type preference. |
2197 |
matched_packages = [] |
2198 |
highest_version = None |
2199 |
if not isinstance(atom, portage.dep.Atom): |
2200 |
atom = portage.dep.Atom(atom) |
2201 |
atom_cp = atom.cp |
2202 |
atom_set = InternalPackageSet(initial_atoms=(atom,)) |
2203 |
existing_node = None |
2204 |
myeb = None |
2205 |
usepkgonly = "--usepkgonly" in self.myopts |
2206 |
empty = "empty" in self.myparams |
2207 |
selective = "selective" in self.myparams |
2208 |
reinstall = False |
2209 |
noreplace = "--noreplace" in self.myopts |
2210 |
# Behavior of the "selective" parameter depends on |
2211 |
# whether or not a package matches an argument atom. |
2212 |
# If an installed package provides an old-style |
2213 |
# virtual that is no longer provided by an available |
2214 |
# package, the installed package may match an argument |
2215 |
# atom even though none of the available packages do. |
2216 |
# Therefore, "selective" logic does not consider |
2217 |
# whether or not an installed package matches an |
2218 |
# argument atom. It only considers whether or not |
2219 |
# available packages match argument atoms, which is |
2220 |
# represented by the found_available_arg flag. |
2221 |
found_available_arg = False |
2222 |
for find_existing_node in True, False: |
2223 |
if existing_node: |
2224 |
break |
2225 |
for db, pkg_type, built, installed, db_keys in dbs: |
2226 |
if existing_node: |
2227 |
break |
2228 |
if installed and not find_existing_node: |
2229 |
want_reinstall = reinstall or empty or \ |
2230 |
(found_available_arg and not selective) |
2231 |
if want_reinstall and matched_packages: |
2232 |
continue |
2233 |
if hasattr(db, "xmatch"): |
2234 |
cpv_list = db.xmatch("match-all", atom) |
2235 |
else: |
2236 |
cpv_list = db.match(atom) |
2237 |
|
2238 |
# USE=multislot can make an installed package appear as if |
2239 |
# it doesn't satisfy a slot dependency. Rebuilding the ebuild |
2240 |
# won't do any good as long as USE=multislot is enabled since |
2241 |
# the newly built package still won't have the expected slot. |
2242 |
# Therefore, assume that such SLOT dependencies are already |
2243 |
# satisfied rather than forcing a rebuild. |
2244 |
if installed and not cpv_list and atom.slot: |
2245 |
for cpv in db.match(atom.cp): |
2246 |
slot_available = False |
2247 |
for other_db, other_type, other_built, \ |
2248 |
other_installed, other_keys in dbs: |
2249 |
try: |
2250 |
if atom.slot == \ |
2251 |
other_db.aux_get(cpv, ["SLOT"])[0]: |
2252 |
slot_available = True |
2253 |
break |
2254 |
except KeyError: |
2255 |
pass |
2256 |
if not slot_available: |
2257 |
continue |
2258 |
inst_pkg = self._pkg(cpv, "installed", |
2259 |
root_config, installed=installed) |
2260 |
# Remove the slot from the atom and verify that |
2261 |
# the package matches the resulting atom. |
2262 |
atom_without_slot = portage.dep.remove_slot(atom) |
2263 |
if atom.use: |
2264 |
atom_without_slot += str(atom.use) |
2265 |
atom_without_slot = portage.dep.Atom(atom_without_slot) |
2266 |
if portage.match_from_list( |
2267 |
atom_without_slot, [inst_pkg]): |
2268 |
cpv_list = [inst_pkg.cpv] |
2269 |
break |
2270 |
|
2271 |
if not cpv_list: |
2272 |
continue |
2273 |
pkg_status = "merge" |
2274 |
if installed or onlydeps: |
2275 |
pkg_status = "nomerge" |
2276 |
# descending order |
2277 |
cpv_list.reverse() |
2278 |
for cpv in cpv_list: |
2279 |
# Make --noreplace take precedence over --newuse. |
2280 |
if not installed and noreplace and \ |
2281 |
cpv in vardb.match(atom): |
2282 |
# If the installed version is masked, it may |
2283 |
# be necessary to look at lower versions, |
2284 |
# in case there is a visible downgrade. |
2285 |
continue |
2286 |
reinstall_for_flags = None |
2287 |
cache_key = (pkg_type, root, cpv, pkg_status) |
2288 |
calculated_use = True |
2289 |
pkg = self._pkg_cache.get(cache_key) |
2290 |
if pkg is None: |
2291 |
calculated_use = False |
2292 |
try: |
2293 |
metadata = izip(db_keys, db.aux_get(cpv, db_keys)) |
2294 |
except KeyError: |
2295 |
continue |
2296 |
pkg = Package(built=built, cpv=cpv, |
2297 |
installed=installed, metadata=metadata, |
2298 |
onlydeps=onlydeps, root_config=root_config, |
2299 |
type_name=pkg_type) |
2300 |
metadata = pkg.metadata |
2301 |
if not built: |
2302 |
metadata['CHOST'] = pkgsettings.get('CHOST', '') |
2303 |
if not built and ("?" in metadata["LICENSE"] or \ |
2304 |
"?" in metadata["PROVIDE"]): |
2305 |
# This is avoided whenever possible because |
2306 |
# it's expensive. It only needs to be done here |
2307 |
# if it has an effect on visibility. |
2308 |
pkgsettings.setcpv(pkg) |
2309 |
metadata["USE"] = pkgsettings["PORTAGE_USE"] |
2310 |
calculated_use = True |
2311 |
self._pkg_cache[pkg] = pkg |
2312 |
|
2313 |
if not installed or (built and matched_packages): |
2314 |
# Only enforce visibility on installed packages |
2315 |
# if there is at least one other visible package |
2316 |
# available. By filtering installed masked packages |
2317 |
# here, packages that have been masked since they |
2318 |
# were installed can be automatically downgraded |
2319 |
# to an unmasked version. |
2320 |
try: |
2321 |
if not visible(pkgsettings, pkg): |
2322 |
continue |
2323 |
except portage.exception.InvalidDependString: |
2324 |
if not installed: |
2325 |
continue |
2326 |
|
2327 |
# Enable upgrade or downgrade to a version |
2328 |
# with visible KEYWORDS when the installed |
2329 |
# version is masked by KEYWORDS, but never |
2330 |
# reinstall the same exact version only due |
2331 |
# to a KEYWORDS mask. |
2332 |
if built and matched_packages: |
2333 |
|
2334 |
different_version = None |
2335 |
for avail_pkg in matched_packages: |
2336 |
if not portage.dep.cpvequal( |
2337 |
pkg.cpv, avail_pkg.cpv): |
2338 |
different_version = avail_pkg |
2339 |
break |
2340 |
if different_version is not None: |
2341 |
|
2342 |
if installed and \ |
2343 |
pkgsettings._getMissingKeywords( |
2344 |
pkg.cpv, pkg.metadata): |
2345 |
continue |
2346 |
|
2347 |
# If the ebuild no longer exists or it's |
2348 |
# keywords have been dropped, reject built |
2349 |
# instances (installed or binary). |
2350 |
# If --usepkgonly is enabled, assume that |
2351 |
# the ebuild status should be ignored. |
2352 |
if not usepkgonly: |
2353 |
try: |
2354 |
pkg_eb = self._pkg( |
2355 |
pkg.cpv, "ebuild", root_config) |
2356 |
except portage.exception.PackageNotFound: |
2357 |
continue |
2358 |
else: |
2359 |
if not visible(pkgsettings, pkg_eb): |
2360 |
continue |
2361 |
|
2362 |
if not pkg.built and not calculated_use: |
2363 |
# This is avoided whenever possible because |
2364 |
# it's expensive. |
2365 |
pkgsettings.setcpv(pkg) |
2366 |
pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"] |
2367 |
|
2368 |
if pkg.cp != atom.cp: |
2369 |
# A cpv can be returned from dbapi.match() as an |
2370 |
# old-style virtual match even in cases when the |
2371 |
# package does not actually PROVIDE the virtual. |
2372 |
# Filter out any such false matches here. |
2373 |
if not atom_set.findAtomForPackage(pkg): |
2374 |
continue |
2375 |
|
2376 |
myarg = None |
2377 |
if root == self.target_root: |
2378 |
try: |
2379 |
# Ebuild USE must have been calculated prior |
2380 |
# to this point, in case atoms have USE deps. |
2381 |
myarg = self._iter_atoms_for_pkg(pkg).next() |
2382 |
except StopIteration: |
2383 |
pass |
2384 |
except portage.exception.InvalidDependString: |
2385 |
if not installed: |
2386 |
# masked by corruption |
2387 |
continue |
2388 |
if not installed and myarg: |
2389 |
found_available_arg = True |
2390 |
|
2391 |
if atom.use and not pkg.built: |
2392 |
use = pkg.use.enabled |
2393 |
if atom.use.enabled.difference(use): |
2394 |
continue |
2395 |
if atom.use.disabled.intersection(use): |
2396 |
continue |
2397 |
if pkg.cp == atom_cp: |
2398 |
if highest_version is None: |
2399 |
highest_version = pkg |
2400 |
elif pkg > highest_version: |
2401 |
highest_version = pkg |
2402 |
# At this point, we've found the highest visible |
2403 |
# match from the current repo. Any lower versions |
2404 |
# from this repo are ignored, so this so the loop |
2405 |
# will always end with a break statement below |
2406 |
# this point. |
2407 |
if find_existing_node: |
2408 |
e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom) |
2409 |
if not e_pkg: |
2410 |
break |
2411 |
if portage.dep.match_from_list(atom, [e_pkg]): |
2412 |
if highest_version and \ |
2413 |
e_pkg.cp == atom_cp and \ |
2414 |
e_pkg < highest_version and \ |
2415 |
e_pkg.slot_atom != highest_version.slot_atom: |
2416 |
# There is a higher version available in a |
2417 |
# different slot, so this existing node is |
2418 |
# irrelevant. |
2419 |
pass |
2420 |
else: |
2421 |
matched_packages.append(e_pkg) |
2422 |
existing_node = e_pkg |
2423 |
break |
2424 |
# Compare built package to current config and |
2425 |
# reject the built package if necessary. |
2426 |
if built and not installed and \ |
2427 |
("--newuse" in self.myopts or \ |
2428 |
"--reinstall" in self.myopts): |
2429 |
iuses = pkg.iuse.all |
2430 |
old_use = pkg.use.enabled |
2431 |
if myeb: |
2432 |
pkgsettings.setcpv(myeb) |
2433 |
else: |
2434 |
pkgsettings.setcpv(pkg) |
2435 |
now_use = pkgsettings["PORTAGE_USE"].split() |
2436 |
forced_flags = set() |
2437 |
forced_flags.update(pkgsettings.useforce) |
2438 |
forced_flags.update(pkgsettings.usemask) |
2439 |
cur_iuse = iuses |
2440 |
if myeb and not usepkgonly: |
2441 |
cur_iuse = myeb.iuse.all |
2442 |
if self._reinstall_for_flags(forced_flags, |
2443 |
old_use, iuses, |
2444 |
now_use, cur_iuse): |
2445 |
break |
2446 |
# Compare current config to installed package |
2447 |
# and do not reinstall if possible. |
2448 |
if not installed and \ |
2449 |
("--newuse" in self.myopts or \ |
2450 |
"--reinstall" in self.myopts) and \ |
2451 |
cpv in vardb.match(atom): |
2452 |
pkgsettings.setcpv(pkg) |
2453 |
forced_flags = set() |
2454 |
forced_flags.update(pkgsettings.useforce) |
2455 |
forced_flags.update(pkgsettings.usemask) |
2456 |
old_use = vardb.aux_get(cpv, ["USE"])[0].split() |
2457 |
old_iuse = set(filter_iuse_defaults( |
2458 |
vardb.aux_get(cpv, ["IUSE"])[0].split())) |
2459 |
cur_use = pkg.use.enabled |
2460 |
cur_iuse = pkg.iuse.all |
2461 |
reinstall_for_flags = \ |
2462 |
self._reinstall_for_flags( |
2463 |
forced_flags, old_use, old_iuse, |
2464 |
cur_use, cur_iuse) |
2465 |
if reinstall_for_flags: |
2466 |
reinstall = True |
2467 |
if not built: |
2468 |
myeb = pkg |
2469 |
matched_packages.append(pkg) |
2470 |
if reinstall_for_flags: |
2471 |
self._reinstall_nodes[pkg] = \ |
2472 |
reinstall_for_flags |
2473 |
break |
2474 |
|
2475 |
if not matched_packages: |
2476 |
return None, None |
2477 |
|
2478 |
if "--debug" in self.myopts: |
2479 |
for pkg in matched_packages: |
2480 |
portage.writemsg("%s %s\n" % \ |
2481 |
((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1) |
2482 |
|
2483 |
# Filter out any old-style virtual matches if they are |
2484 |
# mixed with new-style virtual matches. |
2485 |
cp = portage.dep_getkey(atom) |
2486 |
if len(matched_packages) > 1 and \ |
2487 |
"virtual" == portage.catsplit(cp)[0]: |
2488 |
for pkg in matched_packages: |
2489 |
if pkg.cp != cp: |
2490 |
continue |
2491 |
# Got a new-style virtual, so filter |
2492 |
# out any old-style virtuals. |
2493 |
matched_packages = [pkg for pkg in matched_packages \ |
2494 |
if pkg.cp == cp] |
2495 |
break |
2496 |
|
2497 |
if len(matched_packages) > 1: |
2498 |
bestmatch = portage.best( |
2499 |
[pkg.cpv for pkg in matched_packages]) |
2500 |
matched_packages = [pkg for pkg in matched_packages \ |
2501 |
if portage.dep.cpvequal(pkg.cpv, bestmatch)] |
2502 |
|
2503 |
# ordered by type preference ("ebuild" type is the last resort) |
2504 |
return matched_packages[-1], existing_node |
2505 |
|
2506 |
def _select_pkg_from_graph(self, root, atom, onlydeps=False): |
2507 |
""" |
2508 |
Select packages that have already been added to the graph or |
2509 |
those that are installed and have not been scheduled for |
2510 |
replacement. |
2511 |
""" |
2512 |
graph_db = self._graph_trees[root]["porttree"].dbapi |
2513 |
matches = graph_db.match_pkgs(atom) |
2514 |
if not matches: |
2515 |
return None, None |
2516 |
pkg = matches[-1] # highest match |
2517 |
in_graph = self._slot_pkg_map[root].get(pkg.slot_atom) |
2518 |
return pkg, in_graph |
2519 |
|
2520 |
def _complete_graph(self): |
2521 |
""" |
2522 |
Add any deep dependencies of required sets (args, system, world) that |
2523 |
have not been pulled into the graph yet. This ensures that the graph |
2524 |
is consistent such that initially satisfied deep dependencies are not |
2525 |
broken in the new graph. Initially unsatisfied dependencies are |
2526 |
irrelevant since we only want to avoid breaking dependencies that are |
2527 |
intially satisfied. |
2528 |
|
2529 |
Since this method can consume enough time to disturb users, it is |
2530 |
currently only enabled by the --complete-graph option. |
2531 |
""" |
2532 |
if "--buildpkgonly" in self.myopts or \ |
2533 |
"recurse" not in self.myparams: |
2534 |
return 1 |
2535 |
|
2536 |
if "complete" not in self.myparams: |
2537 |
# Skip this to avoid consuming enough time to disturb users. |
2538 |
return 1 |
2539 |
|
2540 |
# Put the depgraph into a mode that causes it to only |
2541 |
# select packages that have already been added to the |
2542 |
# graph or those that are installed and have not been |
2543 |
# scheduled for replacement. Also, toggle the "deep" |
2544 |
# parameter so that all dependencies are traversed and |
2545 |
# accounted for. |
2546 |
self._select_atoms = self._select_atoms_from_graph |
2547 |
self._select_package = self._select_pkg_from_graph |
2548 |
already_deep = "deep" in self.myparams |
2549 |
if not already_deep: |
2550 |
self.myparams.add("deep") |
2551 |
|
2552 |
for root in self.roots: |
2553 |
required_set_names = self._required_set_names.copy() |
2554 |
if root == self.target_root and \ |
2555 |
(already_deep or "empty" in self.myparams): |
2556 |
required_set_names.difference_update(self._sets) |
2557 |
if not required_set_names and not self._ignored_deps: |
2558 |
continue |
2559 |
root_config = self.roots[root] |
2560 |
setconfig = root_config.setconfig |
2561 |
args = [] |
2562 |
# Reuse existing SetArg instances when available. |
2563 |
for arg in self.digraph.root_nodes(): |
2564 |
if not isinstance(arg, SetArg): |
2565 |
continue |
2566 |
if arg.root_config != root_config: |
2567 |
continue |
2568 |
if arg.name in required_set_names: |
2569 |
args.append(arg) |
2570 |
required_set_names.remove(arg.name) |
2571 |
# Create new SetArg instances only when necessary. |
2572 |
for s in required_set_names: |
2573 |
expanded_set = InternalPackageSet( |
2574 |
initial_atoms=setconfig.getSetAtoms(s)) |
2575 |
atom = SETPREFIX + s |
2576 |
args.append(SetArg(arg=atom, set=expanded_set, |
2577 |
root_config=root_config)) |
2578 |
vardb = root_config.trees["vartree"].dbapi |
2579 |
for arg in args: |
2580 |
for atom in arg.set: |
2581 |
self._dep_stack.append( |
2582 |
Dependency(atom=atom, root=root, parent=arg)) |
2583 |
if self._ignored_deps: |
2584 |
self._dep_stack.extend(self._ignored_deps) |
2585 |
self._ignored_deps = [] |
2586 |
if not self._create_graph(allow_unsatisfied=True): |
2587 |
return 0 |
2588 |
# Check the unsatisfied deps to see if any initially satisfied deps |
2589 |
# will become unsatisfied due to an upgrade. Initially unsatisfied |
2590 |
# deps are irrelevant since we only want to avoid breaking deps |
2591 |
# that are initially satisfied. |
2592 |
while self._unsatisfied_deps: |
2593 |
dep = self._unsatisfied_deps.pop() |
2594 |
matches = vardb.match_pkgs(dep.atom) |
2595 |
if not matches: |
2596 |
self._initially_unsatisfied_deps.append(dep) |
2597 |
continue |
2598 |
# An scheduled installation broke a deep dependency. |
2599 |
# Add the installed package to the graph so that it |
2600 |
# will be appropriately reported as a slot collision |
2601 |
# (possibly solvable via backtracking). |
2602 |
pkg = matches[-1] # highest match |
2603 |
if not self._add_pkg(pkg, dep): |
2604 |
return 0 |
2605 |
if not self._create_graph(allow_unsatisfied=True): |
2606 |
return 0 |
2607 |
return 1 |
2608 |
|
2609 |
def _pkg(self, cpv, type_name, root_config, installed=False): |
2610 |
""" |
2611 |
Get a package instance from the cache, or create a new |
2612 |
one if necessary. Raises KeyError from aux_get if it |
2613 |
failures for some reason (package does not exist or is |
2614 |
corrupt). |
2615 |
""" |
2616 |
operation = "merge" |
2617 |
if installed: |
2618 |
operation = "nomerge" |
2619 |
pkg = self._pkg_cache.get( |
2620 |
(type_name, root_config.root, cpv, operation)) |
2621 |
if pkg is None: |
2622 |
tree_type = self.pkg_tree_map[type_name] |
2623 |
db = root_config.trees[tree_type].dbapi |
2624 |
db_keys = list(self._trees_orig[root_config.root][ |
2625 |
tree_type].dbapi._aux_cache_keys) |
2626 |
try: |
2627 |
metadata = izip(db_keys, db.aux_get(cpv, db_keys)) |
2628 |
except KeyError: |
2629 |
raise portage.exception.PackageNotFound(cpv) |
2630 |
pkg = Package(cpv=cpv, metadata=metadata, |
2631 |
root_config=root_config, installed=installed) |
2632 |
if type_name == "ebuild": |
2633 |
settings = self.pkgsettings[root_config.root] |
2634 |
settings.setcpv(pkg) |
2635 |
pkg.metadata["USE"] = settings["PORTAGE_USE"] |
2636 |
pkg.metadata['CHOST'] = settings.get('CHOST', '') |
2637 |
self._pkg_cache[pkg] = pkg |
2638 |
return pkg |
2639 |
|
2640 |
def validate_blockers(self): |
2641 |
"""Remove any blockers from the digraph that do not match any of the |
2642 |
packages within the graph. If necessary, create hard deps to ensure |
2643 |
correct merge order such that mutually blocking packages are never |
2644 |
installed simultaneously.""" |
2645 |
|
2646 |
if "--buildpkgonly" in self.myopts or \ |
2647 |
"--nodeps" in self.myopts: |
2648 |
return True |
2649 |
|
2650 |
#if "deep" in self.myparams: |
2651 |
if True: |
2652 |
# Pull in blockers from all installed packages that haven't already |
2653 |
# been pulled into the depgraph. This is not enabled by default |
2654 |
# due to the performance penalty that is incurred by all the |
2655 |
# additional dep_check calls that are required. |
2656 |
|
2657 |
dep_keys = ["DEPEND","RDEPEND","PDEPEND"] |
2658 |
for myroot in self.trees: |
2659 |
vardb = self.trees[myroot]["vartree"].dbapi |
2660 |
portdb = self.trees[myroot]["porttree"].dbapi |
2661 |
pkgsettings = self.pkgsettings[myroot] |
2662 |
final_db = self.mydbapi[myroot] |
2663 |
|
2664 |
blocker_cache = BlockerCache(myroot, vardb) |
2665 |
stale_cache = set(blocker_cache) |
2666 |
for pkg in vardb: |
2667 |
cpv = pkg.cpv |
2668 |
stale_cache.discard(cpv) |
2669 |
pkg_in_graph = self.digraph.contains(pkg) |
2670 |
|
2671 |
# Check for masked installed packages. Only warn about |
2672 |
# packages that are in the graph in order to avoid warning |
2673 |
# about those that will be automatically uninstalled during |
2674 |
# the merge process or by --depclean. |
2675 |
if pkg in final_db: |
2676 |
if pkg_in_graph and not visible(pkgsettings, pkg): |
2677 |
self._masked_installed.add(pkg) |
2678 |
|
2679 |
blocker_atoms = None |
2680 |
blockers = None |
2681 |
if pkg_in_graph: |
2682 |
blockers = [] |
2683 |
try: |
2684 |
blockers.extend( |
2685 |
self._blocker_parents.child_nodes(pkg)) |
2686 |
except KeyError: |
2687 |
pass |
2688 |
try: |
2689 |
blockers.extend( |
2690 |
self._irrelevant_blockers.child_nodes(pkg)) |
2691 |
except KeyError: |
2692 |
pass |
2693 |
if blockers is not None: |
2694 |
blockers = set(str(blocker.atom) \ |
2695 |
for blocker in blockers) |
2696 |
|
2697 |
# If this node has any blockers, create a "nomerge" |
2698 |
# node for it so that they can be enforced. |
2699 |
self.spinner.update() |
2700 |
blocker_data = blocker_cache.get(cpv) |
2701 |
if blocker_data is not None and \ |
2702 |
blocker_data.counter != long(pkg.metadata["COUNTER"]): |
2703 |
blocker_data = None |
2704 |
|
2705 |
# If blocker data from the graph is available, use |
2706 |
# it to validate the cache and update the cache if |
2707 |
# it seems invalid. |
2708 |
if blocker_data is not None and \ |
2709 |
blockers is not None: |
2710 |
if not blockers.symmetric_difference( |
2711 |
blocker_data.atoms): |
2712 |
continue |
2713 |
blocker_data = None |
2714 |
|
2715 |
if blocker_data is None and \ |
2716 |
blockers is not None: |
2717 |
# Re-use the blockers from the graph. |
2718 |
blocker_atoms = sorted(blockers) |
2719 |
counter = long(pkg.metadata["COUNTER"]) |
2720 |
blocker_data = \ |
2721 |
blocker_cache.BlockerData(counter, blocker_atoms) |
2722 |
blocker_cache[pkg.cpv] = blocker_data |
2723 |
continue |
2724 |
|
2725 |
if blocker_data: |
2726 |
blocker_atoms = blocker_data.atoms |
2727 |
else: |
2728 |
# Use aux_get() to trigger FakeVartree global |
2729 |
# updates on *DEPEND when appropriate. |
2730 |
depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys)) |
2731 |
# It is crucial to pass in final_db here in order to |
2732 |
# optimize dep_check calls by eliminating atoms via |
2733 |
# dep_wordreduce and dep_eval calls. |
2734 |
try: |
2735 |
portage.dep._dep_check_strict = False |
2736 |
try: |
2737 |
success, atoms = portage.dep_check(depstr, |
2738 |
final_db, pkgsettings, myuse=pkg.use.enabled, |
2739 |
trees=self._graph_trees, myroot=myroot) |
2740 |
except Exception, e: |
2741 |
if isinstance(e, SystemExit): |
2742 |
raise |
2743 |
# This is helpful, for example, if a ValueError |
2744 |
# is thrown from cpv_expand due to multiple |
2745 |
# matches (this can happen if an atom lacks a |
2746 |
# category). |
2747 |
show_invalid_depstring_notice( |
2748 |
pkg, depstr, str(e)) |
2749 |
del e |
2750 |
raise |
2751 |
finally: |
2752 |
portage.dep._dep_check_strict = True |
2753 |
if not success: |
2754 |
replacement_pkg = final_db.match_pkgs(pkg.slot_atom) |
2755 |
if replacement_pkg and \ |
2756 |
replacement_pkg[0].operation == "merge": |
2757 |
# This package is being replaced anyway, so |
2758 |
# ignore invalid dependencies so as not to |
2759 |
# annoy the user too much (otherwise they'd be |
2760 |
# forced to manually unmerge it first). |
2761 |
continue |
2762 |
show_invalid_depstring_notice(pkg, depstr, atoms) |
2763 |
return False |
2764 |
blocker_atoms = [myatom for myatom in atoms \ |
2765 |
if myatom.startswith("!")] |
2766 |
blocker_atoms.sort() |
2767 |
counter = long(pkg.metadata["COUNTER"]) |
2768 |
blocker_cache[cpv] = \ |
2769 |
blocker_cache.BlockerData(counter, blocker_atoms) |
2770 |
if blocker_atoms: |
2771 |
try: |
2772 |
for atom in blocker_atoms: |
2773 |
blocker = Blocker(atom=portage.dep.Atom(atom), |
2774 |
eapi=pkg.metadata["EAPI"], root=myroot) |
2775 |
self._blocker_parents.add(blocker, pkg) |
2776 |
except portage.exception.InvalidAtom, e: |
2777 |
depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys)) |
2778 |
show_invalid_depstring_notice( |
2779 |
pkg, depstr, "Invalid Atom: %s" % (e,)) |
2780 |
return False |
2781 |
for cpv in stale_cache: |
2782 |
del blocker_cache[cpv] |
2783 |
blocker_cache.flush() |
2784 |
del blocker_cache |
2785 |
|
2786 |
# Discard any "uninstall" tasks scheduled by previous calls |
2787 |
# to this method, since those tasks may not make sense given |
2788 |
# the current graph state. |
2789 |
previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes() |
2790 |
if previous_uninstall_tasks: |
2791 |
self._blocker_uninstalls = digraph() |
2792 |
self.digraph.difference_update(previous_uninstall_tasks) |
2793 |
|
2794 |
for blocker in self._blocker_parents.leaf_nodes(): |
2795 |
self.spinner.update() |
2796 |
root_config = self.roots[blocker.root] |
2797 |
virtuals = root_config.settings.getvirtuals() |
2798 |
myroot = blocker.root |
2799 |
initial_db = self.trees[myroot]["vartree"].dbapi |
2800 |
final_db = self.mydbapi[myroot] |
2801 |
|
2802 |
provider_virtual = False |
2803 |
if blocker.cp in virtuals and \ |
2804 |
not self._have_new_virt(blocker.root, blocker.cp): |
2805 |
provider_virtual = True |
2806 |
|
2807 |
# Use this to check PROVIDE for each matched package |
2808 |
# when necessary. |
2809 |
atom_set = InternalPackageSet( |
2810 |
initial_atoms=[blocker.atom]) |
2811 |
|
2812 |
if provider_virtual: |
2813 |
atoms = [] |
2814 |
for provider_entry in virtuals[blocker.cp]: |
2815 |
provider_cp = \ |
2816 |
portage.dep_getkey(provider_entry) |
2817 |
atoms.append(blocker.atom.replace( |
2818 |
blocker.cp, provider_cp)) |
2819 |
else: |
2820 |
atoms = [blocker.atom] |
2821 |
|
2822 |
blocked_initial = set() |
2823 |
for atom in atoms: |
2824 |
for pkg in initial_db.match_pkgs(atom): |
2825 |
if atom_set.findAtomForPackage(pkg): |
2826 |
blocked_initial.add(pkg) |
2827 |
|
2828 |
blocked_final = set() |
2829 |
for atom in atoms: |
2830 |
for pkg in final_db.match_pkgs(atom): |
2831 |
if atom_set.findAtomForPackage(pkg): |
2832 |
blocked_final.add(pkg) |
2833 |
|
2834 |
if not blocked_initial and not blocked_final: |
2835 |
parent_pkgs = self._blocker_parents.parent_nodes(blocker) |
2836 |
self._blocker_parents.remove(blocker) |
2837 |
# Discard any parents that don't have any more blockers. |
2838 |
for pkg in parent_pkgs: |
2839 |
self._irrelevant_blockers.add(blocker, pkg) |
2840 |
if not self._blocker_parents.child_nodes(pkg): |
2841 |
self._blocker_parents.remove(pkg) |
2842 |
continue |
2843 |
for parent in self._blocker_parents.parent_nodes(blocker): |
2844 |
unresolved_blocks = False |
2845 |
depends_on_order = set() |
2846 |
for pkg in blocked_initial: |
2847 |
if pkg.slot_atom == parent.slot_atom: |
2848 |
# TODO: Support blocks within slots in cases where it |
2849 |
# might make sense. For example, a new version might |
2850 |
# require that the old version be uninstalled at build |
2851 |
# time. |
2852 |
continue |
2853 |
if parent.installed: |
2854 |
# Two currently installed packages conflict with |
2855 |
# eachother. Ignore this case since the damage |
2856 |
# is already done and this would be likely to |
2857 |
# confuse users if displayed like a normal blocker. |
2858 |
continue |
2859 |
|
2860 |
self._blocked_pkgs.add(pkg, blocker) |
2861 |
|
2862 |
if parent.operation == "merge": |
2863 |
# Maybe the blocked package can be replaced or simply |
2864 |
# unmerged to resolve this block. |
2865 |
depends_on_order.add((pkg, parent)) |
2866 |
continue |
2867 |
# None of the above blocker resolutions techniques apply, |
2868 |
# so apparently this one is unresolvable. |
2869 |
unresolved_blocks = True |
2870 |
for pkg in blocked_final: |
2871 |
if pkg.slot_atom == parent.slot_atom: |
2872 |
# TODO: Support blocks within slots. |
2873 |
continue |
2874 |
if parent.operation == "nomerge" and \ |
2875 |
pkg.operation == "nomerge": |
2876 |
# This blocker will be handled the next time that a |
2877 |
# merge of either package is triggered. |
2878 |
continue |
2879 |
|
2880 |
self._blocked_pkgs.add(pkg, blocker) |
2881 |
|
2882 |
# Maybe the blocking package can be |
2883 |
# unmerged to resolve this block. |
2884 |
if parent.operation == "merge" and pkg.installed: |
2885 |
depends_on_order.add((pkg, parent)) |
2886 |
continue |
2887 |
elif parent.operation == "nomerge": |
2888 |
depends_on_order.add((parent, pkg)) |
2889 |
continue |
2890 |
# None of the above blocker resolutions techniques apply, |
2891 |
# so apparently this one is unresolvable. |
2892 |
unresolved_blocks = True |
2893 |
|
2894 |
# Make sure we don't unmerge any package that have been pulled |
2895 |
# into the graph. |
2896 |
if not unresolved_blocks and depends_on_order: |
2897 |
for inst_pkg, inst_task in depends_on_order: |
2898 |
if self.digraph.contains(inst_pkg) and \ |
2899 |
self.digraph.parent_nodes(inst_pkg): |
2900 |
unresolved_blocks = True |
2901 |
break |
2902 |
|
2903 |
if not unresolved_blocks and depends_on_order: |
2904 |
for inst_pkg, inst_task in depends_on_order: |
2905 |
uninst_task = Package(built=inst_pkg.built, |
2906 |
cpv=inst_pkg.cpv, installed=inst_pkg.installed, |
2907 |
metadata=inst_pkg.metadata, |
2908 |
operation="uninstall", |
2909 |
root_config=inst_pkg.root_config, |
2910 |
type_name=inst_pkg.type_name) |
2911 |
self._pkg_cache[uninst_task] = uninst_task |
2912 |
# Enforce correct merge order with a hard dep. |
2913 |
self.digraph.addnode(uninst_task, inst_task, |
2914 |
priority=BlockerDepPriority.instance) |
2915 |
# Count references to this blocker so that it can be |
2916 |
# invalidated after nodes referencing it have been |
2917 |
# merged. |
2918 |
self._blocker_uninstalls.addnode(uninst_task, blocker) |
2919 |
if not unresolved_blocks and not depends_on_order: |
2920 |
self._irrelevant_blockers.add(blocker, parent) |
2921 |
self._blocker_parents.remove_edge(blocker, parent) |
2922 |
if not self._blocker_parents.parent_nodes(blocker): |
2923 |
self._blocker_parents.remove(blocker) |
2924 |
if not self._blocker_parents.child_nodes(parent): |
2925 |
self._blocker_parents.remove(parent) |
2926 |
if unresolved_blocks: |
2927 |
self._unsolvable_blockers.add(blocker, parent) |
2928 |
|
2929 |
return True |
2930 |
|
2931 |
def _accept_blocker_conflicts(self): |
2932 |
acceptable = False |
2933 |
for x in ("--buildpkgonly", "--fetchonly", |
2934 |
"--fetch-all-uri", "--nodeps"): |
2935 |
if x in self.myopts: |
2936 |
acceptable = True |
2937 |
break |
2938 |
return acceptable |
2939 |
|
2940 |
def _merge_order_bias(self, mygraph): |
2941 |
""" |
2942 |
For optimal leaf node selection, promote deep system runtime deps and |
2943 |
order nodes from highest to lowest overall reference count. |
2944 |
""" |
2945 |
|
2946 |
node_info = {} |
2947 |
for node in mygraph.order: |
2948 |
node_info[node] = len(mygraph.parent_nodes(node)) |
2949 |
deep_system_deps = _find_deep_system_runtime_deps(mygraph) |
2950 |
|
2951 |
def cmp_merge_preference(node1, node2): |
2952 |
|
2953 |
if node1.operation == 'uninstall': |
2954 |
if node2.operation == 'uninstall': |
2955 |
return 0 |
2956 |
return 1 |
2957 |
|
2958 |
if node2.operation == 'uninstall': |
2959 |
if node1.operation == 'uninstall': |
2960 |
return 0 |
2961 |
return -1 |
2962 |
|
2963 |
node1_sys = node1 in deep_system_deps |
2964 |
node2_sys = node2 in deep_system_deps |
2965 |
if node1_sys != node2_sys: |
2966 |
if node1_sys: |
2967 |
return -1 |
2968 |
return 1 |
2969 |
|
2970 |
return node_info[node2] - node_info[node1] |
2971 |
|
2972 |
mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference)) |
2973 |
|
2974 |
def altlist(self, reversed=False): |
2975 |
|
2976 |
while self._serialized_tasks_cache is None: |
2977 |
self._resolve_conflicts() |
2978 |
try: |
2979 |
self._serialized_tasks_cache, self._scheduler_graph = \ |
2980 |
self._serialize_tasks() |
2981 |
except self._serialize_tasks_retry: |
2982 |
pass |
2983 |
|
2984 |
retlist = self._serialized_tasks_cache[:] |
2985 |
if reversed: |
2986 |
retlist.reverse() |
2987 |
return retlist |
2988 |
|
2989 |
def schedulerGraph(self): |
2990 |
""" |
2991 |
The scheduler graph is identical to the normal one except that |
2992 |
uninstall edges are reversed in specific cases that require |
2993 |
conflicting packages to be temporarily installed simultaneously. |
2994 |
This is intended for use by the Scheduler in it's parallelization |
2995 |
logic. It ensures that temporary simultaneous installation of |
2996 |
conflicting packages is avoided when appropriate (especially for |
2997 |
!!atom blockers), but allowed in specific cases that require it. |
2998 |
|
2999 |
Note that this method calls break_refs() which alters the state of |
3000 |
internal Package instances such that this depgraph instance should |
3001 |
not be used to perform any more calculations. |
3002 |
""" |
3003 |
if self._scheduler_graph is None: |
3004 |
self.altlist() |
3005 |
self.break_refs(self._scheduler_graph.order) |
3006 |
return self._scheduler_graph |
3007 |
|
3008 |
def break_refs(self, nodes): |
3009 |
""" |
3010 |
Take a mergelist like that returned from self.altlist() and |
3011 |
break any references that lead back to the depgraph. This is |
3012 |
useful if you want to hold references to packages without |
3013 |
also holding the depgraph on the heap. |
3014 |
""" |
3015 |
for node in nodes: |
3016 |
if hasattr(node, "root_config"): |
3017 |
# The FakeVartree references the _package_cache which |
3018 |
# references the depgraph. So that Package instances don't |
3019 |
# hold the depgraph and FakeVartree on the heap, replace |
3020 |
# the RootConfig that references the FakeVartree with the |
3021 |
# original RootConfig instance which references the actual |
3022 |
# vartree. |
3023 |
node.root_config = \ |
3024 |
self._trees_orig[node.root_config.root]["root_config"] |
3025 |
|
3026 |
def _resolve_conflicts(self): |
3027 |
if not self._complete_graph(): |
3028 |
raise self._unknown_internal_error() |
3029 |
|
3030 |
if not self.validate_blockers(): |
3031 |
raise self._unknown_internal_error() |
3032 |
|
3033 |
if self._slot_collision_info: |
3034 |
self._process_slot_conflicts() |
3035 |
|
3036 |
def _serialize_tasks(self): |
3037 |
|
3038 |
if "--debug" in self.myopts: |
3039 |
writemsg("\ndigraph:\n\n", noiselevel=-1) |
3040 |
self.digraph.debug_print() |
3041 |
writemsg("\n", noiselevel=-1) |
3042 |
|
3043 |
scheduler_graph = self.digraph.copy() |
3044 |
|
3045 |
if '--nodeps' in self.myopts: |
3046 |
# Preserve the package order given on the command line. |
3047 |
return ([node for node in scheduler_graph \ |
3048 |
if isinstance(node, Package) \ |
3049 |
and node.operation == 'merge'], scheduler_graph) |
3050 |
|
3051 |
mygraph=self.digraph.copy() |
3052 |
# Prune "nomerge" root nodes if nothing depends on them, since |
3053 |
# otherwise they slow down merge order calculation. Don't remove |
3054 |
# non-root nodes since they help optimize merge order in some cases |
3055 |
# such as revdep-rebuild. |
3056 |
removed_nodes = set() |
3057 |
while True: |
3058 |
for node in mygraph.root_nodes(): |
3059 |
if not isinstance(node, Package) or \ |
3060 |
node.installed or node.onlydeps: |
3061 |
removed_nodes.add(node) |
3062 |
if removed_nodes: |
3063 |
self.spinner.update() |
3064 |
mygraph.difference_update(removed_nodes) |
3065 |
if not removed_nodes: |
3066 |
break |
3067 |
removed_nodes.clear() |
3068 |
self._merge_order_bias(mygraph) |
3069 |
def cmp_circular_bias(n1, n2): |
3070 |
""" |
3071 |
RDEPEND is stronger than PDEPEND and this function |
3072 |
measures such a strength bias within a circular |
3073 |
dependency relationship. |
3074 |
""" |
3075 |
n1_n2_medium = n2 in mygraph.child_nodes(n1, |
3076 |
ignore_priority=priority_range.ignore_medium_soft) |
3077 |
n2_n1_medium = n1 in mygraph.child_nodes(n2, |
3078 |
ignore_priority=priority_range.ignore_medium_soft) |
3079 |
if n1_n2_medium == n2_n1_medium: |
3080 |
return 0 |
3081 |
elif n1_n2_medium: |
3082 |
return 1 |
3083 |
return -1 |
3084 |
myblocker_uninstalls = self._blocker_uninstalls.copy() |
3085 |
retlist=[] |
3086 |
# Contains uninstall tasks that have been scheduled to |
3087 |
# occur after overlapping blockers have been installed. |
3088 |
scheduled_uninstalls = set() |
3089 |
# Contains any Uninstall tasks that have been ignored |
3090 |
# in order to avoid the circular deps code path. These |
3091 |
# correspond to blocker conflicts that could not be |
3092 |
# resolved. |
3093 |
ignored_uninstall_tasks = set() |
3094 |
have_uninstall_task = False |
3095 |
complete = "complete" in self.myparams |
3096 |
asap_nodes = [] |
3097 |
|
3098 |
def get_nodes(**kwargs): |
3099 |
""" |
3100 |
Returns leaf nodes excluding Uninstall instances |
3101 |
since those should be executed as late as possible. |
3102 |
""" |
3103 |
return [node for node in mygraph.leaf_nodes(**kwargs) \ |
3104 |
if isinstance(node, Package) and \ |
3105 |
(node.operation != "uninstall" or \ |
3106 |
node in scheduled_uninstalls)] |
3107 |
|
3108 |
# sys-apps/portage needs special treatment if ROOT="/" |
3109 |
running_root = self._running_root.root |
3110 |
from portage.const import PORTAGE_PACKAGE_ATOM |
3111 |
runtime_deps = InternalPackageSet( |
3112 |
initial_atoms=[PORTAGE_PACKAGE_ATOM]) |
3113 |
running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs( |
3114 |
PORTAGE_PACKAGE_ATOM) |
3115 |
replacement_portage = self.mydbapi[running_root].match_pkgs( |
3116 |
PORTAGE_PACKAGE_ATOM) |
3117 |
|
3118 |
if running_portage: |
3119 |
running_portage = running_portage[0] |
3120 |
else: |
3121 |
running_portage = None |
3122 |
|
3123 |
if replacement_portage: |
3124 |
replacement_portage = replacement_portage[0] |
3125 |
else: |
3126 |
replacement_portage = None |
3127 |
|
3128 |
if replacement_portage == running_portage: |
3129 |
replacement_portage = None |
3130 |
|
3131 |
if replacement_portage is not None: |
3132 |
# update from running_portage to replacement_portage asap |
3133 |
asap_nodes.append(replacement_portage) |
3134 |
|
3135 |
if running_portage is not None: |
3136 |
try: |
3137 |
portage_rdepend = self._select_atoms_highest_available( |
3138 |
running_root, running_portage.metadata["RDEPEND"], |
3139 |
myuse=running_portage.use.enabled, |
3140 |
parent=running_portage, strict=False) |
3141 |
except portage.exception.InvalidDependString, e: |
3142 |
portage.writemsg("!!! Invalid RDEPEND in " + \ |
3143 |
"'%svar/db/pkg/%s/RDEPEND': %s\n" % \ |
3144 |
(running_root, running_portage.cpv, e), noiselevel=-1) |
3145 |
del e |
3146 |
portage_rdepend = [] |
3147 |
runtime_deps.update(atom for atom in portage_rdepend \ |
3148 |
if not atom.startswith("!")) |
3149 |
|
3150 |
def gather_deps(ignore_priority, mergeable_nodes, |
3151 |
selected_nodes, node): |
3152 |
""" |
3153 |
Recursively gather a group of nodes that RDEPEND on |
3154 |
eachother. This ensures that they are merged as a group |
3155 |
and get their RDEPENDs satisfied as soon as possible. |
3156 |
""" |
3157 |
if node in selected_nodes: |
3158 |
return True |
3159 |
if node not in mergeable_nodes: |
3160 |
return False |
3161 |
if node == replacement_portage and \ |
3162 |
mygraph.child_nodes(node, |
3163 |
ignore_priority=priority_range.ignore_medium_soft): |
3164 |
# Make sure that portage always has all of it's |
3165 |
# RDEPENDs installed first. |
3166 |
return False |
3167 |
selected_nodes.add(node) |
3168 |
for child in mygraph.child_nodes(node, |
3169 |
ignore_priority=ignore_priority): |
3170 |
if not gather_deps(ignore_priority, |
3171 |
mergeable_nodes, selected_nodes, child): |
3172 |
return False |
3173 |
return True |
3174 |
|
3175 |
def ignore_uninst_or_med(priority): |
3176 |
if priority is BlockerDepPriority.instance: |
3177 |
return True |
3178 |
return priority_range.ignore_medium(priority) |
3179 |
|
3180 |
def ignore_uninst_or_med_soft(priority): |
3181 |
if priority is BlockerDepPriority.instance: |
3182 |
return True |
3183 |
return priority_range.ignore_medium_soft(priority) |
3184 |
|
3185 |
tree_mode = "--tree" in self.myopts |
3186 |
# Tracks whether or not the current iteration should prefer asap_nodes |
3187 |
# if available. This is set to False when the previous iteration |
3188 |
# failed to select any nodes. It is reset whenever nodes are |
3189 |
# successfully selected. |
3190 |
prefer_asap = True |
3191 |
|
3192 |
# Controls whether or not the current iteration should drop edges that |
3193 |
# are "satisfied" by installed packages, in order to solve circular |
3194 |
# dependencies. The deep runtime dependencies of installed packages are |
3195 |
# not checked in this case (bug #199856), so it must be avoided |
3196 |
# whenever possible. |
3197 |
drop_satisfied = False |
3198 |
|
3199 |
# State of variables for successive iterations that loosen the |
3200 |
# criteria for node selection. |
3201 |
# |
3202 |
# iteration prefer_asap drop_satisfied |
3203 |
# 1 True False |
3204 |
# 2 False False |
3205 |
# 3 False True |
3206 |
# |
3207 |
# If no nodes are selected on the last iteration, it is due to |
3208 |
# unresolved blockers or circular dependencies. |
3209 |
|
3210 |
while not mygraph.empty(): |
3211 |
self.spinner.update() |
3212 |
selected_nodes = None |
3213 |
ignore_priority = None |
3214 |
if drop_satisfied or (prefer_asap and asap_nodes): |
3215 |
priority_range = DepPrioritySatisfiedRange |
3216 |
else: |
3217 |
priority_range = DepPriorityNormalRange |
3218 |
if prefer_asap and asap_nodes: |
3219 |
# ASAP nodes are merged before their soft deps. Go ahead and |
3220 |
# select root nodes here if necessary, since it's typical for |
3221 |
# the parent to have been removed from the graph already. |
3222 |
asap_nodes = [node for node in asap_nodes \ |
3223 |
if mygraph.contains(node)] |
3224 |
for node in asap_nodes: |
3225 |
if not mygraph.child_nodes(node, |
3226 |
ignore_priority=priority_range.ignore_soft): |
3227 |
selected_nodes = [node] |
3228 |
asap_nodes.remove(node) |
3229 |
break |
3230 |
if not selected_nodes and \ |
3231 |
not (prefer_asap and asap_nodes): |
3232 |
for i in xrange(priority_range.NONE, |
3233 |
priority_range.MEDIUM_SOFT + 1): |
3234 |
ignore_priority = priority_range.ignore_priority[i] |
3235 |
nodes = get_nodes(ignore_priority=ignore_priority) |
3236 |
if nodes: |
3237 |
# If there is a mix of uninstall nodes with other |
3238 |
# types, save the uninstall nodes for later since |
3239 |
# sometimes a merge node will render an uninstall |
3240 |
# node unnecessary (due to occupying the same slot), |
3241 |
# and we want to avoid executing a separate uninstall |
3242 |
# task in that case. |
3243 |
if len(nodes) > 1: |
3244 |
good_uninstalls = [] |
3245 |
with_some_uninstalls_excluded = [] |
3246 |
for node in nodes: |
3247 |
if node.operation == "uninstall": |
3248 |
slot_node = self.mydbapi[node.root |
3249 |
].match_pkgs(node.slot_atom) |
3250 |
if slot_node and \ |
3251 |
slot_node[0].operation == "merge": |
3252 |
continue |
3253 |
good_uninstalls.append(node) |
3254 |
with_some_uninstalls_excluded.append(node) |
3255 |
if good_uninstalls: |
3256 |
nodes = good_uninstalls |
3257 |
elif with_some_uninstalls_excluded: |
3258 |
nodes = with_some_uninstalls_excluded |
3259 |
else: |
3260 |
nodes = nodes |
3261 |
|
3262 |
if ignore_priority is None and not tree_mode: |
3263 |
# Greedily pop all of these nodes since no |
3264 |
# relationship has been ignored. This optimization |
3265 |
# destroys --tree output, so it's disabled in tree |
3266 |
# mode. |
3267 |
selected_nodes = nodes |
3268 |
else: |
3269 |
# For optimal merge order: |
3270 |
# * Only pop one node. |
3271 |
# * Removing a root node (node without a parent) |
3272 |
# will not produce a leaf node, so avoid it. |
3273 |
# * It's normal for a selected uninstall to be a |
3274 |
# root node, so don't check them for parents. |
3275 |
for node in nodes: |
3276 |
if node.operation == "uninstall" or \ |
3277 |
mygraph.parent_nodes(node): |
3278 |
selected_nodes = [node] |
3279 |
break |
3280 |
|
3281 |
if selected_nodes: |
3282 |
break |
3283 |
|
3284 |
if not selected_nodes: |
3285 |
nodes = get_nodes(ignore_priority=priority_range.ignore_medium) |
3286 |
if nodes: |
3287 |
mergeable_nodes = set(nodes) |
3288 |
if prefer_asap and asap_nodes: |
3289 |
nodes = asap_nodes |
3290 |
for i in xrange(priority_range.SOFT, |
3291 |
priority_range.MEDIUM_SOFT + 1): |
3292 |
ignore_priority = priority_range.ignore_priority[i] |
3293 |
for node in nodes: |
3294 |
if not mygraph.parent_nodes(node): |
3295 |
continue |
3296 |
selected_nodes = set() |
3297 |
if gather_deps(ignore_priority, |
3298 |
mergeable_nodes, selected_nodes, node): |
3299 |
break |
3300 |
else: |
3301 |
selected_nodes = None |
3302 |
if selected_nodes: |
3303 |
break |
3304 |
|
3305 |
if prefer_asap and asap_nodes and not selected_nodes: |
3306 |
# We failed to find any asap nodes to merge, so ignore |
3307 |
# them for the next iteration. |
3308 |
prefer_asap = False |
3309 |
continue |
3310 |
|
3311 |
if selected_nodes and ignore_priority is not None: |
3312 |
# Try to merge ignored medium_soft deps as soon as possible |
3313 |
# if they're not satisfied by installed packages. |
3314 |
for node in selected_nodes: |
3315 |
children = set(mygraph.child_nodes(node)) |
3316 |
soft = children.difference( |
3317 |
mygraph.child_nodes(node, |
3318 |
ignore_priority=DepPrioritySatisfiedRange.ignore_soft)) |
3319 |
medium_soft = children.difference( |
3320 |
mygraph.child_nodes(node, |
3321 |
ignore_priority = \ |
3322 |
DepPrioritySatisfiedRange.ignore_medium_soft)) |
3323 |
medium_soft.difference_update(soft) |
3324 |
for child in medium_soft: |
3325 |
if child in selected_nodes: |
3326 |
continue |
3327 |
if child in asap_nodes: |
3328 |
continue |
3329 |
asap_nodes.append(child) |
3330 |
|
3331 |
if selected_nodes and len(selected_nodes) > 1: |
3332 |
if not isinstance(selected_nodes, list): |
3333 |
selected_nodes = list(selected_nodes) |
3334 |
selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias)) |
3335 |
|
3336 |
if not selected_nodes and not myblocker_uninstalls.is_empty(): |
3337 |
# An Uninstall task needs to be executed in order to |
3338 |
# avoid conflict if possible. |
3339 |
|
3340 |
if drop_satisfied: |
3341 |
priority_range = DepPrioritySatisfiedRange |
3342 |
else: |
3343 |
priority_range = DepPriorityNormalRange |
3344 |
|
3345 |
mergeable_nodes = get_nodes( |
3346 |
ignore_priority=ignore_uninst_or_med) |
3347 |
|
3348 |
min_parent_deps = None |
3349 |
uninst_task = None |
3350 |
for task in myblocker_uninstalls.leaf_nodes(): |
3351 |
# Do some sanity checks so that system or world packages |
3352 |
# don't get uninstalled inappropriately here (only really |
3353 |
# necessary when --complete-graph has not been enabled). |
3354 |
|
3355 |
if task in ignored_uninstall_tasks: |
3356 |
continue |
3357 |
|
3358 |
if task in scheduled_uninstalls: |
3359 |
# It's been scheduled but it hasn't |
3360 |
# been executed yet due to dependence |
3361 |
# on installation of blocking packages. |
3362 |
continue |
3363 |
|
3364 |
root_config = self.roots[task.root] |
3365 |
inst_pkg = self._pkg_cache[ |
3366 |
("installed", task.root, task.cpv, "nomerge")] |
3367 |
|
3368 |
if self.digraph.contains(inst_pkg): |
3369 |
continue |
3370 |
|
3371 |
forbid_overlap = False |
3372 |
heuristic_overlap = False |
3373 |
for blocker in myblocker_uninstalls.parent_nodes(task): |
3374 |
if blocker.eapi in ("0", "1"): |
3375 |
heuristic_overlap = True |
3376 |
elif blocker.atom.blocker.overlap.forbid: |
3377 |
forbid_overlap = True |
3378 |
break |
3379 |
if forbid_overlap and running_root == task.root: |
3380 |
continue |
3381 |
|
3382 |
if heuristic_overlap and running_root == task.root: |
3383 |
# Never uninstall sys-apps/portage or it's essential |
3384 |
# dependencies, except through replacement. |
3385 |
try: |
3386 |
runtime_dep_atoms = \ |
3387 |
list(runtime_deps.iterAtomsForPackage(task)) |
3388 |
except portage.exception.InvalidDependString, e: |
3389 |
portage.writemsg("!!! Invalid PROVIDE in " + \ |
3390 |
"'%svar/db/pkg/%s/PROVIDE': %s\n" % \ |
3391 |
(task.root, task.cpv, e), noiselevel=-1) |
3392 |
del e |
3393 |
continue |
3394 |
|
3395 |
# Don't uninstall a runtime dep if it appears |
3396 |
# to be the only suitable one installed. |
3397 |
skip = False |
3398 |
vardb = root_config.trees["vartree"].dbapi |
3399 |
for atom in runtime_dep_atoms: |
3400 |
other_version = None |
3401 |
for pkg in vardb.match_pkgs(atom): |
3402 |
if pkg.cpv == task.cpv and \ |
3403 |
pkg.metadata["COUNTER"] == \ |
3404 |
task.metadata["COUNTER"]: |
3405 |
continue |
3406 |
other_version = pkg |
3407 |
break |
3408 |
if other_version is None: |
3409 |
skip = True |
3410 |
break |
3411 |
if skip: |
3412 |
continue |
3413 |
|
3414 |
# For packages in the system set, don't take |
3415 |
# any chances. If the conflict can't be resolved |
3416 |
# by a normal replacement operation then abort. |
3417 |
skip = False |
3418 |
try: |
3419 |
for atom in root_config.sets[ |
3420 |
"system"].iterAtomsForPackage(task): |
3421 |
skip = True |
3422 |
break |
3423 |
except portage.exception.InvalidDependString, e: |
3424 |
portage.writemsg("!!! Invalid PROVIDE in " + \ |
3425 |
"'%svar/db/pkg/%s/PROVIDE': %s\n" % \ |
3426 |
(task.root, task.cpv, e), noiselevel=-1) |
3427 |
del e |
3428 |
skip = True |
3429 |
if skip: |
3430 |
continue |
3431 |
|
3432 |
# Note that the world check isn't always |
3433 |
# necessary since self._complete_graph() will |
3434 |
# add all packages from the system and world sets to the |
3435 |
# graph. This just allows unresolved conflicts to be |
3436 |
# detected as early as possible, which makes it possible |
3437 |
# to avoid calling self._complete_graph() when it is |
3438 |
# unnecessary due to blockers triggering an abortion. |
3439 |
if not complete: |
3440 |
# For packages in the world set, go ahead an uninstall |
3441 |
# when necessary, as long as the atom will be satisfied |
3442 |
# in the final state. |
3443 |
graph_db = self.mydbapi[task.root] |
3444 |
skip = False |
3445 |
try: |
3446 |
for atom in root_config.sets[ |
3447 |
"world"].iterAtomsForPackage(task): |
3448 |
satisfied = False |
3449 |
for pkg in graph_db.match_pkgs(atom): |
3450 |
if pkg == inst_pkg: |
3451 |
continue |
3452 |
satisfied = True |
3453 |
break |
3454 |
if not satisfied: |
3455 |
skip = True |
3456 |
self._blocked_world_pkgs[inst_pkg] = atom |
3457 |
break |
3458 |
except portage.exception.InvalidDependString, e: |
3459 |
portage.writemsg("!!! Invalid PROVIDE in " + \ |
3460 |
"'%svar/db/pkg/%s/PROVIDE': %s\n" % \ |
3461 |
(task.root, task.cpv, e), noiselevel=-1) |
3462 |
del e |
3463 |
skip = True |
3464 |
if skip: |
3465 |
continue |
3466 |
|
3467 |
# Check the deps of parent nodes to ensure that |
3468 |
# the chosen task produces a leaf node. Maybe |
3469 |
# this can be optimized some more to make the |
3470 |
# best possible choice, but the current algorithm |
3471 |
# is simple and should be near optimal for most |
3472 |
# common cases. |
3473 |
mergeable_parent = False |
3474 |
parent_deps = set() |
3475 |
for parent in mygraph.parent_nodes(task): |
3476 |
parent_deps.update(mygraph.child_nodes(parent, |
3477 |
ignore_priority=priority_range.ignore_medium_soft)) |
3478 |
if parent in mergeable_nodes and \ |
3479 |
gather_deps(ignore_uninst_or_med_soft, |
3480 |
mergeable_nodes, set(), parent): |
3481 |
mergeable_parent = True |
3482 |
|
3483 |
if not mergeable_parent: |
3484 |
continue |
3485 |
|
3486 |
parent_deps.remove(task) |
3487 |
if min_parent_deps is None or \ |
3488 |
len(parent_deps) < min_parent_deps: |
3489 |
min_parent_deps = len(parent_deps) |
3490 |
uninst_task = task |
3491 |
|
3492 |
if uninst_task is not None: |
3493 |
# The uninstall is performed only after blocking |
3494 |
# packages have been merged on top of it. File |
3495 |
# collisions between blocking packages are detected |
3496 |
# and removed from the list of files to be uninstalled. |
3497 |
scheduled_uninstalls.add(uninst_task) |
3498 |
parent_nodes = mygraph.parent_nodes(uninst_task) |
3499 |
|
3500 |
# Reverse the parent -> uninstall edges since we want |
3501 |
# to do the uninstall after blocking packages have |
3502 |
# been merged on top of it. |
3503 |
mygraph.remove(uninst_task) |
3504 |
for blocked_pkg in parent_nodes: |
3505 |
mygraph.add(blocked_pkg, uninst_task, |
3506 |
priority=BlockerDepPriority.instance) |
3507 |
scheduler_graph.remove_edge(uninst_task, blocked_pkg) |
3508 |
scheduler_graph.add(blocked_pkg, uninst_task, |
3509 |
priority=BlockerDepPriority.instance) |
3510 |
|
3511 |
# Reset the state variables for leaf node selection and |
3512 |
# continue trying to select leaf nodes. |
3513 |
prefer_asap = True |
3514 |
drop_satisfied = False |
3515 |
continue |
3516 |
|
3517 |
if not selected_nodes: |
3518 |
# Only select root nodes as a last resort. This case should |
3519 |
# only trigger when the graph is nearly empty and the only |
3520 |
# remaining nodes are isolated (no parents or children). Since |
3521 |
# the nodes must be isolated, ignore_priority is not needed. |
3522 |
selected_nodes = get_nodes() |
3523 |
|
3524 |
if not selected_nodes and not drop_satisfied: |
3525 |
drop_satisfied = True |
3526 |
continue |
3527 |
|
3528 |
if not selected_nodes and not myblocker_uninstalls.is_empty(): |
3529 |
# If possible, drop an uninstall task here in order to avoid |
3530 |
# the circular deps code path. The corresponding blocker will |
3531 |
# still be counted as an unresolved conflict. |
3532 |
uninst_task = None |
3533 |
for node in myblocker_uninstalls.leaf_nodes(): |
3534 |
try: |
3535 |
mygraph.remove(node) |
3536 |
except KeyError: |
3537 |
pass |
3538 |
else: |
3539 |
uninst_task = node |
3540 |
ignored_uninstall_tasks.add(node) |
3541 |
break |
3542 |
|
3543 |
if uninst_task is not None: |
3544 |
# Reset the state variables for leaf node selection and |
3545 |
# continue trying to select leaf nodes. |
3546 |
prefer_asap = True |
3547 |
drop_satisfied = False |
3548 |
continue |
3549 |
|
3550 |
if not selected_nodes: |
3551 |
self._circular_deps_for_display = mygraph |
3552 |
raise self._unknown_internal_error() |
3553 |
|
3554 |
# At this point, we've succeeded in selecting one or more nodes, so |
3555 |
# reset state variables for leaf node selection. |
3556 |
prefer_asap = True |
3557 |
drop_satisfied = False |
3558 |
|
3559 |
mygraph.difference_update(selected_nodes) |
3560 |
|
3561 |
for node in selected_nodes: |
3562 |
if isinstance(node, Package) and \ |
3563 |
node.operation == "nomerge": |
3564 |
continue |
3565 |
|
3566 |
# Handle interactions between blockers |
3567 |
# and uninstallation tasks. |
3568 |
solved_blockers = set() |
3569 |
uninst_task = None |
3570 |
if isinstance(node, Package) and \ |
3571 |
"uninstall" == node.operation: |
3572 |
have_uninstall_task = True |
3573 |
uninst_task = node |
3574 |
else: |
3575 |
vardb = self.trees[node.root]["vartree"].dbapi |
3576 |
previous_cpv = vardb.match(node.slot_atom) |
3577 |
if previous_cpv: |
3578 |
# The package will be replaced by this one, so remove |
3579 |
# the corresponding Uninstall task if necessary. |
3580 |
previous_cpv = previous_cpv[0] |
3581 |
uninst_task = \ |
3582 |
("installed", node.root, previous_cpv, "uninstall") |
3583 |
try: |
3584 |
mygraph.remove(uninst_task) |
3585 |
except KeyError: |
3586 |
pass |
3587 |
|
3588 |
if uninst_task is not None and \ |
3589 |
uninst_task not in ignored_uninstall_tasks and \ |
3590 |
myblocker_uninstalls.contains(uninst_task): |
3591 |
blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task) |
3592 |
myblocker_uninstalls.remove(uninst_task) |
3593 |
# Discard any blockers that this Uninstall solves. |
3594 |
for blocker in blocker_nodes: |
3595 |
if not myblocker_uninstalls.child_nodes(blocker): |
3596 |
myblocker_uninstalls.remove(blocker) |
3597 |
solved_blockers.add(blocker) |
3598 |
|
3599 |
retlist.append(node) |
3600 |
|
3601 |
if (isinstance(node, Package) and \ |
3602 |
"uninstall" == node.operation) or \ |
3603 |
(uninst_task is not None and \ |
3604 |
uninst_task in scheduled_uninstalls): |
3605 |
# Include satisfied blockers in the merge list |
3606 |
# since the user might be interested and also |
3607 |
# it serves as an indicator that blocking packages |
3608 |
# will be temporarily installed simultaneously. |
3609 |
for blocker in solved_blockers: |
3610 |
retlist.append(Blocker(atom=blocker.atom, |
3611 |
root=blocker.root, eapi=blocker.eapi, |
3612 |
satisfied=True)) |
3613 |
|
3614 |
unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes()) |
3615 |
for node in myblocker_uninstalls.root_nodes(): |
3616 |
unsolvable_blockers.add(node) |
3617 |
|
3618 |
for blocker in unsolvable_blockers: |
3619 |
retlist.append(blocker) |
3620 |
|
3621 |
# If any Uninstall tasks need to be executed in order |
3622 |
# to avoid a conflict, complete the graph with any |
3623 |
# dependencies that may have been initially |
3624 |
# neglected (to ensure that unsafe Uninstall tasks |
3625 |
# are properly identified and blocked from execution). |
3626 |
if have_uninstall_task and \ |
3627 |
not complete and \ |
3628 |
not unsolvable_blockers: |
3629 |
self.myparams.add("complete") |
3630 |
raise self._serialize_tasks_retry("") |
3631 |
|
3632 |
if unsolvable_blockers and \ |
3633 |
not self._accept_blocker_conflicts(): |
3634 |
self._unsatisfied_blockers_for_display = unsolvable_blockers |
3635 |
self._serialized_tasks_cache = retlist[:] |
3636 |
self._scheduler_graph = scheduler_graph |
3637 |
raise self._unknown_internal_error() |
3638 |
|
3639 |
if self._slot_collision_info and \ |
3640 |
not self._accept_blocker_conflicts(): |
3641 |
self._serialized_tasks_cache = retlist[:] |
3642 |
self._scheduler_graph = scheduler_graph |
3643 |
raise self._unknown_internal_error() |
3644 |
|
3645 |
return retlist, scheduler_graph |
3646 |
|
3647 |
def _show_circular_deps(self, mygraph): |
3648 |
# No leaf nodes are available, so we have a circular |
3649 |
# dependency panic situation. Reduce the noise level to a |
3650 |
# minimum via repeated elimination of root nodes since they |
3651 |
# have no parents and thus can not be part of a cycle. |
3652 |
while True: |
3653 |
root_nodes = mygraph.root_nodes( |
3654 |
ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft) |
3655 |
if not root_nodes: |
3656 |
break |
3657 |
mygraph.difference_update(root_nodes) |
3658 |
# Display the USE flags that are enabled on nodes that are part |
3659 |
# of dependency cycles in case that helps the user decide to |
3660 |
# disable some of them. |
3661 |
display_order = [] |
3662 |
tempgraph = mygraph.copy() |
3663 |
while not tempgraph.empty(): |
3664 |
nodes = tempgraph.leaf_nodes() |
3665 |
if not nodes: |
3666 |
node = tempgraph.order[0] |
3667 |
else: |
3668 |
node = nodes[0] |
3669 |
display_order.append(node) |
3670 |
tempgraph.remove(node) |
3671 |
display_order.reverse() |
3672 |
self.myopts.pop("--quiet", None) |
3673 |
self.myopts.pop("--verbose", None) |
3674 |
self.myopts["--tree"] = True |
3675 |
portage.writemsg("\n\n", noiselevel=-1) |
3676 |
self.display(display_order) |
3677 |
prefix = colorize("BAD", " * ") |
3678 |
portage.writemsg("\n", noiselevel=-1) |
3679 |
portage.writemsg(prefix + "Error: circular dependencies:\n", |
3680 |
noiselevel=-1) |
3681 |
portage.writemsg("\n", noiselevel=-1) |
3682 |
mygraph.debug_print() |
3683 |
portage.writemsg("\n", noiselevel=-1) |
3684 |
portage.writemsg(prefix + "Note that circular dependencies " + \ |
3685 |
"can often be avoided by temporarily\n", noiselevel=-1) |
3686 |
portage.writemsg(prefix + "disabling USE flags that trigger " + \ |
3687 |
"optional dependencies.\n", noiselevel=-1) |
3688 |
|
3689 |
def _show_merge_list(self): |
3690 |
if self._serialized_tasks_cache is not None and \ |
3691 |
not (self._displayed_list and \ |
3692 |
(self._displayed_list == self._serialized_tasks_cache or \ |
3693 |
self._displayed_list == \ |
3694 |
list(reversed(self._serialized_tasks_cache)))): |
3695 |
display_list = self._serialized_tasks_cache[:] |
3696 |
if "--tree" in self.myopts: |
3697 |
display_list.reverse() |
3698 |
self.display(display_list) |
3699 |
|
3700 |
def _show_unsatisfied_blockers(self, blockers): |
3701 |
self._show_merge_list() |
3702 |
msg = "Error: The above package list contains " + \ |
3703 |
"packages which cannot be installed " + \ |
3704 |
"at the same time on the same system." |
3705 |
prefix = colorize("BAD", " * ") |
3706 |
from textwrap import wrap |
3707 |
portage.writemsg("\n", noiselevel=-1) |
3708 |
for line in wrap(msg, 70): |
3709 |
portage.writemsg(prefix + line + "\n", noiselevel=-1) |
3710 |
|
3711 |
# Display the conflicting packages along with the packages |
3712 |
# that pulled them in. This is helpful for troubleshooting |
3713 |
# cases in which blockers don't solve automatically and |
3714 |
# the reasons are not apparent from the normal merge list |
3715 |
# display. |
3716 |
|
3717 |
conflict_pkgs = {} |
3718 |
for blocker in blockers: |
3719 |
for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \ |
3720 |
self._blocker_parents.parent_nodes(blocker)): |
3721 |
parent_atoms = self._parent_atoms.get(pkg) |
3722 |
if not parent_atoms: |
3723 |
atom = self._blocked_world_pkgs.get(pkg) |
3724 |
if atom is not None: |
3725 |
parent_atoms = set([("@world", atom)]) |
3726 |
if parent_atoms: |
3727 |
conflict_pkgs[pkg] = parent_atoms |
3728 |
|
3729 |
if conflict_pkgs: |
3730 |
# Reduce noise by pruning packages that are only |
3731 |
# pulled in by other conflict packages. |
3732 |
pruned_pkgs = set() |
3733 |
for pkg, parent_atoms in conflict_pkgs.iteritems(): |
3734 |
relevant_parent = False |
3735 |
for parent, atom in parent_atoms: |
3736 |
if parent not in conflict_pkgs: |
3737 |
relevant_parent = True |
3738 |
break |
3739 |
if not relevant_parent: |
3740 |
pruned_pkgs.add(pkg) |
3741 |
for pkg in pruned_pkgs: |
3742 |
del conflict_pkgs[pkg] |
3743 |
|
3744 |
if conflict_pkgs: |
3745 |
msg = [] |
3746 |
msg.append("\n") |
3747 |
indent = " " |
3748 |
# Max number of parents shown, to avoid flooding the display. |
3749 |
max_parents = 3 |
3750 |
for pkg, parent_atoms in conflict_pkgs.iteritems(): |
3751 |
|
3752 |
pruned_list = set() |
3753 |
|
3754 |
# Prefer packages that are not directly involved in a conflict. |
3755 |
for parent_atom in parent_atoms: |
3756 |
if len(pruned_list) >= max_parents: |
3757 |
break |
3758 |
parent, atom = parent_atom |
3759 |
if parent not in conflict_pkgs: |
3760 |
pruned_list.add(parent_atom) |
3761 |
|
3762 |
for parent_atom in parent_atoms: |
3763 |
if len(pruned_list) >= max_parents: |
3764 |
break |
3765 |
pruned_list.add(parent_atom) |
3766 |
|
3767 |
omitted_parents = len(parent_atoms) - len(pruned_list) |
3768 |
msg.append(indent + "%s pulled in by\n" % pkg) |
3769 |
|
3770 |
for parent_atom in pruned_list: |
3771 |
parent, atom = parent_atom |
3772 |
msg.append(2*indent) |
3773 |
if isinstance(parent, |
3774 |
(PackageArg, AtomArg)): |
3775 |
# For PackageArg and AtomArg types, it's |
3776 |
# redundant to display the atom attribute. |
3777 |
msg.append(str(parent)) |
3778 |
else: |
3779 |
# Display the specific atom from SetArg or |
3780 |
# Package types. |
3781 |
msg.append("%s required by %s" % (atom, parent)) |
3782 |
msg.append("\n") |
3783 |
|
3784 |
if omitted_parents: |
3785 |
msg.append(2*indent) |
3786 |
msg.append("(and %d more)\n" % omitted_parents) |
3787 |
|
3788 |
msg.append("\n") |
3789 |
|
3790 |
sys.stderr.write("".join(msg)) |
3791 |
sys.stderr.flush() |
3792 |
|
3793 |
if "--quiet" not in self.myopts: |
3794 |
show_blocker_docs_link() |
3795 |
|
3796 |
def display(self, mylist, favorites=[], verbosity=None): |
3797 |
|
3798 |
# This is used to prevent display_problems() from |
3799 |
# redundantly displaying this exact same merge list |
3800 |
# again via _show_merge_list(). |
3801 |
self._displayed_list = mylist |
3802 |
|
3803 |
if verbosity is None: |
3804 |
verbosity = ("--quiet" in self.myopts and 1 or \ |
3805 |
"--verbose" in self.myopts and 3 or 2) |
3806 |
favorites_set = InternalPackageSet(favorites) |
3807 |
oneshot = "--oneshot" in self.myopts or \ |
3808 |
"--onlydeps" in self.myopts |
3809 |
columns = "--columns" in self.myopts |
3810 |
changelogs=[] |
3811 |
p=[] |
3812 |
blockers = [] |
3813 |
|
3814 |
counters = PackageCounters() |
3815 |
|
3816 |
if verbosity == 1 and "--verbose" not in self.myopts: |
3817 |
def create_use_string(*args): |
3818 |
return "" |
3819 |
else: |
3820 |
def create_use_string(name, cur_iuse, iuse_forced, cur_use, |
3821 |
old_iuse, old_use, |
3822 |
is_new, reinst_flags, |
3823 |
all_flags=(verbosity == 3 or "--quiet" in self.myopts), |
3824 |
alphabetical=("--alphabetical" in self.myopts)): |
3825 |
enabled = [] |
3826 |
if alphabetical: |
3827 |
disabled = enabled |
3828 |
removed = enabled |
3829 |
else: |
3830 |
disabled = [] |
3831 |
removed = [] |
3832 |
cur_iuse = set(cur_iuse) |
3833 |
enabled_flags = cur_iuse.intersection(cur_use) |
3834 |
removed_iuse = set(old_iuse).difference(cur_iuse) |
3835 |
any_iuse = cur_iuse.union(old_iuse) |
3836 |
any_iuse = list(any_iuse) |
3837 |
any_iuse.sort() |
3838 |
for flag in any_iuse: |
3839 |
flag_str = None |
3840 |
isEnabled = False |
3841 |
reinst_flag = reinst_flags and flag in reinst_flags |
3842 |
if flag in enabled_flags: |
3843 |
isEnabled = True |
3844 |
if is_new or flag in old_use and \ |
3845 |
(all_flags or reinst_flag): |
3846 |
flag_str = red(flag) |
3847 |
elif flag not in old_iuse: |
3848 |
flag_str = yellow(flag) + "%*" |
3849 |
elif flag not in old_use: |
3850 |
flag_str = green(flag) + "*" |
3851 |
elif flag in removed_iuse: |
3852 |
if all_flags or reinst_flag: |
3853 |
flag_str = yellow("-" + flag) + "%" |
3854 |
if flag in old_use: |
3855 |
flag_str += "*" |
3856 |
flag_str = "(" + flag_str + ")" |
3857 |
removed.append(flag_str) |
3858 |
continue |
3859 |
else: |
3860 |
if is_new or flag in old_iuse and \ |
3861 |
flag not in old_use and \ |
3862 |
(all_flags or reinst_flag): |
3863 |
flag_str = blue("-" + flag) |
3864 |
elif flag not in old_iuse: |
3865 |
flag_str = yellow("-" + flag) |
3866 |
if flag not in iuse_forced: |
3867 |
flag_str += "%" |
3868 |
elif flag in old_use: |
3869 |
flag_str = green("-" + flag) + "*" |
3870 |
if flag_str: |
3871 |
if flag in iuse_forced: |
3872 |
flag_str = "(" + flag_str + ")" |
3873 |
if isEnabled: |
3874 |
enabled.append(flag_str) |
3875 |
else: |
3876 |
disabled.append(flag_str) |
3877 |
|
3878 |
if alphabetical: |
3879 |
ret = " ".join(enabled) |
3880 |
else: |
3881 |
ret = " ".join(enabled + disabled + removed) |
3882 |
if ret: |
3883 |
ret = '%s="%s" ' % (name, ret) |
3884 |
return ret |
3885 |
|
3886 |
repo_display = RepoDisplay(self.roots) |
3887 |
|
3888 |
tree_nodes = [] |
3889 |
display_list = [] |
3890 |
mygraph = self.digraph.copy() |
3891 |
|
3892 |
# If there are any Uninstall instances, add the corresponding |
3893 |
# blockers to the digraph (useful for --tree display). |
3894 |
|
3895 |
executed_uninstalls = set(node for node in mylist \ |
3896 |
if isinstance(node, Package) and node.operation == "unmerge") |
3897 |
|
3898 |
for uninstall in self._blocker_uninstalls.leaf_nodes(): |
3899 |
uninstall_parents = \ |
3900 |
self._blocker_uninstalls.parent_nodes(uninstall) |
3901 |
if not uninstall_parents: |
3902 |
continue |
3903 |
|
3904 |
# Remove the corresponding "nomerge" node and substitute |
3905 |
# the Uninstall node. |
3906 |
inst_pkg = self._pkg_cache[ |
3907 |
("installed", uninstall.root, uninstall.cpv, "nomerge")] |
3908 |
try: |
3909 |
mygraph.remove(inst_pkg) |
3910 |
except KeyError: |
3911 |
pass |
3912 |
|
3913 |
try: |
3914 |
inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg) |
3915 |
except KeyError: |
3916 |
inst_pkg_blockers = [] |
3917 |
|
3918 |
# Break the Package -> Uninstall edges. |
3919 |
mygraph.remove(uninstall) |
3920 |
|
3921 |
# Resolution of a package's blockers |
3922 |
# depend on it's own uninstallation. |
3923 |
for blocker in inst_pkg_blockers: |
3924 |
mygraph.add(uninstall, blocker) |
3925 |
|
3926 |
# Expand Package -> Uninstall edges into |
3927 |
# Package -> Blocker -> Uninstall edges. |
3928 |
for blocker in uninstall_parents: |
3929 |
mygraph.add(uninstall, blocker) |
3930 |
for parent in self._blocker_parents.parent_nodes(blocker): |
3931 |
if parent != inst_pkg: |
3932 |
mygraph.add(blocker, parent) |
3933 |
|
3934 |
# If the uninstall task did not need to be executed because |
3935 |
# of an upgrade, display Blocker -> Upgrade edges since the |
3936 |
# corresponding Blocker -> Uninstall edges will not be shown. |
3937 |
upgrade_node = \ |
3938 |
self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom) |
3939 |
if upgrade_node is not None and \ |
3940 |
uninstall not in executed_uninstalls: |
3941 |
for blocker in uninstall_parents: |
3942 |
mygraph.add(upgrade_node, blocker) |
3943 |
|
3944 |
unsatisfied_blockers = [] |
3945 |
i = 0 |
3946 |
depth = 0 |
3947 |
shown_edges = set() |
3948 |
for x in mylist: |
3949 |
if isinstance(x, Blocker) and not x.satisfied: |
3950 |
unsatisfied_blockers.append(x) |
3951 |
continue |
3952 |
graph_key = x |
3953 |
if "--tree" in self.myopts: |
3954 |
depth = len(tree_nodes) |
3955 |
while depth and graph_key not in \ |
3956 |
mygraph.child_nodes(tree_nodes[depth-1]): |
3957 |
depth -= 1 |
3958 |
if depth: |
3959 |
tree_nodes = tree_nodes[:depth] |
3960 |
tree_nodes.append(graph_key) |
3961 |
display_list.append((x, depth, True)) |
3962 |
shown_edges.add((graph_key, tree_nodes[depth-1])) |
3963 |
else: |
3964 |
traversed_nodes = set() # prevent endless circles |
3965 |
traversed_nodes.add(graph_key) |
3966 |
def add_parents(current_node, ordered): |
3967 |
parent_nodes = None |
3968 |
# Do not traverse to parents if this node is an |
3969 |
# an argument or a direct member of a set that has |
3970 |
# been specified as an argument (system or world). |
3971 |
if current_node not in self._set_nodes: |
3972 |
parent_nodes = mygraph.parent_nodes(current_node) |
3973 |
if parent_nodes: |
3974 |
child_nodes = set(mygraph.child_nodes(current_node)) |
3975 |
selected_parent = None |
3976 |
# First, try to avoid a direct cycle. |
3977 |
for node in parent_nodes: |
3978 |
if not isinstance(node, (Blocker, Package)): |
3979 |
continue |
3980 |
if node not in traversed_nodes and \ |
3981 |
node not in child_nodes: |
3982 |
edge = (current_node, node) |
3983 |
if edge in shown_edges: |
3984 |
continue |
3985 |
selected_parent = node |
3986 |
break |
3987 |
if not selected_parent: |
3988 |
# A direct cycle is unavoidable. |
3989 |
for node in parent_nodes: |
3990 |
if not isinstance(node, (Blocker, Package)): |
3991 |
continue |
3992 |
if node not in traversed_nodes: |
3993 |
edge = (current_node, node) |
3994 |
if edge in shown_edges: |
3995 |
continue |
3996 |
selected_parent = node |
3997 |
break |
3998 |
if selected_parent: |
3999 |
shown_edges.add((current_node, selected_parent)) |
4000 |
traversed_nodes.add(selected_parent) |
4001 |
add_parents(selected_parent, False) |
4002 |
display_list.append((current_node, |
4003 |
len(tree_nodes), ordered)) |
4004 |
tree_nodes.append(current_node) |
4005 |
tree_nodes = [] |
4006 |
add_parents(graph_key, True) |
4007 |
else: |
4008 |
display_list.append((x, depth, True)) |
4009 |
mylist = display_list |
4010 |
for x in unsatisfied_blockers: |
4011 |
mylist.append((x, 0, True)) |
4012 |
|
4013 |
last_merge_depth = 0 |
4014 |
for i in xrange(len(mylist)-1,-1,-1): |
4015 |
graph_key, depth, ordered = mylist[i] |
4016 |
if not ordered and depth == 0 and i > 0 \ |
4017 |
and graph_key == mylist[i-1][0] and \ |
4018 |
mylist[i-1][1] == 0: |
4019 |
# An ordered node got a consecutive duplicate when the tree was |
4020 |
# being filled in. |
4021 |
del mylist[i] |
4022 |
continue |
4023 |
if ordered and graph_key[-1] != "nomerge": |
4024 |
last_merge_depth = depth |
4025 |
continue |
4026 |
if depth >= last_merge_depth or \ |
4027 |
i < len(mylist) - 1 and \ |
4028 |
depth >= mylist[i+1][1]: |
4029 |
del mylist[i] |
4030 |
|
4031 |
from portage import flatten |
4032 |
from portage.dep import use_reduce, paren_reduce |
4033 |
# files to fetch list - avoids counting a same file twice |
4034 |
# in size display (verbose mode) |
4035 |
myfetchlist=[] |
4036 |
|
4037 |
# Use this set to detect when all the "repoadd" strings are "[0]" |
4038 |
# and disable the entire repo display in this case. |
4039 |
repoadd_set = set() |
4040 |
|
4041 |
for mylist_index in xrange(len(mylist)): |
4042 |
x, depth, ordered = mylist[mylist_index] |
4043 |
pkg_type = x[0] |
4044 |
myroot = x[1] |
4045 |
pkg_key = x[2] |
4046 |
portdb = self.trees[myroot]["porttree"].dbapi |
4047 |
bindb = self.trees[myroot]["bintree"].dbapi |
4048 |
vardb = self.trees[myroot]["vartree"].dbapi |
4049 |
vartree = self.trees[myroot]["vartree"] |
4050 |
pkgsettings = self.pkgsettings[myroot] |
4051 |
|
4052 |
fetch=" " |
4053 |
indent = " " * depth |
4054 |
|
4055 |
if isinstance(x, Blocker): |
4056 |
if x.satisfied: |
4057 |
blocker_style = "PKG_BLOCKER_SATISFIED" |
4058 |
addl = "%s %s " % (colorize(blocker_style, "b"), fetch) |
4059 |
else: |
4060 |
blocker_style = "PKG_BLOCKER" |
4061 |
addl = "%s %s " % (colorize(blocker_style, "B"), fetch) |
4062 |
if ordered: |
4063 |
counters.blocks += 1 |
4064 |
if x.satisfied: |
4065 |
counters.blocks_satisfied += 1 |
4066 |
resolved = portage.key_expand( |
4067 |
str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings) |
4068 |
if "--columns" in self.myopts and "--quiet" in self.myopts: |
4069 |
addl += " " + colorize(blocker_style, resolved) |
4070 |
else: |
4071 |
addl = "[%s %s] %s%s" % \ |
4072 |
(colorize(blocker_style, "blocks"), |
4073 |
addl, indent, colorize(blocker_style, resolved)) |
4074 |
block_parents = self._blocker_parents.parent_nodes(x) |
4075 |
block_parents = set([pnode[2] for pnode in block_parents]) |
4076 |
block_parents = ", ".join(block_parents) |
4077 |
if resolved!=x[2]: |
4078 |
addl += colorize(blocker_style, |
4079 |
" (\"%s\" is blocking %s)") % \ |
4080 |
(str(x.atom).lstrip("!"), block_parents) |
4081 |
else: |
4082 |
addl += colorize(blocker_style, |
4083 |
" (is blocking %s)") % block_parents |
4084 |
if isinstance(x, Blocker) and x.satisfied: |
4085 |
if columns: |
4086 |
continue |
4087 |
p.append(addl) |
4088 |
else: |
4089 |
blockers.append(addl) |
4090 |
else: |
4091 |
pkg_status = x[3] |
4092 |
pkg_merge = ordered and pkg_status == "merge" |
4093 |
if not pkg_merge and pkg_status == "merge": |
4094 |
pkg_status = "nomerge" |
4095 |
built = pkg_type != "ebuild" |
4096 |
installed = pkg_type == "installed" |
4097 |
pkg = x |
4098 |
metadata = pkg.metadata |
4099 |
ebuild_path = None |
4100 |
repo_name = metadata["repository"] |
4101 |
if pkg_type == "ebuild": |
4102 |
ebuild_path = portdb.findname(pkg_key) |
4103 |
if not ebuild_path: # shouldn't happen |
4104 |
raise portage.exception.PackageNotFound(pkg_key) |
4105 |
repo_path_real = os.path.dirname(os.path.dirname( |
4106 |
os.path.dirname(ebuild_path))) |
4107 |
else: |
4108 |
repo_path_real = portdb.getRepositoryPath(repo_name) |
4109 |
pkg_use = list(pkg.use.enabled) |
4110 |
try: |
4111 |
restrict = flatten(use_reduce(paren_reduce( |
4112 |
pkg.metadata["RESTRICT"]), uselist=pkg_use)) |
4113 |
except portage.exception.InvalidDependString, e: |
4114 |
if not pkg.installed: |
4115 |
show_invalid_depstring_notice(x, |
4116 |
pkg.metadata["RESTRICT"], str(e)) |
4117 |
del e |
4118 |
return 1 |
4119 |
restrict = [] |
4120 |
if "ebuild" == pkg_type and x[3] != "nomerge" and \ |
4121 |
"fetch" in restrict: |
4122 |
fetch = red("F") |
4123 |
if ordered: |
4124 |
counters.restrict_fetch += 1 |
4125 |
if portdb.fetch_check(pkg_key, pkg_use): |
4126 |
fetch = green("f") |
4127 |
if ordered: |
4128 |
counters.restrict_fetch_satisfied += 1 |
4129 |
|
4130 |
#we need to use "--emptrytree" testing here rather than "empty" param testing because "empty" |
4131 |
#param is used for -u, where you still *do* want to see when something is being upgraded. |
4132 |
myoldbest = [] |
4133 |
myinslotlist = None |
4134 |
installed_versions = vardb.match(portage.cpv_getkey(pkg_key)) |
4135 |
if vardb.cpv_exists(pkg_key): |
4136 |
addl=" "+yellow("R")+fetch+" " |
4137 |
if ordered: |
4138 |
if pkg_merge: |
4139 |
counters.reinst += 1 |
4140 |
elif pkg_status == "uninstall": |
4141 |
counters.uninst += 1 |
4142 |
# filter out old-style virtual matches |
4143 |
elif installed_versions and \ |
4144 |
portage.cpv_getkey(installed_versions[0]) == \ |
4145 |
portage.cpv_getkey(pkg_key): |
4146 |
myinslotlist = vardb.match(pkg.slot_atom) |
4147 |
# If this is the first install of a new-style virtual, we |
4148 |
# need to filter out old-style virtual matches. |
4149 |
if myinslotlist and \ |
4150 |
portage.cpv_getkey(myinslotlist[0]) != \ |
4151 |
portage.cpv_getkey(pkg_key): |
4152 |
myinslotlist = None |
4153 |
if myinslotlist: |
4154 |
myoldbest = myinslotlist[:] |
4155 |
addl = " " + fetch |
4156 |
if not portage.dep.cpvequal(pkg_key, |
4157 |
portage.best([pkg_key] + myoldbest)): |
4158 |
# Downgrade in slot |
4159 |
addl += turquoise("U")+blue("D") |
4160 |
if ordered: |
4161 |
counters.downgrades += 1 |
4162 |
else: |
4163 |
# Update in slot |
4164 |
addl += turquoise("U") + " " |
4165 |
if ordered: |
4166 |
counters.upgrades += 1 |
4167 |
else: |
4168 |
# New slot, mark it new. |
4169 |
addl = " " + green("NS") + fetch + " " |
4170 |
myoldbest = vardb.match(portage.cpv_getkey(pkg_key)) |
4171 |
if ordered: |
4172 |
counters.newslot += 1 |
4173 |
|
4174 |
if "--changelog" in self.myopts: |
4175 |
inst_matches = vardb.match(pkg.slot_atom) |
4176 |
if inst_matches: |
4177 |
changelogs.extend(self.calc_changelog( |
4178 |
portdb.findname(pkg_key), |
4179 |
inst_matches[0], pkg_key)) |
4180 |
else: |
4181 |
addl = " " + green("N") + " " + fetch + " " |
4182 |
if ordered: |
4183 |
counters.new += 1 |
4184 |
|
4185 |
verboseadd = "" |
4186 |
repoadd = None |
4187 |
|
4188 |
if True: |
4189 |
# USE flag display |
4190 |
forced_flags = set() |
4191 |
pkgsettings.setcpv(pkg) # for package.use.{mask,force} |
4192 |
forced_flags.update(pkgsettings.useforce) |
4193 |
forced_flags.update(pkgsettings.usemask) |
4194 |
|
4195 |
cur_use = [flag for flag in pkg.use.enabled \ |
4196 |
if flag in pkg.iuse.all] |
4197 |
cur_iuse = sorted(pkg.iuse.all) |
4198 |
|
4199 |
if myoldbest and myinslotlist: |
4200 |
previous_cpv = myoldbest[0] |
4201 |
else: |
4202 |
previous_cpv = pkg.cpv |
4203 |
if vardb.cpv_exists(previous_cpv): |
4204 |
old_iuse, old_use = vardb.aux_get( |
4205 |
previous_cpv, ["IUSE", "USE"]) |
4206 |
old_iuse = list(set( |
4207 |
filter_iuse_defaults(old_iuse.split()))) |
4208 |
old_iuse.sort() |
4209 |
old_use = old_use.split() |
4210 |
is_new = False |
4211 |
else: |
4212 |
old_iuse = [] |
4213 |
old_use = [] |
4214 |
is_new = True |
4215 |
|
4216 |
old_use = [flag for flag in old_use if flag in old_iuse] |
4217 |
|
4218 |
use_expand = pkgsettings["USE_EXPAND"].lower().split() |
4219 |
use_expand.sort() |
4220 |
use_expand.reverse() |
4221 |
use_expand_hidden = \ |
4222 |
pkgsettings["USE_EXPAND_HIDDEN"].lower().split() |
4223 |
|
4224 |
def map_to_use_expand(myvals, forcedFlags=False, |
4225 |
removeHidden=True): |
4226 |
ret = {} |
4227 |
forced = {} |
4228 |
for exp in use_expand: |
4229 |
ret[exp] = [] |
4230 |
forced[exp] = set() |
4231 |
for val in myvals[:]: |
4232 |
if val.startswith(exp.lower()+"_"): |
4233 |
if val in forced_flags: |
4234 |
forced[exp].add(val[len(exp)+1:]) |
4235 |
ret[exp].append(val[len(exp)+1:]) |
4236 |
myvals.remove(val) |
4237 |
ret["USE"] = myvals |
4238 |
forced["USE"] = [val for val in myvals \ |
4239 |
if val in forced_flags] |
4240 |
if removeHidden: |
4241 |
for exp in use_expand_hidden: |
4242 |
ret.pop(exp, None) |
4243 |
if forcedFlags: |
4244 |
return ret, forced |
4245 |
return ret |
4246 |
|
4247 |
# Prevent USE_EXPAND_HIDDEN flags from being hidden if they |
4248 |
# are the only thing that triggered reinstallation. |
4249 |
reinst_flags_map = {} |
4250 |
reinstall_for_flags = self._reinstall_nodes.get(pkg) |
4251 |
reinst_expand_map = None |
4252 |
if reinstall_for_flags: |
4253 |
reinst_flags_map = map_to_use_expand( |
4254 |
list(reinstall_for_flags), removeHidden=False) |
4255 |
for k in list(reinst_flags_map): |
4256 |
if not reinst_flags_map[k]: |
4257 |
del reinst_flags_map[k] |
4258 |
if not reinst_flags_map.get("USE"): |
4259 |
reinst_expand_map = reinst_flags_map.copy() |
4260 |
reinst_expand_map.pop("USE", None) |
4261 |
if reinst_expand_map and \ |
4262 |
not set(reinst_expand_map).difference( |
4263 |
use_expand_hidden): |
4264 |
use_expand_hidden = \ |
4265 |
set(use_expand_hidden).difference( |
4266 |
reinst_expand_map) |
4267 |
|
4268 |
cur_iuse_map, iuse_forced = \ |
4269 |
map_to_use_expand(cur_iuse, forcedFlags=True) |
4270 |
cur_use_map = map_to_use_expand(cur_use) |
4271 |
old_iuse_map = map_to_use_expand(old_iuse) |
4272 |
old_use_map = map_to_use_expand(old_use) |
4273 |
|
4274 |
use_expand.sort() |
4275 |
use_expand.insert(0, "USE") |
4276 |
|
4277 |
for key in use_expand: |
4278 |
if key in use_expand_hidden: |
4279 |
continue |
4280 |
verboseadd += create_use_string(key.upper(), |
4281 |
cur_iuse_map[key], iuse_forced[key], |
4282 |
cur_use_map[key], old_iuse_map[key], |
4283 |
old_use_map[key], is_new, |
4284 |
reinst_flags_map.get(key)) |
4285 |
|
4286 |
if verbosity == 3: |
4287 |
# size verbose |
4288 |
mysize=0 |
4289 |
if pkg_type == "ebuild" and pkg_merge: |
4290 |
try: |
4291 |
myfilesdict = portdb.getfetchsizes(pkg_key, |
4292 |
useflags=pkg_use, debug=self.edebug) |
4293 |
except portage.exception.InvalidDependString, e: |
4294 |
src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0] |
4295 |
show_invalid_depstring_notice(x, src_uri, str(e)) |
4296 |
del e |
4297 |
return 1 |
4298 |
if myfilesdict is None: |
4299 |
myfilesdict="[empty/missing/bad digest]" |
4300 |
else: |
4301 |
for myfetchfile in myfilesdict: |
4302 |
if myfetchfile not in myfetchlist: |
4303 |
mysize+=myfilesdict[myfetchfile] |
4304 |
myfetchlist.append(myfetchfile) |
4305 |
if ordered: |
4306 |
counters.totalsize += mysize |
4307 |
verboseadd += format_size(mysize) |
4308 |
|
4309 |
# overlay verbose |
4310 |
# assign index for a previous version in the same slot |
4311 |
has_previous = False |
4312 |
repo_name_prev = None |
4313 |
slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key), |
4314 |
metadata["SLOT"]) |
4315 |
slot_matches = vardb.match(slot_atom) |
4316 |
if slot_matches: |
4317 |
has_previous = True |
4318 |
repo_name_prev = vardb.aux_get(slot_matches[0], |
4319 |
["repository"])[0] |
4320 |
|
4321 |
# now use the data to generate output |
4322 |
if pkg.installed or not has_previous: |
4323 |
repoadd = repo_display.repoStr(repo_path_real) |
4324 |
else: |
4325 |
repo_path_prev = None |
4326 |
if repo_name_prev: |
4327 |
repo_path_prev = portdb.getRepositoryPath( |
4328 |
repo_name_prev) |
4329 |
if repo_path_prev == repo_path_real: |
4330 |
repoadd = repo_display.repoStr(repo_path_real) |
4331 |
else: |
4332 |
repoadd = "%s=>%s" % ( |
4333 |
repo_display.repoStr(repo_path_prev), |
4334 |
repo_display.repoStr(repo_path_real)) |
4335 |
if repoadd: |
4336 |
repoadd_set.add(repoadd) |
4337 |
|
4338 |
xs = [portage.cpv_getkey(pkg_key)] + \ |
4339 |
list(portage.catpkgsplit(pkg_key)[2:]) |
4340 |
if xs[2] == "r0": |
4341 |
xs[2] = "" |
4342 |
else: |
4343 |
xs[2] = "-" + xs[2] |
4344 |
|
4345 |
mywidth = 130 |
4346 |
if "COLUMNWIDTH" in self.settings: |
4347 |
try: |
4348 |
mywidth = int(self.settings["COLUMNWIDTH"]) |
4349 |
except ValueError, e: |
4350 |
portage.writemsg("!!! %s\n" % str(e), noiselevel=-1) |
4351 |
portage.writemsg( |
4352 |
"!!! Unable to parse COLUMNWIDTH='%s'\n" % \ |
4353 |
self.settings["COLUMNWIDTH"], noiselevel=-1) |
4354 |
del e |
4355 |
oldlp = mywidth - 30 |
4356 |
newlp = oldlp - 30 |
4357 |
|
4358 |
# Convert myoldbest from a list to a string. |
4359 |
if not myoldbest: |
4360 |
myoldbest = "" |
4361 |
else: |
4362 |
for pos, key in enumerate(myoldbest): |
4363 |
key = portage.catpkgsplit(key)[2] + \ |
4364 |
"-" + portage.catpkgsplit(key)[3] |
4365 |
if key[-3:] == "-r0": |
4366 |
key = key[:-3] |
4367 |
myoldbest[pos] = key |
4368 |
myoldbest = blue("["+", ".join(myoldbest)+"]") |
4369 |
|
4370 |
pkg_cp = xs[0] |
4371 |
root_config = self.roots[myroot] |
4372 |
system_set = root_config.sets["system"] |
4373 |
world_set = root_config.sets["world"] |
4374 |
|
4375 |
pkg_system = False |
4376 |
pkg_world = False |
4377 |
try: |
4378 |
pkg_system = system_set.findAtomForPackage(pkg) |
4379 |
pkg_world = world_set.findAtomForPackage(pkg) |
4380 |
if not (oneshot or pkg_world) and \ |
4381 |
myroot == self.target_root and \ |
4382 |
favorites_set.findAtomForPackage(pkg): |
4383 |
# Maybe it will be added to world now. |
4384 |
if create_world_atom(pkg, favorites_set, root_config): |
4385 |
pkg_world = True |
4386 |
except portage.exception.InvalidDependString: |
4387 |
# This is reported elsewhere if relevant. |
4388 |
pass |
4389 |
|
4390 |
def pkgprint(pkg_str): |
4391 |
if pkg_merge: |
4392 |
if pkg_system: |
4393 |
return colorize("PKG_MERGE_SYSTEM", pkg_str) |
4394 |
elif pkg_world: |
4395 |
return colorize("PKG_MERGE_WORLD", pkg_str) |
4396 |
else: |
4397 |
return colorize("PKG_MERGE", pkg_str) |
4398 |
elif pkg_status == "uninstall": |
4399 |
return colorize("PKG_UNINSTALL", pkg_str) |
4400 |
else: |
4401 |
if pkg_system: |
4402 |
return colorize("PKG_NOMERGE_SYSTEM", pkg_str) |
4403 |
elif pkg_world: |
4404 |
return colorize("PKG_NOMERGE_WORLD", pkg_str) |
4405 |
else: |
4406 |
return colorize("PKG_NOMERGE", pkg_str) |
4407 |
|
4408 |
try: |
4409 |
properties = flatten(use_reduce(paren_reduce( |
4410 |
pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled)) |
4411 |
except portage.exception.InvalidDependString, e: |
4412 |
if not pkg.installed: |
4413 |
show_invalid_depstring_notice(pkg, |
4414 |
pkg.metadata["PROPERTIES"], str(e)) |
4415 |
del e |
4416 |
return 1 |
4417 |
properties = [] |
4418 |
interactive = "interactive" in properties |
4419 |
if interactive and pkg.operation == "merge": |
4420 |
addl = colorize("WARN", "I") + addl[1:] |
4421 |
if ordered: |
4422 |
counters.interactive += 1 |
4423 |
|
4424 |
if x[1]!="/": |
4425 |
if myoldbest: |
4426 |
myoldbest +=" " |
4427 |
if "--columns" in self.myopts: |
4428 |
if "--quiet" in self.myopts: |
4429 |
myprint=addl+" "+indent+pkgprint(pkg_cp) |
4430 |
myprint=myprint+darkblue(" "+xs[1]+xs[2])+" " |
4431 |
myprint=myprint+myoldbest |
4432 |
myprint=myprint+darkgreen("to "+x[1]) |
4433 |
verboseadd = None |
4434 |
else: |
4435 |
if not pkg_merge: |
4436 |
myprint = "[%s] %s%s" % \ |
4437 |
(pkgprint(pkg_status.ljust(13)), |
4438 |
indent, pkgprint(pkg.cp)) |
4439 |
else: |
4440 |
myprint = "[%s %s] %s%s" % \ |
4441 |
(pkgprint(pkg.type_name), addl, |
4442 |
indent, pkgprint(pkg.cp)) |
4443 |
if (newlp-nc_len(myprint)) > 0: |
4444 |
myprint=myprint+(" "*(newlp-nc_len(myprint))) |
4445 |
myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] " |
4446 |
if (oldlp-nc_len(myprint)) > 0: |
4447 |
myprint=myprint+" "*(oldlp-nc_len(myprint)) |
4448 |
myprint=myprint+myoldbest |
4449 |
myprint += darkgreen("to " + pkg.root) |
4450 |
else: |
4451 |
if not pkg_merge: |
4452 |
myprint = "[%s] " % pkgprint(pkg_status.ljust(13)) |
4453 |
else: |
4454 |
myprint = "[%s %s] " % (pkgprint(pkg_type), addl) |
4455 |
myprint += indent + pkgprint(pkg_key) + " " + \ |
4456 |
myoldbest + darkgreen("to " + myroot) |
4457 |
else: |
4458 |
if "--columns" in self.myopts: |
4459 |
if "--quiet" in self.myopts: |
4460 |
myprint=addl+" "+indent+pkgprint(pkg_cp) |
4461 |
myprint=myprint+" "+green(xs[1]+xs[2])+" " |
4462 |
myprint=myprint+myoldbest |
4463 |
verboseadd = None |
4464 |
else: |
4465 |
if not pkg_merge: |
4466 |
myprint = "[%s] %s%s" % \ |
4467 |
(pkgprint(pkg_status.ljust(13)), |
4468 |
indent, pkgprint(pkg.cp)) |
4469 |
else: |
4470 |
myprint = "[%s %s] %s%s" % \ |
4471 |
(pkgprint(pkg.type_name), addl, |
4472 |
indent, pkgprint(pkg.cp)) |
4473 |
if (newlp-nc_len(myprint)) > 0: |
4474 |
myprint=myprint+(" "*(newlp-nc_len(myprint))) |
4475 |
myprint=myprint+green(" ["+xs[1]+xs[2]+"] ") |
4476 |
if (oldlp-nc_len(myprint)) > 0: |
4477 |
myprint=myprint+(" "*(oldlp-nc_len(myprint))) |
4478 |
myprint += myoldbest |
4479 |
else: |
4480 |
if not pkg_merge: |
4481 |
myprint = "[%s] %s%s %s" % \ |
4482 |
(pkgprint(pkg_status.ljust(13)), |
4483 |
indent, pkgprint(pkg.cpv), |
4484 |
myoldbest) |
4485 |
else: |
4486 |
myprint = "[%s %s] %s%s %s" % \ |
4487 |
(pkgprint(pkg_type), addl, indent, |
4488 |
pkgprint(pkg.cpv), myoldbest) |
4489 |
|
4490 |
if columns and pkg.operation == "uninstall": |
4491 |
continue |
4492 |
p.append((myprint, verboseadd, repoadd)) |
4493 |
|
4494 |
if "--tree" not in self.myopts and \ |
4495 |
"--quiet" not in self.myopts and \ |
4496 |
not self._opts_no_restart.intersection(self.myopts) and \ |
4497 |
pkg.root == self._running_root.root and \ |
4498 |
portage.match_from_list( |
4499 |
portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \ |
4500 |
not vardb.cpv_exists(pkg.cpv) and \ |
4501 |
"--quiet" not in self.myopts: |
4502 |
if mylist_index < len(mylist) - 1: |
4503 |
p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,")) |
4504 |
p.append(colorize("WARN", " then resume the merge.")) |
4505 |
|
4506 |
out = sys.stdout |
4507 |
show_repos = repoadd_set and repoadd_set != set(["0"]) |
4508 |
|
4509 |
for x in p: |
4510 |
if isinstance(x, basestring): |
4511 |
out.write("%s\n" % (x,)) |
4512 |
continue |
4513 |
|
4514 |
myprint, verboseadd, repoadd = x |
4515 |
|
4516 |
if verboseadd: |
4517 |
myprint += " " + verboseadd |
4518 |
|
4519 |
if show_repos and repoadd: |
4520 |
myprint += " " + teal("[%s]" % repoadd) |
4521 |
|
4522 |
out.write("%s\n" % (myprint,)) |
4523 |
|
4524 |
for x in blockers: |
4525 |
print x |
4526 |
|
4527 |
if verbosity == 3: |
4528 |
print |
4529 |
print counters |
4530 |
if show_repos: |
4531 |
sys.stdout.write(str(repo_display)) |
4532 |
|
4533 |
if "--changelog" in self.myopts: |
4534 |
print |
4535 |
for revision,text in changelogs: |
4536 |
print bold('*'+revision) |
4537 |
sys.stdout.write(text) |
4538 |
|
4539 |
sys.stdout.flush() |
4540 |
return os.EX_OK |
4541 |
|
4542 |
def display_problems(self): |
4543 |
""" |
4544 |
Display problems with the dependency graph such as slot collisions. |
4545 |
This is called internally by display() to show the problems _after_ |
4546 |
the merge list where it is most likely to be seen, but if display() |
4547 |
is not going to be called then this method should be called explicitly |
4548 |
to ensure that the user is notified of problems with the graph. |
4549 |
|
4550 |
All output goes to stderr, except for unsatisfied dependencies which |
4551 |
go to stdout for parsing by programs such as autounmask. |
4552 |
""" |
4553 |
|
4554 |
# Note that show_masked_packages() sends it's output to |
4555 |
# stdout, and some programs such as autounmask parse the |
4556 |
# output in cases when emerge bails out. However, when |
4557 |
# show_masked_packages() is called for installed packages |
4558 |
# here, the message is a warning that is more appropriate |
4559 |
# to send to stderr, so temporarily redirect stdout to |
4560 |
# stderr. TODO: Fix output code so there's a cleaner way |
4561 |
# to redirect everything to stderr. |
4562 |
sys.stdout.flush() |
4563 |
sys.stderr.flush() |
4564 |
stdout = sys.stdout |
4565 |
try: |
4566 |
sys.stdout = sys.stderr |
4567 |
self._display_problems() |
4568 |
finally: |
4569 |
sys.stdout = stdout |
4570 |
sys.stdout.flush() |
4571 |
sys.stderr.flush() |
4572 |
|
4573 |
# This goes to stdout for parsing by programs like autounmask. |
4574 |
for pargs, kwargs in self._unsatisfied_deps_for_display: |
4575 |
self._show_unsatisfied_dep(*pargs, **kwargs) |
4576 |
|
4577 |
def _display_problems(self): |
4578 |
if self._circular_deps_for_display is not None: |
4579 |
self._show_circular_deps( |
4580 |
self._circular_deps_for_display) |
4581 |
|
4582 |
# The user is only notified of a slot conflict if |
4583 |
# there are no unresolvable blocker conflicts. |
4584 |
if self._unsatisfied_blockers_for_display is not None: |
4585 |
self._show_unsatisfied_blockers( |
4586 |
self._unsatisfied_blockers_for_display) |
4587 |
else: |
4588 |
self._show_slot_collision_notice() |
4589 |
|
4590 |
# TODO: Add generic support for "set problem" handlers so that |
4591 |
# the below warnings aren't special cases for world only. |
4592 |
|
4593 |
if self._missing_args: |
4594 |
world_problems = False |
4595 |
if "world" in self._sets: |
4596 |
# Filter out indirect members of world (from nested sets) |
4597 |
# since only direct members of world are desired here. |
4598 |
world_set = self.roots[self.target_root].sets["world"] |
4599 |
for arg, atom in self._missing_args: |
4600 |
if arg.name == "world" and atom in world_set: |
4601 |
world_problems = True |
4602 |
break |
4603 |
|
4604 |
if world_problems: |
4605 |
sys.stderr.write("\n!!! Problems have been " + \ |
4606 |
"detected with your world file\n") |
4607 |
sys.stderr.write("!!! Please run " + \ |
4608 |
green("emaint --check world")+"\n\n") |
4609 |
|
4610 |
if self._missing_args: |
4611 |
sys.stderr.write("\n" + colorize("BAD", "!!!") + \ |
4612 |
" Ebuilds for the following packages are either all\n") |
4613 |
sys.stderr.write(colorize("BAD", "!!!") + \ |
4614 |
" masked or don't exist:\n") |
4615 |
sys.stderr.write(" ".join(str(atom) for arg, atom in \ |
4616 |
self._missing_args) + "\n") |
4617 |
|
4618 |
if self._pprovided_args: |
4619 |
arg_refs = {} |
4620 |
for arg, atom in self._pprovided_args: |
4621 |
if isinstance(arg, SetArg): |
4622 |
parent = arg.name |
4623 |
arg_atom = (atom, atom) |
4624 |
else: |
4625 |
parent = "args" |
4626 |
arg_atom = (arg.arg, atom) |
4627 |
refs = arg_refs.setdefault(arg_atom, []) |
4628 |
if parent not in refs: |
4629 |
refs.append(parent) |
4630 |
msg = [] |
4631 |
msg.append(bad("\nWARNING: ")) |
4632 |
if len(self._pprovided_args) > 1: |
4633 |
msg.append("Requested packages will not be " + \ |
4634 |
"merged because they are listed in\n") |
4635 |
else: |
4636 |
msg.append("A requested package will not be " + \ |
4637 |
"merged because it is listed in\n") |
4638 |
msg.append("package.provided:\n\n") |
4639 |
problems_sets = set() |
4640 |
for (arg, atom), refs in arg_refs.iteritems(): |
4641 |
ref_string = "" |
4642 |
if refs: |
4643 |
problems_sets.update(refs) |
4644 |
refs.sort() |
4645 |
ref_string = ", ".join(["'%s'" % name for name in refs]) |
4646 |
ref_string = " pulled in by " + ref_string |
4647 |
msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string)) |
4648 |
msg.append("\n") |
4649 |
if "world" in problems_sets: |
4650 |
msg.append("This problem can be solved in one of the following ways:\n\n") |
4651 |
msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n") |
4652 |
msg.append(" B) Uninstall offending packages (cleans them from world).\n") |
4653 |
msg.append(" C) Remove offending entries from package.provided.\n\n") |
4654 |
msg.append("The best course of action depends on the reason that an offending\n") |
4655 |
msg.append("package.provided entry exists.\n\n") |
4656 |
sys.stderr.write("".join(msg)) |
4657 |
|
4658 |
masked_packages = [] |
4659 |
for pkg in self._masked_installed: |
4660 |
root_config = pkg.root_config |
4661 |
pkgsettings = self.pkgsettings[pkg.root] |
4662 |
mreasons = get_masking_status(pkg, pkgsettings, root_config) |
4663 |
masked_packages.append((root_config, pkgsettings, |
4664 |
pkg.cpv, pkg.metadata, mreasons)) |
4665 |
if masked_packages: |
4666 |
sys.stderr.write("\n" + colorize("BAD", "!!!") + \ |
4667 |
" The following installed packages are masked:\n") |
4668 |
show_masked_packages(masked_packages) |
4669 |
show_mask_docs() |
4670 |
print |
4671 |
|
4672 |
def calc_changelog(self,ebuildpath,current,next): |
4673 |
if ebuildpath == None or not os.path.exists(ebuildpath): |
4674 |
return [] |
4675 |
current = '-'.join(portage.catpkgsplit(current)[1:]) |
4676 |
if current.endswith('-r0'): |
4677 |
current = current[:-3] |
4678 |
next = '-'.join(portage.catpkgsplit(next)[1:]) |
4679 |
if next.endswith('-r0'): |
4680 |
next = next[:-3] |
4681 |
changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog') |
4682 |
try: |
4683 |
changelog = open(changelogpath).read() |
4684 |
except SystemExit, e: |
4685 |
raise # Needed else can't exit |
4686 |
except: |
4687 |
return [] |
4688 |
divisions = self.find_changelog_tags(changelog) |
4689 |
#print 'XX from',current,'to',next |
4690 |
#for div,text in divisions: print 'XX',div |
4691 |
# skip entries for all revisions above the one we are about to emerge |
4692 |
for i in range(len(divisions)): |
4693 |
if divisions[i][0]==next: |
4694 |
divisions = divisions[i:] |
4695 |
break |
4696 |
# find out how many entries we are going to display |
4697 |
for i in range(len(divisions)): |
4698 |
if divisions[i][0]==current: |
4699 |
divisions = divisions[:i] |
4700 |
break |
4701 |
else: |
4702 |
# couldnt find the current revision in the list. display nothing |
4703 |
return [] |
4704 |
return divisions |
4705 |
|
4706 |
def find_changelog_tags(self,changelog): |
4707 |
divs = [] |
4708 |
release = None |
4709 |
while 1: |
4710 |
match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M) |
4711 |
if match is None: |
4712 |
if release is not None: |
4713 |
divs.append((release,changelog)) |
4714 |
return divs |
4715 |
if release is not None: |
4716 |
divs.append((release,changelog[:match.start()])) |
4717 |
changelog = changelog[match.end():] |
4718 |
release = match.group(1) |
4719 |
if release.endswith('.ebuild'): |
4720 |
release = release[:-7] |
4721 |
if release.endswith('-r0'): |
4722 |
release = release[:-3] |
4723 |
|
4724 |
def saveNomergeFavorites(self): |
4725 |
"""Find atoms in favorites that are not in the mergelist and add them |
4726 |
to the world file if necessary.""" |
4727 |
for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri", |
4728 |
"--oneshot", "--onlydeps", "--pretend"): |
4729 |
if x in self.myopts: |
4730 |
return |
4731 |
root_config = self.roots[self.target_root] |
4732 |
world_set = root_config.sets["world"] |
4733 |
|
4734 |
world_locked = False |
4735 |
if hasattr(world_set, "lock"): |
4736 |
world_set.lock() |
4737 |
world_locked = True |
4738 |
|
4739 |
if hasattr(world_set, "load"): |
4740 |
world_set.load() # maybe it's changed on disk |
4741 |
|
4742 |
args_set = self._sets["args"] |
4743 |
portdb = self.trees[self.target_root]["porttree"].dbapi |
4744 |
added_favorites = set() |
4745 |
for x in self._set_nodes: |
4746 |
pkg_type, root, pkg_key, pkg_status = x |
4747 |
if pkg_status != "nomerge": |
4748 |
continue |
4749 |
|
4750 |
try: |
4751 |
myfavkey = create_world_atom(x, args_set, root_config) |
4752 |
if myfavkey: |
4753 |
if myfavkey in added_favorites: |
4754 |
continue |
4755 |
added_favorites.add(myfavkey) |
4756 |
except portage.exception.InvalidDependString, e: |
4757 |
writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \ |
4758 |
(pkg_key, str(e)), noiselevel=-1) |
4759 |
writemsg("!!! see '%s'\n\n" % os.path.join( |
4760 |
root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1) |
4761 |
del e |
4762 |
all_added = [] |
4763 |
for k in self._sets: |
4764 |
if k in ("args", "world") or not root_config.sets[k].world_candidate: |
4765 |
continue |
4766 |
s = SETPREFIX + k |
4767 |
if s in world_set: |
4768 |
continue |
4769 |
all_added.append(SETPREFIX + k) |
4770 |
all_added.extend(added_favorites) |
4771 |
all_added.sort() |
4772 |
for a in all_added: |
4773 |
print ">>> Recording %s in \"world\" favorites file..." % \ |
4774 |
colorize("INFORM", str(a)) |
4775 |
if all_added: |
4776 |
world_set.update(all_added) |
4777 |
|
4778 |
if world_locked: |
4779 |
world_set.unlock() |
4780 |
|
4781 |
def loadResumeCommand(self, resume_data, skip_masked=True, |
4782 |
skip_missing=True): |
4783 |
""" |
4784 |
Add a resume command to the graph and validate it in the process. This |
4785 |
will raise a PackageNotFound exception if a package is not available. |
4786 |
""" |
4787 |
|
4788 |
if not isinstance(resume_data, dict): |
4789 |
return False |
4790 |
|
4791 |
mergelist = resume_data.get("mergelist") |
4792 |
if not isinstance(mergelist, list): |
4793 |
mergelist = [] |
4794 |
|
4795 |
fakedb = self.mydbapi |
4796 |
trees = self.trees |
4797 |
serialized_tasks = [] |
4798 |
masked_tasks = [] |
4799 |
for x in mergelist: |
4800 |
if not (isinstance(x, list) and len(x) == 4): |
4801 |
continue |
4802 |
pkg_type, myroot, pkg_key, action = x |
4803 |
if pkg_type not in self.pkg_tree_map: |
4804 |
continue |
4805 |
if action != "merge": |
4806 |
continue |
4807 |
tree_type = self.pkg_tree_map[pkg_type] |
4808 |
mydb = trees[myroot][tree_type].dbapi |
4809 |
db_keys = list(self._trees_orig[myroot][ |
4810 |
tree_type].dbapi._aux_cache_keys) |
4811 |
try: |
4812 |
metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys)) |
4813 |
except KeyError: |
4814 |
# It does no exist or it is corrupt. |
4815 |
if action == "uninstall": |
4816 |
continue |
4817 |
if skip_missing: |
4818 |
# TODO: log these somewhere |
4819 |
continue |
4820 |
raise portage.exception.PackageNotFound(pkg_key) |
4821 |
installed = action == "uninstall" |
4822 |
built = pkg_type != "ebuild" |
4823 |
root_config = self.roots[myroot] |
4824 |
pkg = Package(built=built, cpv=pkg_key, |
4825 |
installed=installed, metadata=metadata, |
4826 |
operation=action, root_config=root_config, |
4827 |
type_name=pkg_type) |
4828 |
if pkg_type == "ebuild": |
4829 |
pkgsettings = self.pkgsettings[myroot] |
4830 |
pkgsettings.setcpv(pkg) |
4831 |
pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"] |
4832 |
pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '') |
4833 |
self._pkg_cache[pkg] = pkg |
4834 |
|
4835 |
root_config = self.roots[pkg.root] |
4836 |
if "merge" == pkg.operation and \ |
4837 |
not visible(root_config.settings, pkg): |
4838 |
if skip_masked: |
4839 |
masked_tasks.append(Dependency(root=pkg.root, parent=pkg)) |
4840 |
else: |
4841 |
self._unsatisfied_deps_for_display.append( |
4842 |
((pkg.root, "="+pkg.cpv), {"myparent":None})) |
4843 |
|
4844 |
fakedb[myroot].cpv_inject(pkg) |
4845 |
serialized_tasks.append(pkg) |
4846 |
self.spinner.update() |
4847 |
|
4848 |
if self._unsatisfied_deps_for_display: |
4849 |
return False |
4850 |
|
4851 |
if not serialized_tasks or "--nodeps" in self.myopts: |
4852 |
self._serialized_tasks_cache = serialized_tasks |
4853 |
self._scheduler_graph = self.digraph |
4854 |
else: |
4855 |
self._select_package = self._select_pkg_from_graph |
4856 |
self.myparams.add("selective") |
4857 |
# Always traverse deep dependencies in order to account for |
4858 |
# potentially unsatisfied dependencies of installed packages. |
4859 |
# This is necessary for correct --keep-going or --resume operation |
4860 |
# in case a package from a group of circularly dependent packages |
4861 |
# fails. In this case, a package which has recently been installed |
4862 |
# may have an unsatisfied circular dependency (pulled in by |
4863 |
# PDEPEND, for example). So, even though a package is already |
4864 |
# installed, it may not have all of it's dependencies satisfied, so |
4865 |
# it may not be usable. If such a package is in the subgraph of |
4866 |
# deep depenedencies of a scheduled build, that build needs to |
4867 |
# be cancelled. In order for this type of situation to be |
4868 |
# recognized, deep traversal of dependencies is required. |
4869 |
self.myparams.add("deep") |
4870 |
|
4871 |
favorites = resume_data.get("favorites") |
4872 |
args_set = self._sets["args"] |
4873 |
if isinstance(favorites, list): |
4874 |
args = self._load_favorites(favorites) |
4875 |
else: |
4876 |
args = [] |
4877 |
|
4878 |
for task in serialized_tasks: |
4879 |
if isinstance(task, Package) and \ |
4880 |
task.operation == "merge": |
4881 |
if not self._add_pkg(task, None): |
4882 |
return False |
4883 |
|
4884 |
# Packages for argument atoms need to be explicitly |
4885 |
# added via _add_pkg() so that they are included in the |
4886 |
# digraph (needed at least for --tree display). |
4887 |
for arg in args: |
4888 |
for atom in arg.set: |
4889 |
pkg, existing_node = self._select_package( |
4890 |
arg.root_config.root, atom) |
4891 |
if existing_node is None and \ |
4892 |
pkg is not None: |
4893 |
if not self._add_pkg(pkg, Dependency(atom=atom, |
4894 |
root=pkg.root, parent=arg)): |
4895 |
return False |
4896 |
|
4897 |
# Allow unsatisfied deps here to avoid showing a masking |
4898 |
# message for an unsatisfied dep that isn't necessarily |
4899 |
# masked. |
4900 |
if not self._create_graph(allow_unsatisfied=True): |
4901 |
return False |
4902 |
|
4903 |
unsatisfied_deps = [] |
4904 |
for dep in self._unsatisfied_deps: |
4905 |
if not isinstance(dep.parent, Package): |
4906 |
continue |
4907 |
if dep.parent.operation == "merge": |
4908 |
unsatisfied_deps.append(dep) |
4909 |
continue |
4910 |
|
4911 |
# For unsatisfied deps of installed packages, only account for |
4912 |
# them if they are in the subgraph of dependencies of a package |
4913 |
# which is scheduled to be installed. |
4914 |
unsatisfied_install = False |
4915 |
traversed = set() |
4916 |
dep_stack = self.digraph.parent_nodes(dep.parent) |
4917 |
while dep_stack: |
4918 |
node = dep_stack.pop() |
4919 |
if not isinstance(node, Package): |
4920 |
continue |
4921 |
if node.operation == "merge": |
4922 |
unsatisfied_install = True |
4923 |
break |
4924 |
if node in traversed: |
4925 |
continue |
4926 |
traversed.add(node) |
4927 |
dep_stack.extend(self.digraph.parent_nodes(node)) |
4928 |
|
4929 |
if unsatisfied_install: |
4930 |
unsatisfied_deps.append(dep) |
4931 |
|
4932 |
if masked_tasks or unsatisfied_deps: |
4933 |
# This probably means that a required package |
4934 |
# was dropped via --skipfirst. It makes the |
4935 |
# resume list invalid, so convert it to a |
4936 |
# UnsatisfiedResumeDep exception. |
4937 |
raise self.UnsatisfiedResumeDep(self, |
4938 |
masked_tasks + unsatisfied_deps) |
4939 |
self._serialized_tasks_cache = None |
4940 |
try: |
4941 |
self.altlist() |
4942 |
except self._unknown_internal_error: |
4943 |
return False |
4944 |
|
4945 |
return True |
4946 |
|
4947 |
def _load_favorites(self, favorites): |
4948 |
""" |
4949 |
Use a list of favorites to resume state from a |
4950 |
previous select_files() call. This creates similar |
4951 |
DependencyArg instances to those that would have |
4952 |
been created by the original select_files() call. |
4953 |
This allows Package instances to be matched with |
4954 |
DependencyArg instances during graph creation. |
4955 |
""" |
4956 |
root_config = self.roots[self.target_root] |
4957 |
getSetAtoms = root_config.setconfig.getSetAtoms |
4958 |
sets = root_config.sets |
4959 |
args = [] |
4960 |
for x in favorites: |
4961 |
if not isinstance(x, basestring): |
4962 |
continue |
4963 |
if x in ("system", "world"): |
4964 |
x = SETPREFIX + x |
4965 |
if x.startswith(SETPREFIX): |
4966 |
s = x[len(SETPREFIX):] |
4967 |
if s not in sets: |
4968 |
continue |
4969 |
if s in self._sets: |
4970 |
continue |
4971 |
# Recursively expand sets so that containment tests in |
4972 |
# self._get_parent_sets() properly match atoms in nested |
4973 |
# sets (like if world contains system). |
4974 |
expanded_set = InternalPackageSet( |
4975 |
initial_atoms=getSetAtoms(s)) |
4976 |
self._sets[s] = expanded_set |
4977 |
args.append(SetArg(arg=x, set=expanded_set, |
4978 |
root_config=root_config)) |
4979 |
else: |
4980 |
if not portage.isvalidatom(x): |
4981 |
continue |
4982 |
args.append(AtomArg(arg=x, atom=x, |
4983 |
root_config=root_config)) |
4984 |
|
4985 |
self._set_args(args) |
4986 |
return args |
4987 |
|
4988 |
class UnsatisfiedResumeDep(portage.exception.PortageException): |
4989 |
""" |
4990 |
A dependency of a resume list is not installed. This |
4991 |
can occur when a required package is dropped from the |
4992 |
merge list via --skipfirst. |
4993 |
""" |
4994 |
def __init__(self, depgraph, value): |
4995 |
portage.exception.PortageException.__init__(self, value) |
4996 |
self.depgraph = depgraph |
4997 |
|
4998 |
class _internal_exception(portage.exception.PortageException): |
4999 |
def __init__(self, value=""): |
5000 |
portage.exception.PortageException.__init__(self, value) |
5001 |
|
5002 |
class _unknown_internal_error(_internal_exception): |
5003 |
""" |
5004 |
Used by the depgraph internally to terminate graph creation. |
5005 |
The specific reason for the failure should have been dumped |
5006 |
to stderr, unfortunately, the exact reason for the failure |
5007 |
may not be known. |
5008 |
""" |
5009 |
|
5010 |
class _serialize_tasks_retry(_internal_exception): |
5011 |
""" |
5012 |
This is raised by the _serialize_tasks() method when it needs to |
5013 |
be called again for some reason. The only case that it's currently |
5014 |
used for is when neglected dependencies need to be added to the |
5015 |
graph in order to avoid making a potentially unsafe decision. |
5016 |
""" |
5017 |
|
5018 |
class _dep_check_composite_db(portage.dbapi): |
5019 |
""" |
5020 |
A dbapi-like interface that is optimized for use in dep_check() calls. |
5021 |
This is built on top of the existing depgraph package selection logic. |
5022 |
Some packages that have been added to the graph may be masked from this |
5023 |
view in order to influence the atom preference selection that occurs |
5024 |
via dep_check(). |
5025 |
""" |
5026 |
def __init__(self, depgraph, root): |
5027 |
portage.dbapi.__init__(self) |
5028 |
self._depgraph = depgraph |
5029 |
self._root = root |
5030 |
self._match_cache = {} |
5031 |
self._cpv_pkg_map = {} |
5032 |
|
5033 |
def _clear_cache(self): |
5034 |
self._match_cache.clear() |
5035 |
self._cpv_pkg_map.clear() |
5036 |
|
5037 |
def match(self, atom): |
5038 |
ret = self._match_cache.get(atom) |
5039 |
if ret is not None: |
5040 |
return ret[:] |
5041 |
orig_atom = atom |
5042 |
if "/" not in atom: |
5043 |
atom = self._dep_expand(atom) |
5044 |
pkg, existing = self._depgraph._select_package(self._root, atom) |
5045 |
if not pkg: |
5046 |
ret = [] |
5047 |
else: |
5048 |
# Return the highest available from select_package() as well as |
5049 |
# any matching slots in the graph db. |
5050 |
slots = set() |
5051 |
slots.add(pkg.metadata["SLOT"]) |
5052 |
atom_cp = portage.dep_getkey(atom) |
5053 |
if pkg.cp.startswith("virtual/"): |
5054 |
# For new-style virtual lookahead that occurs inside |
5055 |
# dep_check(), examine all slots. This is needed |
5056 |
# so that newer slots will not unnecessarily be pulled in |
5057 |
# when a satisfying lower slot is already installed. For |
5058 |
# example, if virtual/jdk-1.4 is satisfied via kaffe then |
5059 |
# there's no need to pull in a newer slot to satisfy a |
5060 |
# virtual/jdk dependency. |
5061 |
for db, pkg_type, built, installed, db_keys in \ |
5062 |
self._depgraph._filtered_trees[self._root]["dbs"]: |
5063 |
for cpv in db.match(atom): |
5064 |
if portage.cpv_getkey(cpv) != pkg.cp: |
5065 |
continue |
5066 |
slots.add(db.aux_get(cpv, ["SLOT"])[0]) |
5067 |
ret = [] |
5068 |
if self._visible(pkg): |
5069 |
self._cpv_pkg_map[pkg.cpv] = pkg |
5070 |
ret.append(pkg.cpv) |
5071 |
slots.remove(pkg.metadata["SLOT"]) |
5072 |
while slots: |
5073 |
slot_atom = "%s:%s" % (atom_cp, slots.pop()) |
5074 |
pkg, existing = self._depgraph._select_package( |
5075 |
self._root, slot_atom) |
5076 |
if not pkg: |
5077 |
continue |
5078 |
if not self._visible(pkg): |
5079 |
continue |
5080 |
self._cpv_pkg_map[pkg.cpv] = pkg |
5081 |
ret.append(pkg.cpv) |
5082 |
if ret: |
5083 |
self._cpv_sort_ascending(ret) |
5084 |
self._match_cache[orig_atom] = ret |
5085 |
return ret[:] |
5086 |
|
5087 |
def _visible(self, pkg): |
5088 |
if pkg.installed and "selective" not in self._depgraph.myparams: |
5089 |
try: |
5090 |
arg = self._depgraph._iter_atoms_for_pkg(pkg).next() |
5091 |
except (StopIteration, portage.exception.InvalidDependString): |
5092 |
arg = None |
5093 |
if arg: |
5094 |
return False |
5095 |
if pkg.installed: |
5096 |
try: |
5097 |
if not visible( |
5098 |
self._depgraph.pkgsettings[pkg.root], pkg): |
5099 |
return False |
5100 |
except portage.exception.InvalidDependString: |
5101 |
pass |
5102 |
in_graph = self._depgraph._slot_pkg_map[ |
5103 |
self._root].get(pkg.slot_atom) |
5104 |
if in_graph is None: |
5105 |
# Mask choices for packages which are not the highest visible |
5106 |
# version within their slot (since they usually trigger slot |
5107 |
# conflicts). |
5108 |
highest_visible, in_graph = self._depgraph._select_package( |
5109 |
self._root, pkg.slot_atom) |
5110 |
if pkg != highest_visible: |
5111 |
return False |
5112 |
elif in_graph != pkg: |
5113 |
# Mask choices for packages that would trigger a slot |
5114 |
# conflict with a previously selected package. |
5115 |
return False |
5116 |
return True |
5117 |
|
5118 |
def _dep_expand(self, atom): |
5119 |
""" |
5120 |
This is only needed for old installed packages that may |
5121 |
contain atoms that are not fully qualified with a specific |
5122 |
category. Emulate the cpv_expand() function that's used by |
5123 |
dbapi.match() in cases like this. If there are multiple |
5124 |
matches, it's often due to a new-style virtual that has |
5125 |
been added, so try to filter those out to avoid raising |
5126 |
a ValueError. |
5127 |
""" |
5128 |
root_config = self._depgraph.roots[self._root] |
5129 |
orig_atom = atom |
5130 |
expanded_atoms = self._depgraph._dep_expand(root_config, atom) |
5131 |
if len(expanded_atoms) > 1: |
5132 |
non_virtual_atoms = [] |
5133 |
for x in expanded_atoms: |
5134 |
if not portage.dep_getkey(x).startswith("virtual/"): |
5135 |
non_virtual_atoms.append(x) |
5136 |
if len(non_virtual_atoms) == 1: |
5137 |
expanded_atoms = non_virtual_atoms |
5138 |
if len(expanded_atoms) > 1: |
5139 |
# compatible with portage.cpv_expand() |
5140 |
raise portage.exception.AmbiguousPackageName( |
5141 |
[portage.dep_getkey(x) for x in expanded_atoms]) |
5142 |
if expanded_atoms: |
5143 |
atom = expanded_atoms[0] |
5144 |
else: |
5145 |
null_atom = insert_category_into_atom(atom, "null") |
5146 |
null_cp = portage.dep_getkey(null_atom) |
5147 |
cat, atom_pn = portage.catsplit(null_cp) |
5148 |
virts_p = root_config.settings.get_virts_p().get(atom_pn) |
5149 |
if virts_p: |
5150 |
# Allow the resolver to choose which virtual. |
5151 |
atom = insert_category_into_atom(atom, "virtual") |
5152 |
else: |
5153 |
atom = insert_category_into_atom(atom, "null") |
5154 |
return atom |
5155 |
|
5156 |
def aux_get(self, cpv, wants): |
5157 |
metadata = self._cpv_pkg_map[cpv].metadata |
5158 |
return [metadata.get(x, "") for x in wants] |
5159 |
|
5160 |
class Scheduler(PollScheduler): |
5161 |
|
5162 |
_opts_ignore_blockers = \ |
5163 |
frozenset(["--buildpkgonly", |
5164 |
"--fetchonly", "--fetch-all-uri", |
5165 |
"--nodeps", "--pretend"]) |
5166 |
|
5167 |
_opts_no_background = \ |
5168 |
frozenset(["--pretend", |
5169 |
"--fetchonly", "--fetch-all-uri"]) |
5170 |
|
5171 |
_opts_no_restart = frozenset(["--buildpkgonly", |
5172 |
"--fetchonly", "--fetch-all-uri", "--pretend"]) |
5173 |
|
5174 |
_bad_resume_opts = set(["--ask", "--changelog", |
5175 |
"--resume", "--skipfirst"]) |
5176 |
|
5177 |
_fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log') |
5178 |
|
5179 |
class _iface_class(SlotObject): |
5180 |
__slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge", |
5181 |
"dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule", |
5182 |
"scheduleSetup", "scheduleUnpack", "scheduleYield", |
5183 |
"unregister") |
5184 |
|
5185 |
class _fetch_iface_class(SlotObject): |
5186 |
__slots__ = ("log_file", "schedule") |
5187 |
|
5188 |
_task_queues_class = slot_dict_class( |
5189 |
("merge", "jobs", "fetch", "unpack"), prefix="") |
5190 |
|
5191 |
class _build_opts_class(SlotObject): |
5192 |
__slots__ = ("buildpkg", "buildpkgonly", |
5193 |
"fetch_all_uri", "fetchonly", "pretend") |
5194 |
|
5195 |
class _binpkg_opts_class(SlotObject): |
5196 |
__slots__ = ("fetchonly", "getbinpkg", "pretend") |
5197 |
|
5198 |
class _pkg_count_class(SlotObject): |
5199 |
__slots__ = ("curval", "maxval") |
5200 |
|
5201 |
class _emerge_log_class(SlotObject): |
5202 |
__slots__ = ("xterm_titles",) |
5203 |
|
5204 |
def log(self, *pargs, **kwargs): |
5205 |
if not self.xterm_titles: |
5206 |
# Avoid interference with the scheduler's status display. |
5207 |
kwargs.pop("short_msg", None) |
5208 |
emergelog(self.xterm_titles, *pargs, **kwargs) |
5209 |
|
5210 |
class _failed_pkg(SlotObject): |
5211 |
__slots__ = ("build_dir", "build_log", "pkg", "returncode") |
5212 |
|
5213 |
class _ConfigPool(object): |
5214 |
"""Interface for a task to temporarily allocate a config |
5215 |
instance from a pool. This allows a task to be constructed |
5216 |
long before the config instance actually becomes needed, like |
5217 |
when prefetchers are constructed for the whole merge list.""" |
5218 |
__slots__ = ("_root", "_allocate", "_deallocate") |
5219 |
def __init__(self, root, allocate, deallocate): |
5220 |
self._root = root |
5221 |
self._allocate = allocate |
5222 |
self._deallocate = deallocate |
5223 |
def allocate(self): |
5224 |
return self._allocate(self._root) |
5225 |
def deallocate(self, settings): |
5226 |
self._deallocate(settings) |
5227 |
|
5228 |
class _unknown_internal_error(portage.exception.PortageException): |
5229 |
""" |
5230 |
Used internally to terminate scheduling. The specific reason for |
5231 |
the failure should have been dumped to stderr. |
5232 |
""" |
5233 |
def __init__(self, value=""): |
5234 |
portage.exception.PortageException.__init__(self, value) |
5235 |
|
5236 |
def __init__(self, settings, trees, mtimedb, myopts, |
5237 |
spinner, mergelist, favorites, digraph): |
5238 |
PollScheduler.__init__(self) |
5239 |
self.settings = settings |
5240 |
self.target_root = settings["ROOT"] |
5241 |
self.trees = trees |
5242 |
self.myopts = myopts |
5243 |
self._spinner = spinner |
5244 |
self._mtimedb = mtimedb |
5245 |
self._mergelist = mergelist |
5246 |
self._favorites = favorites |
5247 |
self._args_set = InternalPackageSet(favorites) |
5248 |
self._build_opts = self._build_opts_class() |
5249 |
for k in self._build_opts.__slots__: |
5250 |
setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts) |
5251 |
self._binpkg_opts = self._binpkg_opts_class() |
5252 |
for k in self._binpkg_opts.__slots__: |
5253 |
setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts) |
5254 |
|
5255 |
self.curval = 0 |
5256 |
self._logger = self._emerge_log_class() |
5257 |
self._task_queues = self._task_queues_class() |
5258 |
for k in self._task_queues.allowed_keys: |
5259 |
setattr(self._task_queues, k, |
5260 |
SequentialTaskQueue()) |
5261 |
|
5262 |
# Holds merges that will wait to be executed when no builds are |
5263 |
# executing. This is useful for system packages since dependencies |
5264 |
# on system packages are frequently unspecified. |
5265 |
self._merge_wait_queue = [] |
5266 |
# Holds merges that have been transfered from the merge_wait_queue to |
5267 |
# the actual merge queue. They are removed from this list upon |
5268 |
# completion. Other packages can start building only when this list is |
5269 |
# empty. |
5270 |
self._merge_wait_scheduled = [] |
5271 |
|
5272 |
# Holds system packages and their deep runtime dependencies. Before |
5273 |
# being merged, these packages go to merge_wait_queue, to be merged |
5274 |
# when no other packages are building. |
5275 |
self._deep_system_deps = set() |
5276 |
|
5277 |
# Holds packages to merge which will satisfy currently unsatisfied |
5278 |
# deep runtime dependencies of system packages. If this is not empty |
5279 |
# then no parallel builds will be spawned until it is empty. This |
5280 |
# minimizes the possibility that a build will fail due to the system |
5281 |
# being in a fragile state. For example, see bug #259954. |
5282 |
self._unsatisfied_system_deps = set() |
5283 |
|
5284 |
self._status_display = JobStatusDisplay( |
5285 |
xterm_titles=('notitles' not in settings.features)) |
5286 |
self._max_load = myopts.get("--load-average") |
5287 |
max_jobs = myopts.get("--jobs") |
5288 |
if max_jobs is None: |
5289 |
max_jobs = 1 |
5290 |
self._set_max_jobs(max_jobs) |
5291 |
|
5292 |
# The root where the currently running |
5293 |
# portage instance is installed. |
5294 |
self._running_root = trees["/"]["root_config"] |
5295 |
self.edebug = 0 |
5296 |
if settings.get("PORTAGE_DEBUG", "") == "1": |
5297 |
self.edebug = 1 |
5298 |
self.pkgsettings = {} |
5299 |
self._config_pool = {} |
5300 |
self._blocker_db = {} |
5301 |
for root in trees: |
5302 |
self._config_pool[root] = [] |
5303 |
self._blocker_db[root] = BlockerDB(trees[root]["root_config"]) |
5304 |
|
5305 |
fetch_iface = self._fetch_iface_class(log_file=self._fetch_log, |
5306 |
schedule=self._schedule_fetch) |
5307 |
self._sched_iface = self._iface_class( |
5308 |
dblinkEbuildPhase=self._dblink_ebuild_phase, |
5309 |
dblinkDisplayMerge=self._dblink_display_merge, |
5310 |
dblinkElog=self._dblink_elog, |
5311 |
dblinkEmergeLog=self._dblink_emerge_log, |
5312 |
fetch=fetch_iface, register=self._register, |
5313 |
schedule=self._schedule_wait, |
5314 |
scheduleSetup=self._schedule_setup, |
5315 |
scheduleUnpack=self._schedule_unpack, |
5316 |
scheduleYield=self._schedule_yield, |
5317 |
unregister=self._unregister) |
5318 |
|
5319 |
self._prefetchers = weakref.WeakValueDictionary() |
5320 |
self._pkg_queue = [] |
5321 |
self._completed_tasks = set() |
5322 |
|
5323 |
self._failed_pkgs = [] |
5324 |
self._failed_pkgs_all = [] |
5325 |
self._failed_pkgs_die_msgs = [] |
5326 |
self._post_mod_echo_msgs = [] |
5327 |
self._parallel_fetch = False |
5328 |
merge_count = len([x for x in mergelist \ |
5329 |
if isinstance(x, Package) and x.operation == "merge"]) |
5330 |
self._pkg_count = self._pkg_count_class( |
5331 |
curval=0, maxval=merge_count) |
5332 |
self._status_display.maxval = self._pkg_count.maxval |
5333 |
|
5334 |
# The load average takes some time to respond when new |
5335 |
# jobs are added, so we need to limit the rate of adding |
5336 |
# new jobs. |
5337 |
self._job_delay_max = 10 |
5338 |
self._job_delay_factor = 1.0 |
5339 |
self._job_delay_exp = 1.5 |
5340 |
self._previous_job_start_time = None |
5341 |
|
5342 |
self._set_digraph(digraph) |
5343 |
|
5344 |
# This is used to memoize the _choose_pkg() result when |
5345 |
# no packages can be chosen until one of the existing |
5346 |
# jobs completes. |
5347 |
self._choose_pkg_return_early = False |
5348 |
|
5349 |
features = self.settings.features |
5350 |
if "parallel-fetch" in features and \ |
5351 |
not ("--pretend" in self.myopts or \ |
5352 |
"--fetch-all-uri" in self.myopts or \ |
5353 |
"--fetchonly" in self.myopts): |
5354 |
if "distlocks" not in features: |
5355 |
portage.writemsg(red("!!!")+"\n", noiselevel=-1) |
5356 |
portage.writemsg(red("!!!")+" parallel-fetching " + \ |
5357 |
"requires the distlocks feature enabled"+"\n", |
5358 |
noiselevel=-1) |
5359 |
portage.writemsg(red("!!!")+" you have it disabled, " + \ |
5360 |
"thus parallel-fetching is being disabled"+"\n", |
5361 |
noiselevel=-1) |
5362 |
portage.writemsg(red("!!!")+"\n", noiselevel=-1) |
5363 |
elif len(mergelist) > 1: |
5364 |
self._parallel_fetch = True |
5365 |
|
5366 |
if self._parallel_fetch: |
5367 |
# clear out existing fetch log if it exists |
5368 |
try: |
5369 |
open(self._fetch_log, 'w') |
5370 |
except EnvironmentError: |
5371 |
pass |
5372 |
|
5373 |
self._running_portage = None |
5374 |
portage_match = self._running_root.trees["vartree"].dbapi.match( |
5375 |
portage.const.PORTAGE_PACKAGE_ATOM) |
5376 |
if portage_match: |
5377 |
cpv = portage_match.pop() |
5378 |
self._running_portage = self._pkg(cpv, "installed", |
5379 |
self._running_root, installed=True) |
5380 |
|
5381 |
def _poll(self, timeout=None): |
5382 |
self._schedule() |
5383 |
PollScheduler._poll(self, timeout=timeout) |
5384 |
|
5385 |
def _set_max_jobs(self, max_jobs): |
5386 |
self._max_jobs = max_jobs |
5387 |
self._task_queues.jobs.max_jobs = max_jobs |
5388 |
|
5389 |
def _background_mode(self): |
5390 |
""" |
5391 |
Check if background mode is enabled and adjust states as necessary. |
5392 |
|
5393 |
@rtype: bool |
5394 |
@returns: True if background mode is enabled, False otherwise. |
5395 |
""" |
5396 |
background = (self._max_jobs is True or \ |
5397 |
self._max_jobs > 1 or "--quiet" in self.myopts) and \ |
5398 |
not bool(self._opts_no_background.intersection(self.myopts)) |
5399 |
|
5400 |
if background: |
5401 |
interactive_tasks = self._get_interactive_tasks() |
5402 |
if interactive_tasks: |
5403 |
background = False |
5404 |
writemsg_level(">>> Sending package output to stdio due " + \ |
5405 |
"to interactive package(s):\n", |
5406 |
level=logging.INFO, noiselevel=-1) |
5407 |
msg = [""] |
5408 |
for pkg in interactive_tasks: |
5409 |
pkg_str = " " + colorize("INFORM", str(pkg.cpv)) |
5410 |
if pkg.root != "/": |
5411 |
pkg_str += " for " + pkg.root |
5412 |
msg.append(pkg_str) |
5413 |
msg.append("") |
5414 |
writemsg_level("".join("%s\n" % (l,) for l in msg), |
5415 |
level=logging.INFO, noiselevel=-1) |
5416 |
if self._max_jobs is True or self._max_jobs > 1: |
5417 |
self._set_max_jobs(1) |
5418 |
writemsg_level(">>> Setting --jobs=1 due " + \ |
5419 |
"to the above interactive package(s)\n", |
5420 |
level=logging.INFO, noiselevel=-1) |
5421 |
|
5422 |
self._status_display.quiet = \ |
5423 |
not background or \ |
5424 |
("--quiet" in self.myopts and \ |
5425 |
"--verbose" not in self.myopts) |
5426 |
|
5427 |
self._logger.xterm_titles = \ |
5428 |
"notitles" not in self.settings.features and \ |
5429 |
self._status_display.quiet |
5430 |
|
5431 |
return background |
5432 |
|
5433 |
def _get_interactive_tasks(self): |
5434 |
from portage import flatten |
5435 |
from portage.dep import use_reduce, paren_reduce |
5436 |
interactive_tasks = [] |
5437 |
for task in self._mergelist: |
5438 |
if not (isinstance(task, Package) and \ |
5439 |
task.operation == "merge"): |
5440 |
continue |
5441 |
try: |
5442 |
properties = flatten(use_reduce(paren_reduce( |
5443 |
task.metadata["PROPERTIES"]), uselist=task.use.enabled)) |
5444 |
except portage.exception.InvalidDependString, e: |
5445 |
show_invalid_depstring_notice(task, |
5446 |
task.metadata["PROPERTIES"], str(e)) |
5447 |
raise self._unknown_internal_error() |
5448 |
if "interactive" in properties: |
5449 |
interactive_tasks.append(task) |
5450 |
return interactive_tasks |
5451 |
|
5452 |
def _set_digraph(self, digraph): |
5453 |
if "--nodeps" in self.myopts or \ |
5454 |
(self._max_jobs is not True and self._max_jobs < 2): |
5455 |
# save some memory |
5456 |
self._digraph = None |
5457 |
return |
5458 |
|
5459 |
self._digraph = digraph |
5460 |
self._find_system_deps() |
5461 |
self._prune_digraph() |
5462 |
self._prevent_builddir_collisions() |
5463 |
|
5464 |
def _find_system_deps(self): |
5465 |
""" |
5466 |
Find system packages and their deep runtime dependencies. Before being |
5467 |
merged, these packages go to merge_wait_queue, to be merged when no |
5468 |
other packages are building. |
5469 |
""" |
5470 |
deep_system_deps = self._deep_system_deps |
5471 |
deep_system_deps.clear() |
5472 |
deep_system_deps.update( |
5473 |
_find_deep_system_runtime_deps(self._digraph)) |
5474 |
deep_system_deps.difference_update([pkg for pkg in \ |
5475 |
deep_system_deps if pkg.operation != "merge"]) |
5476 |
|
5477 |
def _prune_digraph(self): |
5478 |
""" |
5479 |
Prune any root nodes that are irrelevant. |
5480 |
""" |
5481 |
|
5482 |
graph = self._digraph |
5483 |
completed_tasks = self._completed_tasks |
5484 |
removed_nodes = set() |
5485 |
while True: |
5486 |
for node in graph.root_nodes(): |
5487 |
if not isinstance(node, Package) or \ |
5488 |
(node.installed and node.operation == "nomerge") or \ |
5489 |
node.onlydeps or \ |
5490 |
node in completed_tasks: |
5491 |
removed_nodes.add(node) |
5492 |
if removed_nodes: |
5493 |
graph.difference_update(removed_nodes) |
5494 |
if not removed_nodes: |
5495 |
break |
5496 |
removed_nodes.clear() |
5497 |
|
5498 |
def _prevent_builddir_collisions(self): |
5499 |
""" |
5500 |
When building stages, sometimes the same exact cpv needs to be merged |
5501 |
to both $ROOTs. Add edges to the digraph in order to avoid collisions |
5502 |
in the builddir. Currently, normal file locks would be inappropriate |
5503 |
for this purpose since emerge holds all of it's build dir locks from |
5504 |
the main process. |
5505 |
""" |
5506 |
cpv_map = {} |
5507 |
for pkg in self._mergelist: |
5508 |
if not isinstance(pkg, Package): |
5509 |
# a satisfied blocker |
5510 |
continue |
5511 |
if pkg.installed: |
5512 |
continue |
5513 |
if pkg.cpv not in cpv_map: |
5514 |
cpv_map[pkg.cpv] = [pkg] |
5515 |
continue |
5516 |
for earlier_pkg in cpv_map[pkg.cpv]: |
5517 |
self._digraph.add(earlier_pkg, pkg, |
5518 |
priority=DepPriority(buildtime=True)) |
5519 |
cpv_map[pkg.cpv].append(pkg) |
5520 |
|
5521 |
class _pkg_failure(portage.exception.PortageException): |
5522 |
""" |
5523 |
An instance of this class is raised by unmerge() when |
5524 |
an uninstallation fails. |
5525 |
""" |
5526 |
status = 1 |
5527 |
def __init__(self, *pargs): |
5528 |
portage.exception.PortageException.__init__(self, pargs) |
5529 |
if pargs: |
5530 |
self.status = pargs[0] |
5531 |
|
5532 |
def _schedule_fetch(self, fetcher): |
5533 |
""" |
5534 |
Schedule a fetcher on the fetch queue, in order to |
5535 |
serialize access to the fetch log. |
5536 |
""" |
5537 |
self._task_queues.fetch.addFront(fetcher) |
5538 |
|
5539 |
def _schedule_setup(self, setup_phase): |
5540 |
""" |
5541 |
Schedule a setup phase on the merge queue, in order to |
5542 |
serialize unsandboxed access to the live filesystem. |
5543 |
""" |
5544 |
self._task_queues.merge.addFront(setup_phase) |
5545 |
self._schedule() |
5546 |
|
5547 |
def _schedule_unpack(self, unpack_phase): |
5548 |
""" |
5549 |
Schedule an unpack phase on the unpack queue, in order |
5550 |
to serialize $DISTDIR access for live ebuilds. |
5551 |
""" |
5552 |
self._task_queues.unpack.add(unpack_phase) |
5553 |
|
5554 |
def _find_blockers(self, new_pkg): |
5555 |
""" |
5556 |
Returns a callable which should be called only when |
5557 |
the vdb lock has been acquired. |
5558 |
""" |
5559 |
def get_blockers(): |
5560 |
return self._find_blockers_with_lock(new_pkg, acquire_lock=0) |
5561 |
return get_blockers |
5562 |
|
5563 |
def _find_blockers_with_lock(self, new_pkg, acquire_lock=0): |
5564 |
if self._opts_ignore_blockers.intersection(self.myopts): |
5565 |
return None |
5566 |
|
5567 |
# Call gc.collect() here to avoid heap overflow that |
5568 |
# triggers 'Cannot allocate memory' errors (reported |
5569 |
# with python-2.5). |
5570 |
import gc |
5571 |
gc.collect() |
5572 |
|
5573 |
blocker_db = self._blocker_db[new_pkg.root] |
5574 |
|
5575 |
blocker_dblinks = [] |
5576 |
for blocking_pkg in blocker_db.findInstalledBlockers( |
5577 |
new_pkg, acquire_lock=acquire_lock): |
5578 |
if new_pkg.slot_atom == blocking_pkg.slot_atom: |
5579 |
continue |
5580 |
if new_pkg.cpv == blocking_pkg.cpv: |
5581 |
continue |
5582 |
blocker_dblinks.append(portage.dblink( |
5583 |
blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root, |
5584 |
self.pkgsettings[blocking_pkg.root], treetype="vartree", |
5585 |
vartree=self.trees[blocking_pkg.root]["vartree"])) |
5586 |
|
5587 |
gc.collect() |
5588 |
|
5589 |
return blocker_dblinks |
5590 |
|
5591 |
def _dblink_pkg(self, pkg_dblink): |
5592 |
cpv = pkg_dblink.mycpv |
5593 |
type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype] |
5594 |
root_config = self.trees[pkg_dblink.myroot]["root_config"] |
5595 |
installed = type_name == "installed" |
5596 |
return self._pkg(cpv, type_name, root_config, installed=installed) |
5597 |
|
5598 |
def _append_to_log_path(self, log_path, msg): |
5599 |
f = open(log_path, 'a') |
5600 |
try: |
5601 |
f.write(msg) |
5602 |
finally: |
5603 |
f.close() |
5604 |
|
5605 |
def _dblink_elog(self, pkg_dblink, phase, func, msgs): |
5606 |
|
5607 |
log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE") |
5608 |
log_file = None |
5609 |
out = sys.stdout |
5610 |
background = self._background |
5611 |
|
5612 |
if background and log_path is not None: |
5613 |
log_file = open(log_path, 'a') |
5614 |
out = log_file |
5615 |
|
5616 |
try: |
5617 |
for msg in msgs: |
5618 |
func(msg, phase=phase, key=pkg_dblink.mycpv, out=out) |
5619 |
finally: |
5620 |
if log_file is not None: |
5621 |
log_file.close() |
5622 |
|
5623 |
def _dblink_emerge_log(self, msg): |
5624 |
self._logger.log(msg) |
5625 |
|
5626 |
def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0): |
5627 |
log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE") |
5628 |
background = self._background |
5629 |
|
5630 |
if log_path is None: |
5631 |
if not (background and level < logging.WARN): |
5632 |
portage.util.writemsg_level(msg, |
5633 |
level=level, noiselevel=noiselevel) |
5634 |
else: |
5635 |
if not background: |
5636 |
portage.util.writemsg_level(msg, |
5637 |
level=level, noiselevel=noiselevel) |
5638 |
self._append_to_log_path(log_path, msg) |
5639 |
|
5640 |
def _dblink_ebuild_phase(self, |
5641 |
pkg_dblink, pkg_dbapi, ebuild_path, phase): |
5642 |
""" |
5643 |
Using this callback for merge phases allows the scheduler |
5644 |
to run while these phases execute asynchronously, and allows |
5645 |
the scheduler control output handling. |
5646 |
""" |
5647 |
|
5648 |
scheduler = self._sched_iface |
5649 |
settings = pkg_dblink.settings |
5650 |
pkg = self._dblink_pkg(pkg_dblink) |
5651 |
background = self._background |
5652 |
log_path = settings.get("PORTAGE_LOG_FILE") |
5653 |
|
5654 |
ebuild_phase = EbuildPhase(background=background, |
5655 |
pkg=pkg, phase=phase, scheduler=scheduler, |
5656 |
settings=settings, tree=pkg_dblink.treetype) |
5657 |
ebuild_phase.start() |
5658 |
ebuild_phase.wait() |
5659 |
|
5660 |
return ebuild_phase.returncode |
5661 |
|
5662 |
def _generate_digests(self): |
5663 |
""" |
5664 |
Generate digests if necessary for --digests or FEATURES=digest. |
5665 |
In order to avoid interference, this must done before parallel |
5666 |
tasks are started. |
5667 |
""" |
5668 |
|
5669 |
if '--fetchonly' in self.myopts: |
5670 |
return os.EX_OK |
5671 |
|
5672 |
digest = '--digest' in self.myopts |
5673 |
if not digest: |
5674 |
for pkgsettings in self.pkgsettings.itervalues(): |
5675 |
if 'digest' in pkgsettings.features: |
5676 |
digest = True |
5677 |
break |
5678 |
|
5679 |
if not digest: |
5680 |
return os.EX_OK |
5681 |
|
5682 |
for x in self._mergelist: |
5683 |
if not isinstance(x, Package) or \ |
5684 |
x.type_name != 'ebuild' or \ |
5685 |
x.operation != 'merge': |
5686 |
continue |
5687 |
pkgsettings = self.pkgsettings[x.root] |
5688 |
if '--digest' not in self.myopts and \ |
5689 |
'digest' not in pkgsettings.features: |
5690 |
continue |
5691 |
portdb = x.root_config.trees['porttree'].dbapi |
5692 |
ebuild_path = portdb.findname(x.cpv) |
5693 |
if not ebuild_path: |
5694 |
writemsg_level( |
5695 |
"!!! Could not locate ebuild for '%s'.\n" \ |
5696 |
% x.cpv, level=logging.ERROR, noiselevel=-1) |
5697 |
return 1 |
5698 |
pkgsettings['O'] = os.path.dirname(ebuild_path) |
5699 |
if not portage.digestgen([], pkgsettings, myportdb=portdb): |
5700 |
writemsg_level( |
5701 |
"!!! Unable to generate manifest for '%s'.\n" \ |
5702 |
% x.cpv, level=logging.ERROR, noiselevel=-1) |
5703 |
return 1 |
5704 |
|
5705 |
return os.EX_OK |
5706 |
|
5707 |
def _check_manifests(self): |
5708 |
# Verify all the manifests now so that the user is notified of failure |
5709 |
# as soon as possible. |
5710 |
if "strict" not in self.settings.features or \ |
5711 |
"--fetchonly" in self.myopts or \ |
5712 |
"--fetch-all-uri" in self.myopts: |
5713 |
return os.EX_OK |
5714 |
|
5715 |
shown_verifying_msg = False |
5716 |
quiet_settings = {} |
5717 |
for myroot, pkgsettings in self.pkgsettings.iteritems(): |
5718 |
quiet_config = portage.config(clone=pkgsettings) |
5719 |
quiet_config["PORTAGE_QUIET"] = "1" |
5720 |
quiet_config.backup_changes("PORTAGE_QUIET") |
5721 |
quiet_settings[myroot] = quiet_config |
5722 |
del quiet_config |
5723 |
|
5724 |
for x in self._mergelist: |
5725 |
if not isinstance(x, Package) or \ |
5726 |
x.type_name != "ebuild": |
5727 |
continue |
5728 |
|
5729 |
if not shown_verifying_msg: |
5730 |
shown_verifying_msg = True |
5731 |
self._status_msg("Verifying ebuild manifests") |
5732 |
|
5733 |
root_config = x.root_config |
5734 |
portdb = root_config.trees["porttree"].dbapi |
5735 |
quiet_config = quiet_settings[root_config.root] |
5736 |
quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv)) |
5737 |
if not portage.digestcheck([], quiet_config, strict=True): |
5738 |
return 1 |
5739 |
|
5740 |
return os.EX_OK |
5741 |
|
5742 |
def _add_prefetchers(self): |
5743 |
|
5744 |
if not self._parallel_fetch: |
5745 |
return |
5746 |
|
5747 |
if self._parallel_fetch: |
5748 |
self._status_msg("Starting parallel fetch") |
5749 |
|
5750 |
prefetchers = self._prefetchers |
5751 |
getbinpkg = "--getbinpkg" in self.myopts |
5752 |
|
5753 |
# In order to avoid "waiting for lock" messages |
5754 |
# at the beginning, which annoy users, never |
5755 |
# spawn a prefetcher for the first package. |
5756 |
for pkg in self._mergelist[1:]: |
5757 |
prefetcher = self._create_prefetcher(pkg) |
5758 |
if prefetcher is not None: |
5759 |
self._task_queues.fetch.add(prefetcher) |
5760 |
prefetchers[pkg] = prefetcher |
5761 |
|
5762 |
def _create_prefetcher(self, pkg): |
5763 |
""" |
5764 |
@return: a prefetcher, or None if not applicable |
5765 |
""" |
5766 |
prefetcher = None |
5767 |
|
5768 |
if not isinstance(pkg, Package): |
5769 |
pass |
5770 |
|
5771 |
elif pkg.type_name == "ebuild": |
5772 |
|
5773 |
prefetcher = EbuildFetcher(background=True, |
5774 |
config_pool=self._ConfigPool(pkg.root, |
5775 |
self._allocate_config, self._deallocate_config), |
5776 |
fetchonly=1, logfile=self._fetch_log, |
5777 |
pkg=pkg, prefetch=True, scheduler=self._sched_iface) |
5778 |
|
5779 |
elif pkg.type_name == "binary" and \ |
5780 |
"--getbinpkg" in self.myopts and \ |
5781 |
pkg.root_config.trees["bintree"].isremote(pkg.cpv): |
5782 |
|
5783 |
prefetcher = BinpkgPrefetcher(background=True, |
5784 |
pkg=pkg, scheduler=self._sched_iface) |
5785 |
|
5786 |
return prefetcher |
5787 |
|
5788 |
def _is_restart_scheduled(self): |
5789 |
""" |
5790 |
Check if the merge list contains a replacement |
5791 |
for the current running instance, that will result |
5792 |
in restart after merge. |
5793 |
@rtype: bool |
5794 |
@returns: True if a restart is scheduled, False otherwise. |
5795 |
""" |
5796 |
if self._opts_no_restart.intersection(self.myopts): |
5797 |
return False |
5798 |
|
5799 |
mergelist = self._mergelist |
5800 |
|
5801 |
for i, pkg in enumerate(mergelist): |
5802 |
if self._is_restart_necessary(pkg) and \ |
5803 |
i != len(mergelist) - 1: |
5804 |
return True |
5805 |
|
5806 |
return False |
5807 |
|
5808 |
def _is_restart_necessary(self, pkg): |
5809 |
""" |
5810 |
@return: True if merging the given package |
5811 |
requires restart, False otherwise. |
5812 |
""" |
5813 |
|
5814 |
# Figure out if we need a restart. |
5815 |
if pkg.root == self._running_root.root and \ |
5816 |
portage.match_from_list( |
5817 |
portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): |
5818 |
if self._running_portage: |
5819 |
return pkg.cpv != self._running_portage.cpv |
5820 |
return True |
5821 |
return False |
5822 |
|
5823 |
def _restart_if_necessary(self, pkg): |
5824 |
""" |
5825 |
Use execv() to restart emerge. This happens |
5826 |
if portage upgrades itself and there are |
5827 |
remaining packages in the list. |
5828 |
""" |
5829 |
|
5830 |
if self._opts_no_restart.intersection(self.myopts): |
5831 |
return |
5832 |
|
5833 |
if not self._is_restart_necessary(pkg): |
5834 |
return |
5835 |
|
5836 |
if pkg == self._mergelist[-1]: |
5837 |
return |
5838 |
|
5839 |
self._main_loop_cleanup() |
5840 |
|
5841 |
logger = self._logger |
5842 |
pkg_count = self._pkg_count |
5843 |
mtimedb = self._mtimedb |
5844 |
bad_resume_opts = self._bad_resume_opts |
5845 |
|
5846 |
logger.log(" ::: completed emerge (%s of %s) %s to %s" % \ |
5847 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root)) |
5848 |
|
5849 |
logger.log(" *** RESTARTING " + \ |
5850 |
"emerge via exec() after change of " + \ |
5851 |
"portage version.") |
5852 |
|
5853 |
mtimedb["resume"]["mergelist"].remove(list(pkg)) |
5854 |
mtimedb.commit() |
5855 |
portage.run_exitfuncs() |
5856 |
mynewargv = [sys.argv[0], "--resume"] |
5857 |
resume_opts = self.myopts.copy() |
5858 |
# For automatic resume, we need to prevent |
5859 |
# any of bad_resume_opts from leaking in |
5860 |
# via EMERGE_DEFAULT_OPTS. |
5861 |
resume_opts["--ignore-default-opts"] = True |
5862 |
for myopt, myarg in resume_opts.iteritems(): |
5863 |
if myopt not in bad_resume_opts: |
5864 |
if myarg is True: |
5865 |
mynewargv.append(myopt) |
5866 |
else: |
5867 |
mynewargv.append(myopt +"="+ str(myarg)) |
5868 |
# priority only needs to be adjusted on the first run |
5869 |
os.environ["PORTAGE_NICENESS"] = "0" |
5870 |
os.execv(mynewargv[0], mynewargv) |
5871 |
|
5872 |
def merge(self): |
5873 |
|
5874 |
if "--resume" in self.myopts: |
5875 |
# We're resuming. |
5876 |
portage.writemsg_stdout( |
5877 |
colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1) |
5878 |
self._logger.log(" *** Resuming merge...") |
5879 |
|
5880 |
self._save_resume_list() |
5881 |
|
5882 |
try: |
5883 |
self._background = self._background_mode() |
5884 |
except self._unknown_internal_error: |
5885 |
return 1 |
5886 |
|
5887 |
for root in self.trees: |
5888 |
root_config = self.trees[root]["root_config"] |
5889 |
|
5890 |
# Even for --pretend --fetch mode, PORTAGE_TMPDIR is required |
5891 |
# since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR |
5892 |
# for ensuring sane $PWD (bug #239560) and storing elog messages. |
5893 |
tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "") |
5894 |
if not tmpdir or not os.path.isdir(tmpdir): |
5895 |
msg = "The directory specified in your " + \ |
5896 |
"PORTAGE_TMPDIR variable, '%s', " % tmpdir + \ |
5897 |
"does not exist. Please create this " + \ |
5898 |
"directory or correct your PORTAGE_TMPDIR setting." |
5899 |
msg = textwrap.wrap(msg, 70) |
5900 |
out = portage.output.EOutput() |
5901 |
for l in msg: |
5902 |
out.eerror(l) |
5903 |
return 1 |
5904 |
|
5905 |
if self._background: |
5906 |
root_config.settings.unlock() |
5907 |
root_config.settings["PORTAGE_BACKGROUND"] = "1" |
5908 |
root_config.settings.backup_changes("PORTAGE_BACKGROUND") |
5909 |
root_config.settings.lock() |
5910 |
|
5911 |
self.pkgsettings[root] = portage.config( |
5912 |
clone=root_config.settings) |
5913 |
|
5914 |
rval = self._generate_digests() |
5915 |
if rval != os.EX_OK: |
5916 |
return rval |
5917 |
|
5918 |
rval = self._check_manifests() |
5919 |
if rval != os.EX_OK: |
5920 |
return rval |
5921 |
|
5922 |
keep_going = "--keep-going" in self.myopts |
5923 |
fetchonly = self._build_opts.fetchonly |
5924 |
mtimedb = self._mtimedb |
5925 |
failed_pkgs = self._failed_pkgs |
5926 |
|
5927 |
while True: |
5928 |
rval = self._merge() |
5929 |
if rval == os.EX_OK or fetchonly or not keep_going: |
5930 |
break |
5931 |
if "resume" not in mtimedb: |
5932 |
break |
5933 |
mergelist = self._mtimedb["resume"].get("mergelist") |
5934 |
if not mergelist: |
5935 |
break |
5936 |
|
5937 |
if not failed_pkgs: |
5938 |
break |
5939 |
|
5940 |
for failed_pkg in failed_pkgs: |
5941 |
mergelist.remove(list(failed_pkg.pkg)) |
5942 |
|
5943 |
self._failed_pkgs_all.extend(failed_pkgs) |
5944 |
del failed_pkgs[:] |
5945 |
|
5946 |
if not mergelist: |
5947 |
break |
5948 |
|
5949 |
if not self._calc_resume_list(): |
5950 |
break |
5951 |
|
5952 |
clear_caches(self.trees) |
5953 |
if not self._mergelist: |
5954 |
break |
5955 |
|
5956 |
self._save_resume_list() |
5957 |
self._pkg_count.curval = 0 |
5958 |
self._pkg_count.maxval = len([x for x in self._mergelist \ |
5959 |
if isinstance(x, Package) and x.operation == "merge"]) |
5960 |
self._status_display.maxval = self._pkg_count.maxval |
5961 |
|
5962 |
self._logger.log(" *** Finished. Cleaning up...") |
5963 |
|
5964 |
if failed_pkgs: |
5965 |
self._failed_pkgs_all.extend(failed_pkgs) |
5966 |
del failed_pkgs[:] |
5967 |
|
5968 |
background = self._background |
5969 |
failure_log_shown = False |
5970 |
if background and len(self._failed_pkgs_all) == 1: |
5971 |
# If only one package failed then just show it's |
5972 |
# whole log for easy viewing. |
5973 |
failed_pkg = self._failed_pkgs_all[-1] |
5974 |
build_dir = failed_pkg.build_dir |
5975 |
log_file = None |
5976 |
|
5977 |
log_paths = [failed_pkg.build_log] |
5978 |
|
5979 |
log_path = self._locate_failure_log(failed_pkg) |
5980 |
if log_path is not None: |
5981 |
try: |
5982 |
log_file = open(log_path) |
5983 |
except IOError: |
5984 |
pass |
5985 |
|
5986 |
if log_file is not None: |
5987 |
try: |
5988 |
for line in log_file: |
5989 |
writemsg_level(line, noiselevel=-1) |
5990 |
finally: |
5991 |
log_file.close() |
5992 |
failure_log_shown = True |
5993 |
|
5994 |
# Dump mod_echo output now since it tends to flood the terminal. |
5995 |
# This allows us to avoid having more important output, generated |
5996 |
# later, from being swept away by the mod_echo output. |
5997 |
mod_echo_output = _flush_elog_mod_echo() |
5998 |
|
5999 |
if background and not failure_log_shown and \ |
6000 |
self._failed_pkgs_all and \ |
6001 |
self._failed_pkgs_die_msgs and \ |
6002 |
not mod_echo_output: |
6003 |
|
6004 |
printer = portage.output.EOutput() |
6005 |
for mysettings, key, logentries in self._failed_pkgs_die_msgs: |
6006 |
root_msg = "" |
6007 |
if mysettings["ROOT"] != "/": |
6008 |
root_msg = " merged to %s" % mysettings["ROOT"] |
6009 |
print |
6010 |
printer.einfo("Error messages for package %s%s:" % \ |
6011 |
(colorize("INFORM", key), root_msg)) |
6012 |
print |
6013 |
for phase in portage.const.EBUILD_PHASES: |
6014 |
if phase not in logentries: |
6015 |
continue |
6016 |
for msgtype, msgcontent in logentries[phase]: |
6017 |
if isinstance(msgcontent, basestring): |
6018 |
msgcontent = [msgcontent] |
6019 |
for line in msgcontent: |
6020 |
printer.eerror(line.strip("\n")) |
6021 |
|
6022 |
if self._post_mod_echo_msgs: |
6023 |
for msg in self._post_mod_echo_msgs: |
6024 |
msg() |
6025 |
|
6026 |
if len(self._failed_pkgs_all) > 1 or \ |
6027 |
(self._failed_pkgs_all and "--keep-going" in self.myopts): |
6028 |
if len(self._failed_pkgs_all) > 1: |
6029 |
msg = "The following %d packages have " % \ |
6030 |
len(self._failed_pkgs_all) + \ |
6031 |
"failed to build or install:" |
6032 |
else: |
6033 |
msg = "The following package has " + \ |
6034 |
"failed to build or install:" |
6035 |
prefix = bad(" * ") |
6036 |
writemsg(prefix + "\n", noiselevel=-1) |
6037 |
from textwrap import wrap |
6038 |
for line in wrap(msg, 72): |
6039 |
writemsg("%s%s\n" % (prefix, line), noiselevel=-1) |
6040 |
writemsg(prefix + "\n", noiselevel=-1) |
6041 |
for failed_pkg in self._failed_pkgs_all: |
6042 |
writemsg("%s\t%s\n" % (prefix, |
6043 |
colorize("INFORM", str(failed_pkg.pkg))), |
6044 |
noiselevel=-1) |
6045 |
writemsg(prefix + "\n", noiselevel=-1) |
6046 |
|
6047 |
return rval |
6048 |
|
6049 |
def _elog_listener(self, mysettings, key, logentries, fulltext): |
6050 |
errors = portage.elog.filter_loglevels(logentries, ["ERROR"]) |
6051 |
if errors: |
6052 |
self._failed_pkgs_die_msgs.append( |
6053 |
(mysettings, key, errors)) |
6054 |
|
6055 |
def _locate_failure_log(self, failed_pkg): |
6056 |
|
6057 |
build_dir = failed_pkg.build_dir |
6058 |
log_file = None |
6059 |
|
6060 |
log_paths = [failed_pkg.build_log] |
6061 |
|
6062 |
for log_path in log_paths: |
6063 |
if not log_path: |
6064 |
continue |
6065 |
|
6066 |
try: |
6067 |
log_size = os.stat(log_path).st_size |
6068 |
except OSError: |
6069 |
continue |
6070 |
|
6071 |
if log_size == 0: |
6072 |
continue |
6073 |
|
6074 |
return log_path |
6075 |
|
6076 |
return None |
6077 |
|
6078 |
def _add_packages(self): |
6079 |
pkg_queue = self._pkg_queue |
6080 |
for pkg in self._mergelist: |
6081 |
if isinstance(pkg, Package): |
6082 |
pkg_queue.append(pkg) |
6083 |
elif isinstance(pkg, Blocker): |
6084 |
pass |
6085 |
|
6086 |
def _system_merge_started(self, merge): |
6087 |
""" |
6088 |
Add any unsatisfied runtime deps to self._unsatisfied_system_deps. |
6089 |
""" |
6090 |
graph = self._digraph |
6091 |
if graph is None: |
6092 |
return |
6093 |
pkg = merge.merge.pkg |
6094 |
|
6095 |
# Skip this if $ROOT != / since it shouldn't matter if there |
6096 |
# are unsatisfied system runtime deps in this case. |
6097 |
if pkg.root != '/': |
6098 |
return |
6099 |
|
6100 |
completed_tasks = self._completed_tasks |
6101 |
unsatisfied = self._unsatisfied_system_deps |
6102 |
|
6103 |
def ignore_non_runtime_or_satisfied(priority): |
6104 |
""" |
6105 |
Ignore non-runtime and satisfied runtime priorities. |
6106 |
""" |
6107 |
if isinstance(priority, DepPriority) and \ |
6108 |
not priority.satisfied and \ |
6109 |
(priority.runtime or priority.runtime_post): |
6110 |
return False |
6111 |
return True |
6112 |
|
6113 |
# When checking for unsatisfied runtime deps, only check |
6114 |
# direct deps since indirect deps are checked when the |
6115 |
# corresponding parent is merged. |
6116 |
for child in graph.child_nodes(pkg, |
6117 |
ignore_priority=ignore_non_runtime_or_satisfied): |
6118 |
if not isinstance(child, Package) or \ |
6119 |
child.operation == 'uninstall': |
6120 |
continue |
6121 |
if child is pkg: |
6122 |
continue |
6123 |
if child.operation == 'merge' and \ |
6124 |
child not in completed_tasks: |
6125 |
unsatisfied.add(child) |
6126 |
|
6127 |
def _merge_wait_exit_handler(self, task): |
6128 |
self._merge_wait_scheduled.remove(task) |
6129 |
self._merge_exit(task) |
6130 |
|
6131 |
def _merge_exit(self, merge): |
6132 |
self._do_merge_exit(merge) |
6133 |
self._deallocate_config(merge.merge.settings) |
6134 |
if merge.returncode == os.EX_OK and \ |
6135 |
not merge.merge.pkg.installed: |
6136 |
self._status_display.curval += 1 |
6137 |
self._status_display.merges = len(self._task_queues.merge) |
6138 |
self._schedule() |
6139 |
|
6140 |
def _do_merge_exit(self, merge): |
6141 |
pkg = merge.merge.pkg |
6142 |
if merge.returncode != os.EX_OK: |
6143 |
settings = merge.merge.settings |
6144 |
build_dir = settings.get("PORTAGE_BUILDDIR") |
6145 |
build_log = settings.get("PORTAGE_LOG_FILE") |
6146 |
|
6147 |
self._failed_pkgs.append(self._failed_pkg( |
6148 |
build_dir=build_dir, build_log=build_log, |
6149 |
pkg=pkg, |
6150 |
returncode=merge.returncode)) |
6151 |
self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to") |
6152 |
|
6153 |
self._status_display.failed = len(self._failed_pkgs) |
6154 |
return |
6155 |
|
6156 |
self._task_complete(pkg) |
6157 |
pkg_to_replace = merge.merge.pkg_to_replace |
6158 |
if pkg_to_replace is not None: |
6159 |
# When a package is replaced, mark it's uninstall |
6160 |
# task complete (if any). |
6161 |
uninst_hash_key = \ |
6162 |
("installed", pkg.root, pkg_to_replace.cpv, "uninstall") |
6163 |
self._task_complete(uninst_hash_key) |
6164 |
|
6165 |
if pkg.installed: |
6166 |
return |
6167 |
|
6168 |
self._restart_if_necessary(pkg) |
6169 |
|
6170 |
# Call mtimedb.commit() after each merge so that |
6171 |
# --resume still works after being interrupted |
6172 |
# by reboot, sigkill or similar. |
6173 |
mtimedb = self._mtimedb |
6174 |
mtimedb["resume"]["mergelist"].remove(list(pkg)) |
6175 |
if not mtimedb["resume"]["mergelist"]: |
6176 |
del mtimedb["resume"] |
6177 |
mtimedb.commit() |
6178 |
|
6179 |
def _build_exit(self, build): |
6180 |
if build.returncode == os.EX_OK: |
6181 |
self.curval += 1 |
6182 |
merge = PackageMerge(merge=build) |
6183 |
if not build.build_opts.buildpkgonly and \ |
6184 |
build.pkg in self._deep_system_deps: |
6185 |
# Since dependencies on system packages are frequently |
6186 |
# unspecified, merge them only when no builds are executing. |
6187 |
self._merge_wait_queue.append(merge) |
6188 |
merge.addStartListener(self._system_merge_started) |
6189 |
else: |
6190 |
merge.addExitListener(self._merge_exit) |
6191 |
self._task_queues.merge.add(merge) |
6192 |
self._status_display.merges = len(self._task_queues.merge) |
6193 |
else: |
6194 |
settings = build.settings |
6195 |
build_dir = settings.get("PORTAGE_BUILDDIR") |
6196 |
build_log = settings.get("PORTAGE_LOG_FILE") |
6197 |
|
6198 |
self._failed_pkgs.append(self._failed_pkg( |
6199 |
build_dir=build_dir, build_log=build_log, |
6200 |
pkg=build.pkg, |
6201 |
returncode=build.returncode)) |
6202 |
self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for") |
6203 |
|
6204 |
self._status_display.failed = len(self._failed_pkgs) |
6205 |
self._deallocate_config(build.settings) |
6206 |
self._jobs -= 1 |
6207 |
self._status_display.running = self._jobs |
6208 |
self._schedule() |
6209 |
|
6210 |
def _extract_exit(self, build): |
6211 |
self._build_exit(build) |
6212 |
|
6213 |
def _task_complete(self, pkg): |
6214 |
self._completed_tasks.add(pkg) |
6215 |
self._unsatisfied_system_deps.discard(pkg) |
6216 |
self._choose_pkg_return_early = False |
6217 |
|
6218 |
def _merge(self): |
6219 |
|
6220 |
self._add_prefetchers() |
6221 |
self._add_packages() |
6222 |
pkg_queue = self._pkg_queue |
6223 |
failed_pkgs = self._failed_pkgs |
6224 |
portage.locks._quiet = self._background |
6225 |
portage.elog._emerge_elog_listener = self._elog_listener |
6226 |
rval = os.EX_OK |
6227 |
|
6228 |
try: |
6229 |
self._main_loop() |
6230 |
finally: |
6231 |
self._main_loop_cleanup() |
6232 |
portage.locks._quiet = False |
6233 |
portage.elog._emerge_elog_listener = None |
6234 |
if failed_pkgs: |
6235 |
rval = failed_pkgs[-1].returncode |
6236 |
|
6237 |
return rval |
6238 |
|
6239 |
def _main_loop_cleanup(self): |
6240 |
del self._pkg_queue[:] |
6241 |
self._completed_tasks.clear() |
6242 |
self._deep_system_deps.clear() |
6243 |
self._unsatisfied_system_deps.clear() |
6244 |
self._choose_pkg_return_early = False |
6245 |
self._status_display.reset() |
6246 |
self._digraph = None |
6247 |
self._task_queues.fetch.clear() |
6248 |
|
6249 |
def _choose_pkg(self): |
6250 |
""" |
6251 |
Choose a task that has all it's dependencies satisfied. |
6252 |
""" |
6253 |
|
6254 |
if self._choose_pkg_return_early: |
6255 |
return None |
6256 |
|
6257 |
if self._digraph is None: |
6258 |
if (self._jobs or self._task_queues.merge) and \ |
6259 |
not ("--nodeps" in self.myopts and \ |
6260 |
(self._max_jobs is True or self._max_jobs > 1)): |
6261 |
self._choose_pkg_return_early = True |
6262 |
return None |
6263 |
return self._pkg_queue.pop(0) |
6264 |
|
6265 |
if not (self._jobs or self._task_queues.merge): |
6266 |
return self._pkg_queue.pop(0) |
6267 |
|
6268 |
self._prune_digraph() |
6269 |
|
6270 |
chosen_pkg = None |
6271 |
later = set(self._pkg_queue) |
6272 |
for pkg in self._pkg_queue: |
6273 |
later.remove(pkg) |
6274 |
if not self._dependent_on_scheduled_merges(pkg, later): |
6275 |
chosen_pkg = pkg |
6276 |
break |
6277 |
|
6278 |
if chosen_pkg is not None: |
6279 |
self._pkg_queue.remove(chosen_pkg) |
6280 |
|
6281 |
if chosen_pkg is None: |
6282 |
# There's no point in searching for a package to |
6283 |
# choose until at least one of the existing jobs |
6284 |
# completes. |
6285 |
self._choose_pkg_return_early = True |
6286 |
|
6287 |
return chosen_pkg |
6288 |
|
6289 |
def _dependent_on_scheduled_merges(self, pkg, later): |
6290 |
""" |
6291 |
Traverse the subgraph of the given packages deep dependencies |
6292 |
to see if it contains any scheduled merges. |
6293 |
@param pkg: a package to check dependencies for |
6294 |
@type pkg: Package |
6295 |
@param later: packages for which dependence should be ignored |
6296 |
since they will be merged later than pkg anyway and therefore |
6297 |
delaying the merge of pkg will not result in a more optimal |
6298 |
merge order |
6299 |
@type later: set |
6300 |
@rtype: bool |
6301 |
@returns: True if the package is dependent, False otherwise. |
6302 |
""" |
6303 |
|
6304 |
graph = self._digraph |
6305 |
completed_tasks = self._completed_tasks |
6306 |
|
6307 |
dependent = False |
6308 |
traversed_nodes = set([pkg]) |
6309 |
direct_deps = graph.child_nodes(pkg) |
6310 |
node_stack = direct_deps |
6311 |
direct_deps = frozenset(direct_deps) |
6312 |
while node_stack: |
6313 |
node = node_stack.pop() |
6314 |
if node in traversed_nodes: |
6315 |
continue |
6316 |
traversed_nodes.add(node) |
6317 |
if not ((node.installed and node.operation == "nomerge") or \ |
6318 |
(node.operation == "uninstall" and \ |
6319 |
node not in direct_deps) or \ |
6320 |
node in completed_tasks or \ |
6321 |
node in later): |
6322 |
dependent = True |
6323 |
break |
6324 |
node_stack.extend(graph.child_nodes(node)) |
6325 |
|
6326 |
return dependent |
6327 |
|
6328 |
def _allocate_config(self, root): |
6329 |
""" |
6330 |
Allocate a unique config instance for a task in order |
6331 |
to prevent interference between parallel tasks. |
6332 |
""" |
6333 |
if self._config_pool[root]: |
6334 |
temp_settings = self._config_pool[root].pop() |
6335 |
else: |
6336 |
temp_settings = portage.config(clone=self.pkgsettings[root]) |
6337 |
# Since config.setcpv() isn't guaranteed to call config.reset() due to |
6338 |
# performance reasons, call it here to make sure all settings from the |
6339 |
# previous package get flushed out (such as PORTAGE_LOG_FILE). |
6340 |
temp_settings.reload() |
6341 |
temp_settings.reset() |
6342 |
return temp_settings |
6343 |
|
6344 |
def _deallocate_config(self, settings): |
6345 |
self._config_pool[settings["ROOT"]].append(settings) |
6346 |
|
6347 |
def _main_loop(self): |
6348 |
|
6349 |
# Only allow 1 job max if a restart is scheduled |
6350 |
# due to portage update. |
6351 |
if self._is_restart_scheduled() or \ |
6352 |
self._opts_no_background.intersection(self.myopts): |
6353 |
self._set_max_jobs(1) |
6354 |
|
6355 |
merge_queue = self._task_queues.merge |
6356 |
|
6357 |
while self._schedule(): |
6358 |
if self._poll_event_handlers: |
6359 |
self._poll_loop() |
6360 |
|
6361 |
while True: |
6362 |
self._schedule() |
6363 |
if not (self._jobs or merge_queue): |
6364 |
break |
6365 |
if self._poll_event_handlers: |
6366 |
self._poll_loop() |
6367 |
|
6368 |
def _keep_scheduling(self): |
6369 |
return bool(self._pkg_queue and \ |
6370 |
not (self._failed_pkgs and not self._build_opts.fetchonly)) |
6371 |
|
6372 |
def _schedule_tasks(self): |
6373 |
|
6374 |
# When the number of jobs drops to zero, process all waiting merges. |
6375 |
if not self._jobs and self._merge_wait_queue: |
6376 |
for task in self._merge_wait_queue: |
6377 |
task.addExitListener(self._merge_wait_exit_handler) |
6378 |
self._task_queues.merge.add(task) |
6379 |
self._status_display.merges = len(self._task_queues.merge) |
6380 |
self._merge_wait_scheduled.extend(self._merge_wait_queue) |
6381 |
del self._merge_wait_queue[:] |
6382 |
|
6383 |
self._schedule_tasks_imp() |
6384 |
self._status_display.display() |
6385 |
|
6386 |
state_change = 0 |
6387 |
for q in self._task_queues.values(): |
6388 |
if q.schedule(): |
6389 |
state_change += 1 |
6390 |
|
6391 |
# Cancel prefetchers if they're the only reason |
6392 |
# the main poll loop is still running. |
6393 |
if self._failed_pkgs and not self._build_opts.fetchonly and \ |
6394 |
not (self._jobs or self._task_queues.merge) and \ |
6395 |
self._task_queues.fetch: |
6396 |
self._task_queues.fetch.clear() |
6397 |
state_change += 1 |
6398 |
|
6399 |
if state_change: |
6400 |
self._schedule_tasks_imp() |
6401 |
self._status_display.display() |
6402 |
|
6403 |
return self._keep_scheduling() |
6404 |
|
6405 |
def _job_delay(self): |
6406 |
""" |
6407 |
@rtype: bool |
6408 |
@returns: True if job scheduling should be delayed, False otherwise. |
6409 |
""" |
6410 |
|
6411 |
if self._jobs and self._max_load is not None: |
6412 |
|
6413 |
current_time = time.time() |
6414 |
|
6415 |
delay = self._job_delay_factor * self._jobs ** self._job_delay_exp |
6416 |
if delay > self._job_delay_max: |
6417 |
delay = self._job_delay_max |
6418 |
if (current_time - self._previous_job_start_time) < delay: |
6419 |
return True |
6420 |
|
6421 |
return False |
6422 |
|
6423 |
def _schedule_tasks_imp(self): |
6424 |
""" |
6425 |
@rtype: bool |
6426 |
@returns: True if state changed, False otherwise. |
6427 |
""" |
6428 |
|
6429 |
state_change = 0 |
6430 |
|
6431 |
while True: |
6432 |
|
6433 |
if not self._keep_scheduling(): |
6434 |
return bool(state_change) |
6435 |
|
6436 |
if self._choose_pkg_return_early or \ |
6437 |
self._merge_wait_scheduled or \ |
6438 |
(self._jobs and self._unsatisfied_system_deps) or \ |
6439 |
not self._can_add_job() or \ |
6440 |
self._job_delay(): |
6441 |
return bool(state_change) |
6442 |
|
6443 |
pkg = self._choose_pkg() |
6444 |
if pkg is None: |
6445 |
return bool(state_change) |
6446 |
|
6447 |
state_change += 1 |
6448 |
|
6449 |
if not pkg.installed: |
6450 |
self._pkg_count.curval += 1 |
6451 |
|
6452 |
task = self._task(pkg) |
6453 |
|
6454 |
if pkg.installed: |
6455 |
merge = PackageMerge(merge=task) |
6456 |
merge.addExitListener(self._merge_exit) |
6457 |
self._task_queues.merge.add(merge) |
6458 |
|
6459 |
elif pkg.built: |
6460 |
self._jobs += 1 |
6461 |
self._previous_job_start_time = time.time() |
6462 |
self._status_display.running = self._jobs |
6463 |
task.addExitListener(self._extract_exit) |
6464 |
self._task_queues.jobs.add(task) |
6465 |
|
6466 |
else: |
6467 |
self._jobs += 1 |
6468 |
self._previous_job_start_time = time.time() |
6469 |
self._status_display.running = self._jobs |
6470 |
task.addExitListener(self._build_exit) |
6471 |
self._task_queues.jobs.add(task) |
6472 |
|
6473 |
return bool(state_change) |
6474 |
|
6475 |
def _task(self, pkg): |
6476 |
|
6477 |
pkg_to_replace = None |
6478 |
if pkg.operation != "uninstall": |
6479 |
vardb = pkg.root_config.trees["vartree"].dbapi |
6480 |
previous_cpv = vardb.match(pkg.slot_atom) |
6481 |
if previous_cpv: |
6482 |
previous_cpv = previous_cpv.pop() |
6483 |
pkg_to_replace = self._pkg(previous_cpv, |
6484 |
"installed", pkg.root_config, installed=True) |
6485 |
|
6486 |
task = MergeListItem(args_set=self._args_set, |
6487 |
background=self._background, binpkg_opts=self._binpkg_opts, |
6488 |
build_opts=self._build_opts, |
6489 |
config_pool=self._ConfigPool(pkg.root, |
6490 |
self._allocate_config, self._deallocate_config), |
6491 |
emerge_opts=self.myopts, |
6492 |
find_blockers=self._find_blockers(pkg), logger=self._logger, |
6493 |
mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(), |
6494 |
pkg_to_replace=pkg_to_replace, |
6495 |
prefetcher=self._prefetchers.get(pkg), |
6496 |
scheduler=self._sched_iface, |
6497 |
settings=self._allocate_config(pkg.root), |
6498 |
statusMessage=self._status_msg, |
6499 |
world_atom=self._world_atom) |
6500 |
|
6501 |
return task |
6502 |
|
6503 |
def _failed_pkg_msg(self, failed_pkg, action, preposition): |
6504 |
pkg = failed_pkg.pkg |
6505 |
msg = "%s to %s %s" % \ |
6506 |
(bad("Failed"), action, colorize("INFORM", pkg.cpv)) |
6507 |
if pkg.root != "/": |
6508 |
msg += " %s %s" % (preposition, pkg.root) |
6509 |
|
6510 |
log_path = self._locate_failure_log(failed_pkg) |
6511 |
if log_path is not None: |
6512 |
msg += ", Log file:" |
6513 |
self._status_msg(msg) |
6514 |
|
6515 |
if log_path is not None: |
6516 |
self._status_msg(" '%s'" % (colorize("INFORM", log_path),)) |
6517 |
|
6518 |
def _status_msg(self, msg): |
6519 |
""" |
6520 |
Display a brief status message (no newlines) in the status display. |
6521 |
This is called by tasks to provide feedback to the user. This |
6522 |
delegates the resposibility of generating \r and \n control characters, |
6523 |
to guarantee that lines are created or erased when necessary and |
6524 |
appropriate. |
6525 |
|
6526 |
@type msg: str |
6527 |
@param msg: a brief status message (no newlines allowed) |
6528 |
""" |
6529 |
if not self._background: |
6530 |
writemsg_level("\n") |
6531 |
self._status_display.displayMessage(msg) |
6532 |
|
6533 |
def _save_resume_list(self): |
6534 |
""" |
6535 |
Do this before verifying the ebuild Manifests since it might |
6536 |
be possible for the user to use --resume --skipfirst get past |
6537 |
a non-essential package with a broken digest. |
6538 |
""" |
6539 |
mtimedb = self._mtimedb |
6540 |
mtimedb["resume"]["mergelist"] = [list(x) \ |
6541 |
for x in self._mergelist \ |
6542 |
if isinstance(x, Package) and x.operation == "merge"] |
6543 |
|
6544 |
mtimedb.commit() |
6545 |
|
6546 |
def _calc_resume_list(self): |
6547 |
""" |
6548 |
Use the current resume list to calculate a new one, |
6549 |
dropping any packages with unsatisfied deps. |
6550 |
@rtype: bool |
6551 |
@returns: True if successful, False otherwise. |
6552 |
""" |
6553 |
print colorize("GOOD", "*** Resuming merge...") |
6554 |
|
6555 |
if self._show_list(): |
6556 |
if "--tree" in self.myopts: |
6557 |
portage.writemsg_stdout("\n" + \ |
6558 |
darkgreen("These are the packages that " + \ |
6559 |
"would be merged, in reverse order:\n\n")) |
6560 |
|
6561 |
else: |
6562 |
portage.writemsg_stdout("\n" + \ |
6563 |
darkgreen("These are the packages that " + \ |
6564 |
"would be merged, in order:\n\n")) |
6565 |
|
6566 |
show_spinner = "--quiet" not in self.myopts and \ |
6567 |
"--nodeps" not in self.myopts |
6568 |
|
6569 |
if show_spinner: |
6570 |
print "Calculating dependencies ", |
6571 |
|
6572 |
myparams = create_depgraph_params(self.myopts, None) |
6573 |
success = False |
6574 |
e = None |
6575 |
try: |
6576 |
success, mydepgraph, dropped_tasks = resume_depgraph( |
6577 |
self.settings, self.trees, self._mtimedb, self.myopts, |
6578 |
myparams, self._spinner) |
6579 |
except depgraph.UnsatisfiedResumeDep, exc: |
6580 |
# rename variable to avoid python-3.0 error: |
6581 |
# SyntaxError: can not delete variable 'e' referenced in nested |
6582 |
# scope |
6583 |
e = exc |
6584 |
mydepgraph = e.depgraph |
6585 |
dropped_tasks = set() |
6586 |
|
6587 |
if show_spinner: |
6588 |
print "\b\b... done!" |
6589 |
|
6590 |
if e is not None: |
6591 |
def unsatisfied_resume_dep_msg(): |
6592 |
mydepgraph.display_problems() |
6593 |
out = portage.output.EOutput() |
6594 |
out.eerror("One or more packages are either masked or " + \ |
6595 |
"have missing dependencies:") |
6596 |
out.eerror("") |
6597 |
indent = " " |
6598 |
show_parents = set() |
6599 |
for dep in e.value: |
6600 |
if dep.parent in show_parents: |
6601 |
continue |
6602 |
show_parents.add(dep.parent) |
6603 |
if dep.atom is None: |
6604 |
out.eerror(indent + "Masked package:") |
6605 |
out.eerror(2 * indent + str(dep.parent)) |
6606 |
out.eerror("") |
6607 |
else: |
6608 |
out.eerror(indent + str(dep.atom) + " pulled in by:") |
6609 |
out.eerror(2 * indent + str(dep.parent)) |
6610 |
out.eerror("") |
6611 |
msg = "The resume list contains packages " + \ |
6612 |
"that are either masked or have " + \ |
6613 |
"unsatisfied dependencies. " + \ |
6614 |
"Please restart/continue " + \ |
6615 |
"the operation manually, or use --skipfirst " + \ |
6616 |
"to skip the first package in the list and " + \ |
6617 |
"any other packages that may be " + \ |
6618 |
"masked or have missing dependencies." |
6619 |
for line in textwrap.wrap(msg, 72): |
6620 |
out.eerror(line) |
6621 |
self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg) |
6622 |
return False |
6623 |
|
6624 |
if success and self._show_list(): |
6625 |
mylist = mydepgraph.altlist() |
6626 |
if mylist: |
6627 |
if "--tree" in self.myopts: |
6628 |
mylist.reverse() |
6629 |
mydepgraph.display(mylist, favorites=self._favorites) |
6630 |
|
6631 |
if not success: |
6632 |
self._post_mod_echo_msgs.append(mydepgraph.display_problems) |
6633 |
return False |
6634 |
mydepgraph.display_problems() |
6635 |
|
6636 |
mylist = mydepgraph.altlist() |
6637 |
mydepgraph.break_refs(mylist) |
6638 |
mydepgraph.break_refs(dropped_tasks) |
6639 |
self._mergelist = mylist |
6640 |
self._set_digraph(mydepgraph.schedulerGraph()) |
6641 |
|
6642 |
msg_width = 75 |
6643 |
for task in dropped_tasks: |
6644 |
if not (isinstance(task, Package) and task.operation == "merge"): |
6645 |
continue |
6646 |
pkg = task |
6647 |
msg = "emerge --keep-going:" + \ |
6648 |
" %s" % (pkg.cpv,) |
6649 |
if pkg.root != "/": |
6650 |
msg += " for %s" % (pkg.root,) |
6651 |
msg += " dropped due to unsatisfied dependency." |
6652 |
for line in textwrap.wrap(msg, msg_width): |
6653 |
eerror(line, phase="other", key=pkg.cpv) |
6654 |
settings = self.pkgsettings[pkg.root] |
6655 |
# Ensure that log collection from $T is disabled inside |
6656 |
# elog_process(), since any logs that might exist are |
6657 |
# not valid here. |
6658 |
settings.pop("T", None) |
6659 |
portage.elog.elog_process(pkg.cpv, settings) |
6660 |
self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg)) |
6661 |
|
6662 |
return True |
6663 |
|
6664 |
def _show_list(self): |
6665 |
myopts = self.myopts |
6666 |
if "--quiet" not in myopts and \ |
6667 |
("--ask" in myopts or "--tree" in myopts or \ |
6668 |
"--verbose" in myopts): |
6669 |
return True |
6670 |
return False |
6671 |
|
6672 |
def _world_atom(self, pkg): |
6673 |
""" |
6674 |
Add the package to the world file, but only if |
6675 |
it's supposed to be added. Otherwise, do nothing. |
6676 |
""" |
6677 |
|
6678 |
if set(("--buildpkgonly", "--fetchonly", |
6679 |
"--fetch-all-uri", |
6680 |
"--oneshot", "--onlydeps", |
6681 |
"--pretend")).intersection(self.myopts): |
6682 |
return |
6683 |
|
6684 |
if pkg.root != self.target_root: |
6685 |
return |
6686 |
|
6687 |
args_set = self._args_set |
6688 |
if not args_set.findAtomForPackage(pkg): |
6689 |
return |
6690 |
|
6691 |
logger = self._logger |
6692 |
pkg_count = self._pkg_count |
6693 |
root_config = pkg.root_config |
6694 |
world_set = root_config.sets["world"] |
6695 |
world_locked = False |
6696 |
if hasattr(world_set, "lock"): |
6697 |
world_set.lock() |
6698 |
world_locked = True |
6699 |
|
6700 |
try: |
6701 |
if hasattr(world_set, "load"): |
6702 |
world_set.load() # maybe it's changed on disk |
6703 |
|
6704 |
atom = create_world_atom(pkg, args_set, root_config) |
6705 |
if atom: |
6706 |
if hasattr(world_set, "add"): |
6707 |
self._status_msg(('Recording %s in "world" ' + \ |
6708 |
'favorites file...') % atom) |
6709 |
logger.log(" === (%s of %s) Updating world file (%s)" % \ |
6710 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv)) |
6711 |
world_set.add(atom) |
6712 |
else: |
6713 |
writemsg_level('\n!!! Unable to record %s in "world"\n' % \ |
6714 |
(atom,), level=logging.WARN, noiselevel=-1) |
6715 |
finally: |
6716 |
if world_locked: |
6717 |
world_set.unlock() |
6718 |
|
6719 |
def _pkg(self, cpv, type_name, root_config, installed=False): |
6720 |
""" |
6721 |
Get a package instance from the cache, or create a new |
6722 |
one if necessary. Raises KeyError from aux_get if it |
6723 |
failures for some reason (package does not exist or is |
6724 |
corrupt). |
6725 |
""" |
6726 |
operation = "merge" |
6727 |
if installed: |
6728 |
operation = "nomerge" |
6729 |
|
6730 |
if self._digraph is not None: |
6731 |
# Reuse existing instance when available. |
6732 |
pkg = self._digraph.get( |
6733 |
(type_name, root_config.root, cpv, operation)) |
6734 |
if pkg is not None: |
6735 |
return pkg |
6736 |
|
6737 |
tree_type = depgraph.pkg_tree_map[type_name] |
6738 |
db = root_config.trees[tree_type].dbapi |
6739 |
db_keys = list(self.trees[root_config.root][ |
6740 |
tree_type].dbapi._aux_cache_keys) |
6741 |
metadata = izip(db_keys, db.aux_get(cpv, db_keys)) |
6742 |
pkg = Package(cpv=cpv, metadata=metadata, |
6743 |
root_config=root_config, installed=installed) |
6744 |
if type_name == "ebuild": |
6745 |
settings = self.pkgsettings[root_config.root] |
6746 |
settings.setcpv(pkg) |
6747 |
pkg.metadata["USE"] = settings["PORTAGE_USE"] |
6748 |
pkg.metadata['CHOST'] = settings.get('CHOST', '') |
6749 |
|
6750 |
return pkg |
6751 |
|
6752 |
def chk_updated_info_files(root, infodirs, prev_mtimes, retval): |
189 |
def chk_updated_info_files(root, infodirs, prev_mtimes, retval): |
6753 |
|
190 |
|
6754 |
if os.path.exists("/usr/bin/install-info"): |
191 |
if os.path.exists("/usr/bin/install-info"): |
Lines 6958-6980
Link Here
|
6958 |
print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries" |
395 |
print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries" |
6959 |
|
396 |
|
6960 |
|
397 |
|
6961 |
def _flush_elog_mod_echo(): |
|
|
6962 |
""" |
6963 |
Dump the mod_echo output now so that our other |
6964 |
notifications are shown last. |
6965 |
@rtype: bool |
6966 |
@returns: True if messages were shown, False otherwise. |
6967 |
""" |
6968 |
messages_shown = False |
6969 |
try: |
6970 |
from portage.elog import mod_echo |
6971 |
except ImportError: |
6972 |
pass # happens during downgrade to a version without the module |
6973 |
else: |
6974 |
messages_shown = bool(mod_echo._items) |
6975 |
mod_echo.finalize() |
6976 |
return messages_shown |
6977 |
|
6978 |
def post_emerge(root_config, myopts, mtimedb, retval): |
398 |
def post_emerge(root_config, myopts, mtimedb, retval): |
6979 |
""" |
399 |
""" |
6980 |
Misc. things to run at the end of a merge session. |
400 |
Misc. things to run at the end of a merge session. |
Lines 7134-7167
Link Here
|
7134 |
manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH) |
554 |
manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH) |
7135 |
return manager.getUnreadItems( repo_id, update=update ) |
555 |
return manager.getUnreadItems( repo_id, update=update ) |
7136 |
|
556 |
|
7137 |
def insert_category_into_atom(atom, category): |
|
|
7138 |
alphanum = re.search(r'\w', atom) |
7139 |
if alphanum: |
7140 |
ret = atom[:alphanum.start()] + "%s/" % category + \ |
7141 |
atom[alphanum.start():] |
7142 |
else: |
7143 |
ret = None |
7144 |
return ret |
7145 |
|
7146 |
def is_valid_package_atom(x): |
7147 |
if "/" not in x: |
7148 |
alphanum = re.search(r'\w', x) |
7149 |
if alphanum: |
7150 |
x = x[:alphanum.start()] + "cat/" + x[alphanum.start():] |
7151 |
return portage.isvalidatom(x) |
7152 |
|
7153 |
def show_blocker_docs_link(): |
7154 |
print |
7155 |
print "For more information about " + bad("Blocked Packages") + ", please refer to the following" |
7156 |
print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):" |
7157 |
print |
7158 |
print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked" |
7159 |
print |
7160 |
|
7161 |
def show_mask_docs(): |
7162 |
print "For more information, see the MASKED PACKAGES section in the emerge" |
7163 |
print "man page or refer to the Gentoo Handbook." |
7164 |
|
7165 |
def action_sync(settings, trees, mtimedb, myopts, myaction): |
557 |
def action_sync(settings, trees, mtimedb, myopts, myaction): |
7166 |
xterm_titles = "notitles" not in settings.features |
558 |
xterm_titles = "notitles" not in settings.features |
7167 |
emergelog(xterm_titles, " === sync") |
559 |
emergelog(xterm_titles, " === sync") |
Lines 9108-9187
Link Here
|
9108 |
else: |
2500 |
else: |
9109 |
print "Number removed: "+str(len(cleanlist)) |
2501 |
print "Number removed: "+str(len(cleanlist)) |
9110 |
|
2502 |
|
9111 |
def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner): |
|
|
9112 |
""" |
9113 |
Construct a depgraph for the given resume list. This will raise |
9114 |
PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary. |
9115 |
@rtype: tuple |
9116 |
@returns: (success, depgraph, dropped_tasks) |
9117 |
""" |
9118 |
skip_masked = True |
9119 |
skip_unsatisfied = True |
9120 |
mergelist = mtimedb["resume"]["mergelist"] |
9121 |
dropped_tasks = set() |
9122 |
while True: |
9123 |
mydepgraph = depgraph(settings, trees, |
9124 |
myopts, myparams, spinner) |
9125 |
try: |
9126 |
success = mydepgraph.loadResumeCommand(mtimedb["resume"], |
9127 |
skip_masked=skip_masked) |
9128 |
except depgraph.UnsatisfiedResumeDep, e: |
9129 |
if not skip_unsatisfied: |
9130 |
raise |
9131 |
|
9132 |
graph = mydepgraph.digraph |
9133 |
unsatisfied_parents = dict((dep.parent, dep.parent) \ |
9134 |
for dep in e.value) |
9135 |
traversed_nodes = set() |
9136 |
unsatisfied_stack = list(unsatisfied_parents) |
9137 |
while unsatisfied_stack: |
9138 |
pkg = unsatisfied_stack.pop() |
9139 |
if pkg in traversed_nodes: |
9140 |
continue |
9141 |
traversed_nodes.add(pkg) |
9142 |
|
9143 |
# If this package was pulled in by a parent |
9144 |
# package scheduled for merge, removing this |
9145 |
# package may cause the the parent package's |
9146 |
# dependency to become unsatisfied. |
9147 |
for parent_node in graph.parent_nodes(pkg): |
9148 |
if not isinstance(parent_node, Package) \ |
9149 |
or parent_node.operation not in ("merge", "nomerge"): |
9150 |
continue |
9151 |
unsatisfied = \ |
9152 |
graph.child_nodes(parent_node, |
9153 |
ignore_priority=DepPrioritySatisfiedRange.ignore_soft) |
9154 |
if pkg in unsatisfied: |
9155 |
unsatisfied_parents[parent_node] = parent_node |
9156 |
unsatisfied_stack.append(parent_node) |
9157 |
|
9158 |
pruned_mergelist = [] |
9159 |
for x in mergelist: |
9160 |
if isinstance(x, list) and \ |
9161 |
tuple(x) not in unsatisfied_parents: |
9162 |
pruned_mergelist.append(x) |
9163 |
|
9164 |
# If the mergelist doesn't shrink then this loop is infinite. |
9165 |
if len(pruned_mergelist) == len(mergelist): |
9166 |
# This happens if a package can't be dropped because |
9167 |
# it's already installed, but it has unsatisfied PDEPEND. |
9168 |
raise |
9169 |
mergelist[:] = pruned_mergelist |
9170 |
|
9171 |
# Exclude installed packages that have been removed from the graph due |
9172 |
# to failure to build/install runtime dependencies after the dependent |
9173 |
# package has already been installed. |
9174 |
dropped_tasks.update(pkg for pkg in \ |
9175 |
unsatisfied_parents if pkg.operation != "nomerge") |
9176 |
mydepgraph.break_refs(unsatisfied_parents) |
9177 |
|
9178 |
del e, graph, traversed_nodes, \ |
9179 |
unsatisfied_parents, unsatisfied_stack |
9180 |
continue |
9181 |
else: |
9182 |
break |
9183 |
return (success, mydepgraph, dropped_tasks) |
9184 |
|
9185 |
def action_build(settings, trees, mtimedb, |
2503 |
def action_build(settings, trees, mtimedb, |
9186 |
myopts, myaction, myfiles, spinner): |
2504 |
myopts, myaction, myfiles, spinner): |
9187 |
|
2505 |
|
Lines 9847-9862
Link Here
|
9847 |
settings = trees[myroot]["vartree"].settings |
3165 |
settings = trees[myroot]["vartree"].settings |
9848 |
settings.validate() |
3166 |
settings.validate() |
9849 |
|
3167 |
|
9850 |
def clear_caches(trees): |
|
|
9851 |
for d in trees.itervalues(): |
9852 |
d["porttree"].dbapi.melt() |
9853 |
d["porttree"].dbapi._aux_cache.clear() |
9854 |
d["bintree"].dbapi._aux_cache.clear() |
9855 |
d["bintree"].dbapi._clear_cache() |
9856 |
d["vartree"].dbapi.linkmap._clear_cache() |
9857 |
portage.dircache.clear() |
9858 |
gc.collect() |
9859 |
|
9860 |
def load_emerge_config(trees=None): |
3168 |
def load_emerge_config(trees=None): |
9861 |
kwargs = {} |
3169 |
kwargs = {} |
9862 |
for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")): |
3170 |
for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")): |
Lines 10255-10282
Link Here
|
10255 |
msg += " for '%s'" % root |
3563 |
msg += " for '%s'" % root |
10256 |
writemsg_level(msg, level=logging.WARN, noiselevel=-1) |
3564 |
writemsg_level(msg, level=logging.WARN, noiselevel=-1) |
10257 |
|
3565 |
|
10258 |
def ambiguous_package_name(arg, atoms, root_config, spinner, myopts): |
|
|
10259 |
|
10260 |
if "--quiet" in myopts: |
10261 |
print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg |
10262 |
print "!!! one of the following fully-qualified ebuild names instead:\n" |
10263 |
for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)): |
10264 |
print " " + colorize("INFORM", cp) |
10265 |
return |
10266 |
|
10267 |
s = search(root_config, spinner, "--searchdesc" in myopts, |
10268 |
"--quiet" not in myopts, "--usepkg" in myopts, |
10269 |
"--usepkgonly" in myopts) |
10270 |
null_cp = portage.dep_getkey(insert_category_into_atom( |
10271 |
arg, "null")) |
10272 |
cat, atom_pn = portage.catsplit(null_cp) |
10273 |
s.searchkey = atom_pn |
10274 |
for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)): |
10275 |
s.addCP(cp) |
10276 |
s.output() |
10277 |
print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg |
10278 |
print "!!! one of the above fully-qualified ebuild names instead.\n" |
10279 |
|
10280 |
def profile_check(trees, myaction, myopts): |
3566 |
def profile_check(trees, myaction, myopts): |
10281 |
if myaction in ("info", "sync"): |
3567 |
if myaction in ("info", "sync"): |
10282 |
return os.EX_OK |
3568 |
return os.EX_OK |